1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * membarrier system call
9 * For documentation purposes, here are some membarrier ordering
10 * scenarios to keep in mind:
12 * A) Userspace thread execution after IPI vs membarrier's memory
13 * barrier before sending the IPI
15 * Userspace variables:
19 * The memory barrier at the start of membarrier() on CPU0 is necessary in
20 * order to enforce the guarantee that any writes occurring on CPU0 before
21 * the membarrier() is executed will be visible to any code executing on
22 * CPU1 after the IPI-induced memory barrier:
29 * b: send IPI IPI-induced mb
36 * BUG_ON(r1 == 0 && r2 == 0)
38 * The write to y and load from x by CPU1 are unordered by the hardware,
39 * so it's possible to have "r1 = x" reordered before "y = 1" at any
40 * point after (b). If the memory barrier at (a) is omitted, then "x = 1"
41 * can be reordered after (a) (although not after (c)), so we get r1 == 0
42 * and r2 == 0. This violates the guarantee that membarrier() is
43 * supposed by provide.
45 * The timing of the memory barrier at (a) has to ensure that it executes
46 * before the IPI-induced memory barrier on CPU1.
48 * B) Userspace thread execution before IPI vs membarrier's memory
49 * barrier after completing the IPI
51 * Userspace variables:
55 * The memory barrier at the end of membarrier() on CPU0 is necessary in
56 * order to enforce the guarantee that any writes occurring on CPU1 before
57 * the membarrier() is executed will be visible to any code executing on
58 * CPU0 after the membarrier():
68 * b: send IPI IPI-induced mb
71 * BUG_ON(r1 == 0 && r2 == 1)
73 * The writes to x and y are unordered by the hardware, so it's possible to
74 * have "r2 = 1" even though the write to x doesn't execute until (b). If
75 * the memory barrier at (c) is omitted then "r1 = x" can be reordered
76 * before (b) (although not before (a)), so we get "r1 = 0". This violates
77 * the guarantee that membarrier() is supposed to provide.
79 * The timing of the memory barrier at (c) has to ensure that it executes
80 * after the IPI-induced memory barrier on CPU1.
82 * C) Scheduling userspace thread -> kthread -> userspace thread vs membarrier
88 * d: switch to kthread (includes mb)
89 * b: read rq->curr->mm == NULL
90 * e: switch to user (includes mb)
93 * Using the scenario from (A), we can show that (a) needs to be paired
94 * with (e). Using the scenario from (B), we can show that (c) needs to
97 * D) exit_mm vs membarrier
99 * Two thread groups are created, A and B. Thread group B is created by
100 * issuing clone from group A with flag CLONE_VM set, but not CLONE_THREAD.
101 * Let's assume we have a single thread within each thread group (Thread A
102 * and Thread B). Thread A runs on CPU0, Thread B runs on CPU1.
110 * e: current->mm = NULL
111 * b: read rq->curr->mm == NULL
114 * Using scenario (B), we can show that (c) needs to be paired with (d).
116 * E) kthread_{use,unuse}_mm vs membarrier
124 * e: current->mm = NULL
125 * b: read rq->curr->mm == NULL
127 * f: current->mm = mm
131 * Using the scenario from (A), we can show that (a) needs to be paired
132 * with (g). Using the scenario from (B), we can show that (c) needs to
133 * be paired with (d).
137 * Bitmask made from a "or" of all commands within enum membarrier_cmd,
138 * except MEMBARRIER_CMD_QUERY.
140 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
141 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
142 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
143 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
145 #define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
149 #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
150 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
151 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
153 #define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
156 #define MEMBARRIER_CMD_BITMASK \
157 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
158 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
159 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
160 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
161 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
162 | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
163 | MEMBARRIER_CMD_GET_REGISTRATIONS)
165 static DEFINE_MUTEX(membarrier_ipi_mutex
);
166 #define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex)
168 static void ipi_mb(void *info
)
170 smp_mb(); /* IPIs should be serializing but paranoid. */
173 static void ipi_sync_core(void *info
)
176 * The smp_mb() in membarrier after all the IPIs is supposed to
177 * ensure that memory on remote CPUs that occur before the IPI
178 * become visible to membarrier()'s caller -- see scenario B in
179 * the big comment at the top of this file.
181 * A sync_core() would provide this guarantee, but
182 * sync_core_before_usermode() might end up being deferred until
183 * after membarrier()'s smp_mb().
185 smp_mb(); /* IPIs should be serializing but paranoid. */
187 sync_core_before_usermode();
190 static void ipi_rseq(void *info
)
193 * Ensure that all stores done by the calling thread are visible
194 * to the current task before the current task resumes. We could
195 * probably optimize this away on most architectures, but by the
196 * time we've already sent an IPI, the cost of the extra smp_mb()
200 rseq_preempt(current
);
203 static void ipi_sync_rq_state(void *info
)
205 struct mm_struct
*mm
= (struct mm_struct
*) info
;
207 if (current
->mm
!= mm
)
209 this_cpu_write(runqueues
.membarrier_state
,
210 atomic_read(&mm
->membarrier_state
));
212 * Issue a memory barrier after setting
213 * MEMBARRIER_STATE_GLOBAL_EXPEDITED in the current runqueue to
214 * guarantee that no memory access following registration is reordered
215 * before registration.
220 void membarrier_exec_mmap(struct mm_struct
*mm
)
223 * Issue a memory barrier before clearing membarrier_state to
224 * guarantee that no memory access prior to exec is reordered after
225 * clearing this state.
228 atomic_set(&mm
->membarrier_state
, 0);
230 * Keep the runqueue membarrier_state in sync with this mm
233 this_cpu_write(runqueues
.membarrier_state
, 0);
236 void membarrier_update_current_mm(struct mm_struct
*next_mm
)
238 struct rq
*rq
= this_rq();
239 int membarrier_state
= 0;
242 membarrier_state
= atomic_read(&next_mm
->membarrier_state
);
243 if (READ_ONCE(rq
->membarrier_state
) == membarrier_state
)
245 WRITE_ONCE(rq
->membarrier_state
, membarrier_state
);
248 static int membarrier_global_expedited(void)
251 cpumask_var_t tmpmask
;
253 if (num_online_cpus() == 1)
257 * Matches memory barriers after rq->curr modification in
260 smp_mb(); /* system call entry is not a mb. */
262 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
268 for_each_online_cpu(cpu
) {
269 struct task_struct
*p
;
272 * Skipping the current CPU is OK even through we can be
273 * migrated at any point. The current CPU, at the point
274 * where we read raw_smp_processor_id(), is ensured to
275 * be in program order with respect to the caller
276 * thread. Therefore, we can skip this CPU from the
279 if (cpu
== raw_smp_processor_id())
282 if (!(READ_ONCE(cpu_rq(cpu
)->membarrier_state
) &
283 MEMBARRIER_STATE_GLOBAL_EXPEDITED
))
287 * Skip the CPU if it runs a kernel thread which is not using
290 p
= rcu_dereference(cpu_rq(cpu
)->curr
);
294 __cpumask_set_cpu(cpu
, tmpmask
);
299 smp_call_function_many(tmpmask
, ipi_mb
, NULL
, 1);
302 free_cpumask_var(tmpmask
);
306 * Memory barrier on the caller thread _after_ we finished
307 * waiting for the last IPI. Matches memory barriers before
308 * rq->curr modification in scheduler.
310 smp_mb(); /* exit from system call is not a mb */
314 static int membarrier_private_expedited(int flags
, int cpu_id
)
316 cpumask_var_t tmpmask
;
317 struct mm_struct
*mm
= current
->mm
;
318 smp_call_func_t ipi_func
= ipi_mb
;
320 if (flags
== MEMBARRIER_FLAG_SYNC_CORE
) {
321 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
))
323 if (!(atomic_read(&mm
->membarrier_state
) &
324 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY
))
326 ipi_func
= ipi_sync_core
;
327 prepare_sync_core_cmd(mm
);
328 } else if (flags
== MEMBARRIER_FLAG_RSEQ
) {
329 if (!IS_ENABLED(CONFIG_RSEQ
))
331 if (!(atomic_read(&mm
->membarrier_state
) &
332 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
))
337 if (!(atomic_read(&mm
->membarrier_state
) &
338 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
))
342 if (flags
!= MEMBARRIER_FLAG_SYNC_CORE
&&
343 (atomic_read(&mm
->mm_users
) == 1 || num_online_cpus() == 1))
347 * Matches memory barriers after rq->curr modification in
350 * On RISC-V, this barrier pairing is also needed for the
351 * SYNC_CORE command when switching between processes, cf.
352 * the inline comments in membarrier_arch_switch_mm().
354 smp_mb(); /* system call entry is not a mb. */
356 if (cpu_id
< 0 && !zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
363 struct task_struct
*p
;
365 if (cpu_id
>= nr_cpu_ids
|| !cpu_online(cpu_id
))
368 p
= rcu_dereference(cpu_rq(cpu_id
)->curr
);
369 if (!p
|| p
->mm
!= mm
) {
378 for_each_online_cpu(cpu
) {
379 struct task_struct
*p
;
381 p
= rcu_dereference(cpu_rq(cpu
)->curr
);
382 if (p
&& p
->mm
== mm
)
383 __cpumask_set_cpu(cpu
, tmpmask
);
390 * smp_call_function_single() will call ipi_func() if cpu_id
391 * is the calling CPU.
393 smp_call_function_single(cpu_id
, ipi_func
, NULL
, 1);
396 * For regular membarrier, we can save a few cycles by
397 * skipping the current cpu -- we're about to do smp_mb()
398 * below, and if we migrate to a different cpu, this cpu
399 * and the new cpu will execute a full barrier in the
402 * For SYNC_CORE, we do need a barrier on the current cpu --
403 * otherwise, if we are migrated and replaced by a different
404 * task in the same mm just before, during, or after
405 * membarrier, we will end up with some thread in the mm
406 * running without a core sync.
408 * For RSEQ, don't rseq_preempt() the caller. User code
409 * is not supposed to issue syscalls at all from inside an
410 * rseq critical section.
412 if (flags
!= MEMBARRIER_FLAG_SYNC_CORE
) {
414 smp_call_function_many(tmpmask
, ipi_func
, NULL
, true);
417 on_each_cpu_mask(tmpmask
, ipi_func
, NULL
, true);
423 free_cpumask_var(tmpmask
);
427 * Memory barrier on the caller thread _after_ we finished
428 * waiting for the last IPI. Matches memory barriers before
429 * rq->curr modification in scheduler.
431 smp_mb(); /* exit from system call is not a mb */
436 static int sync_runqueues_membarrier_state(struct mm_struct
*mm
)
438 int membarrier_state
= atomic_read(&mm
->membarrier_state
);
439 cpumask_var_t tmpmask
;
442 if (atomic_read(&mm
->mm_users
) == 1 || num_online_cpus() == 1) {
443 this_cpu_write(runqueues
.membarrier_state
, membarrier_state
);
446 * For single mm user, we can simply issue a memory barrier
447 * after setting MEMBARRIER_STATE_GLOBAL_EXPEDITED in the
448 * mm and in the current runqueue to guarantee that no memory
449 * access following registration is reordered before
456 if (!zalloc_cpumask_var(&tmpmask
, GFP_KERNEL
))
460 * For mm with multiple users, we need to ensure all future
461 * scheduler executions will observe @mm's new membarrier
467 * For each cpu runqueue, if the task's mm match @mm, ensure that all
468 * @mm's membarrier state set bits are also set in the runqueue's
469 * membarrier state. This ensures that a runqueue scheduling
470 * between threads which are users of @mm has its membarrier state
476 for_each_online_cpu(cpu
) {
477 struct rq
*rq
= cpu_rq(cpu
);
478 struct task_struct
*p
;
480 p
= rcu_dereference(rq
->curr
);
481 if (p
&& p
->mm
== mm
)
482 __cpumask_set_cpu(cpu
, tmpmask
);
486 on_each_cpu_mask(tmpmask
, ipi_sync_rq_state
, mm
, true);
488 free_cpumask_var(tmpmask
);
494 static int membarrier_register_global_expedited(void)
496 struct task_struct
*p
= current
;
497 struct mm_struct
*mm
= p
->mm
;
500 if (atomic_read(&mm
->membarrier_state
) &
501 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY
)
503 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED
, &mm
->membarrier_state
);
504 ret
= sync_runqueues_membarrier_state(mm
);
507 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY
,
508 &mm
->membarrier_state
);
513 static int membarrier_register_private_expedited(int flags
)
515 struct task_struct
*p
= current
;
516 struct mm_struct
*mm
= p
->mm
;
517 int ready_state
= MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
,
518 set_state
= MEMBARRIER_STATE_PRIVATE_EXPEDITED
,
521 if (flags
== MEMBARRIER_FLAG_SYNC_CORE
) {
522 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
))
525 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY
;
526 } else if (flags
== MEMBARRIER_FLAG_RSEQ
) {
527 if (!IS_ENABLED(CONFIG_RSEQ
))
530 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
;
536 * We need to consider threads belonging to different thread
537 * groups, which use the same mm. (CLONE_VM but not
540 if ((atomic_read(&mm
->membarrier_state
) & ready_state
) == ready_state
)
542 if (flags
& MEMBARRIER_FLAG_SYNC_CORE
)
543 set_state
|= MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE
;
544 if (flags
& MEMBARRIER_FLAG_RSEQ
)
545 set_state
|= MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ
;
546 atomic_or(set_state
, &mm
->membarrier_state
);
547 ret
= sync_runqueues_membarrier_state(mm
);
550 atomic_or(ready_state
, &mm
->membarrier_state
);
555 static int membarrier_get_registrations(void)
557 struct task_struct
*p
= current
;
558 struct mm_struct
*mm
= p
->mm
;
559 int registrations_mask
= 0, membarrier_state
, i
;
560 static const int states
[] = {
561 MEMBARRIER_STATE_GLOBAL_EXPEDITED
|
562 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY
,
563 MEMBARRIER_STATE_PRIVATE_EXPEDITED
|
564 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY
,
565 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE
|
566 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY
,
567 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ
|
568 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY
570 static const int registration_cmds
[] = {
571 MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
,
572 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
,
573 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
,
574 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
576 BUILD_BUG_ON(ARRAY_SIZE(states
) != ARRAY_SIZE(registration_cmds
));
578 membarrier_state
= atomic_read(&mm
->membarrier_state
);
579 for (i
= 0; i
< ARRAY_SIZE(states
); ++i
) {
580 if (membarrier_state
& states
[i
]) {
581 registrations_mask
|= registration_cmds
[i
];
582 membarrier_state
&= ~states
[i
];
585 WARN_ON_ONCE(membarrier_state
!= 0);
586 return registrations_mask
;
590 * sys_membarrier - issue memory barriers on a set of threads
591 * @cmd: Takes command values defined in enum membarrier_cmd.
592 * @flags: Currently needs to be 0 for all commands other than
593 * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ: in the latter
594 * case it can be MEMBARRIER_CMD_FLAG_CPU, indicating that @cpu_id
595 * contains the CPU on which to interrupt (= restart)
596 * the RSEQ critical section.
597 * @cpu_id: if @flags == MEMBARRIER_CMD_FLAG_CPU, indicates the cpu on which
598 * RSEQ CS should be interrupted (@cmd must be
599 * MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ).
601 * If this system call is not implemented, -ENOSYS is returned. If the
602 * command specified does not exist, not available on the running
603 * kernel, or if the command argument is invalid, this system call
604 * returns -EINVAL. For a given command, with flags argument set to 0,
605 * if this system call returns -ENOSYS or -EINVAL, it is guaranteed to
606 * always return the same value until reboot. In addition, it can return
607 * -ENOMEM if there is not enough memory available to perform the system
610 * All memory accesses performed in program order from each targeted thread
611 * is guaranteed to be ordered with respect to sys_membarrier(). If we use
612 * the semantic "barrier()" to represent a compiler barrier forcing memory
613 * accesses to be performed in program order across the barrier, and
614 * smp_mb() to represent explicit memory barriers forcing full memory
615 * ordering across the barrier, we have the following ordering table for
616 * each pair of barrier(), sys_membarrier() and smp_mb():
618 * The pair ordering is detailed as (O: ordered, X: not ordered):
620 * barrier() smp_mb() sys_membarrier()
623 * sys_membarrier() O O O
625 SYSCALL_DEFINE3(membarrier
, int, cmd
, unsigned int, flags
, int, cpu_id
)
628 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ
:
629 if (unlikely(flags
&& flags
!= MEMBARRIER_CMD_FLAG_CPU
))
637 if (!(flags
& MEMBARRIER_CMD_FLAG_CPU
))
641 case MEMBARRIER_CMD_QUERY
:
643 int cmd_mask
= MEMBARRIER_CMD_BITMASK
;
645 if (tick_nohz_full_enabled())
646 cmd_mask
&= ~MEMBARRIER_CMD_GLOBAL
;
649 case MEMBARRIER_CMD_GLOBAL
:
650 /* MEMBARRIER_CMD_GLOBAL is not compatible with nohz_full. */
651 if (tick_nohz_full_enabled())
653 if (num_online_cpus() > 1)
656 case MEMBARRIER_CMD_GLOBAL_EXPEDITED
:
657 return membarrier_global_expedited();
658 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
:
659 return membarrier_register_global_expedited();
660 case MEMBARRIER_CMD_PRIVATE_EXPEDITED
:
661 return membarrier_private_expedited(0, cpu_id
);
662 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
:
663 return membarrier_register_private_expedited(0);
664 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
:
665 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE
, cpu_id
);
666 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
:
667 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE
);
668 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ
:
669 return membarrier_private_expedited(MEMBARRIER_FLAG_RSEQ
, cpu_id
);
670 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ
:
671 return membarrier_register_private_expedited(MEMBARRIER_FLAG_RSEQ
);
672 case MEMBARRIER_CMD_GET_REGISTRATIONS
:
673 return membarrier_get_registrations();