2 * linux/arch/m32r/kernel/smp.c
4 * M32R SMP support routines.
6 * Copyright (c) 2001, 2002 Hitoshi Yamamoto
8 * Taken from i386 version.
9 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
10 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
12 * This code is released under the GNU General Public License version 2 or
18 #include <linux/irq.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/profile.h>
24 #include <linux/cpu.h>
26 #include <asm/cacheflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/atomic.h>
30 #include <asm/mmu_context.h>
33 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
34 /* Data structures and variables */
35 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
38 * Structure and data for smp_call_function(). This is designed to minimise
39 * static memory requirements. It also looks cleaner.
41 static DEFINE_SPINLOCK(call_lock
);
43 struct call_data_struct
{
44 void (*func
) (void *info
);
49 } __attribute__ ((__aligned__(SMP_CACHE_BYTES
)));
51 static struct call_data_struct
*call_data
;
54 * For flush_cache_all()
56 static DEFINE_SPINLOCK(flushcache_lock
);
57 static volatile unsigned long flushcache_cpumask
= 0;
60 * For flush_tlb_others()
62 static volatile cpumask_t flush_cpumask
;
63 static struct mm_struct
*flush_mm
;
64 static struct vm_area_struct
*flush_vma
;
65 static volatile unsigned long flush_va
;
66 static DEFINE_SPINLOCK(tlbstate_lock
);
67 #define FLUSH_ALL 0xffffffff
69 DECLARE_PER_CPU(int, prof_multiplier
);
70 DECLARE_PER_CPU(int, prof_old_multiplier
);
71 DECLARE_PER_CPU(int, prof_counter
);
73 extern spinlock_t ipi_lock
[];
75 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
76 /* Function Prototypes */
77 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
79 void smp_send_reschedule(int);
80 void smp_reschedule_interrupt(void);
82 void smp_flush_cache_all(void);
83 void smp_flush_cache_all_interrupt(void);
85 void smp_flush_tlb_all(void);
86 static void flush_tlb_all_ipi(void *);
88 void smp_flush_tlb_mm(struct mm_struct
*);
89 void smp_flush_tlb_range(struct vm_area_struct
*, unsigned long, \
91 void smp_flush_tlb_page(struct vm_area_struct
*, unsigned long);
92 static void flush_tlb_others(cpumask_t
, struct mm_struct
*,
93 struct vm_area_struct
*, unsigned long);
94 void smp_invalidate_interrupt(void);
96 void smp_send_stop(void);
97 static void stop_this_cpu(void *);
99 int smp_call_function(void (*) (void *), void *, int, int);
100 void smp_call_function_interrupt(void);
102 void smp_send_timer(void);
103 void smp_ipi_timer_interrupt(struct pt_regs
*);
104 void smp_local_timer_interrupt(struct pt_regs
*);
106 void send_IPI_allbutself(int, int);
107 static void send_IPI_mask(cpumask_t
, int, int);
108 unsigned long send_IPI_mask_phys(cpumask_t
, int, int);
110 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
111 /* Rescheduling request Routines */
112 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
114 /*==========================================================================*
115 * Name: smp_send_reschedule
117 * Description: This routine requests other CPU to execute rescheduling.
118 * 1.Send 'RESCHEDULE_IPI' to other CPU.
119 * Request other CPU to execute 'smp_reschedule_interrupt()'.
121 * Born on Date: 2002.02.05
123 * Arguments: cpu_id - Target CPU ID
125 * Returns: void (cannot fail)
128 * Date Who Description
129 * ---------- --- --------------------------------------------------------
131 *==========================================================================*/
132 void smp_send_reschedule(int cpu_id
)
134 WARN_ON(cpu_is_offline(cpu_id
));
135 send_IPI_mask(cpumask_of_cpu(cpu_id
), RESCHEDULE_IPI
, 1);
138 /*==========================================================================*
139 * Name: smp_reschedule_interrupt
141 * Description: This routine executes on CPU which received
143 * Rescheduling is processed at the exit of interrupt
146 * Born on Date: 2002.02.05
150 * Returns: void (cannot fail)
153 * Date Who Description
154 * ---------- --- --------------------------------------------------------
156 *==========================================================================*/
157 void smp_reschedule_interrupt(void)
162 /*==========================================================================*
163 * Name: smp_flush_cache_all
165 * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
166 * CPUs in the system.
168 * Born on Date: 2003-05-28
172 * Returns: void (cannot fail)
175 * Date Who Description
176 * ---------- --- --------------------------------------------------------
178 *==========================================================================*/
179 void smp_flush_cache_all(void)
185 cpumask
= cpu_online_map
;
186 cpu_clear(smp_processor_id(), cpumask
);
187 spin_lock(&flushcache_lock
);
188 mask
=cpus_addr(cpumask
);
189 atomic_set_mask(*mask
, (atomic_t
*)&flushcache_cpumask
);
190 send_IPI_mask(cpumask
, INVALIDATE_CACHE_IPI
, 0);
191 _flush_cache_copyback_all();
192 while (flushcache_cpumask
)
194 spin_unlock(&flushcache_lock
);
198 void smp_flush_cache_all_interrupt(void)
200 _flush_cache_copyback_all();
201 clear_bit(smp_processor_id(), &flushcache_cpumask
);
204 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
205 /* TLB flush request Routins */
206 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
208 /*==========================================================================*
209 * Name: smp_flush_tlb_all
211 * Description: This routine flushes all processes TLBs.
212 * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
213 * 2.Execute 'do_flush_tlb_all_local()'.
215 * Born on Date: 2002.02.05
219 * Returns: void (cannot fail)
222 * Date Who Description
223 * ---------- --- --------------------------------------------------------
225 *==========================================================================*/
226 void smp_flush_tlb_all(void)
231 local_irq_save(flags
);
233 local_irq_restore(flags
);
234 smp_call_function(flush_tlb_all_ipi
, 0, 1, 1);
238 /*==========================================================================*
239 * Name: flush_tlb_all_ipi
241 * Description: This routine flushes all local TLBs.
242 * 1.Execute 'do_flush_tlb_all_local()'.
244 * Born on Date: 2002.02.05
246 * Arguments: *info - not used
248 * Returns: void (cannot fail)
251 * Date Who Description
252 * ---------- --- --------------------------------------------------------
254 *==========================================================================*/
255 static void flush_tlb_all_ipi(void *info
)
260 /*==========================================================================*
261 * Name: smp_flush_tlb_mm
263 * Description: This routine flushes the specified mm context TLB's.
265 * Born on Date: 2002.02.05
267 * Arguments: *mm - a pointer to the mm struct for flush TLB
269 * Returns: void (cannot fail)
272 * Date Who Description
273 * ---------- --- --------------------------------------------------------
275 *==========================================================================*/
276 void smp_flush_tlb_mm(struct mm_struct
*mm
)
278 int cpu_id
= smp_processor_id();
280 unsigned long *mmc
= &mm
->context
[cpu_id
];
284 cpu_mask
= mm
->cpu_vm_mask
;
285 cpu_clear(cpu_id
, cpu_mask
);
287 if (*mmc
!= NO_CONTEXT
) {
288 local_irq_save(flags
);
290 if (mm
== current
->mm
)
291 activate_context(mm
);
293 cpu_clear(cpu_id
, mm
->cpu_vm_mask
);
294 local_irq_restore(flags
);
296 if (!cpus_empty(cpu_mask
))
297 flush_tlb_others(cpu_mask
, mm
, NULL
, FLUSH_ALL
);
302 /*==========================================================================*
303 * Name: smp_flush_tlb_range
305 * Description: This routine flushes a range of pages.
307 * Born on Date: 2002.02.05
309 * Arguments: *mm - a pointer to the mm struct for flush TLB
313 * Returns: void (cannot fail)
316 * Date Who Description
317 * ---------- --- --------------------------------------------------------
319 *==========================================================================*/
320 void smp_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
323 smp_flush_tlb_mm(vma
->vm_mm
);
326 /*==========================================================================*
327 * Name: smp_flush_tlb_page
329 * Description: This routine flushes one page.
331 * Born on Date: 2002.02.05
333 * Arguments: *vma - a pointer to the vma struct include va
334 * va - virtual address for flush TLB
336 * Returns: void (cannot fail)
339 * Date Who Description
340 * ---------- --- --------------------------------------------------------
342 *==========================================================================*/
343 void smp_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long va
)
345 struct mm_struct
*mm
= vma
->vm_mm
;
346 int cpu_id
= smp_processor_id();
348 unsigned long *mmc
= &mm
->context
[cpu_id
];
352 cpu_mask
= mm
->cpu_vm_mask
;
353 cpu_clear(cpu_id
, cpu_mask
);
360 if (*mmc
!= NO_CONTEXT
) {
361 local_irq_save(flags
);
363 va
|= (*mmc
& MMU_CONTEXT_ASID_MASK
);
364 __flush_tlb_page(va
);
365 local_irq_restore(flags
);
367 if (!cpus_empty(cpu_mask
))
368 flush_tlb_others(cpu_mask
, mm
, vma
, va
);
373 /*==========================================================================*
374 * Name: flush_tlb_others
376 * Description: This routine requests other CPU to execute flush TLB.
378 * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
379 * Request other CPU to execute 'smp_invalidate_interrupt()'.
380 * 3.Wait for other CPUs operation finished.
382 * Born on Date: 2002.02.05
384 * Arguments: cpumask - bitmap of target CPUs
385 * *mm - a pointer to the mm struct for flush TLB
386 * *vma - a pointer to the vma struct include va
387 * va - virtual address for flush TLB
389 * Returns: void (cannot fail)
392 * Date Who Description
393 * ---------- --- --------------------------------------------------------
395 *==========================================================================*/
396 static void flush_tlb_others(cpumask_t cpumask
, struct mm_struct
*mm
,
397 struct vm_area_struct
*vma
, unsigned long va
)
403 if (!(flags
& 0x0040)) /* Interrupt Disable NONONO */
405 #endif /* DEBUG_SMP */
408 * A couple of (to be removed) sanity checks:
410 * - we do not send IPIs to not-yet booted CPUs.
411 * - current CPU must not be in mask
412 * - mask must exist :)
414 BUG_ON(cpus_empty(cpumask
));
416 BUG_ON(cpu_isset(smp_processor_id(), cpumask
));
419 /* If a CPU which we ran on has gone down, OK. */
420 cpus_and(cpumask
, cpumask
, cpu_online_map
);
421 if (cpus_empty(cpumask
))
425 * i'm not happy about this global shared spinlock in the
426 * MM hot path, but we'll see how contended it is.
427 * Temporarily this turns IRQs off, so that lockups are
428 * detected by the NMI watchdog.
430 spin_lock(&tlbstate_lock
);
435 mask
=cpus_addr(cpumask
);
436 atomic_set_mask(*mask
, (atomic_t
*)&flush_cpumask
);
439 * We have to send the IPI only to
442 send_IPI_mask(cpumask
, INVALIDATE_TLB_IPI
, 0);
444 while (!cpus_empty(flush_cpumask
)) {
445 /* nothing. lockup detection does not belong here */
452 spin_unlock(&tlbstate_lock
);
455 /*==========================================================================*
456 * Name: smp_invalidate_interrupt
458 * Description: This routine executes on CPU which received
459 * 'INVALIDATE_TLB_IPI'.
461 * 2.Report flush TLB process was finished.
463 * Born on Date: 2002.02.05
467 * Returns: void (cannot fail)
470 * Date Who Description
471 * ---------- --- --------------------------------------------------------
473 *==========================================================================*/
474 void smp_invalidate_interrupt(void)
476 int cpu_id
= smp_processor_id();
477 unsigned long *mmc
= &flush_mm
->context
[cpu_id
];
479 if (!cpu_isset(cpu_id
, flush_cpumask
))
482 if (flush_va
== FLUSH_ALL
) {
484 if (flush_mm
== current
->active_mm
)
485 activate_context(flush_mm
);
487 cpu_clear(cpu_id
, flush_mm
->cpu_vm_mask
);
489 unsigned long va
= flush_va
;
491 if (*mmc
!= NO_CONTEXT
) {
493 va
|= (*mmc
& MMU_CONTEXT_ASID_MASK
);
494 __flush_tlb_page(va
);
497 cpu_clear(cpu_id
, flush_cpumask
);
500 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
501 /* Stop CPU request Routins */
502 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
504 /*==========================================================================*
505 * Name: smp_send_stop
507 * Description: This routine requests stop all CPUs.
508 * 1.Request other CPU to execute 'stop_this_cpu()'.
510 * Born on Date: 2002.02.05
514 * Returns: void (cannot fail)
517 * Date Who Description
518 * ---------- --- --------------------------------------------------------
520 *==========================================================================*/
521 void smp_send_stop(void)
523 smp_call_function(stop_this_cpu
, NULL
, 1, 0);
526 /*==========================================================================*
527 * Name: stop_this_cpu
529 * Description: This routine halt CPU.
531 * Born on Date: 2002.02.05
535 * Returns: void (cannot fail)
538 * Date Who Description
539 * ---------- --- --------------------------------------------------------
541 *==========================================================================*/
542 static void stop_this_cpu(void *dummy
)
544 int cpu_id
= smp_processor_id();
549 cpu_clear(cpu_id
, cpu_online_map
);
557 outl(0, M32R_ICU_IMASK_PORTL
);
558 inl(M32R_ICU_IMASK_PORTL
); /* dummy read */
564 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
565 /* Call function Routins */
566 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
568 /*==========================================================================*
569 * Name: smp_call_function
571 * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
574 * Born on Date: 2002.02.05
576 * Arguments: *func - The function to run. This must be fast and
578 * *info - An arbitrary pointer to pass to the function.
579 * nonatomic - currently unused.
580 * wait - If true, wait (atomically) until function has
581 * completed on other CPUs.
583 * Returns: 0 on success, else a negative status code. Does not return
584 * until remote CPUs are nearly ready to execute <<func>> or
585 * are or have executed.
587 * Cautions: You must not call this function with disabled interrupts or
588 * from a hardware interrupt handler, you may call it from a
589 * bottom half handler.
592 * Date Who Description
593 * ---------- --- --------------------------------------------------------
595 *==========================================================================*/
596 int smp_call_function(void (*func
) (void *info
), void *info
, int nonatomic
,
599 struct call_data_struct data
;
605 if (!(flags
& 0x0040)) /* Interrupt Disable NONONO */
607 #endif /* DEBUG_SMP */
609 /* Holding any lock stops cpus from going down. */
610 spin_lock(&call_lock
);
611 cpus
= num_online_cpus() - 1;
614 spin_unlock(&call_lock
);
618 /* Can deadlock when called with interrupts disabled */
619 WARN_ON(irqs_disabled());
623 atomic_set(&data
.started
, 0);
626 atomic_set(&data
.finished
, 0);
631 /* Send a message to all other CPUs and wait for them to respond */
632 send_IPI_allbutself(CALL_FUNCTION_IPI
, 0);
634 /* Wait for response */
635 while (atomic_read(&data
.started
) != cpus
)
639 while (atomic_read(&data
.finished
) != cpus
)
641 spin_unlock(&call_lock
);
646 /*==========================================================================*
647 * Name: smp_call_function_interrupt
649 * Description: This routine executes on CPU which received
650 * 'CALL_FUNCTION_IPI'.
652 * Born on Date: 2002.02.05
656 * Returns: void (cannot fail)
659 * Date Who Description
660 * ---------- --- --------------------------------------------------------
662 *==========================================================================*/
663 void smp_call_function_interrupt(void)
665 void (*func
) (void *info
) = call_data
->func
;
666 void *info
= call_data
->info
;
667 int wait
= call_data
->wait
;
670 * Notify initiating CPU that I've grabbed the data and am
671 * about to execute the function
674 atomic_inc(&call_data
->started
);
676 * At this point the info structure may be out of scope unless wait==1
684 atomic_inc(&call_data
->finished
);
688 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
690 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
692 /*==========================================================================*
693 * Name: smp_send_timer
695 * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
698 * Born on Date: 2002.02.05
702 * Returns: void (cannot fail)
705 * Date Who Description
706 * ---------- --- --------------------------------------------------------
708 *==========================================================================*/
709 void smp_send_timer(void)
711 send_IPI_allbutself(LOCAL_TIMER_IPI
, 1);
714 /*==========================================================================*
715 * Name: smp_send_timer
717 * Description: This routine executes on CPU which received
720 * Born on Date: 2002.02.05
722 * Arguments: *regs - a pointer to the saved regster info
724 * Returns: void (cannot fail)
727 * Date Who Description
728 * ---------- --- --------------------------------------------------------
730 *==========================================================================*/
731 void smp_ipi_timer_interrupt(struct pt_regs
*regs
)
734 smp_local_timer_interrupt(regs
);
738 /*==========================================================================*
739 * Name: smp_local_timer_interrupt
741 * Description: Local timer interrupt handler. It does both profiling and
742 * process statistics/rescheduling.
743 * We do profiling in every local tick, statistics/rescheduling
744 * happen only every 'profiling multiplier' ticks. The default
745 * multiplier is 1 and it can be changed by writing the new
746 * multiplier value into /proc/profile.
748 * Born on Date: 2002.02.05
750 * Arguments: *regs - a pointer to the saved regster info
752 * Returns: void (cannot fail)
754 * Original: arch/i386/kernel/apic.c
757 * Date Who Description
758 * ---------- --- --------------------------------------------------------
759 * 2003-06-24 hy use per_cpu structure.
760 *==========================================================================*/
761 void smp_local_timer_interrupt(struct pt_regs
*regs
)
763 int user
= user_mode(regs
);
764 int cpu_id
= smp_processor_id();
767 * The profiling function is SMP safe. (nothing can mess
768 * around with "current", and the profiling counters are
769 * updated with atomic operations). This is especially
770 * useful with a profiling multiplier != 1
773 profile_tick(CPU_PROFILING
, regs
);
775 if (--per_cpu(prof_counter
, cpu_id
) <= 0) {
777 * The multiplier may have changed since the last time we got
778 * to this point as a result of the user writing to
779 * /proc/profile. In this case we need to adjust the APIC
782 * Interrupts are already masked off at this point.
784 per_cpu(prof_counter
, cpu_id
)
785 = per_cpu(prof_multiplier
, cpu_id
);
786 if (per_cpu(prof_counter
, cpu_id
)
787 != per_cpu(prof_old_multiplier
, cpu_id
))
789 per_cpu(prof_old_multiplier
, cpu_id
)
790 = per_cpu(prof_counter
, cpu_id
);
793 update_process_times(user
);
797 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
798 /* Send IPI Routins */
799 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
801 /*==========================================================================*
802 * Name: send_IPI_allbutself
804 * Description: This routine sends a IPI to all other CPUs in the system.
806 * Born on Date: 2002.02.05
808 * Arguments: ipi_num - Number of IPI
809 * try - 0 : Send IPI certainly.
810 * !0 : The following IPI is not sended when Target CPU
811 * has not received the before IPI.
813 * Returns: void (cannot fail)
816 * Date Who Description
817 * ---------- --- --------------------------------------------------------
819 *==========================================================================*/
820 void send_IPI_allbutself(int ipi_num
, int try)
824 cpumask
= cpu_online_map
;
825 cpu_clear(smp_processor_id(), cpumask
);
827 send_IPI_mask(cpumask
, ipi_num
, try);
830 /*==========================================================================*
831 * Name: send_IPI_mask
833 * Description: This routine sends a IPI to CPUs in the system.
835 * Born on Date: 2002.02.05
837 * Arguments: cpu_mask - Bitmap of target CPUs logical ID
838 * ipi_num - Number of IPI
839 * try - 0 : Send IPI certainly.
840 * !0 : The following IPI is not sended when Target CPU
841 * has not received the before IPI.
843 * Returns: void (cannot fail)
846 * Date Who Description
847 * ---------- --- --------------------------------------------------------
849 *==========================================================================*/
850 static void send_IPI_mask(cpumask_t cpumask
, int ipi_num
, int try)
852 cpumask_t physid_mask
, tmp
;
854 int num_cpus
= num_online_cpus();
856 if (num_cpus
<= 1) /* NO MP */
859 cpus_and(tmp
, cpumask
, cpu_online_map
);
860 BUG_ON(!cpus_equal(cpumask
, tmp
));
862 physid_mask
= CPU_MASK_NONE
;
863 for_each_cpu_mask(cpu_id
, cpumask
){
864 if ((phys_id
= cpu_to_physid(cpu_id
)) != -1)
865 cpu_set(phys_id
, physid_mask
);
868 send_IPI_mask_phys(physid_mask
, ipi_num
, try);
871 /*==========================================================================*
872 * Name: send_IPI_mask_phys
874 * Description: This routine sends a IPI to other CPUs in the system.
876 * Born on Date: 2002.02.05
878 * Arguments: cpu_mask - Bitmap of target CPUs physical ID
879 * ipi_num - Number of IPI
880 * try - 0 : Send IPI certainly.
881 * !0 : The following IPI is not sended when Target CPU
882 * has not received the before IPI.
884 * Returns: IPICRi regster value.
887 * Date Who Description
888 * ---------- --- --------------------------------------------------------
890 *==========================================================================*/
891 unsigned long send_IPI_mask_phys(cpumask_t physid_mask
, int ipi_num
,
895 unsigned long flags
= 0;
896 volatile unsigned long *ipicr_addr
;
897 unsigned long ipicr_val
;
898 unsigned long my_physid_mask
;
899 unsigned long mask
= cpus_addr(physid_mask
)[0];
902 if (mask
& ~physids_coerce(phys_cpu_present_map
))
904 if (ipi_num
>= NR_IPIS
)
908 ipilock
= &ipi_lock
[ipi_num
];
909 ipicr_addr
= (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
911 my_physid_mask
= ~(1 << smp_processor_id());
916 * write IPICRi (send IPIi)
919 __asm__
__volatile__ (
920 ";; LOCK ipi_lock[i] \n\t"
924 "clrpsw #0x40 -> nop \n\t"
925 DCACHE_CLEAR("r4", "r5", "%2")
928 "unlock r4, @%2 \n\t"
931 LOCK_SECTION_START(".balign 4 \n\t")
938 ";; CHECK IPICRi == 0 \n\t"
946 ";; WRITE IPICRi (send IPIi) \n\t"
950 ";; UNLOCK ipi_lock[i] \n\t"
956 : "r"(flags
), "r"(&ipilock
->slock
), "r"(ipicr_addr
),
957 "r"(mask
), "r"(try), "r"(my_physid_mask
)
959 #ifdef CONFIG_CHIP_M32700_TS1
961 #endif /* CONFIG_CHIP_M32700_TS1 */