2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * TILE SMP support routines.
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/module.h>
22 #include <asm/cacheflush.h>
24 HV_Topology smp_topology __write_once
;
25 EXPORT_SYMBOL(smp_topology
);
28 static unsigned long __iomem
*ipi_mappings
[NR_CPUS
];
33 * Top-level send_IPI*() functions to send messages to other cpus.
36 /* Set by smp_send_stop() to avoid recursive panics. */
37 static int stopping_cpus
;
39 static void __send_IPI_many(HV_Recipient
*recip
, int nrecip
, int tag
)
42 while (sent
< nrecip
) {
43 int rc
= hv_send_message(recip
, nrecip
,
44 (HV_VirtAddr
)&tag
, sizeof(tag
));
46 if (!stopping_cpus
) /* avoid recursive panic */
47 panic("hv_send_message returned %d", rc
);
50 WARN_ONCE(rc
== 0, "hv_send_message() returned zero\n");
55 void send_IPI_single(int cpu
, int tag
)
57 HV_Recipient recip
= {
60 .state
= HV_TO_BE_SENT
62 __send_IPI_many(&recip
, 1, tag
);
65 void send_IPI_many(const struct cpumask
*mask
, int tag
)
67 HV_Recipient recip
[NR_CPUS
];
70 int my_cpu
= smp_processor_id();
71 for_each_cpu(cpu
, mask
) {
73 BUG_ON(cpu
== my_cpu
);
75 r
->y
= cpu
/ smp_width
;
76 r
->x
= cpu
% smp_width
;
77 r
->state
= HV_TO_BE_SENT
;
79 __send_IPI_many(recip
, nrecip
, tag
);
82 void send_IPI_allbutself(int tag
)
85 cpumask_copy(&mask
, cpu_online_mask
);
86 cpumask_clear_cpu(smp_processor_id(), &mask
);
87 send_IPI_many(&mask
, tag
);
91 * Functions related to starting/stopping cpus.
94 /* Handler to start the current cpu. */
95 static void smp_start_cpu_interrupt(void)
97 get_irq_regs()->pc
= start_cpu_function_addr
;
100 /* Handler to stop the current cpu. */
101 static void smp_stop_cpu_interrupt(void)
103 set_cpu_online(smp_processor_id(), 0);
104 arch_local_irq_disable_all();
109 /* This function calls the 'stop' function on all other CPUs in the system. */
110 void smp_send_stop(void)
113 send_IPI_allbutself(MSG_TAG_STOP_CPU
);
116 /* On panic, just wait; we may get an smp_send_stop() later on. */
117 void panic_smp_self_stop(void)
124 * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
126 void evaluate_message(int tag
)
129 case MSG_TAG_START_CPU
: /* Start up a cpu */
130 smp_start_cpu_interrupt();
133 case MSG_TAG_STOP_CPU
: /* Sent to shut down slave CPU's */
134 smp_stop_cpu_interrupt();
137 case MSG_TAG_CALL_FUNCTION_MANY
: /* Call function on cpumask */
138 generic_smp_call_function_interrupt();
141 case MSG_TAG_CALL_FUNCTION_SINGLE
: /* Call function on one other CPU */
142 generic_smp_call_function_single_interrupt();
146 panic("Unknown IPI message tag %d", tag
);
153 * flush_icache_range() code uses smp_call_function().
161 static void ipi_flush_icache_range(void *info
)
163 struct ipi_flush
*flush
= (struct ipi_flush
*) info
;
164 __flush_icache_range(flush
->start
, flush
->end
);
167 void flush_icache_range(unsigned long start
, unsigned long end
)
169 struct ipi_flush flush
= { start
, end
};
171 on_each_cpu(ipi_flush_icache_range
, &flush
, 1);
176 /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
177 static irqreturn_t
handle_reschedule_ipi(int irq
, void *token
)
179 __get_cpu_var(irq_stat
).irq_resched_count
++;
185 static struct irqaction resched_action
= {
186 .handler
= handle_reschedule_ipi
,
188 .dev_id
= handle_reschedule_ipi
/* unique token */,
191 void __init
ipi_init(void)
195 /* Map IPI trigger MMIO addresses. */
196 for_each_possible_cpu(cpu
) {
199 unsigned long offset
;
203 if (hv_get_ipi_pte(tile
, KERNEL_PL
, &pte
) != 0)
204 panic("Failed to initialize IPI for cpu %d\n", cpu
);
206 offset
= PFN_PHYS(pte_pfn(pte
));
207 ipi_mappings
[cpu
] = ioremap_prot(offset
, PAGE_SIZE
, pte
);
211 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
212 tile_irq_activate(IRQ_RESCHEDULE
, TILE_IRQ_PERCPU
);
213 BUG_ON(setup_irq(IRQ_RESCHEDULE
, &resched_action
));
218 void smp_send_reschedule(int cpu
)
220 WARN_ON(cpu_is_offline(cpu
));
223 * We just want to do an MMIO store. The traditional writeq()
224 * functions aren't really correct here, since they're always
225 * directed at the PCI shim. For now, just do a raw store,
226 * casting away the __iomem attribute.
228 ((unsigned long __force
*)ipi_mappings
[cpu
])[IRQ_RESCHEDULE
] = 0;
233 void smp_send_reschedule(int cpu
)
237 WARN_ON(cpu_is_offline(cpu
));
239 coord
.y
= cpu_y(cpu
);
240 coord
.x
= cpu_x(cpu
);
241 hv_trigger_ipi(coord
, IRQ_RESCHEDULE
);
244 #endif /* CHIP_HAS_IPI() */