Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / mips / kernel / smp-cmp.c
blobfe3095160655755f87fa7d0641d86b4916e3bc5a
1 /*
2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9 * for more details.
11 * You should have received a copy of the GNU General Public License along
12 * with this program; if not, write to the Free Software Foundation, Inc.,
13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
15 * Copyright (C) 2007 MIPS Technologies, Inc.
16 * Chris Dearman (chris@mips.com)
19 #undef DEBUG
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/cpumask.h>
25 #include <linux/interrupt.h>
26 #include <linux/compiler.h>
28 #include <linux/atomic.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cpu.h>
31 #include <asm/processor.h>
32 #include <asm/system.h>
33 #include <asm/hardirq.h>
34 #include <asm/mmu_context.h>
35 #include <asm/smp.h>
36 #include <asm/time.h>
37 #include <asm/mipsregs.h>
38 #include <asm/mipsmtregs.h>
39 #include <asm/mips_mt.h>
40 #include <asm/amon.h>
41 #include <asm/gic.h>
43 static void ipi_call_function(unsigned int cpu)
45 pr_debug("CPU%d: %s cpu %d status %08x\n",
46 smp_processor_id(), __func__, cpu, read_c0_status());
48 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
52 static void ipi_resched(unsigned int cpu)
54 pr_debug("CPU%d: %s cpu %d status %08x\n",
55 smp_processor_id(), __func__, cpu, read_c0_status());
57 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
61 * FIXME: This isn't restricted to CMP
62 * The SMVP kernel could use GIC interrupts if available
64 void cmp_send_ipi_single(int cpu, unsigned int action)
66 unsigned long flags;
68 local_irq_save(flags);
70 switch (action) {
71 case SMP_CALL_FUNCTION:
72 ipi_call_function(cpu);
73 break;
75 case SMP_RESCHEDULE_YOURSELF:
76 ipi_resched(cpu);
77 break;
80 local_irq_restore(flags);
83 static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
85 unsigned int i;
87 for_each_cpu(i, mask)
88 cmp_send_ipi_single(i, action);
91 static void cmp_init_secondary(void)
93 struct cpuinfo_mips *c = &current_cpu_data;
95 /* Assume GIC is present */
96 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
97 STATUSF_IP7);
99 /* Enable per-cpu interrupts: platform specific */
101 c->core = (read_c0_ebase() >> 1) & 0xff;
102 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
103 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
104 #endif
105 #ifdef CONFIG_MIPS_MT_SMTC
106 c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
107 #endif
110 static void cmp_smp_finish(void)
112 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
114 /* CDFIXME: remove this? */
115 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
117 #ifdef CONFIG_MIPS_MT_FPAFF
118 /* If we have an FPU, enroll ourselves in the FPU-full mask */
119 if (cpu_has_fpu)
120 cpu_set(smp_processor_id(), mt_fpu_cpumask);
121 #endif /* CONFIG_MIPS_MT_FPAFF */
123 local_irq_enable();
126 static void cmp_cpus_done(void)
128 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
132 * Setup the PC, SP, and GP of a secondary processor and start it running
133 * smp_bootstrap is the place to resume from
134 * __KSTK_TOS(idle) is apparently the stack pointer
135 * (unsigned long)idle->thread_info the gp
137 static void cmp_boot_secondary(int cpu, struct task_struct *idle)
139 struct thread_info *gp = task_thread_info(idle);
140 unsigned long sp = __KSTK_TOS(idle);
141 unsigned long pc = (unsigned long)&smp_bootstrap;
142 unsigned long a0 = 0;
144 pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
145 __func__, cpu);
147 #if 0
148 /* Needed? */
149 flush_icache_range((unsigned long)gp,
150 (unsigned long)(gp + sizeof(struct thread_info)));
151 #endif
153 amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
157 * Common setup before any secondaries are started
159 void __init cmp_smp_setup(void)
161 int i;
162 int ncpu = 0;
164 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
166 #ifdef CONFIG_MIPS_MT_FPAFF
167 /* If we have an FPU, enroll ourselves in the FPU-full mask */
168 if (cpu_has_fpu)
169 cpu_set(0, mt_fpu_cpumask);
170 #endif /* CONFIG_MIPS_MT_FPAFF */
172 for (i = 1; i < NR_CPUS; i++) {
173 if (amon_cpu_avail(i)) {
174 set_cpu_possible(i, true);
175 __cpu_number_map[i] = ++ncpu;
176 __cpu_logical_map[ncpu] = i;
180 if (cpu_has_mipsmt) {
181 unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
183 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
184 smp_num_siblings = nvpe;
186 pr_info("Detected %i available secondary CPU(s)\n", ncpu);
189 void __init cmp_prepare_cpus(unsigned int max_cpus)
191 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
192 smp_processor_id(), __func__, max_cpus);
195 * FIXME: some of these options are per-system, some per-core and
196 * some per-cpu
198 mips_mt_set_cpuoptions();
201 struct plat_smp_ops cmp_smp_ops = {
202 .send_ipi_single = cmp_send_ipi_single,
203 .send_ipi_mask = cmp_send_ipi_mask,
204 .init_secondary = cmp_init_secondary,
205 .smp_finish = cmp_smp_finish,
206 .cpus_done = cmp_cpus_done,
207 .boot_secondary = cmp_boot_secondary,
208 .smp_setup = cmp_smp_setup,
209 .prepare_cpus = cmp_prepare_cpus,