4 * Accessor routines for the various MMIO register blocks of the CBE
6 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
9 #include <linux/percpu.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_platform.h>
16 #include <asm/pgtable.h>
18 #include <asm/ptrace.h>
19 #include <asm/cell-regs.h>
22 * Current implementation uses "cpu" nodes. We build our own mapping
23 * array of cpu numbers to cpu nodes locally for now to allow interrupt
24 * time code to have a fast path rather than call of_get_cpu_node(). If
25 * we implement cpu hotplug, we'll have to install an appropriate norifier
26 * in order to release references to the cpu going away
28 static struct cbe_regs_map
30 struct device_node
*cpu_node
;
31 struct device_node
*be_node
;
32 struct cbe_pmd_regs __iomem
*pmd_regs
;
33 struct cbe_iic_regs __iomem
*iic_regs
;
34 struct cbe_mic_tm_regs __iomem
*mic_tm_regs
;
35 struct cbe_pmd_shadow_regs pmd_shadow_regs
;
36 } cbe_regs_maps
[MAX_CBE
];
37 static int cbe_regs_map_count
;
39 static struct cbe_thread_map
41 struct device_node
*cpu_node
;
42 struct device_node
*be_node
;
43 struct cbe_regs_map
*regs
;
44 unsigned int thread_id
;
46 } cbe_thread_map
[NR_CPUS
];
48 static cpumask_t cbe_local_mask
[MAX_CBE
] = { [0 ... MAX_CBE
-1] = CPU_MASK_NONE
};
49 static cpumask_t cbe_first_online_cpu
= CPU_MASK_NONE
;
51 static struct cbe_regs_map
*cbe_find_map(struct device_node
*np
)
54 struct device_node
*tmp_np
;
56 if (strcasecmp(np
->type
, "spe")) {
57 for (i
= 0; i
< cbe_regs_map_count
; i
++)
58 if (cbe_regs_maps
[i
].cpu_node
== np
||
59 cbe_regs_maps
[i
].be_node
== np
)
60 return &cbe_regs_maps
[i
];
67 /* walk up path until cpu or be node was found */
70 tmp_np
= tmp_np
->parent
;
71 /* on a correct devicetree we wont get up to root */
73 } while (strcasecmp(tmp_np
->type
, "cpu") &&
74 strcasecmp(tmp_np
->type
, "be"));
76 np
->data
= cbe_find_map(tmp_np
);
81 struct cbe_pmd_regs __iomem
*cbe_get_pmd_regs(struct device_node
*np
)
83 struct cbe_regs_map
*map
= cbe_find_map(np
);
88 EXPORT_SYMBOL_GPL(cbe_get_pmd_regs
);
90 struct cbe_pmd_regs __iomem
*cbe_get_cpu_pmd_regs(int cpu
)
92 struct cbe_regs_map
*map
= cbe_thread_map
[cpu
].regs
;
97 EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs
);
99 struct cbe_pmd_shadow_regs
*cbe_get_pmd_shadow_regs(struct device_node
*np
)
101 struct cbe_regs_map
*map
= cbe_find_map(np
);
104 return &map
->pmd_shadow_regs
;
107 struct cbe_pmd_shadow_regs
*cbe_get_cpu_pmd_shadow_regs(int cpu
)
109 struct cbe_regs_map
*map
= cbe_thread_map
[cpu
].regs
;
112 return &map
->pmd_shadow_regs
;
115 struct cbe_iic_regs __iomem
*cbe_get_iic_regs(struct device_node
*np
)
117 struct cbe_regs_map
*map
= cbe_find_map(np
);
120 return map
->iic_regs
;
123 struct cbe_iic_regs __iomem
*cbe_get_cpu_iic_regs(int cpu
)
125 struct cbe_regs_map
*map
= cbe_thread_map
[cpu
].regs
;
128 return map
->iic_regs
;
131 struct cbe_mic_tm_regs __iomem
*cbe_get_mic_tm_regs(struct device_node
*np
)
133 struct cbe_regs_map
*map
= cbe_find_map(np
);
136 return map
->mic_tm_regs
;
139 struct cbe_mic_tm_regs __iomem
*cbe_get_cpu_mic_tm_regs(int cpu
)
141 struct cbe_regs_map
*map
= cbe_thread_map
[cpu
].regs
;
144 return map
->mic_tm_regs
;
146 EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs
);
148 u32
cbe_get_hw_thread_id(int cpu
)
150 return cbe_thread_map
[cpu
].thread_id
;
152 EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id
);
154 u32
cbe_cpu_to_node(int cpu
)
156 return cbe_thread_map
[cpu
].cbe_id
;
158 EXPORT_SYMBOL_GPL(cbe_cpu_to_node
);
160 u32
cbe_node_to_cpu(int node
)
162 return find_first_bit( (unsigned long *) &cbe_local_mask
[node
], sizeof(cpumask_t
));
164 EXPORT_SYMBOL_GPL(cbe_node_to_cpu
);
166 static struct device_node
*cbe_get_be_node(int cpu_id
)
168 struct device_node
*np
;
170 for_each_node_by_type (np
, "be") {
172 const phandle
*cpu_handle
;
174 cpu_handle
= of_get_property(np
, "cpus", &len
);
177 * the CAB SLOF tree is non compliant, so we just assume
178 * there is only one node
180 if (WARN_ON_ONCE(!cpu_handle
))
183 for (i
=0; i
<len
; i
++)
184 if (of_find_node_by_phandle(cpu_handle
[i
]) == of_get_cpu_node(cpu_id
, NULL
))
191 void __init
cbe_fill_regs_map(struct cbe_regs_map
*map
)
194 struct device_node
*be
, *np
;
198 for_each_node_by_type(np
, "pervasive")
199 if (of_get_parent(np
) == be
)
200 map
->pmd_regs
= of_iomap(np
, 0);
202 for_each_node_by_type(np
, "CBEA-Internal-Interrupt-Controller")
203 if (of_get_parent(np
) == be
)
204 map
->iic_regs
= of_iomap(np
, 2);
206 for_each_node_by_type(np
, "mic-tm")
207 if (of_get_parent(np
) == be
)
208 map
->mic_tm_regs
= of_iomap(np
, 0);
210 struct device_node
*cpu
;
211 /* That hack must die die die ! */
212 const struct address_prop
{
213 unsigned long address
;
215 } __attribute__((packed
)) *prop
;
219 prop
= of_get_property(cpu
, "pervasive", NULL
);
221 map
->pmd_regs
= ioremap(prop
->address
, prop
->len
);
223 prop
= of_get_property(cpu
, "iic", NULL
);
225 map
->iic_regs
= ioremap(prop
->address
, prop
->len
);
227 prop
= of_get_property(cpu
, "mic-tm", NULL
);
229 map
->mic_tm_regs
= ioremap(prop
->address
, prop
->len
);
234 void __init
cbe_regs_init(void)
237 unsigned int thread_id
;
238 struct device_node
*cpu
;
240 /* Build local fast map of CPUs */
241 for_each_possible_cpu(i
) {
242 cbe_thread_map
[i
].cpu_node
= of_get_cpu_node(i
, &thread_id
);
243 cbe_thread_map
[i
].be_node
= cbe_get_be_node(i
);
244 cbe_thread_map
[i
].thread_id
= thread_id
;
247 /* Find maps for each device tree CPU */
248 for_each_node_by_type(cpu
, "cpu") {
249 struct cbe_regs_map
*map
;
252 cbe_id
= cbe_regs_map_count
++;
253 map
= &cbe_regs_maps
[cbe_id
];
255 if (cbe_regs_map_count
> MAX_CBE
) {
256 printk(KERN_ERR
"cbe_regs: More BE chips than supported"
258 cbe_regs_map_count
--;
264 for_each_possible_cpu(i
) {
265 struct cbe_thread_map
*thread
= &cbe_thread_map
[i
];
267 if (thread
->cpu_node
== cpu
) {
269 thread
->cbe_id
= cbe_id
;
270 map
->be_node
= thread
->be_node
;
271 cpu_set(i
, cbe_local_mask
[cbe_id
]);
272 if(thread
->thread_id
== 0)
273 cpu_set(i
, cbe_first_online_cpu
);
277 cbe_fill_regs_map(map
);