2 * spu management operations for of based platforms
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * Copyright 2006 Sony Corp.
6 * (C) Copyright 2007 TOSHIBA CORPORATION
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/wait.h>
29 #include <linux/mutex.h>
30 #include <linux/device.h>
33 #include <asm/spu_priv1.h>
34 #include <asm/firmware.h>
37 #include "spufs/spufs.h"
38 #include "interrupt.h"
40 struct device_node
*spu_devnode(struct spu
*spu
)
45 EXPORT_SYMBOL_GPL(spu_devnode
);
47 static u64 __init
find_spu_unit_number(struct device_node
*spe
)
49 const unsigned int *prop
;
52 /* new device trees should provide the physical-id attribute */
53 prop
= of_get_property(spe
, "physical-id", &proplen
);
57 /* celleb device tree provides the unit-id */
58 prop
= of_get_property(spe
, "unit-id", &proplen
);
62 /* legacy device trees provide the id in the reg attribute */
63 prop
= of_get_property(spe
, "reg", &proplen
);
70 static void spu_unmap(struct spu
*spu
)
72 if (!firmware_has_feature(FW_FEATURE_LPAR
))
75 iounmap(spu
->problem
);
76 iounmap((__force u8 __iomem
*)spu
->local_store
);
79 static int __init
spu_map_interrupts_old(struct spu
*spu
,
80 struct device_node
*np
)
86 /* Get the interrupt source unit from the device-tree */
87 tmp
= of_get_property(np
, "isrc", NULL
);
92 tmp
= of_get_property(np
->parent
->parent
, "node-id", NULL
);
94 printk(KERN_WARNING
"%s: can't find node-id\n", __func__
);
99 /* Add the node number */
100 isrc
|= nid
<< IIC_IRQ_NODE_SHIFT
;
102 /* Now map interrupts of all 3 classes */
103 spu
->irqs
[0] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_0
| isrc
);
104 spu
->irqs
[1] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_1
| isrc
);
105 spu
->irqs
[2] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_2
| isrc
);
107 /* Right now, we only fail if class 2 failed */
108 return spu
->irqs
[2] == NO_IRQ
? -EINVAL
: 0;
111 static void __iomem
* __init
spu_map_prop_old(struct spu
*spu
,
112 struct device_node
*n
,
115 const struct address_prop
{
116 unsigned long address
;
118 } __attribute__((packed
)) *prop
;
121 prop
= of_get_property(n
, name
, &proplen
);
122 if (prop
== NULL
|| proplen
!= sizeof (struct address_prop
))
125 return ioremap(prop
->address
, prop
->len
);
128 static int __init
spu_map_device_old(struct spu
*spu
)
130 struct device_node
*node
= spu
->devnode
;
135 spu
->name
= of_get_property(node
, "name", NULL
);
139 prop
= of_get_property(node
, "local-store", NULL
);
142 spu
->local_store_phys
= *(unsigned long *)prop
;
144 /* we use local store as ram, not io memory */
145 spu
->local_store
= (void __force
*)
146 spu_map_prop_old(spu
, node
, "local-store");
147 if (!spu
->local_store
)
150 prop
= of_get_property(node
, "problem", NULL
);
153 spu
->problem_phys
= *(unsigned long *)prop
;
155 spu
->problem
= spu_map_prop_old(spu
, node
, "problem");
159 spu
->priv2
= spu_map_prop_old(spu
, node
, "priv2");
163 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
164 spu
->priv1
= spu_map_prop_old(spu
, node
, "priv1");
178 static int __init
spu_map_interrupts(struct spu
*spu
, struct device_node
*np
)
180 struct of_phandle_args oirq
;
184 for (i
=0; i
< 3; i
++) {
185 ret
= of_irq_parse_one(np
, i
, &oirq
);
187 pr_debug("spu_new: failed to get irq %d\n", i
);
191 pr_debug(" irq %d no 0x%x on %s\n", i
, oirq
.args
[0],
193 spu
->irqs
[i
] = irq_create_of_mapping(&oirq
);
194 if (spu
->irqs
[i
] == NO_IRQ
) {
195 pr_debug("spu_new: failed to map it !\n");
202 pr_debug("failed to map irq %x for spu %s\n", *oirq
.args
,
204 for (; i
>= 0; i
--) {
205 if (spu
->irqs
[i
] != NO_IRQ
)
206 irq_dispose_mapping(spu
->irqs
[i
]);
211 static int spu_map_resource(struct spu
*spu
, int nr
,
212 void __iomem
** virt
, unsigned long *phys
)
214 struct device_node
*np
= spu
->devnode
;
215 struct resource resource
= { };
219 ret
= of_address_to_resource(np
, nr
, &resource
);
223 *phys
= resource
.start
;
224 len
= resource_size(&resource
);
225 *virt
= ioremap(resource
.start
, len
);
231 static int __init
spu_map_device(struct spu
*spu
)
233 struct device_node
*np
= spu
->devnode
;
236 spu
->name
= of_get_property(np
, "name", NULL
);
240 ret
= spu_map_resource(spu
, 0, (void __iomem
**)&spu
->local_store
,
241 &spu
->local_store_phys
);
243 pr_debug("spu_new: failed to map %s resource 0\n",
247 ret
= spu_map_resource(spu
, 1, (void __iomem
**)&spu
->problem
,
250 pr_debug("spu_new: failed to map %s resource 1\n",
254 ret
= spu_map_resource(spu
, 2, (void __iomem
**)&spu
->priv2
, NULL
);
256 pr_debug("spu_new: failed to map %s resource 2\n",
260 if (!firmware_has_feature(FW_FEATURE_LPAR
))
261 ret
= spu_map_resource(spu
, 3,
262 (void __iomem
**)&spu
->priv1
, NULL
);
264 pr_debug("spu_new: failed to map %s resource 3\n",
268 pr_debug("spu_new: %s maps:\n", np
->full_name
);
269 pr_debug(" local store : 0x%016lx -> 0x%p\n",
270 spu
->local_store_phys
, spu
->local_store
);
271 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
272 spu
->problem_phys
, spu
->problem
);
273 pr_debug(" priv2 : 0x%p\n", spu
->priv2
);
274 pr_debug(" priv1 : 0x%p\n", spu
->priv1
);
281 pr_debug("failed to map spe %s: %d\n", spu
->name
, ret
);
285 static int __init
of_enumerate_spus(int (*fn
)(void *data
))
288 struct device_node
*node
;
292 for (node
= of_find_node_by_type(NULL
, "spe");
293 node
; node
= of_find_node_by_type(node
, "spe")) {
296 printk(KERN_WARNING
"%s: Error initializing %s\n",
297 __func__
, node
->name
);
302 return ret
? ret
: n
;
305 static int __init
of_create_spu(struct spu
*spu
, void *data
)
308 struct device_node
*spe
= (struct device_node
*)data
;
309 static int legacy_map
= 0, legacy_irq
= 0;
311 spu
->devnode
= of_node_get(spe
);
312 spu
->spe_id
= find_spu_unit_number(spe
);
314 spu
->node
= of_node_to_nid(spe
);
315 if (spu
->node
>= MAX_NUMNODES
) {
316 printk(KERN_WARNING
"SPE %s on node %d ignored,"
317 " node number too big\n", spe
->full_name
, spu
->node
);
318 printk(KERN_WARNING
"Check if CONFIG_NUMA is enabled.\n");
323 ret
= spu_map_device(spu
);
327 printk(KERN_WARNING
"%s: Legacy device tree found, "
328 "trying to map old style\n", __func__
);
330 ret
= spu_map_device_old(spu
);
332 printk(KERN_ERR
"Unable to map %s\n",
338 ret
= spu_map_interrupts(spu
, spe
);
342 printk(KERN_WARNING
"%s: Legacy device tree found, "
343 "trying old style irq\n", __func__
);
345 ret
= spu_map_interrupts_old(spu
, spe
);
347 printk(KERN_ERR
"%s: could not map interrupts\n",
353 pr_debug("Using SPE %s %p %p %p %p %d\n", spu
->name
,
354 spu
->local_store
, spu
->problem
, spu
->priv1
,
355 spu
->priv2
, spu
->number
);
364 static int of_destroy_spu(struct spu
*spu
)
367 of_node_put(spu
->devnode
);
371 static void enable_spu_by_master_run(struct spu_context
*ctx
)
373 ctx
->ops
->master_start(ctx
);
376 static void disable_spu_by_master_run(struct spu_context
*ctx
)
378 ctx
->ops
->master_stop(ctx
);
381 /* Hardcoded affinity idxs for qs20 */
382 #define QS20_SPES_PER_BE 8
383 static int qs20_reg_idxs
[QS20_SPES_PER_BE
] = { 0, 2, 4, 6, 7, 5, 3, 1 };
384 static int qs20_reg_memory
[QS20_SPES_PER_BE
] = { 1, 1, 0, 0, 0, 0, 0, 0 };
386 static struct spu
*spu_lookup_reg(int node
, u32 reg
)
391 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
392 spu_reg
= of_get_property(spu_devnode(spu
), "reg", NULL
);
399 static void init_affinity_qs20_harcoded(void)
402 struct spu
*last_spu
, *spu
;
405 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
407 for (i
= 0; i
< QS20_SPES_PER_BE
; i
++) {
408 reg
= qs20_reg_idxs
[i
];
409 spu
= spu_lookup_reg(node
, reg
);
412 spu
->has_mem_affinity
= qs20_reg_memory
[reg
];
414 list_add_tail(&spu
->aff_list
,
415 &last_spu
->aff_list
);
421 static int of_has_vicinity(void)
423 struct device_node
*dn
;
425 for_each_node_by_type(dn
, "spe") {
426 if (of_find_property(dn
, "vicinity", NULL
)) {
434 static struct spu
*devnode_spu(int cbe
, struct device_node
*dn
)
438 list_for_each_entry(spu
, &cbe_spu_info
[cbe
].spus
, cbe_list
)
439 if (spu_devnode(spu
) == dn
)
445 neighbour_spu(int cbe
, struct device_node
*target
, struct device_node
*avoid
)
448 struct device_node
*spu_dn
;
449 const phandle
*vic_handles
;
452 list_for_each_entry(spu
, &cbe_spu_info
[cbe
].spus
, cbe_list
) {
453 spu_dn
= spu_devnode(spu
);
456 vic_handles
= of_get_property(spu_dn
, "vicinity", &lenp
);
457 for (i
=0; i
< (lenp
/ sizeof(phandle
)); i
++) {
458 if (vic_handles
[i
] == target
->phandle
)
465 static void init_affinity_node(int cbe
)
467 struct spu
*spu
, *last_spu
;
468 struct device_node
*vic_dn
, *last_spu_dn
;
470 const phandle
*vic_handles
;
474 last_spu
= list_first_entry(&cbe_spu_info
[cbe
].spus
, struct spu
,
477 for (added
= 1; added
< cbe_spu_info
[cbe
].n_spus
; added
++) {
478 last_spu_dn
= spu_devnode(last_spu
);
479 vic_handles
= of_get_property(last_spu_dn
, "vicinity", &lenp
);
482 * Walk through each phandle in vicinity property of the spu
483 * (tipically two vicinity phandles per spe node)
485 for (i
= 0; i
< (lenp
/ sizeof(phandle
)); i
++) {
486 if (vic_handles
[i
] == avoid_ph
)
489 vic_dn
= of_find_node_by_phandle(vic_handles
[i
]);
493 /* a neighbour might be spe, mic-tm, or bif0 */
494 name
= of_get_property(vic_dn
, "name", NULL
);
498 if (strcmp(name
, "spe") == 0) {
499 spu
= devnode_spu(cbe
, vic_dn
);
500 avoid_ph
= last_spu_dn
->phandle
;
503 * "mic-tm" and "bif0" nodes do not have
504 * vicinity property. So we need to find the
505 * spe which has vic_dn as neighbour, but
506 * skipping the one we came from (last_spu_dn)
508 spu
= neighbour_spu(cbe
, vic_dn
, last_spu_dn
);
511 if (!strcmp(name
, "mic-tm")) {
512 last_spu
->has_mem_affinity
= 1;
513 spu
->has_mem_affinity
= 1;
515 avoid_ph
= vic_dn
->phandle
;
518 list_add_tail(&spu
->aff_list
, &last_spu
->aff_list
);
525 static void init_affinity_fw(void)
529 for (cbe
= 0; cbe
< MAX_NUMNODES
; cbe
++)
530 init_affinity_node(cbe
);
533 static int __init
init_affinity(void)
535 if (of_has_vicinity()) {
538 long root
= of_get_flat_dt_root();
539 if (of_flat_dt_is_compatible(root
, "IBM,CPBW-1.0"))
540 init_affinity_qs20_harcoded();
542 printk("No affinity configuration found\n");
548 const struct spu_management_ops spu_management_of_ops
= {
549 .enumerate_spus
= of_enumerate_spus
,
550 .create_spu
= of_create_spu
,
551 .destroy_spu
= of_destroy_spu
,
552 .enable_spu
= enable_spu_by_master_run
,
553 .disable_spu
= disable_spu_by_master_run
,
554 .init_affinity
= init_affinity
,