2 * spu management operations for of based platforms
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 * Copyright 2006 Sony Corp.
6 * (C) Copyright 2007 TOSHIBA CORPORATION
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <linux/interrupt.h>
23 #include <linux/list.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/wait.h>
29 #include <linux/mutex.h>
30 #include <linux/device.h>
33 #include <asm/spu_priv1.h>
34 #include <asm/firmware.h>
37 #include "spufs/spufs.h"
38 #include "interrupt.h"
40 struct device_node
*spu_devnode(struct spu
*spu
)
45 EXPORT_SYMBOL_GPL(spu_devnode
);
47 static u64 __init
find_spu_unit_number(struct device_node
*spe
)
49 const unsigned int *prop
;
52 /* new device trees should provide the physical-id attribute */
53 prop
= of_get_property(spe
, "physical-id", &proplen
);
57 /* celleb device tree provides the unit-id */
58 prop
= of_get_property(spe
, "unit-id", &proplen
);
62 /* legacy device trees provide the id in the reg attribute */
63 prop
= of_get_property(spe
, "reg", &proplen
);
70 static void spu_unmap(struct spu
*spu
)
72 if (!firmware_has_feature(FW_FEATURE_LPAR
))
75 iounmap(spu
->problem
);
76 iounmap((__force u8 __iomem
*)spu
->local_store
);
79 static int __init
spu_map_interrupts_old(struct spu
*spu
,
80 struct device_node
*np
)
86 /* Get the interrupt source unit from the device-tree */
87 tmp
= of_get_property(np
, "isrc", NULL
);
92 tmp
= of_get_property(np
->parent
->parent
, "node-id", NULL
);
94 printk(KERN_WARNING
"%s: can't find node-id\n", __func__
);
99 /* Add the node number */
100 isrc
|= nid
<< IIC_IRQ_NODE_SHIFT
;
102 /* Now map interrupts of all 3 classes */
103 spu
->irqs
[0] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_0
| isrc
);
104 spu
->irqs
[1] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_1
| isrc
);
105 spu
->irqs
[2] = irq_create_mapping(NULL
, IIC_IRQ_CLASS_2
| isrc
);
107 /* Right now, we only fail if class 2 failed */
114 static void __iomem
* __init
spu_map_prop_old(struct spu
*spu
,
115 struct device_node
*n
,
118 const struct address_prop
{
119 unsigned long address
;
121 } __attribute__((packed
)) *prop
;
124 prop
= of_get_property(n
, name
, &proplen
);
125 if (prop
== NULL
|| proplen
!= sizeof (struct address_prop
))
128 return ioremap(prop
->address
, prop
->len
);
131 static int __init
spu_map_device_old(struct spu
*spu
)
133 struct device_node
*node
= spu
->devnode
;
138 spu
->name
= of_get_property(node
, "name", NULL
);
142 prop
= of_get_property(node
, "local-store", NULL
);
145 spu
->local_store_phys
= *(unsigned long *)prop
;
147 /* we use local store as ram, not io memory */
148 spu
->local_store
= (void __force
*)
149 spu_map_prop_old(spu
, node
, "local-store");
150 if (!spu
->local_store
)
153 prop
= of_get_property(node
, "problem", NULL
);
156 spu
->problem_phys
= *(unsigned long *)prop
;
158 spu
->problem
= spu_map_prop_old(spu
, node
, "problem");
162 spu
->priv2
= spu_map_prop_old(spu
, node
, "priv2");
166 if (!firmware_has_feature(FW_FEATURE_LPAR
)) {
167 spu
->priv1
= spu_map_prop_old(spu
, node
, "priv1");
181 static int __init
spu_map_interrupts(struct spu
*spu
, struct device_node
*np
)
183 struct of_phandle_args oirq
;
187 for (i
=0; i
< 3; i
++) {
188 ret
= of_irq_parse_one(np
, i
, &oirq
);
190 pr_debug("spu_new: failed to get irq %d\n", i
);
194 pr_debug(" irq %d no 0x%x on %s\n", i
, oirq
.args
[0],
196 spu
->irqs
[i
] = irq_create_of_mapping(&oirq
);
198 pr_debug("spu_new: failed to map it !\n");
205 pr_debug("failed to map irq %x for spu %s\n", *oirq
.args
,
207 for (; i
>= 0; i
--) {
209 irq_dispose_mapping(spu
->irqs
[i
]);
214 static int spu_map_resource(struct spu
*spu
, int nr
,
215 void __iomem
** virt
, unsigned long *phys
)
217 struct device_node
*np
= spu
->devnode
;
218 struct resource resource
= { };
222 ret
= of_address_to_resource(np
, nr
, &resource
);
226 *phys
= resource
.start
;
227 len
= resource_size(&resource
);
228 *virt
= ioremap(resource
.start
, len
);
234 static int __init
spu_map_device(struct spu
*spu
)
236 struct device_node
*np
= spu
->devnode
;
239 spu
->name
= of_get_property(np
, "name", NULL
);
243 ret
= spu_map_resource(spu
, 0, (void __iomem
**)&spu
->local_store
,
244 &spu
->local_store_phys
);
246 pr_debug("spu_new: failed to map %s resource 0\n",
250 ret
= spu_map_resource(spu
, 1, (void __iomem
**)&spu
->problem
,
253 pr_debug("spu_new: failed to map %s resource 1\n",
257 ret
= spu_map_resource(spu
, 2, (void __iomem
**)&spu
->priv2
, NULL
);
259 pr_debug("spu_new: failed to map %s resource 2\n",
263 if (!firmware_has_feature(FW_FEATURE_LPAR
))
264 ret
= spu_map_resource(spu
, 3,
265 (void __iomem
**)&spu
->priv1
, NULL
);
267 pr_debug("spu_new: failed to map %s resource 3\n",
271 pr_debug("spu_new: %s maps:\n", np
->full_name
);
272 pr_debug(" local store : 0x%016lx -> 0x%p\n",
273 spu
->local_store_phys
, spu
->local_store
);
274 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
275 spu
->problem_phys
, spu
->problem
);
276 pr_debug(" priv2 : 0x%p\n", spu
->priv2
);
277 pr_debug(" priv1 : 0x%p\n", spu
->priv1
);
284 pr_debug("failed to map spe %s: %d\n", spu
->name
, ret
);
288 static int __init
of_enumerate_spus(int (*fn
)(void *data
))
291 struct device_node
*node
;
295 for (node
= of_find_node_by_type(NULL
, "spe");
296 node
; node
= of_find_node_by_type(node
, "spe")) {
299 printk(KERN_WARNING
"%s: Error initializing %s\n",
300 __func__
, node
->name
);
305 return ret
? ret
: n
;
308 static int __init
of_create_spu(struct spu
*spu
, void *data
)
311 struct device_node
*spe
= (struct device_node
*)data
;
312 static int legacy_map
= 0, legacy_irq
= 0;
314 spu
->devnode
= of_node_get(spe
);
315 spu
->spe_id
= find_spu_unit_number(spe
);
317 spu
->node
= of_node_to_nid(spe
);
318 if (spu
->node
>= MAX_NUMNODES
) {
319 printk(KERN_WARNING
"SPE %s on node %d ignored,"
320 " node number too big\n", spe
->full_name
, spu
->node
);
321 printk(KERN_WARNING
"Check if CONFIG_NUMA is enabled.\n");
326 ret
= spu_map_device(spu
);
330 printk(KERN_WARNING
"%s: Legacy device tree found, "
331 "trying to map old style\n", __func__
);
333 ret
= spu_map_device_old(spu
);
335 printk(KERN_ERR
"Unable to map %s\n",
341 ret
= spu_map_interrupts(spu
, spe
);
345 printk(KERN_WARNING
"%s: Legacy device tree found, "
346 "trying old style irq\n", __func__
);
348 ret
= spu_map_interrupts_old(spu
, spe
);
350 printk(KERN_ERR
"%s: could not map interrupts\n",
356 pr_debug("Using SPE %s %p %p %p %p %d\n", spu
->name
,
357 spu
->local_store
, spu
->problem
, spu
->priv1
,
358 spu
->priv2
, spu
->number
);
367 static int of_destroy_spu(struct spu
*spu
)
370 of_node_put(spu
->devnode
);
374 static void enable_spu_by_master_run(struct spu_context
*ctx
)
376 ctx
->ops
->master_start(ctx
);
379 static void disable_spu_by_master_run(struct spu_context
*ctx
)
381 ctx
->ops
->master_stop(ctx
);
384 /* Hardcoded affinity idxs for qs20 */
385 #define QS20_SPES_PER_BE 8
386 static int qs20_reg_idxs
[QS20_SPES_PER_BE
] = { 0, 2, 4, 6, 7, 5, 3, 1 };
387 static int qs20_reg_memory
[QS20_SPES_PER_BE
] = { 1, 1, 0, 0, 0, 0, 0, 0 };
389 static struct spu
*spu_lookup_reg(int node
, u32 reg
)
394 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
395 spu_reg
= of_get_property(spu_devnode(spu
), "reg", NULL
);
402 static void init_affinity_qs20_harcoded(void)
405 struct spu
*last_spu
, *spu
;
408 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
410 for (i
= 0; i
< QS20_SPES_PER_BE
; i
++) {
411 reg
= qs20_reg_idxs
[i
];
412 spu
= spu_lookup_reg(node
, reg
);
415 spu
->has_mem_affinity
= qs20_reg_memory
[reg
];
417 list_add_tail(&spu
->aff_list
,
418 &last_spu
->aff_list
);
424 static int of_has_vicinity(void)
426 struct device_node
*dn
;
428 for_each_node_by_type(dn
, "spe") {
429 if (of_find_property(dn
, "vicinity", NULL
)) {
437 static struct spu
*devnode_spu(int cbe
, struct device_node
*dn
)
441 list_for_each_entry(spu
, &cbe_spu_info
[cbe
].spus
, cbe_list
)
442 if (spu_devnode(spu
) == dn
)
448 neighbour_spu(int cbe
, struct device_node
*target
, struct device_node
*avoid
)
451 struct device_node
*spu_dn
;
452 const phandle
*vic_handles
;
455 list_for_each_entry(spu
, &cbe_spu_info
[cbe
].spus
, cbe_list
) {
456 spu_dn
= spu_devnode(spu
);
459 vic_handles
= of_get_property(spu_dn
, "vicinity", &lenp
);
460 for (i
=0; i
< (lenp
/ sizeof(phandle
)); i
++) {
461 if (vic_handles
[i
] == target
->phandle
)
468 static void init_affinity_node(int cbe
)
470 struct spu
*spu
, *last_spu
;
471 struct device_node
*vic_dn
, *last_spu_dn
;
473 const phandle
*vic_handles
;
477 last_spu
= list_first_entry(&cbe_spu_info
[cbe
].spus
, struct spu
,
480 for (added
= 1; added
< cbe_spu_info
[cbe
].n_spus
; added
++) {
481 last_spu_dn
= spu_devnode(last_spu
);
482 vic_handles
= of_get_property(last_spu_dn
, "vicinity", &lenp
);
485 * Walk through each phandle in vicinity property of the spu
486 * (tipically two vicinity phandles per spe node)
488 for (i
= 0; i
< (lenp
/ sizeof(phandle
)); i
++) {
489 if (vic_handles
[i
] == avoid_ph
)
492 vic_dn
= of_find_node_by_phandle(vic_handles
[i
]);
496 /* a neighbour might be spe, mic-tm, or bif0 */
497 name
= of_get_property(vic_dn
, "name", NULL
);
501 if (strcmp(name
, "spe") == 0) {
502 spu
= devnode_spu(cbe
, vic_dn
);
503 avoid_ph
= last_spu_dn
->phandle
;
506 * "mic-tm" and "bif0" nodes do not have
507 * vicinity property. So we need to find the
508 * spe which has vic_dn as neighbour, but
509 * skipping the one we came from (last_spu_dn)
511 spu
= neighbour_spu(cbe
, vic_dn
, last_spu_dn
);
514 if (!strcmp(name
, "mic-tm")) {
515 last_spu
->has_mem_affinity
= 1;
516 spu
->has_mem_affinity
= 1;
518 avoid_ph
= vic_dn
->phandle
;
521 list_add_tail(&spu
->aff_list
, &last_spu
->aff_list
);
528 static void init_affinity_fw(void)
532 for (cbe
= 0; cbe
< MAX_NUMNODES
; cbe
++)
533 init_affinity_node(cbe
);
536 static int __init
init_affinity(void)
538 if (of_has_vicinity()) {
541 if (of_machine_is_compatible("IBM,CPBW-1.0"))
542 init_affinity_qs20_harcoded();
544 printk("No affinity configuration found\n");
550 const struct spu_management_ops spu_management_of_ops
= {
551 .enumerate_spus
= of_enumerate_spus
,
552 .create_spu
= of_create_spu
,
553 .destroy_spu
= of_destroy_spu
,
554 .enable_spu
= enable_spu_by_master_run
,
555 .disable_spu
= disable_spu_by_master_run
,
556 .init_affinity
= init_affinity
,