Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / arch / powerpc / platforms / cell / spu_base.c
blobdea6f0f2589742166a30570b27271634bea427d4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Low-level SPU handling
5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
7 * Author: Arnd Bergmann <arndb@de.ibm.com>
8 */
10 #undef DEBUG
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/init.h>
15 #include <linux/ptrace.h>
16 #include <linux/slab.h>
17 #include <linux/wait.h>
18 #include <linux/mm.h>
19 #include <linux/io.h>
20 #include <linux/mutex.h>
21 #include <linux/linux_logo.h>
22 #include <linux/syscore_ops.h>
23 #include <asm/spu.h>
24 #include <asm/spu_priv1.h>
25 #include <asm/spu_csa.h>
26 #include <asm/xmon.h>
27 #include <asm/kexec.h>
29 const struct spu_management_ops *spu_management_ops;
30 EXPORT_SYMBOL_GPL(spu_management_ops);
32 const struct spu_priv1_ops *spu_priv1_ops;
33 EXPORT_SYMBOL_GPL(spu_priv1_ops);
35 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
36 EXPORT_SYMBOL_GPL(cbe_spu_info);
39 * The spufs fault-handling code needs to call force_sig_fault to raise signals
40 * on DMA errors. Export it here to avoid general kernel-wide access to this
41 * function
43 EXPORT_SYMBOL_GPL(force_sig_fault);
46 * Protects cbe_spu_info and spu->number.
48 static DEFINE_SPINLOCK(spu_lock);
51 * List of all spus in the system.
53 * This list is iterated by callers from irq context and callers that
54 * want to sleep. Thus modifications need to be done with both
55 * spu_full_list_lock and spu_full_list_mutex held, while iterating
56 * through it requires either of these locks.
58 * In addition spu_full_list_lock protects all assignments to
59 * spu->mm.
61 static LIST_HEAD(spu_full_list);
62 static DEFINE_SPINLOCK(spu_full_list_lock);
63 static DEFINE_MUTEX(spu_full_list_mutex);
65 void spu_invalidate_slbs(struct spu *spu)
67 struct spu_priv2 __iomem *priv2 = spu->priv2;
68 unsigned long flags;
70 spin_lock_irqsave(&spu->register_lock, flags);
71 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
72 out_be64(&priv2->slb_invalidate_all_W, 0UL);
73 spin_unlock_irqrestore(&spu->register_lock, flags);
75 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
77 /* This is called by the MM core when a segment size is changed, to
78 * request a flush of all the SPEs using a given mm
80 void spu_flush_all_slbs(struct mm_struct *mm)
82 struct spu *spu;
83 unsigned long flags;
85 spin_lock_irqsave(&spu_full_list_lock, flags);
86 list_for_each_entry(spu, &spu_full_list, full_list) {
87 if (spu->mm == mm)
88 spu_invalidate_slbs(spu);
90 spin_unlock_irqrestore(&spu_full_list_lock, flags);
93 /* The hack below stinks... try to do something better one of
94 * these days... Does it even work properly with NR_CPUS == 1 ?
96 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
98 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
100 /* Global TLBIE broadcast required with SPEs. */
101 bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
104 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
106 unsigned long flags;
108 spin_lock_irqsave(&spu_full_list_lock, flags);
109 spu->mm = mm;
110 spin_unlock_irqrestore(&spu_full_list_lock, flags);
111 if (mm)
112 mm_needs_global_tlbie(mm);
114 EXPORT_SYMBOL_GPL(spu_associate_mm);
116 int spu_64k_pages_available(void)
118 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
120 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
122 static void spu_restart_dma(struct spu *spu)
124 struct spu_priv2 __iomem *priv2 = spu->priv2;
126 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
127 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
128 else {
129 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
130 mb();
134 static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
136 struct spu_priv2 __iomem *priv2 = spu->priv2;
138 pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
139 __func__, slbe, slb->vsid, slb->esid);
141 out_be64(&priv2->slb_index_W, slbe);
142 /* set invalid before writing vsid */
143 out_be64(&priv2->slb_esid_RW, 0);
144 /* now it's safe to write the vsid */
145 out_be64(&priv2->slb_vsid_RW, slb->vsid);
146 /* setting the new esid makes the entry valid again */
147 out_be64(&priv2->slb_esid_RW, slb->esid);
150 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
152 struct copro_slb slb;
153 int ret;
155 ret = copro_calculate_slb(spu->mm, ea, &slb);
156 if (ret)
157 return ret;
159 spu_load_slb(spu, spu->slb_replace, &slb);
161 spu->slb_replace++;
162 if (spu->slb_replace >= 8)
163 spu->slb_replace = 0;
165 spu_restart_dma(spu);
166 spu->stats.slb_flt++;
167 return 0;
170 extern int hash_page(unsigned long ea, unsigned long access,
171 unsigned long trap, unsigned long dsisr); //XXX
172 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
174 int ret;
176 pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
179 * Handle kernel space hash faults immediately. User hash
180 * faults need to be deferred to process context.
182 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
183 (get_region_id(ea) != USER_REGION_ID)) {
185 spin_unlock(&spu->register_lock);
186 ret = hash_page(ea,
187 _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
188 0x300, dsisr);
189 spin_lock(&spu->register_lock);
191 if (!ret) {
192 spu_restart_dma(spu);
193 return 0;
197 spu->class_1_dar = ea;
198 spu->class_1_dsisr = dsisr;
200 spu->stop_callback(spu, 1);
202 spu->class_1_dar = 0;
203 spu->class_1_dsisr = 0;
205 return 0;
208 static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
210 unsigned long ea = (unsigned long)addr;
211 u64 llp;
213 if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
214 llp = mmu_psize_defs[mmu_linear_psize].sllp;
215 else
216 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
218 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
219 SLB_VSID_KERNEL | llp;
220 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
224 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
225 * address @new_addr is present.
227 static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
228 void *new_addr)
230 unsigned long ea = (unsigned long)new_addr;
231 int i;
233 for (i = 0; i < nr_slbs; i++)
234 if (!((slbs[i].esid ^ ea) & ESID_MASK))
235 return 1;
237 return 0;
241 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
242 * need to map both the context save area, and the save/restore code.
244 * Because the lscsa and code may cross segment boundaries, we check to see
245 * if mappings are required for the start and end of each range. We currently
246 * assume that the mappings are smaller that one segment - if not, something
247 * is seriously wrong.
249 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
250 void *code, int code_size)
252 struct copro_slb slbs[4];
253 int i, nr_slbs = 0;
254 /* start and end addresses of both mappings */
255 void *addrs[] = {
256 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
257 code, code + code_size - 1
260 /* check the set of addresses, and create a new entry in the slbs array
261 * if there isn't already a SLB for that address */
262 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
263 if (__slb_present(slbs, nr_slbs, addrs[i]))
264 continue;
266 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
267 nr_slbs++;
270 spin_lock_irq(&spu->register_lock);
271 /* Add the set of SLBs */
272 for (i = 0; i < nr_slbs; i++)
273 spu_load_slb(spu, i, &slbs[i]);
274 spin_unlock_irq(&spu->register_lock);
276 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
278 static irqreturn_t
279 spu_irq_class_0(int irq, void *data)
281 struct spu *spu;
282 unsigned long stat, mask;
284 spu = data;
286 spin_lock(&spu->register_lock);
287 mask = spu_int_mask_get(spu, 0);
288 stat = spu_int_stat_get(spu, 0) & mask;
290 spu->class_0_pending |= stat;
291 spu->class_0_dar = spu_mfc_dar_get(spu);
292 spu->stop_callback(spu, 0);
293 spu->class_0_pending = 0;
294 spu->class_0_dar = 0;
296 spu_int_stat_clear(spu, 0, stat);
297 spin_unlock(&spu->register_lock);
299 return IRQ_HANDLED;
302 static irqreturn_t
303 spu_irq_class_1(int irq, void *data)
305 struct spu *spu;
306 unsigned long stat, mask, dar, dsisr;
308 spu = data;
310 /* atomically read & clear class1 status. */
311 spin_lock(&spu->register_lock);
312 mask = spu_int_mask_get(spu, 1);
313 stat = spu_int_stat_get(spu, 1) & mask;
314 dar = spu_mfc_dar_get(spu);
315 dsisr = spu_mfc_dsisr_get(spu);
316 if (stat & CLASS1_STORAGE_FAULT_INTR)
317 spu_mfc_dsisr_set(spu, 0ul);
318 spu_int_stat_clear(spu, 1, stat);
320 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
321 dar, dsisr);
323 if (stat & CLASS1_SEGMENT_FAULT_INTR)
324 __spu_trap_data_seg(spu, dar);
326 if (stat & CLASS1_STORAGE_FAULT_INTR)
327 __spu_trap_data_map(spu, dar, dsisr);
329 spu->class_1_dsisr = 0;
330 spu->class_1_dar = 0;
332 spin_unlock(&spu->register_lock);
334 return stat ? IRQ_HANDLED : IRQ_NONE;
337 static irqreturn_t
338 spu_irq_class_2(int irq, void *data)
340 struct spu *spu;
341 unsigned long stat;
342 unsigned long mask;
343 const int mailbox_intrs =
344 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
346 spu = data;
347 spin_lock(&spu->register_lock);
348 stat = spu_int_stat_get(spu, 2);
349 mask = spu_int_mask_get(spu, 2);
350 /* ignore interrupts we're not waiting for */
351 stat &= mask;
352 /* mailbox interrupts are level triggered. mask them now before
353 * acknowledging */
354 if (stat & mailbox_intrs)
355 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
356 /* acknowledge all interrupts before the callbacks */
357 spu_int_stat_clear(spu, 2, stat);
359 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
361 if (stat & CLASS2_MAILBOX_INTR)
362 spu->ibox_callback(spu);
364 if (stat & CLASS2_SPU_STOP_INTR)
365 spu->stop_callback(spu, 2);
367 if (stat & CLASS2_SPU_HALT_INTR)
368 spu->stop_callback(spu, 2);
370 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
371 spu->mfc_callback(spu);
373 if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
374 spu->wbox_callback(spu);
376 spu->stats.class2_intr++;
378 spin_unlock(&spu->register_lock);
380 return stat ? IRQ_HANDLED : IRQ_NONE;
383 static int __init spu_request_irqs(struct spu *spu)
385 int ret = 0;
387 if (spu->irqs[0]) {
388 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
389 spu->number);
390 ret = request_irq(spu->irqs[0], spu_irq_class_0,
391 0, spu->irq_c0, spu);
392 if (ret)
393 goto bail0;
395 if (spu->irqs[1]) {
396 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
397 spu->number);
398 ret = request_irq(spu->irqs[1], spu_irq_class_1,
399 0, spu->irq_c1, spu);
400 if (ret)
401 goto bail1;
403 if (spu->irqs[2]) {
404 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
405 spu->number);
406 ret = request_irq(spu->irqs[2], spu_irq_class_2,
407 0, spu->irq_c2, spu);
408 if (ret)
409 goto bail2;
411 return 0;
413 bail2:
414 if (spu->irqs[1])
415 free_irq(spu->irqs[1], spu);
416 bail1:
417 if (spu->irqs[0])
418 free_irq(spu->irqs[0], spu);
419 bail0:
420 return ret;
423 static void spu_free_irqs(struct spu *spu)
425 if (spu->irqs[0])
426 free_irq(spu->irqs[0], spu);
427 if (spu->irqs[1])
428 free_irq(spu->irqs[1], spu);
429 if (spu->irqs[2])
430 free_irq(spu->irqs[2], spu);
433 void spu_init_channels(struct spu *spu)
435 static const struct {
436 unsigned channel;
437 unsigned count;
438 } zero_list[] = {
439 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
440 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
441 }, count_list[] = {
442 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
443 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
444 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
446 struct spu_priv2 __iomem *priv2;
447 int i;
449 priv2 = spu->priv2;
451 /* initialize all channel data to zero */
452 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
453 int count;
455 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
456 for (count = 0; count < zero_list[i].count; count++)
457 out_be64(&priv2->spu_chnldata_RW, 0);
460 /* initialize channel counts to meaningful values */
461 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
462 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
463 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
466 EXPORT_SYMBOL_GPL(spu_init_channels);
468 static struct bus_type spu_subsys = {
469 .name = "spu",
470 .dev_name = "spu",
473 int spu_add_dev_attr(struct device_attribute *attr)
475 struct spu *spu;
477 mutex_lock(&spu_full_list_mutex);
478 list_for_each_entry(spu, &spu_full_list, full_list)
479 device_create_file(&spu->dev, attr);
480 mutex_unlock(&spu_full_list_mutex);
482 return 0;
484 EXPORT_SYMBOL_GPL(spu_add_dev_attr);
486 int spu_add_dev_attr_group(const struct attribute_group *attrs)
488 struct spu *spu;
489 int rc = 0;
491 mutex_lock(&spu_full_list_mutex);
492 list_for_each_entry(spu, &spu_full_list, full_list) {
493 rc = sysfs_create_group(&spu->dev.kobj, attrs);
495 /* we're in trouble here, but try unwinding anyway */
496 if (rc) {
497 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
498 __func__, attrs->name);
500 list_for_each_entry_continue_reverse(spu,
501 &spu_full_list, full_list)
502 sysfs_remove_group(&spu->dev.kobj, attrs);
503 break;
507 mutex_unlock(&spu_full_list_mutex);
509 return rc;
511 EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
514 void spu_remove_dev_attr(struct device_attribute *attr)
516 struct spu *spu;
518 mutex_lock(&spu_full_list_mutex);
519 list_for_each_entry(spu, &spu_full_list, full_list)
520 device_remove_file(&spu->dev, attr);
521 mutex_unlock(&spu_full_list_mutex);
523 EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
525 void spu_remove_dev_attr_group(const struct attribute_group *attrs)
527 struct spu *spu;
529 mutex_lock(&spu_full_list_mutex);
530 list_for_each_entry(spu, &spu_full_list, full_list)
531 sysfs_remove_group(&spu->dev.kobj, attrs);
532 mutex_unlock(&spu_full_list_mutex);
534 EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
536 static int __init spu_create_dev(struct spu *spu)
538 int ret;
540 spu->dev.id = spu->number;
541 spu->dev.bus = &spu_subsys;
542 ret = device_register(&spu->dev);
543 if (ret) {
544 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
545 spu->number);
546 return ret;
549 sysfs_add_device_to_node(&spu->dev, spu->node);
551 return 0;
554 static int __init create_spu(void *data)
556 struct spu *spu;
557 int ret;
558 static int number;
559 unsigned long flags;
561 ret = -ENOMEM;
562 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
563 if (!spu)
564 goto out;
566 spu->alloc_state = SPU_FREE;
568 spin_lock_init(&spu->register_lock);
569 spin_lock(&spu_lock);
570 spu->number = number++;
571 spin_unlock(&spu_lock);
573 ret = spu_create_spu(spu, data);
575 if (ret)
576 goto out_free;
578 spu_mfc_sdr_setup(spu);
579 spu_mfc_sr1_set(spu, 0x33);
580 ret = spu_request_irqs(spu);
581 if (ret)
582 goto out_destroy;
584 ret = spu_create_dev(spu);
585 if (ret)
586 goto out_free_irqs;
588 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
589 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
590 cbe_spu_info[spu->node].n_spus++;
591 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
593 mutex_lock(&spu_full_list_mutex);
594 spin_lock_irqsave(&spu_full_list_lock, flags);
595 list_add(&spu->full_list, &spu_full_list);
596 spin_unlock_irqrestore(&spu_full_list_lock, flags);
597 mutex_unlock(&spu_full_list_mutex);
599 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
600 spu->stats.tstamp = ktime_get_ns();
602 INIT_LIST_HEAD(&spu->aff_list);
604 goto out;
606 out_free_irqs:
607 spu_free_irqs(spu);
608 out_destroy:
609 spu_destroy_spu(spu);
610 out_free:
611 kfree(spu);
612 out:
613 return ret;
616 static const char *spu_state_names[] = {
617 "user", "system", "iowait", "idle"
620 static unsigned long long spu_acct_time(struct spu *spu,
621 enum spu_utilization_state state)
623 unsigned long long time = spu->stats.times[state];
626 * If the spu is idle or the context is stopped, utilization
627 * statistics are not updated. Apply the time delta from the
628 * last recorded state of the spu.
630 if (spu->stats.util_state == state)
631 time += ktime_get_ns() - spu->stats.tstamp;
633 return time / NSEC_PER_MSEC;
637 static ssize_t spu_stat_show(struct device *dev,
638 struct device_attribute *attr, char *buf)
640 struct spu *spu = container_of(dev, struct spu, dev);
642 return sprintf(buf, "%s %llu %llu %llu %llu "
643 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
644 spu_state_names[spu->stats.util_state],
645 spu_acct_time(spu, SPU_UTIL_USER),
646 spu_acct_time(spu, SPU_UTIL_SYSTEM),
647 spu_acct_time(spu, SPU_UTIL_IOWAIT),
648 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
649 spu->stats.vol_ctx_switch,
650 spu->stats.invol_ctx_switch,
651 spu->stats.slb_flt,
652 spu->stats.hash_flt,
653 spu->stats.min_flt,
654 spu->stats.maj_flt,
655 spu->stats.class2_intr,
656 spu->stats.libassist);
659 static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
661 #ifdef CONFIG_KEXEC_CORE
663 struct crash_spu_info {
664 struct spu *spu;
665 u32 saved_spu_runcntl_RW;
666 u32 saved_spu_status_R;
667 u32 saved_spu_npc_RW;
668 u64 saved_mfc_sr1_RW;
669 u64 saved_mfc_dar;
670 u64 saved_mfc_dsisr;
673 #define CRASH_NUM_SPUS 16 /* Enough for current hardware */
674 static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
676 static void crash_kexec_stop_spus(void)
678 struct spu *spu;
679 int i;
680 u64 tmp;
682 for (i = 0; i < CRASH_NUM_SPUS; i++) {
683 if (!crash_spu_info[i].spu)
684 continue;
686 spu = crash_spu_info[i].spu;
688 crash_spu_info[i].saved_spu_runcntl_RW =
689 in_be32(&spu->problem->spu_runcntl_RW);
690 crash_spu_info[i].saved_spu_status_R =
691 in_be32(&spu->problem->spu_status_R);
692 crash_spu_info[i].saved_spu_npc_RW =
693 in_be32(&spu->problem->spu_npc_RW);
695 crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
696 crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
697 tmp = spu_mfc_sr1_get(spu);
698 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
700 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
701 spu_mfc_sr1_set(spu, tmp);
703 __delay(200);
707 static void __init crash_register_spus(struct list_head *list)
709 struct spu *spu;
710 int ret;
712 list_for_each_entry(spu, list, full_list) {
713 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
714 continue;
716 crash_spu_info[spu->number].spu = spu;
719 ret = crash_shutdown_register(&crash_kexec_stop_spus);
720 if (ret)
721 printk(KERN_ERR "Could not register SPU crash handler");
724 #else
725 static inline void crash_register_spus(struct list_head *list)
728 #endif
730 static void spu_shutdown(void)
732 struct spu *spu;
734 mutex_lock(&spu_full_list_mutex);
735 list_for_each_entry(spu, &spu_full_list, full_list) {
736 spu_free_irqs(spu);
737 spu_destroy_spu(spu);
739 mutex_unlock(&spu_full_list_mutex);
742 static struct syscore_ops spu_syscore_ops = {
743 .shutdown = spu_shutdown,
746 static int __init init_spu_base(void)
748 int i, ret = 0;
750 for (i = 0; i < MAX_NUMNODES; i++) {
751 mutex_init(&cbe_spu_info[i].list_mutex);
752 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
755 if (!spu_management_ops)
756 goto out;
758 /* create system subsystem for spus */
759 ret = subsys_system_register(&spu_subsys, NULL);
760 if (ret)
761 goto out;
763 ret = spu_enumerate_spus(create_spu);
765 if (ret < 0) {
766 printk(KERN_WARNING "%s: Error initializing spus\n",
767 __func__);
768 goto out_unregister_subsys;
771 if (ret > 0)
772 fb_append_extra_logo(&logo_spe_clut224, ret);
774 mutex_lock(&spu_full_list_mutex);
775 xmon_register_spus(&spu_full_list);
776 crash_register_spus(&spu_full_list);
777 mutex_unlock(&spu_full_list_mutex);
778 spu_add_dev_attr(&dev_attr_stat);
779 register_syscore_ops(&spu_syscore_ops);
781 spu_init_affinity();
783 return 0;
785 out_unregister_subsys:
786 bus_unregister(&spu_subsys);
787 out:
788 return ret;
790 device_initcall(init_spu_base);