2 * PS3 Platform spu routines.
4 * Copyright (C) 2006 Sony Computer Entertainment Inc.
5 * Copyright 2006 Sony Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/mmzone.h>
28 #include <asm/spu_priv1.h>
29 #include <asm/lv1call.h>
32 #include "../cell/spufs/spufs.h"
35 /* spu_management_ops */
38 * enum spe_type - Type of spe to create.
39 * @spe_type_logical: Standard logical spe.
41 * For use with lv1_construct_logical_spe(). The current HV does not support
42 * any types other than those listed.
50 * struct spe_shadow - logical spe shadow register area.
52 * Read-only shadow of spe registers.
56 u8 padding_0140
[0x0140];
57 u64 int_status_class0_RW
; /* 0x0140 */
58 u64 int_status_class1_RW
; /* 0x0148 */
59 u64 int_status_class2_RW
; /* 0x0150 */
60 u8 padding_0158
[0x0610-0x0158];
61 u64 mfc_dsisr_RW
; /* 0x0610 */
62 u8 padding_0618
[0x0620-0x0618];
63 u64 mfc_dar_RW
; /* 0x0620 */
64 u8 padding_0628
[0x0800-0x0628];
65 u64 mfc_dsipr_R
; /* 0x0800 */
66 u8 padding_0808
[0x0810-0x0808];
67 u64 mfc_lscrr_R
; /* 0x0810 */
68 u8 padding_0818
[0x0c00-0x0818];
69 u64 mfc_cer_R
; /* 0x0c00 */
70 u8 padding_0c08
[0x0f00-0x0c08];
71 u64 spe_execution_status
; /* 0x0f00 */
72 u8 padding_0f08
[0x1000-0x0f08];
76 * enum spe_ex_state - Logical spe execution state.
77 * @spe_ex_state_unexecutable: Uninitialized.
78 * @spe_ex_state_executable: Enabled, not ready.
79 * @spe_ex_state_executed: Ready for use.
81 * The execution state (status) of the logical spe as reported in
82 * struct spe_shadow:spe_execution_status.
86 SPE_EX_STATE_UNEXECUTABLE
= 0,
87 SPE_EX_STATE_EXECUTABLE
= 2,
88 SPE_EX_STATE_EXECUTED
= 3,
92 * struct priv1_cache - Cached values of priv1 registers.
93 * @masks[]: Array of cached spe interrupt masks, indexed by class.
94 * @sr1: Cached mfc_sr1 register.
95 * @tclass_id: Cached mfc_tclass_id register.
105 * struct spu_pdata - Platform state variables.
106 * @spe_id: HV spe id returned by lv1_construct_logical_spe().
107 * @resource_id: HV spe resource id returned by
108 * ps3_repository_read_spe_resource_id().
109 * @priv2_addr: lpar address of spe priv2 area returned by
110 * lv1_construct_logical_spe().
111 * @shadow_addr: lpar address of spe register shadow area returned by
112 * lv1_construct_logical_spe().
113 * @shadow: Virtual (ioremap) address of spe register shadow area.
114 * @cache: Cached values of priv1 registers.
122 struct spe_shadow __iomem
*shadow
;
123 struct priv1_cache cache
;
126 static struct spu_pdata
*spu_pdata(struct spu
*spu
)
131 #define dump_areas(_a, _b, _c, _d, _e) \
132 _dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
133 static void _dump_areas(unsigned int spe_id
, unsigned long priv2
,
134 unsigned long problem
, unsigned long ls
, unsigned long shadow
,
135 const char* func
, int line
)
137 pr_debug("%s:%d: spe_id: %xh (%u)\n", func
, line
, spe_id
, spe_id
);
138 pr_debug("%s:%d: priv2: %lxh\n", func
, line
, priv2
);
139 pr_debug("%s:%d: problem: %lxh\n", func
, line
, problem
);
140 pr_debug("%s:%d: ls: %lxh\n", func
, line
, ls
);
141 pr_debug("%s:%d: shadow: %lxh\n", func
, line
, shadow
);
144 inline u64
ps3_get_spe_id(void *arg
)
146 return spu_pdata(arg
)->spe_id
;
148 EXPORT_SYMBOL_GPL(ps3_get_spe_id
);
150 static unsigned long get_vas_id(void)
154 lv1_get_logical_ppe_id(&id
);
155 lv1_get_virtual_address_space_id_of_ppe(id
, &id
);
160 static int __init
construct_spu(struct spu
*spu
)
165 u64 local_store_phys
;
167 result
= lv1_construct_logical_spe(PAGE_SHIFT
, PAGE_SHIFT
, PAGE_SHIFT
,
168 PAGE_SHIFT
, PAGE_SHIFT
, get_vas_id(), SPE_TYPE_LOGICAL
,
169 &spu_pdata(spu
)->priv2_addr
, &problem_phys
,
170 &local_store_phys
, &unused
,
171 &spu_pdata(spu
)->shadow_addr
,
172 &spu_pdata(spu
)->spe_id
);
173 spu
->problem_phys
= problem_phys
;
174 spu
->local_store_phys
= local_store_phys
;
177 pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
178 __func__
, __LINE__
, ps3_result(result
));
185 static void spu_unmap(struct spu
*spu
)
188 iounmap(spu
->problem
);
189 iounmap((__force u8 __iomem
*)spu
->local_store
);
190 iounmap(spu_pdata(spu
)->shadow
);
194 * setup_areas - Map the spu regions into the address space.
196 * The current HV requires the spu shadow regs to be mapped with the
197 * PTE page protection bits set as read-only (PP=3). This implementation
198 * uses the low level __ioremap() to bypass the page protection settings
199 * inforced by ioremap_flags() to get the needed PTE bits set for the
203 static int __init
setup_areas(struct spu
*spu
)
205 struct table
{char* name
; unsigned long addr
; unsigned long size
;};
206 static const unsigned long shadow_flags
= _PAGE_NO_CACHE
| 3;
208 spu_pdata(spu
)->shadow
= __ioremap(spu_pdata(spu
)->shadow_addr
,
209 sizeof(struct spe_shadow
),
211 if (!spu_pdata(spu
)->shadow
) {
212 pr_debug("%s:%d: ioremap shadow failed\n", __func__
, __LINE__
);
216 spu
->local_store
= (__force
void *)ioremap_flags(spu
->local_store_phys
,
217 LS_SIZE
, _PAGE_NO_CACHE
);
219 if (!spu
->local_store
) {
220 pr_debug("%s:%d: ioremap local_store failed\n",
225 spu
->problem
= ioremap(spu
->problem_phys
,
226 sizeof(struct spu_problem
));
229 pr_debug("%s:%d: ioremap problem failed\n", __func__
, __LINE__
);
233 spu
->priv2
= ioremap(spu_pdata(spu
)->priv2_addr
,
234 sizeof(struct spu_priv2
));
237 pr_debug("%s:%d: ioremap priv2 failed\n", __func__
, __LINE__
);
241 dump_areas(spu_pdata(spu
)->spe_id
, spu_pdata(spu
)->priv2_addr
,
242 spu
->problem_phys
, spu
->local_store_phys
,
243 spu_pdata(spu
)->shadow_addr
);
244 dump_areas(spu_pdata(spu
)->spe_id
, (unsigned long)spu
->priv2
,
245 (unsigned long)spu
->problem
, (unsigned long)spu
->local_store
,
246 (unsigned long)spu_pdata(spu
)->shadow
);
256 static int __init
setup_interrupts(struct spu
*spu
)
260 result
= ps3_spe_irq_setup(PS3_BINDING_CPU_ANY
, spu_pdata(spu
)->spe_id
,
266 result
= ps3_spe_irq_setup(PS3_BINDING_CPU_ANY
, spu_pdata(spu
)->spe_id
,
272 result
= ps3_spe_irq_setup(PS3_BINDING_CPU_ANY
, spu_pdata(spu
)->spe_id
,
281 ps3_spe_irq_destroy(spu
->irqs
[1]);
283 ps3_spe_irq_destroy(spu
->irqs
[0]);
285 spu
->irqs
[0] = spu
->irqs
[1] = spu
->irqs
[2] = NO_IRQ
;
289 static int __init
enable_spu(struct spu
*spu
)
293 result
= lv1_enable_logical_spe(spu_pdata(spu
)->spe_id
,
294 spu_pdata(spu
)->resource_id
);
297 pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
298 __func__
, __LINE__
, ps3_result(result
));
302 result
= setup_areas(spu
);
307 result
= setup_interrupts(spu
);
310 goto fail_interrupts
;
317 lv1_disable_logical_spe(spu_pdata(spu
)->spe_id
, 0);
322 static int ps3_destroy_spu(struct spu
*spu
)
326 pr_debug("%s:%d spu_%d\n", __func__
, __LINE__
, spu
->number
);
328 result
= lv1_disable_logical_spe(spu_pdata(spu
)->spe_id
, 0);
331 ps3_spe_irq_destroy(spu
->irqs
[2]);
332 ps3_spe_irq_destroy(spu
->irqs
[1]);
333 ps3_spe_irq_destroy(spu
->irqs
[0]);
335 spu
->irqs
[0] = spu
->irqs
[1] = spu
->irqs
[2] = NO_IRQ
;
339 result
= lv1_destruct_logical_spe(spu_pdata(spu
)->spe_id
);
348 static int __init
ps3_create_spu(struct spu
*spu
, void *data
)
352 pr_debug("%s:%d spu_%d\n", __func__
, __LINE__
, spu
->number
);
354 spu
->pdata
= kzalloc(sizeof(struct spu_pdata
),
362 spu_pdata(spu
)->resource_id
= (unsigned long)data
;
364 /* Init cached reg values to HV defaults. */
366 spu_pdata(spu
)->cache
.sr1
= 0x33;
368 result
= construct_spu(spu
);
373 /* For now, just go ahead and enable it. */
375 result
= enable_spu(spu
);
380 /* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
382 /* need something better here!!! */
383 while (in_be64(&spu_pdata(spu
)->shadow
->spe_execution_status
)
384 != SPE_EX_STATE_EXECUTED
)
391 ps3_destroy_spu(spu
);
396 static int __init
ps3_enumerate_spus(int (*fn
)(void *data
))
399 unsigned int num_resource_id
;
402 result
= ps3_repository_read_num_spu_resource_id(&num_resource_id
);
404 pr_debug("%s:%d: num_resource_id %u\n", __func__
, __LINE__
,
408 * For now, just create logical spus equal to the number
409 * of physical spus reserved for the partition.
412 for (i
= 0; i
< num_resource_id
; i
++) {
413 enum ps3_spu_resource_type resource_type
;
414 unsigned int resource_id
;
416 result
= ps3_repository_read_spu_resource_id(i
,
417 &resource_type
, &resource_id
);
422 if (resource_type
== PS3_SPU_RESOURCE_TYPE_EXCLUSIVE
) {
423 result
= fn((void*)(unsigned long)resource_id
);
431 printk(KERN_WARNING
"%s:%d: Error initializing spus\n",
436 return num_resource_id
;
439 static int ps3_init_affinity(void)
445 * ps3_enable_spu - Enable SPU run control.
447 * An outstanding enhancement for the PS3 would be to add a guard to check
448 * for incorrect access to the spu problem state when the spu context is
449 * disabled. This check could be implemented with a flag added to the spu
450 * context that would inhibit mapping problem state pages, and a routine
451 * to unmap spu problem state pages. When the spu is enabled with
452 * ps3_enable_spu() the flag would be set allowing pages to be mapped,
453 * and when the spu is disabled with ps3_disable_spu() the flag would be
454 * cleared and the mapped problem state pages would be unmapped.
457 static void ps3_enable_spu(struct spu_context
*ctx
)
461 static void ps3_disable_spu(struct spu_context
*ctx
)
463 ctx
->ops
->runcntl_stop(ctx
);
466 const struct spu_management_ops spu_management_ps3_ops
= {
467 .enumerate_spus
= ps3_enumerate_spus
,
468 .create_spu
= ps3_create_spu
,
469 .destroy_spu
= ps3_destroy_spu
,
470 .enable_spu
= ps3_enable_spu
,
471 .disable_spu
= ps3_disable_spu
,
472 .init_affinity
= ps3_init_affinity
,
477 static void int_mask_and(struct spu
*spu
, int class, u64 mask
)
481 /* are these serialized by caller??? */
482 old_mask
= spu_int_mask_get(spu
, class);
483 spu_int_mask_set(spu
, class, old_mask
& mask
);
486 static void int_mask_or(struct spu
*spu
, int class, u64 mask
)
490 old_mask
= spu_int_mask_get(spu
, class);
491 spu_int_mask_set(spu
, class, old_mask
| mask
);
494 static void int_mask_set(struct spu
*spu
, int class, u64 mask
)
496 spu_pdata(spu
)->cache
.masks
[class] = mask
;
497 lv1_set_spe_interrupt_mask(spu_pdata(spu
)->spe_id
, class,
498 spu_pdata(spu
)->cache
.masks
[class]);
501 static u64
int_mask_get(struct spu
*spu
, int class)
503 return spu_pdata(spu
)->cache
.masks
[class];
506 static void int_stat_clear(struct spu
*spu
, int class, u64 stat
)
508 /* Note that MFC_DSISR will be cleared when class1[MF] is set. */
510 lv1_clear_spe_interrupt_status(spu_pdata(spu
)->spe_id
, class,
514 static u64
int_stat_get(struct spu
*spu
, int class)
518 lv1_get_spe_interrupt_status(spu_pdata(spu
)->spe_id
, class, &stat
);
522 static void cpu_affinity_set(struct spu
*spu
, int cpu
)
527 static u64
mfc_dar_get(struct spu
*spu
)
529 return in_be64(&spu_pdata(spu
)->shadow
->mfc_dar_RW
);
532 static void mfc_dsisr_set(struct spu
*spu
, u64 dsisr
)
534 /* Nothing to do, cleared in int_stat_clear(). */
537 static u64
mfc_dsisr_get(struct spu
*spu
)
539 return in_be64(&spu_pdata(spu
)->shadow
->mfc_dsisr_RW
);
542 static void mfc_sdr_setup(struct spu
*spu
)
547 static void mfc_sr1_set(struct spu
*spu
, u64 sr1
)
549 /* Check bits allowed by HV. */
551 static const u64 allowed
= ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
552 | MFC_STATE1_PROBLEM_STATE_MASK
);
554 BUG_ON((sr1
& allowed
) != (spu_pdata(spu
)->cache
.sr1
& allowed
));
556 spu_pdata(spu
)->cache
.sr1
= sr1
;
557 lv1_set_spe_privilege_state_area_1_register(
558 spu_pdata(spu
)->spe_id
,
559 offsetof(struct spu_priv1
, mfc_sr1_RW
),
560 spu_pdata(spu
)->cache
.sr1
);
563 static u64
mfc_sr1_get(struct spu
*spu
)
565 return spu_pdata(spu
)->cache
.sr1
;
568 static void mfc_tclass_id_set(struct spu
*spu
, u64 tclass_id
)
570 spu_pdata(spu
)->cache
.tclass_id
= tclass_id
;
571 lv1_set_spe_privilege_state_area_1_register(
572 spu_pdata(spu
)->spe_id
,
573 offsetof(struct spu_priv1
, mfc_tclass_id_RW
),
574 spu_pdata(spu
)->cache
.tclass_id
);
577 static u64
mfc_tclass_id_get(struct spu
*spu
)
579 return spu_pdata(spu
)->cache
.tclass_id
;
582 static void tlb_invalidate(struct spu
*spu
)
587 static void resource_allocation_groupID_set(struct spu
*spu
, u64 id
)
592 static u64
resource_allocation_groupID_get(struct spu
*spu
)
594 return 0; /* No support. */
597 static void resource_allocation_enable_set(struct spu
*spu
, u64 enable
)
602 static u64
resource_allocation_enable_get(struct spu
*spu
)
604 return 0; /* No support. */
607 const struct spu_priv1_ops spu_priv1_ps3_ops
= {
608 .int_mask_and
= int_mask_and
,
609 .int_mask_or
= int_mask_or
,
610 .int_mask_set
= int_mask_set
,
611 .int_mask_get
= int_mask_get
,
612 .int_stat_clear
= int_stat_clear
,
613 .int_stat_get
= int_stat_get
,
614 .cpu_affinity_set
= cpu_affinity_set
,
615 .mfc_dar_get
= mfc_dar_get
,
616 .mfc_dsisr_set
= mfc_dsisr_set
,
617 .mfc_dsisr_get
= mfc_dsisr_get
,
618 .mfc_sdr_setup
= mfc_sdr_setup
,
619 .mfc_sr1_set
= mfc_sr1_set
,
620 .mfc_sr1_get
= mfc_sr1_get
,
621 .mfc_tclass_id_set
= mfc_tclass_id_set
,
622 .mfc_tclass_id_get
= mfc_tclass_id_get
,
623 .tlb_invalidate
= tlb_invalidate
,
624 .resource_allocation_groupID_set
= resource_allocation_groupID_set
,
625 .resource_allocation_groupID_get
= resource_allocation_groupID_get
,
626 .resource_allocation_enable_set
= resource_allocation_enable_set
,
627 .resource_allocation_enable_get
= resource_allocation_enable_get
,
630 void ps3_spu_set_platform(void)
632 spu_priv1_ops
= &spu_priv1_ps3_ops
;
633 spu_management_ops
= &spu_management_ps3_ops
;