3 #include <linux/wait.h>
4 #include <linux/ptrace.h>
7 #include <asm/spu_priv1.h>
9 #include <asm/unistd.h>
13 /* interrupt-level stop callback function. */
14 void spufs_stop_callback(struct spu
*spu
, int irq
)
16 struct spu_context
*ctx
= spu
->ctx
;
19 * It should be impossible to preempt a context while an exception
20 * is being processed, since the context switch code is specially
21 * coded to deal with interrupts ... But, just in case, sanity check
22 * the context pointer. It is OK to return doing nothing since
23 * the exception will be regenerated when the context is resumed.
26 /* Copy exception arguments into module specific structure */
29 ctx
->csa
.class_0_pending
= spu
->class_0_pending
;
30 ctx
->csa
.class_0_dar
= spu
->class_0_dar
;
33 ctx
->csa
.class_1_dsisr
= spu
->class_1_dsisr
;
34 ctx
->csa
.class_1_dar
= spu
->class_1_dar
;
40 /* ensure that the exception status has hit memory before a
41 * thread waiting on the context's stop queue is woken */
44 wake_up_all(&ctx
->stop_wq
);
48 int spu_stopped(struct spu_context
*ctx
, u32
*stat
)
53 stopped
= SPU_STATUS_INVALID_INSTR
| SPU_STATUS_SINGLE_STEP
|
54 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
57 *stat
= ctx
->ops
->status_read(ctx
);
58 if (*stat
& stopped
) {
60 * If the spu hasn't finished stopping, we need to
61 * re-read the register to get the stopped value.
63 if (*stat
& SPU_STATUS_RUNNING
)
68 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE
, &ctx
->sched_flags
))
71 dsisr
= ctx
->csa
.class_1_dsisr
;
72 if (dsisr
& (MFC_DSISR_PTE_NOT_FOUND
| MFC_DSISR_ACCESS_DENIED
))
75 if (ctx
->csa
.class_0_pending
)
81 static int spu_setup_isolated(struct spu_context
*ctx
)
84 u64 __iomem
*mfc_cntl
;
87 unsigned long timeout
;
88 const u32 status_loading
= SPU_STATUS_RUNNING
89 | SPU_STATUS_ISOLATED_STATE
| SPU_STATUS_ISOLATED_LOAD_STATUS
;
96 * We need to exclude userspace access to the context.
98 * To protect against memory access we invalidate all ptes
99 * and make sure the pagefault handlers block on the mutex.
101 spu_unmap_mappings(ctx
);
103 mfc_cntl
= &ctx
->spu
->priv2
->mfc_control_RW
;
105 /* purge the MFC DMA queue to ensure no spurious accesses before we
106 * enter kernel mode */
107 timeout
= jiffies
+ HZ
;
108 out_be64(mfc_cntl
, MFC_CNTL_PURGE_DMA_REQUEST
);
109 while ((in_be64(mfc_cntl
) & MFC_CNTL_PURGE_DMA_STATUS_MASK
)
110 != MFC_CNTL_PURGE_DMA_COMPLETE
) {
111 if (time_after(jiffies
, timeout
)) {
112 printk(KERN_ERR
"%s: timeout flushing MFC DMA queue\n",
120 /* clear purge status */
121 out_be64(mfc_cntl
, 0);
123 /* put the SPE in kernel mode to allow access to the loader */
124 sr1
= spu_mfc_sr1_get(ctx
->spu
);
125 sr1
&= ~MFC_STATE1_PROBLEM_STATE_MASK
;
126 spu_mfc_sr1_set(ctx
->spu
, sr1
);
128 /* start the loader */
129 ctx
->ops
->signal1_write(ctx
, (unsigned long)isolated_loader
>> 32);
130 ctx
->ops
->signal2_write(ctx
,
131 (unsigned long)isolated_loader
& 0xffffffff);
133 ctx
->ops
->runcntl_write(ctx
,
134 SPU_RUNCNTL_RUNNABLE
| SPU_RUNCNTL_ISOLATE
);
137 timeout
= jiffies
+ HZ
;
138 while (((status
= ctx
->ops
->status_read(ctx
)) & status_loading
) ==
140 if (time_after(jiffies
, timeout
)) {
141 printk(KERN_ERR
"%s: timeout waiting for loader\n",
149 if (!(status
& SPU_STATUS_RUNNING
)) {
150 /* If isolated LOAD has failed: run SPU, we will get a stop-and
152 pr_debug("%s: isolated LOAD failed\n", __func__
);
153 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_RUNNABLE
);
158 if (!(status
& SPU_STATUS_ISOLATED_STATE
)) {
159 /* This isn't allowed by the CBEA, but check anyway */
160 pr_debug("%s: SPU fell out of isolated mode?\n", __func__
);
161 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_STOP
);
167 /* Finished accessing the loader. Drop kernel mode */
168 sr1
|= MFC_STATE1_PROBLEM_STATE_MASK
;
169 spu_mfc_sr1_set(ctx
->spu
, sr1
);
175 static int spu_run_init(struct spu_context
*ctx
, u32
*npc
)
177 unsigned long runcntl
= SPU_RUNCNTL_RUNNABLE
;
180 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
183 * NOSCHED is synchronous scheduling with respect to the caller.
184 * The caller waits for the context to be loaded.
186 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
187 if (ctx
->state
== SPU_STATE_SAVED
) {
188 ret
= spu_activate(ctx
, 0);
195 * Apply special setup as required.
197 if (ctx
->flags
& SPU_CREATE_ISOLATE
) {
198 if (!(ctx
->ops
->status_read(ctx
) & SPU_STATUS_ISOLATED_STATE
)) {
199 ret
= spu_setup_isolated(ctx
);
205 * If userspace has set the runcntrl register (eg, to
206 * issue an isolated exit), we need to re-set it here
208 runcntl
= ctx
->ops
->runcntl_read(ctx
) &
209 (SPU_RUNCNTL_RUNNABLE
| SPU_RUNCNTL_ISOLATE
);
211 runcntl
= SPU_RUNCNTL_RUNNABLE
;
213 unsigned long privcntl
;
215 if (test_thread_flag(TIF_SINGLESTEP
))
216 privcntl
= SPU_PRIVCNTL_MODE_SINGLE_STEP
;
218 privcntl
= SPU_PRIVCNTL_MODE_NORMAL
;
220 ctx
->ops
->privcntl_write(ctx
, privcntl
);
221 ctx
->ops
->npc_write(ctx
, *npc
);
224 ctx
->ops
->runcntl_write(ctx
, runcntl
);
226 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
227 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
230 if (ctx
->state
== SPU_STATE_SAVED
) {
231 ret
= spu_activate(ctx
, 0);
235 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
239 set_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
);
243 static int spu_run_fini(struct spu_context
*ctx
, u32
*npc
,
248 spu_del_from_rq(ctx
);
250 *status
= ctx
->ops
->status_read(ctx
);
251 *npc
= ctx
->ops
->npc_read(ctx
);
253 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
254 clear_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
);
255 spu_switch_log_notify(NULL
, ctx
, SWITCH_LOG_EXIT
, *status
);
258 if (signal_pending(current
))
265 * SPU syscall restarting is tricky because we violate the basic
266 * assumption that the signal handler is running on the interrupted
267 * thread. Here instead, the handler runs on PowerPC user space code,
268 * while the syscall was called from the SPU.
269 * This means we can only do a very rough approximation of POSIX
272 static int spu_handle_restartsys(struct spu_context
*ctx
, long *spu_ret
,
279 case -ERESTARTNOINTR
:
281 * Enter the regular syscall restarting for
282 * sys_spu_run, then restart the SPU syscall
288 case -ERESTARTNOHAND
:
289 case -ERESTART_RESTARTBLOCK
:
291 * Restart block is too hard for now, just return -EINTR
293 * ERESTARTNOHAND comes from sys_pause, we also return
295 * Assume that we need to be restarted ourselves though.
301 printk(KERN_WARNING
"%s: unexpected return code %ld\n",
308 static int spu_process_callback(struct spu_context
*ctx
)
310 struct spu_syscall_block s
;
316 /* get syscall block from local store */
317 npc
= ctx
->ops
->npc_read(ctx
) & ~3;
318 ls
= (void __iomem
*)ctx
->ops
->get_ls(ctx
);
319 ls_pointer
= in_be32(ls
+ npc
);
320 if (ls_pointer
> (LS_SIZE
- sizeof(s
)))
322 memcpy_fromio(&s
, ls
+ ls_pointer
, sizeof(s
));
324 /* do actual syscall without pinning the spu */
329 if (s
.nr_ret
< __NR_syscalls
) {
331 /* do actual system call from here */
332 spu_ret
= spu_sys_callback(&s
);
333 if (spu_ret
<= -ERESTARTSYS
) {
334 ret
= spu_handle_restartsys(ctx
, &spu_ret
, &npc
);
336 mutex_lock(&ctx
->state_mutex
);
337 if (ret
== -ERESTARTSYS
)
341 /* need to re-get the ls, as it may have changed when we released the
343 ls
= (void __iomem
*)ctx
->ops
->get_ls(ctx
);
345 /* write result, jump over indirect pointer */
346 memcpy_toio(ls
+ ls_pointer
, &spu_ret
, sizeof(spu_ret
));
347 ctx
->ops
->npc_write(ctx
, npc
);
348 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_RUNNABLE
);
352 long spufs_run_spu(struct spu_context
*ctx
, u32
*npc
, u32
*event
)
358 if (mutex_lock_interruptible(&ctx
->run_mutex
))
361 ctx
->event_return
= 0;
363 ret
= spu_acquire(ctx
);
369 spu_update_sched_info(ctx
);
371 ret
= spu_run_init(ctx
, npc
);
378 ret
= spufs_wait(ctx
->stop_wq
, spu_stopped(ctx
, &status
));
381 * This is nasty: we need the state_mutex for all the
382 * bookkeeping even if the syscall was interrupted by
385 mutex_lock(&ctx
->state_mutex
);
389 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE
,
390 &ctx
->sched_flags
))) {
391 if (!(status
& SPU_STATUS_STOPPED_BY_STOP
)) {
392 spu_switch_notify(spu
, ctx
);
397 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
399 if ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
400 (status
>> SPU_STOP_STATUS_SHIFT
== 0x2104)) {
401 ret
= spu_process_callback(ctx
);
404 status
&= ~SPU_STATUS_STOPPED_BY_STOP
;
406 ret
= spufs_handle_class1(ctx
);
410 ret
= spufs_handle_class0(ctx
);
414 if (signal_pending(current
))
416 } while (!ret
&& !(status
& (SPU_STATUS_STOPPED_BY_STOP
|
417 SPU_STATUS_STOPPED_BY_HALT
|
418 SPU_STATUS_SINGLE_STEP
)));
420 spu_disable_spu(ctx
);
421 ret
= spu_run_fini(ctx
, npc
, &status
);
424 if ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
425 (((status
>> SPU_STOP_STATUS_SHIFT
) & 0x3f00) == 0x2100))
426 ctx
->stats
.libassist
++;
429 ((ret
== -ERESTARTSYS
) &&
430 ((status
& SPU_STATUS_STOPPED_BY_HALT
) ||
431 (status
& SPU_STATUS_SINGLE_STEP
) ||
432 ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
433 (status
>> SPU_STOP_STATUS_SHIFT
!= 0x2104)))))
436 /* Note: we don't need to force_sig SIGTRAP on single-step
437 * since we have TIF_SINGLESTEP set, thus the kernel will do
438 * it upon return from the syscall anyawy
440 if (unlikely(status
& SPU_STATUS_SINGLE_STEP
))
443 else if (unlikely((status
& SPU_STATUS_STOPPED_BY_STOP
)
444 && (status
>> SPU_STOP_STATUS_SHIFT
) == 0x3fff)) {
445 force_sig(SIGTRAP
, current
);
450 *event
= ctx
->event_return
;
452 mutex_unlock(&ctx
->run_mutex
);