1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/wait.h>
5 #include <linux/ptrace.h>
8 #include <asm/spu_priv1.h>
10 #include <asm/unistd.h>
14 /* interrupt-level stop callback function. */
15 void spufs_stop_callback(struct spu
*spu
, int irq
)
17 struct spu_context
*ctx
= spu
->ctx
;
20 * It should be impossible to preempt a context while an exception
21 * is being processed, since the context switch code is specially
22 * coded to deal with interrupts ... But, just in case, sanity check
23 * the context pointer. It is OK to return doing nothing since
24 * the exception will be regenerated when the context is resumed.
27 /* Copy exception arguments into module specific structure */
30 ctx
->csa
.class_0_pending
= spu
->class_0_pending
;
31 ctx
->csa
.class_0_dar
= spu
->class_0_dar
;
34 ctx
->csa
.class_1_dsisr
= spu
->class_1_dsisr
;
35 ctx
->csa
.class_1_dar
= spu
->class_1_dar
;
41 /* ensure that the exception status has hit memory before a
42 * thread waiting on the context's stop queue is woken */
45 wake_up_all(&ctx
->stop_wq
);
49 int spu_stopped(struct spu_context
*ctx
, u32
*stat
)
54 stopped
= SPU_STATUS_INVALID_INSTR
| SPU_STATUS_SINGLE_STEP
|
55 SPU_STATUS_STOPPED_BY_HALT
| SPU_STATUS_STOPPED_BY_STOP
;
58 *stat
= ctx
->ops
->status_read(ctx
);
59 if (*stat
& stopped
) {
61 * If the spu hasn't finished stopping, we need to
62 * re-read the register to get the stopped value.
64 if (*stat
& SPU_STATUS_RUNNING
)
69 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE
, &ctx
->sched_flags
))
72 dsisr
= ctx
->csa
.class_1_dsisr
;
73 if (dsisr
& (MFC_DSISR_PTE_NOT_FOUND
| MFC_DSISR_ACCESS_DENIED
))
76 if (ctx
->csa
.class_0_pending
)
82 static int spu_setup_isolated(struct spu_context
*ctx
)
85 u64 __iomem
*mfc_cntl
;
88 unsigned long timeout
;
89 const u32 status_loading
= SPU_STATUS_RUNNING
90 | SPU_STATUS_ISOLATED_STATE
| SPU_STATUS_ISOLATED_LOAD_STATUS
;
97 * We need to exclude userspace access to the context.
99 * To protect against memory access we invalidate all ptes
100 * and make sure the pagefault handlers block on the mutex.
102 spu_unmap_mappings(ctx
);
104 mfc_cntl
= &ctx
->spu
->priv2
->mfc_control_RW
;
106 /* purge the MFC DMA queue to ensure no spurious accesses before we
107 * enter kernel mode */
108 timeout
= jiffies
+ HZ
;
109 out_be64(mfc_cntl
, MFC_CNTL_PURGE_DMA_REQUEST
);
110 while ((in_be64(mfc_cntl
) & MFC_CNTL_PURGE_DMA_STATUS_MASK
)
111 != MFC_CNTL_PURGE_DMA_COMPLETE
) {
112 if (time_after(jiffies
, timeout
)) {
113 printk(KERN_ERR
"%s: timeout flushing MFC DMA queue\n",
121 /* clear purge status */
122 out_be64(mfc_cntl
, 0);
124 /* put the SPE in kernel mode to allow access to the loader */
125 sr1
= spu_mfc_sr1_get(ctx
->spu
);
126 sr1
&= ~MFC_STATE1_PROBLEM_STATE_MASK
;
127 spu_mfc_sr1_set(ctx
->spu
, sr1
);
129 /* start the loader */
130 ctx
->ops
->signal1_write(ctx
, (unsigned long)isolated_loader
>> 32);
131 ctx
->ops
->signal2_write(ctx
,
132 (unsigned long)isolated_loader
& 0xffffffff);
134 ctx
->ops
->runcntl_write(ctx
,
135 SPU_RUNCNTL_RUNNABLE
| SPU_RUNCNTL_ISOLATE
);
138 timeout
= jiffies
+ HZ
;
139 while (((status
= ctx
->ops
->status_read(ctx
)) & status_loading
) ==
141 if (time_after(jiffies
, timeout
)) {
142 printk(KERN_ERR
"%s: timeout waiting for loader\n",
150 if (!(status
& SPU_STATUS_RUNNING
)) {
151 /* If isolated LOAD has failed: run SPU, we will get a stop-and
153 pr_debug("%s: isolated LOAD failed\n", __func__
);
154 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_RUNNABLE
);
159 if (!(status
& SPU_STATUS_ISOLATED_STATE
)) {
160 /* This isn't allowed by the CBEA, but check anyway */
161 pr_debug("%s: SPU fell out of isolated mode?\n", __func__
);
162 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_STOP
);
168 /* Finished accessing the loader. Drop kernel mode */
169 sr1
|= MFC_STATE1_PROBLEM_STATE_MASK
;
170 spu_mfc_sr1_set(ctx
->spu
, sr1
);
176 static int spu_run_init(struct spu_context
*ctx
, u32
*npc
)
178 unsigned long runcntl
= SPU_RUNCNTL_RUNNABLE
;
181 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
184 * NOSCHED is synchronous scheduling with respect to the caller.
185 * The caller waits for the context to be loaded.
187 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
188 if (ctx
->state
== SPU_STATE_SAVED
) {
189 ret
= spu_activate(ctx
, 0);
196 * Apply special setup as required.
198 if (ctx
->flags
& SPU_CREATE_ISOLATE
) {
199 if (!(ctx
->ops
->status_read(ctx
) & SPU_STATUS_ISOLATED_STATE
)) {
200 ret
= spu_setup_isolated(ctx
);
206 * If userspace has set the runcntrl register (eg, to
207 * issue an isolated exit), we need to re-set it here
209 runcntl
= ctx
->ops
->runcntl_read(ctx
) &
210 (SPU_RUNCNTL_RUNNABLE
| SPU_RUNCNTL_ISOLATE
);
212 runcntl
= SPU_RUNCNTL_RUNNABLE
;
214 unsigned long privcntl
;
216 if (test_thread_flag(TIF_SINGLESTEP
))
217 privcntl
= SPU_PRIVCNTL_MODE_SINGLE_STEP
;
219 privcntl
= SPU_PRIVCNTL_MODE_NORMAL
;
221 ctx
->ops
->privcntl_write(ctx
, privcntl
);
222 ctx
->ops
->npc_write(ctx
, *npc
);
225 ctx
->ops
->runcntl_write(ctx
, runcntl
);
227 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
228 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
231 if (ctx
->state
== SPU_STATE_SAVED
) {
232 ret
= spu_activate(ctx
, 0);
236 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
240 set_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
);
244 static int spu_run_fini(struct spu_context
*ctx
, u32
*npc
,
249 spu_del_from_rq(ctx
);
251 *status
= ctx
->ops
->status_read(ctx
);
252 *npc
= ctx
->ops
->npc_read(ctx
);
254 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
255 clear_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
);
256 spu_switch_log_notify(NULL
, ctx
, SWITCH_LOG_EXIT
, *status
);
259 if (signal_pending(current
))
266 * SPU syscall restarting is tricky because we violate the basic
267 * assumption that the signal handler is running on the interrupted
268 * thread. Here instead, the handler runs on PowerPC user space code,
269 * while the syscall was called from the SPU.
270 * This means we can only do a very rough approximation of POSIX
273 static int spu_handle_restartsys(struct spu_context
*ctx
, long *spu_ret
,
280 case -ERESTARTNOINTR
:
282 * Enter the regular syscall restarting for
283 * sys_spu_run, then restart the SPU syscall
289 case -ERESTARTNOHAND
:
290 case -ERESTART_RESTARTBLOCK
:
292 * Restart block is too hard for now, just return -EINTR
294 * ERESTARTNOHAND comes from sys_pause, we also return
296 * Assume that we need to be restarted ourselves though.
302 printk(KERN_WARNING
"%s: unexpected return code %ld\n",
309 static int spu_process_callback(struct spu_context
*ctx
)
311 struct spu_syscall_block s
;
317 /* get syscall block from local store */
318 npc
= ctx
->ops
->npc_read(ctx
) & ~3;
319 ls
= (void __iomem
*)ctx
->ops
->get_ls(ctx
);
320 ls_pointer
= in_be32(ls
+ npc
);
321 if (ls_pointer
> (LS_SIZE
- sizeof(s
)))
323 memcpy_fromio(&s
, ls
+ ls_pointer
, sizeof(s
));
325 /* do actual syscall without pinning the spu */
330 if (s
.nr_ret
< NR_syscalls
) {
332 /* do actual system call from here */
333 spu_ret
= spu_sys_callback(&s
);
334 if (spu_ret
<= -ERESTARTSYS
) {
335 ret
= spu_handle_restartsys(ctx
, &spu_ret
, &npc
);
337 mutex_lock(&ctx
->state_mutex
);
338 if (ret
== -ERESTARTSYS
)
342 /* need to re-get the ls, as it may have changed when we released the
344 ls
= (void __iomem
*)ctx
->ops
->get_ls(ctx
);
346 /* write result, jump over indirect pointer */
347 memcpy_toio(ls
+ ls_pointer
, &spu_ret
, sizeof(spu_ret
));
348 ctx
->ops
->npc_write(ctx
, npc
);
349 ctx
->ops
->runcntl_write(ctx
, SPU_RUNCNTL_RUNNABLE
);
353 long spufs_run_spu(struct spu_context
*ctx
, u32
*npc
, u32
*event
)
359 if (mutex_lock_interruptible(&ctx
->run_mutex
))
362 ctx
->event_return
= 0;
364 ret
= spu_acquire(ctx
);
370 spu_update_sched_info(ctx
);
372 ret
= spu_run_init(ctx
, npc
);
379 ret
= spufs_wait(ctx
->stop_wq
, spu_stopped(ctx
, &status
));
382 * This is nasty: we need the state_mutex for all the
383 * bookkeeping even if the syscall was interrupted by
386 mutex_lock(&ctx
->state_mutex
);
390 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE
,
391 &ctx
->sched_flags
))) {
392 if (!(status
& SPU_STATUS_STOPPED_BY_STOP
)) {
393 spu_switch_notify(spu
, ctx
);
398 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
400 if ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
401 (status
>> SPU_STOP_STATUS_SHIFT
== 0x2104)) {
402 ret
= spu_process_callback(ctx
);
405 status
&= ~SPU_STATUS_STOPPED_BY_STOP
;
407 ret
= spufs_handle_class1(ctx
);
411 ret
= spufs_handle_class0(ctx
);
415 if (signal_pending(current
))
417 } while (!ret
&& !(status
& (SPU_STATUS_STOPPED_BY_STOP
|
418 SPU_STATUS_STOPPED_BY_HALT
|
419 SPU_STATUS_SINGLE_STEP
)));
421 spu_disable_spu(ctx
);
422 ret
= spu_run_fini(ctx
, npc
, &status
);
425 if ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
426 (((status
>> SPU_STOP_STATUS_SHIFT
) & 0x3f00) == 0x2100))
427 ctx
->stats
.libassist
++;
430 ((ret
== -ERESTARTSYS
) &&
431 ((status
& SPU_STATUS_STOPPED_BY_HALT
) ||
432 (status
& SPU_STATUS_SINGLE_STEP
) ||
433 ((status
& SPU_STATUS_STOPPED_BY_STOP
) &&
434 (status
>> SPU_STOP_STATUS_SHIFT
!= 0x2104)))))
437 /* Note: we don't need to force_sig SIGTRAP on single-step
438 * since we have TIF_SINGLESTEP set, thus the kernel will do
439 * it upon return from the syscall anyway.
441 if (unlikely(status
& SPU_STATUS_SINGLE_STEP
))
444 else if (unlikely((status
& SPU_STATUS_STOPPED_BY_STOP
)
445 && (status
>> SPU_STOP_STATUS_SHIFT
) == 0x3fff)) {
451 *event
= ctx
->event_return
;
453 mutex_unlock(&ctx
->run_mutex
);