1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* sched.c - SPU scheduler.
4 * Copyright (C) IBM 2005
5 * Author: Mark Nutter <mnutter@us.ibm.com>
7 * 2006-03-31 NUMA domains added.
12 #include <linux/errno.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sched/loadavg.h>
15 #include <linux/sched/rt.h>
16 #include <linux/kernel.h>
18 #include <linux/slab.h>
19 #include <linux/completion.h>
20 #include <linux/vmalloc.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/numa.h>
25 #include <linux/mutex.h>
26 #include <linux/notifier.h>
27 #include <linux/kthread.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/proc_fs.h>
30 #include <linux/seq_file.h>
33 #include <asm/mmu_context.h>
35 #include <asm/spu_csa.h>
36 #include <asm/spu_priv1.h>
38 #define CREATE_TRACE_POINTS
41 struct spu_prio_array
{
42 DECLARE_BITMAP(bitmap
, MAX_PRIO
);
43 struct list_head runq
[MAX_PRIO
];
48 static unsigned long spu_avenrun
[3];
49 static struct spu_prio_array
*spu_prio
;
50 static struct task_struct
*spusched_task
;
51 static struct timer_list spusched_timer
;
52 static struct timer_list spuloadavg_timer
;
55 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
57 #define NORMAL_PRIO 120
60 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
61 * tick for every 10 CPU scheduler ticks.
63 #define SPUSCHED_TICK (10)
66 * These are the 'tuning knobs' of the scheduler:
68 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
69 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
71 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
72 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
74 #define SCALE_PRIO(x, prio) \
75 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
78 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
79 * [800ms ... 100ms ... 5ms]
81 * The higher a thread's priority, the bigger timeslices
82 * it gets during one round of execution. But even the lowest
83 * priority thread gets MIN_TIMESLICE worth of execution time.
85 void spu_set_timeslice(struct spu_context
*ctx
)
87 if (ctx
->prio
< NORMAL_PRIO
)
88 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
* 4, ctx
->prio
);
90 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
, ctx
->prio
);
94 * Update scheduling information from the owning thread.
96 void __spu_update_sched_info(struct spu_context
*ctx
)
99 * assert that the context is not on the runqueue, so it is safe
100 * to change its scheduling parameters.
102 BUG_ON(!list_empty(&ctx
->rq
));
105 * 32-Bit assignments are atomic on powerpc, and we don't care about
106 * memory ordering here because retrieving the controlling thread is
107 * per definition racy.
109 ctx
->tid
= current
->pid
;
112 * We do our own priority calculations, so we normally want
113 * ->static_prio to start with. Unfortunately this field
114 * contains junk for threads with a realtime scheduling
115 * policy so we have to look at ->prio in this case.
117 if (rt_prio(current
->prio
))
118 ctx
->prio
= current
->prio
;
120 ctx
->prio
= current
->static_prio
;
121 ctx
->policy
= current
->policy
;
124 * TO DO: the context may be loaded, so we may need to activate
125 * it again on a different node. But it shouldn't hurt anything
126 * to update its parameters, because we know that the scheduler
127 * is not actively looking at this field, since it is not on the
128 * runqueue. The context will be rescheduled on the proper node
129 * if it is timesliced or preempted.
131 cpumask_copy(&ctx
->cpus_allowed
, current
->cpus_ptr
);
133 /* Save the current cpu id for spu interrupt routing. */
134 ctx
->last_ran
= raw_smp_processor_id();
137 void spu_update_sched_info(struct spu_context
*ctx
)
141 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
142 node
= ctx
->spu
->node
;
145 * Take list_mutex to sync with find_victim().
147 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
148 __spu_update_sched_info(ctx
);
149 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
151 __spu_update_sched_info(ctx
);
155 static int __node_allowed(struct spu_context
*ctx
, int node
)
157 if (nr_cpus_node(node
)) {
158 const struct cpumask
*mask
= cpumask_of_node(node
);
160 if (cpumask_intersects(mask
, &ctx
->cpus_allowed
))
167 static int node_allowed(struct spu_context
*ctx
, int node
)
171 spin_lock(&spu_prio
->runq_lock
);
172 rval
= __node_allowed(ctx
, node
);
173 spin_unlock(&spu_prio
->runq_lock
);
178 void do_notify_spus_active(void)
183 * Wake up the active spu_contexts.
185 * When the awakened processes see their "notify_active" flag is set,
186 * they will call spu_switch_notify().
188 for_each_online_node(node
) {
191 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
192 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
193 if (spu
->alloc_state
!= SPU_FREE
) {
194 struct spu_context
*ctx
= spu
->ctx
;
195 set_bit(SPU_SCHED_NOTIFY_ACTIVE
,
198 wake_up_all(&ctx
->stop_wq
);
201 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
206 * spu_bind_context - bind spu context to physical spu
207 * @spu: physical spu to bind to
208 * @ctx: context to bind
210 static void spu_bind_context(struct spu
*spu
, struct spu_context
*ctx
)
212 spu_context_trace(spu_bind_context__enter
, ctx
, spu
);
214 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
216 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
217 atomic_inc(&cbe_spu_info
[spu
->node
].reserved_spus
);
219 ctx
->stats
.slb_flt_base
= spu
->stats
.slb_flt
;
220 ctx
->stats
.class2_intr_base
= spu
->stats
.class2_intr
;
222 spu_associate_mm(spu
, ctx
->owner
);
224 spin_lock_irq(&spu
->register_lock
);
228 ctx
->ops
= &spu_hw_ops
;
229 spu
->pid
= current
->pid
;
230 spu
->tgid
= current
->tgid
;
231 spu
->ibox_callback
= spufs_ibox_callback
;
232 spu
->wbox_callback
= spufs_wbox_callback
;
233 spu
->stop_callback
= spufs_stop_callback
;
234 spu
->mfc_callback
= spufs_mfc_callback
;
235 spin_unlock_irq(&spu
->register_lock
);
237 spu_unmap_mappings(ctx
);
239 spu_switch_log_notify(spu
, ctx
, SWITCH_LOG_START
, 0);
240 spu_restore(&ctx
->csa
, spu
);
241 spu
->timestamp
= jiffies
;
242 spu_switch_notify(spu
, ctx
);
243 ctx
->state
= SPU_STATE_RUNNABLE
;
245 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
249 * Must be used with the list_mutex held.
251 static inline int sched_spu(struct spu
*spu
)
253 BUG_ON(!mutex_is_locked(&cbe_spu_info
[spu
->node
].list_mutex
));
255 return (!spu
->ctx
|| !(spu
->ctx
->flags
& SPU_CREATE_NOSCHED
));
258 static void aff_merge_remaining_ctxs(struct spu_gang
*gang
)
260 struct spu_context
*ctx
;
262 list_for_each_entry(ctx
, &gang
->aff_list_head
, aff_list
) {
263 if (list_empty(&ctx
->aff_list
))
264 list_add(&ctx
->aff_list
, &gang
->aff_list_head
);
266 gang
->aff_flags
|= AFF_MERGED
;
269 static void aff_set_offsets(struct spu_gang
*gang
)
271 struct spu_context
*ctx
;
275 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
277 if (&ctx
->aff_list
== &gang
->aff_list_head
)
279 ctx
->aff_offset
= offset
--;
283 list_for_each_entry(ctx
, gang
->aff_ref_ctx
->aff_list
.prev
, aff_list
) {
284 if (&ctx
->aff_list
== &gang
->aff_list_head
)
286 ctx
->aff_offset
= offset
++;
289 gang
->aff_flags
|= AFF_OFFSETS_SET
;
292 static struct spu
*aff_ref_location(struct spu_context
*ctx
, int mem_aff
,
293 int group_size
, int lowest_offset
)
299 * TODO: A better algorithm could be used to find a good spu to be
300 * used as reference location for the ctxs chain.
302 node
= cpu_to_node(raw_smp_processor_id());
303 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
305 * "available_spus" counts how many spus are not potentially
306 * going to be used by other affinity gangs whose reference
307 * context is already in place. Although this code seeks to
308 * avoid having affinity gangs with a summed amount of
309 * contexts bigger than the amount of spus in the node,
310 * this may happen sporadically. In this case, available_spus
311 * becomes negative, which is harmless.
315 node
= (node
< MAX_NUMNODES
) ? node
: 0;
316 if (!node_allowed(ctx
, node
))
320 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
321 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
322 if (spu
->ctx
&& spu
->ctx
->gang
&& !spu
->ctx
->aff_offset
323 && spu
->ctx
->gang
->aff_ref_spu
)
324 available_spus
-= spu
->ctx
->gang
->contexts
;
327 if (available_spus
< ctx
->gang
->contexts
) {
328 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
332 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
333 if ((!mem_aff
|| spu
->has_mem_affinity
) &&
335 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
339 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
344 static void aff_set_ref_point_location(struct spu_gang
*gang
)
346 int mem_aff
, gs
, lowest_offset
;
347 struct spu_context
*ctx
;
350 mem_aff
= gang
->aff_ref_ctx
->flags
& SPU_CREATE_AFFINITY_MEM
;
354 list_for_each_entry(tmp
, &gang
->aff_list_head
, aff_list
)
357 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
359 if (&ctx
->aff_list
== &gang
->aff_list_head
)
361 lowest_offset
= ctx
->aff_offset
;
364 gang
->aff_ref_spu
= aff_ref_location(gang
->aff_ref_ctx
, mem_aff
, gs
,
368 static struct spu
*ctx_location(struct spu
*ref
, int offset
, int node
)
374 list_for_each_entry(spu
, ref
->aff_list
.prev
, aff_list
) {
375 BUG_ON(spu
->node
!= node
);
382 list_for_each_entry_reverse(spu
, ref
->aff_list
.next
, aff_list
) {
383 BUG_ON(spu
->node
!= node
);
395 * affinity_check is called each time a context is going to be scheduled.
396 * It returns the spu ptr on which the context must run.
398 static int has_affinity(struct spu_context
*ctx
)
400 struct spu_gang
*gang
= ctx
->gang
;
402 if (list_empty(&ctx
->aff_list
))
405 if (atomic_read(&ctx
->gang
->aff_sched_count
) == 0)
406 ctx
->gang
->aff_ref_spu
= NULL
;
408 if (!gang
->aff_ref_spu
) {
409 if (!(gang
->aff_flags
& AFF_MERGED
))
410 aff_merge_remaining_ctxs(gang
);
411 if (!(gang
->aff_flags
& AFF_OFFSETS_SET
))
412 aff_set_offsets(gang
);
413 aff_set_ref_point_location(gang
);
416 return gang
->aff_ref_spu
!= NULL
;
420 * spu_unbind_context - unbind spu context from physical spu
421 * @spu: physical spu to unbind from
422 * @ctx: context to unbind
424 static void spu_unbind_context(struct spu
*spu
, struct spu_context
*ctx
)
428 spu_context_trace(spu_unbind_context__enter
, ctx
, spu
);
430 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
432 if (spu
->ctx
->flags
& SPU_CREATE_NOSCHED
)
433 atomic_dec(&cbe_spu_info
[spu
->node
].reserved_spus
);
437 * If ctx->gang->aff_sched_count is positive, SPU affinity is
438 * being considered in this gang. Using atomic_dec_if_positive
439 * allow us to skip an explicit check for affinity in this gang
441 atomic_dec_if_positive(&ctx
->gang
->aff_sched_count
);
443 spu_switch_notify(spu
, NULL
);
444 spu_unmap_mappings(ctx
);
445 spu_save(&ctx
->csa
, spu
);
446 spu_switch_log_notify(spu
, ctx
, SWITCH_LOG_STOP
, 0);
448 spin_lock_irq(&spu
->register_lock
);
449 spu
->timestamp
= jiffies
;
450 ctx
->state
= SPU_STATE_SAVED
;
451 spu
->ibox_callback
= NULL
;
452 spu
->wbox_callback
= NULL
;
453 spu
->stop_callback
= NULL
;
454 spu
->mfc_callback
= NULL
;
457 ctx
->ops
= &spu_backing_ops
;
460 spin_unlock_irq(&spu
->register_lock
);
462 spu_associate_mm(spu
, NULL
);
464 ctx
->stats
.slb_flt
+=
465 (spu
->stats
.slb_flt
- ctx
->stats
.slb_flt_base
);
466 ctx
->stats
.class2_intr
+=
467 (spu
->stats
.class2_intr
- ctx
->stats
.class2_intr_base
);
469 /* This maps the underlying spu state to idle */
470 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
473 if (spu_stopped(ctx
, &status
))
474 wake_up_all(&ctx
->stop_wq
);
478 * spu_add_to_rq - add a context to the runqueue
479 * @ctx: context to add
481 static void __spu_add_to_rq(struct spu_context
*ctx
)
484 * Unfortunately this code path can be called from multiple threads
485 * on behalf of a single context due to the way the problem state
486 * mmap support works.
488 * Fortunately we need to wake up all these threads at the same time
489 * and can simply skip the runqueue addition for every but the first
490 * thread getting into this codepath.
492 * It's still quite hacky, and long-term we should proxy all other
493 * threads through the owner thread so that spu_run is in control
494 * of all the scheduling activity for a given context.
496 if (list_empty(&ctx
->rq
)) {
497 list_add_tail(&ctx
->rq
, &spu_prio
->runq
[ctx
->prio
]);
498 set_bit(ctx
->prio
, spu_prio
->bitmap
);
499 if (!spu_prio
->nr_waiting
++)
500 mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
504 static void spu_add_to_rq(struct spu_context
*ctx
)
506 spin_lock(&spu_prio
->runq_lock
);
507 __spu_add_to_rq(ctx
);
508 spin_unlock(&spu_prio
->runq_lock
);
511 static void __spu_del_from_rq(struct spu_context
*ctx
)
513 int prio
= ctx
->prio
;
515 if (!list_empty(&ctx
->rq
)) {
516 if (!--spu_prio
->nr_waiting
)
517 del_timer(&spusched_timer
);
518 list_del_init(&ctx
->rq
);
520 if (list_empty(&spu_prio
->runq
[prio
]))
521 clear_bit(prio
, spu_prio
->bitmap
);
525 void spu_del_from_rq(struct spu_context
*ctx
)
527 spin_lock(&spu_prio
->runq_lock
);
528 __spu_del_from_rq(ctx
);
529 spin_unlock(&spu_prio
->runq_lock
);
532 static void spu_prio_wait(struct spu_context
*ctx
)
537 * The caller must explicitly wait for a context to be loaded
538 * if the nosched flag is set. If NOSCHED is not set, the caller
539 * queues the context and waits for an spu event or error.
541 BUG_ON(!(ctx
->flags
& SPU_CREATE_NOSCHED
));
543 spin_lock(&spu_prio
->runq_lock
);
544 prepare_to_wait_exclusive(&ctx
->stop_wq
, &wait
, TASK_INTERRUPTIBLE
);
545 if (!signal_pending(current
)) {
546 __spu_add_to_rq(ctx
);
547 spin_unlock(&spu_prio
->runq_lock
);
548 mutex_unlock(&ctx
->state_mutex
);
550 mutex_lock(&ctx
->state_mutex
);
551 spin_lock(&spu_prio
->runq_lock
);
552 __spu_del_from_rq(ctx
);
554 spin_unlock(&spu_prio
->runq_lock
);
555 __set_current_state(TASK_RUNNING
);
556 remove_wait_queue(&ctx
->stop_wq
, &wait
);
559 static struct spu
*spu_get_idle(struct spu_context
*ctx
)
561 struct spu
*spu
, *aff_ref_spu
;
564 spu_context_nospu_trace(spu_get_idle__enter
, ctx
);
567 mutex_lock(&ctx
->gang
->aff_mutex
);
568 if (has_affinity(ctx
)) {
569 aff_ref_spu
= ctx
->gang
->aff_ref_spu
;
570 atomic_inc(&ctx
->gang
->aff_sched_count
);
571 mutex_unlock(&ctx
->gang
->aff_mutex
);
572 node
= aff_ref_spu
->node
;
574 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
575 spu
= ctx_location(aff_ref_spu
, ctx
->aff_offset
, node
);
576 if (spu
&& spu
->alloc_state
== SPU_FREE
)
578 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
580 atomic_dec(&ctx
->gang
->aff_sched_count
);
583 mutex_unlock(&ctx
->gang
->aff_mutex
);
585 node
= cpu_to_node(raw_smp_processor_id());
586 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
587 node
= (node
< MAX_NUMNODES
) ? node
: 0;
588 if (!node_allowed(ctx
, node
))
591 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
592 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
593 if (spu
->alloc_state
== SPU_FREE
)
596 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
600 spu_context_nospu_trace(spu_get_idle__not_found
, ctx
);
604 spu
->alloc_state
= SPU_USED
;
605 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
606 spu_context_trace(spu_get_idle__found
, ctx
, spu
);
607 spu_init_channels(spu
);
612 * find_victim - find a lower priority context to preempt
613 * @ctx: candidate context for running
615 * Returns the freed physical spu to run the new context on.
617 static struct spu
*find_victim(struct spu_context
*ctx
)
619 struct spu_context
*victim
= NULL
;
623 spu_context_nospu_trace(spu_find_victim__enter
, ctx
);
626 * Look for a possible preemption candidate on the local node first.
627 * If there is no candidate look at the other nodes. This isn't
628 * exactly fair, but so far the whole spu scheduler tries to keep
629 * a strong node affinity. We might want to fine-tune this in
633 node
= cpu_to_node(raw_smp_processor_id());
634 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
635 node
= (node
< MAX_NUMNODES
) ? node
: 0;
636 if (!node_allowed(ctx
, node
))
639 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
640 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
641 struct spu_context
*tmp
= spu
->ctx
;
643 if (tmp
&& tmp
->prio
> ctx
->prio
&&
644 !(tmp
->flags
& SPU_CREATE_NOSCHED
) &&
645 (!victim
|| tmp
->prio
> victim
->prio
)) {
650 get_spu_context(victim
);
651 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
655 * This nests ctx->state_mutex, but we always lock
656 * higher priority contexts before lower priority
657 * ones, so this is safe until we introduce
658 * priority inheritance schemes.
660 * XXX if the highest priority context is locked,
661 * this can loop a long time. Might be better to
662 * look at another context or give up after X retries.
664 if (!mutex_trylock(&victim
->state_mutex
)) {
665 put_spu_context(victim
);
671 if (!spu
|| victim
->prio
<= ctx
->prio
) {
673 * This race can happen because we've dropped
674 * the active list mutex. Not a problem, just
675 * restart the search.
677 mutex_unlock(&victim
->state_mutex
);
678 put_spu_context(victim
);
683 spu_context_trace(__spu_deactivate__unload
, ctx
, spu
);
685 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
686 cbe_spu_info
[node
].nr_active
--;
687 spu_unbind_context(spu
, victim
);
688 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
690 victim
->stats
.invol_ctx_switch
++;
691 spu
->stats
.invol_ctx_switch
++;
692 if (test_bit(SPU_SCHED_SPU_RUN
, &victim
->sched_flags
))
693 spu_add_to_rq(victim
);
695 mutex_unlock(&victim
->state_mutex
);
696 put_spu_context(victim
);
705 static void __spu_schedule(struct spu
*spu
, struct spu_context
*ctx
)
707 int node
= spu
->node
;
710 spu_set_timeslice(ctx
);
712 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
713 if (spu
->ctx
== NULL
) {
714 spu_bind_context(spu
, ctx
);
715 cbe_spu_info
[node
].nr_active
++;
716 spu
->alloc_state
= SPU_USED
;
719 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
722 wake_up_all(&ctx
->run_wq
);
727 static void spu_schedule(struct spu
*spu
, struct spu_context
*ctx
)
729 /* not a candidate for interruptible because it's called either
730 from the scheduler thread or from spu_deactivate */
731 mutex_lock(&ctx
->state_mutex
);
732 if (ctx
->state
== SPU_STATE_SAVED
)
733 __spu_schedule(spu
, ctx
);
738 * spu_unschedule - remove a context from a spu, and possibly release it.
739 * @spu: The SPU to unschedule from
740 * @ctx: The context currently scheduled on the SPU
741 * @free_spu Whether to free the SPU for other contexts
743 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
744 * SPU is made available for other contexts (ie, may be returned by
745 * spu_get_idle). If this is zero, the caller is expected to schedule another
746 * context to this spu.
748 * Should be called with ctx->state_mutex held.
750 static void spu_unschedule(struct spu
*spu
, struct spu_context
*ctx
,
753 int node
= spu
->node
;
755 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
756 cbe_spu_info
[node
].nr_active
--;
758 spu
->alloc_state
= SPU_FREE
;
759 spu_unbind_context(spu
, ctx
);
760 ctx
->stats
.invol_ctx_switch
++;
761 spu
->stats
.invol_ctx_switch
++;
762 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
766 * spu_activate - find a free spu for a context and execute it
767 * @ctx: spu context to schedule
768 * @flags: flags (currently ignored)
770 * Tries to find a free spu to run @ctx. If no free spu is available
771 * add the context to the runqueue so it gets woken up once an spu
774 int spu_activate(struct spu_context
*ctx
, unsigned long flags
)
779 * If there are multiple threads waiting for a single context
780 * only one actually binds the context while the others will
781 * only be able to acquire the state_mutex once the context
782 * already is in runnable state.
788 if (signal_pending(current
))
791 spu
= spu_get_idle(ctx
);
793 * If this is a realtime thread we try to get it running by
794 * preempting a lower priority thread.
796 if (!spu
&& rt_prio(ctx
->prio
))
797 spu
= find_victim(ctx
);
799 unsigned long runcntl
;
801 runcntl
= ctx
->ops
->runcntl_read(ctx
);
802 __spu_schedule(spu
, ctx
);
803 if (runcntl
& SPU_RUNCNTL_RUNNABLE
)
804 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
809 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
811 goto spu_activate_top
;
820 * grab_runnable_context - try to find a runnable context
822 * Remove the highest priority context on the runqueue and return it
823 * to the caller. Returns %NULL if no runnable context was found.
825 static struct spu_context
*grab_runnable_context(int prio
, int node
)
827 struct spu_context
*ctx
;
830 spin_lock(&spu_prio
->runq_lock
);
831 best
= find_first_bit(spu_prio
->bitmap
, prio
);
832 while (best
< prio
) {
833 struct list_head
*rq
= &spu_prio
->runq
[best
];
835 list_for_each_entry(ctx
, rq
, rq
) {
836 /* XXX(hch): check for affinity here as well */
837 if (__node_allowed(ctx
, node
)) {
838 __spu_del_from_rq(ctx
);
846 spin_unlock(&spu_prio
->runq_lock
);
850 static int __spu_deactivate(struct spu_context
*ctx
, int force
, int max_prio
)
852 struct spu
*spu
= ctx
->spu
;
853 struct spu_context
*new = NULL
;
856 new = grab_runnable_context(max_prio
, spu
->node
);
858 spu_unschedule(spu
, ctx
, new == NULL
);
860 if (new->flags
& SPU_CREATE_NOSCHED
)
861 wake_up(&new->stop_wq
);
864 spu_schedule(spu
, new);
865 /* this one can't easily be made
867 mutex_lock(&ctx
->state_mutex
);
877 * spu_deactivate - unbind a context from it's physical spu
878 * @ctx: spu context to unbind
880 * Unbind @ctx from the physical spu it is running on and schedule
881 * the highest priority context to run on the freed physical spu.
883 void spu_deactivate(struct spu_context
*ctx
)
885 spu_context_nospu_trace(spu_deactivate__enter
, ctx
);
886 __spu_deactivate(ctx
, 1, MAX_PRIO
);
890 * spu_yield - yield a physical spu if others are waiting
891 * @ctx: spu context to yield
893 * Check if there is a higher priority context waiting and if yes
894 * unbind @ctx from the physical spu and schedule the highest
895 * priority context to run on the freed physical spu instead.
897 void spu_yield(struct spu_context
*ctx
)
899 spu_context_nospu_trace(spu_yield__enter
, ctx
);
900 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
)) {
901 mutex_lock(&ctx
->state_mutex
);
902 __spu_deactivate(ctx
, 0, MAX_PRIO
);
903 mutex_unlock(&ctx
->state_mutex
);
907 static noinline
void spusched_tick(struct spu_context
*ctx
)
909 struct spu_context
*new = NULL
;
910 struct spu
*spu
= NULL
;
912 if (spu_acquire(ctx
))
913 BUG(); /* a kernel thread never has signals pending */
915 if (ctx
->state
!= SPU_STATE_RUNNABLE
)
917 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
919 if (ctx
->policy
== SCHED_FIFO
)
922 if (--ctx
->time_slice
&& test_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
))
927 spu_context_trace(spusched_tick__preempt
, ctx
, spu
);
929 new = grab_runnable_context(ctx
->prio
+ 1, spu
->node
);
931 spu_unschedule(spu
, ctx
, 0);
932 if (test_bit(SPU_SCHED_SPU_RUN
, &ctx
->sched_flags
))
935 spu_context_nospu_trace(spusched_tick__newslice
, ctx
);
936 if (!ctx
->time_slice
)
943 spu_schedule(spu
, new);
947 * count_active_contexts - count nr of active tasks
949 * Return the number of tasks currently running or waiting to run.
951 * Note that we don't take runq_lock / list_mutex here. Reading
952 * a single 32bit value is atomic on powerpc, and we don't care
953 * about memory ordering issues here.
955 static unsigned long count_active_contexts(void)
957 int nr_active
= 0, node
;
959 for (node
= 0; node
< MAX_NUMNODES
; node
++)
960 nr_active
+= cbe_spu_info
[node
].nr_active
;
961 nr_active
+= spu_prio
->nr_waiting
;
967 * spu_calc_load - update the avenrun load estimates.
969 * No locking against reading these values from userspace, as for
970 * the CPU loadavg code.
972 static void spu_calc_load(void)
974 unsigned long active_tasks
; /* fixed-point */
976 active_tasks
= count_active_contexts() * FIXED_1
;
977 spu_avenrun
[0] = calc_load(spu_avenrun
[0], EXP_1
, active_tasks
);
978 spu_avenrun
[1] = calc_load(spu_avenrun
[1], EXP_5
, active_tasks
);
979 spu_avenrun
[2] = calc_load(spu_avenrun
[2], EXP_15
, active_tasks
);
982 static void spusched_wake(struct timer_list
*unused
)
984 mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
985 wake_up_process(spusched_task
);
988 static void spuloadavg_wake(struct timer_list
*unused
)
990 mod_timer(&spuloadavg_timer
, jiffies
+ LOAD_FREQ
);
994 static int spusched_thread(void *unused
)
999 while (!kthread_should_stop()) {
1000 set_current_state(TASK_INTERRUPTIBLE
);
1002 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
1003 struct mutex
*mtx
= &cbe_spu_info
[node
].list_mutex
;
1006 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
,
1008 struct spu_context
*ctx
= spu
->ctx
;
1011 get_spu_context(ctx
);
1015 put_spu_context(ctx
);
1025 void spuctx_switch_state(struct spu_context
*ctx
,
1026 enum spu_utilization_state new_state
)
1028 unsigned long long curtime
;
1029 signed long long delta
;
1031 enum spu_utilization_state old_state
;
1034 curtime
= ktime_get_ns();
1035 delta
= curtime
- ctx
->stats
.tstamp
;
1037 WARN_ON(!mutex_is_locked(&ctx
->state_mutex
));
1041 old_state
= ctx
->stats
.util_state
;
1042 ctx
->stats
.util_state
= new_state
;
1043 ctx
->stats
.tstamp
= curtime
;
1046 * Update the physical SPU utilization statistics.
1049 ctx
->stats
.times
[old_state
] += delta
;
1050 spu
->stats
.times
[old_state
] += delta
;
1051 spu
->stats
.util_state
= new_state
;
1052 spu
->stats
.tstamp
= curtime
;
1054 if (old_state
== SPU_UTIL_USER
)
1055 atomic_dec(&cbe_spu_info
[node
].busy_spus
);
1056 if (new_state
== SPU_UTIL_USER
)
1057 atomic_inc(&cbe_spu_info
[node
].busy_spus
);
1061 static int show_spu_loadavg(struct seq_file
*s
, void *private)
1065 a
= spu_avenrun
[0] + (FIXED_1
/200);
1066 b
= spu_avenrun
[1] + (FIXED_1
/200);
1067 c
= spu_avenrun
[2] + (FIXED_1
/200);
1070 * Note that last_pid doesn't really make much sense for the
1071 * SPU loadavg (it even seems very odd on the CPU side...),
1072 * but we include it here to have a 100% compatible interface.
1074 seq_printf(s
, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1075 LOAD_INT(a
), LOAD_FRAC(a
),
1076 LOAD_INT(b
), LOAD_FRAC(b
),
1077 LOAD_INT(c
), LOAD_FRAC(c
),
1078 count_active_contexts(),
1079 atomic_read(&nr_spu_contexts
),
1080 idr_get_cursor(&task_active_pid_ns(current
)->idr
) - 1);
1084 int __init
spu_sched_init(void)
1086 struct proc_dir_entry
*entry
;
1087 int err
= -ENOMEM
, i
;
1089 spu_prio
= kzalloc(sizeof(struct spu_prio_array
), GFP_KERNEL
);
1093 for (i
= 0; i
< MAX_PRIO
; i
++) {
1094 INIT_LIST_HEAD(&spu_prio
->runq
[i
]);
1095 __clear_bit(i
, spu_prio
->bitmap
);
1097 spin_lock_init(&spu_prio
->runq_lock
);
1099 timer_setup(&spusched_timer
, spusched_wake
, 0);
1100 timer_setup(&spuloadavg_timer
, spuloadavg_wake
, 0);
1102 spusched_task
= kthread_run(spusched_thread
, NULL
, "spusched");
1103 if (IS_ERR(spusched_task
)) {
1104 err
= PTR_ERR(spusched_task
);
1105 goto out_free_spu_prio
;
1108 mod_timer(&spuloadavg_timer
, 0);
1110 entry
= proc_create_single("spu_loadavg", 0, NULL
, show_spu_loadavg
);
1112 goto out_stop_kthread
;
1114 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1115 SPUSCHED_TICK
, MIN_SPU_TIMESLICE
, DEF_SPU_TIMESLICE
);
1119 kthread_stop(spusched_task
);
1126 void spu_sched_exit(void)
1131 remove_proc_entry("spu_loadavg", NULL
);
1133 del_timer_sync(&spusched_timer
);
1134 del_timer_sync(&spuloadavg_timer
);
1135 kthread_stop(spusched_task
);
1137 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
1138 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
1139 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
)
1140 if (spu
->alloc_state
!= SPU_FREE
)
1141 spu
->alloc_state
= SPU_FREE
;
1142 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);