1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/marker.h>
45 #include <asm/mmu_context.h>
47 #include <asm/spu_csa.h>
48 #include <asm/spu_priv1.h>
51 struct spu_prio_array
{
52 DECLARE_BITMAP(bitmap
, MAX_PRIO
);
53 struct list_head runq
[MAX_PRIO
];
58 static unsigned long spu_avenrun
[3];
59 static struct spu_prio_array
*spu_prio
;
60 static struct task_struct
*spusched_task
;
61 static struct timer_list spusched_timer
;
62 static struct timer_list spuloadavg_timer
;
65 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
67 #define NORMAL_PRIO 120
70 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
71 * tick for every 10 CPU scheduler ticks.
73 #define SPUSCHED_TICK (10)
76 * These are the 'tuning knobs' of the scheduler:
78 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
79 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
81 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
82 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
84 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
85 #define SCALE_PRIO(x, prio) \
86 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
89 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
90 * [800ms ... 100ms ... 5ms]
92 * The higher a thread's priority, the bigger timeslices
93 * it gets during one round of execution. But even the lowest
94 * priority thread gets MIN_TIMESLICE worth of execution time.
96 void spu_set_timeslice(struct spu_context
*ctx
)
98 if (ctx
->prio
< NORMAL_PRIO
)
99 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
* 4, ctx
->prio
);
101 ctx
->time_slice
= SCALE_PRIO(DEF_SPU_TIMESLICE
, ctx
->prio
);
105 * Update scheduling information from the owning thread.
107 void __spu_update_sched_info(struct spu_context
*ctx
)
110 * assert that the context is not on the runqueue, so it is safe
111 * to change its scheduling parameters.
113 BUG_ON(!list_empty(&ctx
->rq
));
116 * 32-Bit assignments are atomic on powerpc, and we don't care about
117 * memory ordering here because retrieving the controlling thread is
118 * per definition racy.
120 ctx
->tid
= current
->pid
;
123 * We do our own priority calculations, so we normally want
124 * ->static_prio to start with. Unfortunately this field
125 * contains junk for threads with a realtime scheduling
126 * policy so we have to look at ->prio in this case.
128 if (rt_prio(current
->prio
))
129 ctx
->prio
= current
->prio
;
131 ctx
->prio
= current
->static_prio
;
132 ctx
->policy
= current
->policy
;
135 * TO DO: the context may be loaded, so we may need to activate
136 * it again on a different node. But it shouldn't hurt anything
137 * to update its parameters, because we know that the scheduler
138 * is not actively looking at this field, since it is not on the
139 * runqueue. The context will be rescheduled on the proper node
140 * if it is timesliced or preempted.
142 ctx
->cpus_allowed
= current
->cpus_allowed
;
145 void spu_update_sched_info(struct spu_context
*ctx
)
149 if (ctx
->state
== SPU_STATE_RUNNABLE
) {
150 node
= ctx
->spu
->node
;
153 * Take list_mutex to sync with find_victim().
155 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
156 __spu_update_sched_info(ctx
);
157 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
159 __spu_update_sched_info(ctx
);
163 static int __node_allowed(struct spu_context
*ctx
, int node
)
165 if (nr_cpus_node(node
)) {
166 cpumask_t mask
= node_to_cpumask(node
);
168 if (cpus_intersects(mask
, ctx
->cpus_allowed
))
175 static int node_allowed(struct spu_context
*ctx
, int node
)
179 spin_lock(&spu_prio
->runq_lock
);
180 rval
= __node_allowed(ctx
, node
);
181 spin_unlock(&spu_prio
->runq_lock
);
186 void do_notify_spus_active(void)
191 * Wake up the active spu_contexts.
193 * When the awakened processes see their "notify_active" flag is set,
194 * they will call spu_switch_notify().
196 for_each_online_node(node
) {
199 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
200 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
201 if (spu
->alloc_state
!= SPU_FREE
) {
202 struct spu_context
*ctx
= spu
->ctx
;
203 set_bit(SPU_SCHED_NOTIFY_ACTIVE
,
206 wake_up_all(&ctx
->stop_wq
);
209 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
214 * spu_bind_context - bind spu context to physical spu
215 * @spu: physical spu to bind to
216 * @ctx: context to bind
218 static void spu_bind_context(struct spu
*spu
, struct spu_context
*ctx
)
220 spu_context_trace(spu_bind_context__enter
, ctx
, spu
);
222 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
224 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
225 atomic_inc(&cbe_spu_info
[spu
->node
].reserved_spus
);
227 ctx
->stats
.slb_flt_base
= spu
->stats
.slb_flt
;
228 ctx
->stats
.class2_intr_base
= spu
->stats
.class2_intr
;
233 ctx
->ops
= &spu_hw_ops
;
234 spu
->pid
= current
->pid
;
235 spu
->tgid
= current
->tgid
;
236 spu_associate_mm(spu
, ctx
->owner
);
237 spu
->ibox_callback
= spufs_ibox_callback
;
238 spu
->wbox_callback
= spufs_wbox_callback
;
239 spu
->stop_callback
= spufs_stop_callback
;
240 spu
->mfc_callback
= spufs_mfc_callback
;
242 spu_unmap_mappings(ctx
);
243 spu_restore(&ctx
->csa
, spu
);
244 spu
->timestamp
= jiffies
;
245 spu_cpu_affinity_set(spu
, raw_smp_processor_id());
246 spu_switch_notify(spu
, ctx
);
247 ctx
->state
= SPU_STATE_RUNNABLE
;
249 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
253 * Must be used with the list_mutex held.
255 static inline int sched_spu(struct spu
*spu
)
257 BUG_ON(!mutex_is_locked(&cbe_spu_info
[spu
->node
].list_mutex
));
259 return (!spu
->ctx
|| !(spu
->ctx
->flags
& SPU_CREATE_NOSCHED
));
262 static void aff_merge_remaining_ctxs(struct spu_gang
*gang
)
264 struct spu_context
*ctx
;
266 list_for_each_entry(ctx
, &gang
->aff_list_head
, aff_list
) {
267 if (list_empty(&ctx
->aff_list
))
268 list_add(&ctx
->aff_list
, &gang
->aff_list_head
);
270 gang
->aff_flags
|= AFF_MERGED
;
273 static void aff_set_offsets(struct spu_gang
*gang
)
275 struct spu_context
*ctx
;
279 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
281 if (&ctx
->aff_list
== &gang
->aff_list_head
)
283 ctx
->aff_offset
= offset
--;
287 list_for_each_entry(ctx
, gang
->aff_ref_ctx
->aff_list
.prev
, aff_list
) {
288 if (&ctx
->aff_list
== &gang
->aff_list_head
)
290 ctx
->aff_offset
= offset
++;
293 gang
->aff_flags
|= AFF_OFFSETS_SET
;
296 static struct spu
*aff_ref_location(struct spu_context
*ctx
, int mem_aff
,
297 int group_size
, int lowest_offset
)
303 * TODO: A better algorithm could be used to find a good spu to be
304 * used as reference location for the ctxs chain.
306 node
= cpu_to_node(raw_smp_processor_id());
307 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
308 node
= (node
< MAX_NUMNODES
) ? node
: 0;
309 if (!node_allowed(ctx
, node
))
311 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
312 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
313 if ((!mem_aff
|| spu
->has_mem_affinity
) &&
315 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
319 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
324 static void aff_set_ref_point_location(struct spu_gang
*gang
)
326 int mem_aff
, gs
, lowest_offset
;
327 struct spu_context
*ctx
;
330 mem_aff
= gang
->aff_ref_ctx
->flags
& SPU_CREATE_AFFINITY_MEM
;
334 list_for_each_entry(tmp
, &gang
->aff_list_head
, aff_list
)
337 list_for_each_entry_reverse(ctx
, &gang
->aff_ref_ctx
->aff_list
,
339 if (&ctx
->aff_list
== &gang
->aff_list_head
)
341 lowest_offset
= ctx
->aff_offset
;
344 gang
->aff_ref_spu
= aff_ref_location(gang
->aff_ref_ctx
, mem_aff
, gs
,
348 static struct spu
*ctx_location(struct spu
*ref
, int offset
, int node
)
354 list_for_each_entry(spu
, ref
->aff_list
.prev
, aff_list
) {
355 BUG_ON(spu
->node
!= node
);
362 list_for_each_entry_reverse(spu
, ref
->aff_list
.next
, aff_list
) {
363 BUG_ON(spu
->node
!= node
);
375 * affinity_check is called each time a context is going to be scheduled.
376 * It returns the spu ptr on which the context must run.
378 static int has_affinity(struct spu_context
*ctx
)
380 struct spu_gang
*gang
= ctx
->gang
;
382 if (list_empty(&ctx
->aff_list
))
385 if (!gang
->aff_ref_spu
) {
386 if (!(gang
->aff_flags
& AFF_MERGED
))
387 aff_merge_remaining_ctxs(gang
);
388 if (!(gang
->aff_flags
& AFF_OFFSETS_SET
))
389 aff_set_offsets(gang
);
390 aff_set_ref_point_location(gang
);
393 return gang
->aff_ref_spu
!= NULL
;
397 * spu_unbind_context - unbind spu context from physical spu
398 * @spu: physical spu to unbind from
399 * @ctx: context to unbind
401 static void spu_unbind_context(struct spu
*spu
, struct spu_context
*ctx
)
403 spu_context_trace(spu_unbind_context__enter
, ctx
, spu
);
405 spuctx_switch_state(ctx
, SPU_UTIL_SYSTEM
);
407 if (spu
->ctx
->flags
& SPU_CREATE_NOSCHED
)
408 atomic_dec(&cbe_spu_info
[spu
->node
].reserved_spus
);
411 mutex_lock(&ctx
->gang
->aff_mutex
);
412 if (has_affinity(ctx
)) {
413 if (atomic_dec_and_test(&ctx
->gang
->aff_sched_count
))
414 ctx
->gang
->aff_ref_spu
= NULL
;
416 mutex_unlock(&ctx
->gang
->aff_mutex
);
419 spu_switch_notify(spu
, NULL
);
420 spu_unmap_mappings(ctx
);
421 spu_save(&ctx
->csa
, spu
);
422 spu
->timestamp
= jiffies
;
423 ctx
->state
= SPU_STATE_SAVED
;
424 spu
->ibox_callback
= NULL
;
425 spu
->wbox_callback
= NULL
;
426 spu
->stop_callback
= NULL
;
427 spu
->mfc_callback
= NULL
;
428 spu_associate_mm(spu
, NULL
);
431 ctx
->ops
= &spu_backing_ops
;
435 ctx
->stats
.slb_flt
+=
436 (spu
->stats
.slb_flt
- ctx
->stats
.slb_flt_base
);
437 ctx
->stats
.class2_intr
+=
438 (spu
->stats
.class2_intr
- ctx
->stats
.class2_intr_base
);
440 /* This maps the underlying spu state to idle */
441 spuctx_switch_state(ctx
, SPU_UTIL_IDLE_LOADED
);
446 * spu_add_to_rq - add a context to the runqueue
447 * @ctx: context to add
449 static void __spu_add_to_rq(struct spu_context
*ctx
)
452 * Unfortunately this code path can be called from multiple threads
453 * on behalf of a single context due to the way the problem state
454 * mmap support works.
456 * Fortunately we need to wake up all these threads at the same time
457 * and can simply skip the runqueue addition for every but the first
458 * thread getting into this codepath.
460 * It's still quite hacky, and long-term we should proxy all other
461 * threads through the owner thread so that spu_run is in control
462 * of all the scheduling activity for a given context.
464 if (list_empty(&ctx
->rq
)) {
465 list_add_tail(&ctx
->rq
, &spu_prio
->runq
[ctx
->prio
]);
466 set_bit(ctx
->prio
, spu_prio
->bitmap
);
467 if (!spu_prio
->nr_waiting
++)
468 __mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
472 static void spu_add_to_rq(struct spu_context
*ctx
)
474 spin_lock(&spu_prio
->runq_lock
);
475 __spu_add_to_rq(ctx
);
476 spin_unlock(&spu_prio
->runq_lock
);
479 static void __spu_del_from_rq(struct spu_context
*ctx
)
481 int prio
= ctx
->prio
;
483 if (!list_empty(&ctx
->rq
)) {
484 if (!--spu_prio
->nr_waiting
)
485 del_timer(&spusched_timer
);
486 list_del_init(&ctx
->rq
);
488 if (list_empty(&spu_prio
->runq
[prio
]))
489 clear_bit(prio
, spu_prio
->bitmap
);
493 void spu_del_from_rq(struct spu_context
*ctx
)
495 spin_lock(&spu_prio
->runq_lock
);
496 __spu_del_from_rq(ctx
);
497 spin_unlock(&spu_prio
->runq_lock
);
500 static void spu_prio_wait(struct spu_context
*ctx
)
505 * The caller must explicitly wait for a context to be loaded
506 * if the nosched flag is set. If NOSCHED is not set, the caller
507 * queues the context and waits for an spu event or error.
509 BUG_ON(!(ctx
->flags
& SPU_CREATE_NOSCHED
));
511 spin_lock(&spu_prio
->runq_lock
);
512 prepare_to_wait_exclusive(&ctx
->stop_wq
, &wait
, TASK_INTERRUPTIBLE
);
513 if (!signal_pending(current
)) {
514 __spu_add_to_rq(ctx
);
515 spin_unlock(&spu_prio
->runq_lock
);
516 mutex_unlock(&ctx
->state_mutex
);
518 mutex_lock(&ctx
->state_mutex
);
519 spin_lock(&spu_prio
->runq_lock
);
520 __spu_del_from_rq(ctx
);
522 spin_unlock(&spu_prio
->runq_lock
);
523 __set_current_state(TASK_RUNNING
);
524 remove_wait_queue(&ctx
->stop_wq
, &wait
);
527 static struct spu
*spu_get_idle(struct spu_context
*ctx
)
529 struct spu
*spu
, *aff_ref_spu
;
532 spu_context_nospu_trace(spu_get_idle__enter
, ctx
);
535 mutex_lock(&ctx
->gang
->aff_mutex
);
536 if (has_affinity(ctx
)) {
537 aff_ref_spu
= ctx
->gang
->aff_ref_spu
;
538 atomic_inc(&ctx
->gang
->aff_sched_count
);
539 mutex_unlock(&ctx
->gang
->aff_mutex
);
540 node
= aff_ref_spu
->node
;
542 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
543 spu
= ctx_location(aff_ref_spu
, ctx
->aff_offset
, node
);
544 if (spu
&& spu
->alloc_state
== SPU_FREE
)
546 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
548 mutex_lock(&ctx
->gang
->aff_mutex
);
549 if (atomic_dec_and_test(&ctx
->gang
->aff_sched_count
))
550 ctx
->gang
->aff_ref_spu
= NULL
;
551 mutex_unlock(&ctx
->gang
->aff_mutex
);
554 mutex_unlock(&ctx
->gang
->aff_mutex
);
556 node
= cpu_to_node(raw_smp_processor_id());
557 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
558 node
= (node
< MAX_NUMNODES
) ? node
: 0;
559 if (!node_allowed(ctx
, node
))
562 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
563 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
564 if (spu
->alloc_state
== SPU_FREE
)
567 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
571 spu_context_nospu_trace(spu_get_idle__not_found
, ctx
);
575 spu
->alloc_state
= SPU_USED
;
576 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
577 spu_context_trace(spu_get_idle__found
, ctx
, spu
);
578 spu_init_channels(spu
);
583 * find_victim - find a lower priority context to preempt
584 * @ctx: canidate context for running
586 * Returns the freed physical spu to run the new context on.
588 static struct spu
*find_victim(struct spu_context
*ctx
)
590 struct spu_context
*victim
= NULL
;
594 spu_context_nospu_trace(spu_find_vitim__enter
, ctx
);
597 * Look for a possible preemption candidate on the local node first.
598 * If there is no candidate look at the other nodes. This isn't
599 * exactly fair, but so far the whole spu scheduler tries to keep
600 * a strong node affinity. We might want to fine-tune this in
604 node
= cpu_to_node(raw_smp_processor_id());
605 for (n
= 0; n
< MAX_NUMNODES
; n
++, node
++) {
606 node
= (node
< MAX_NUMNODES
) ? node
: 0;
607 if (!node_allowed(ctx
, node
))
610 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
611 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
) {
612 struct spu_context
*tmp
= spu
->ctx
;
614 if (tmp
&& tmp
->prio
> ctx
->prio
&&
615 !(tmp
->flags
& SPU_CREATE_NOSCHED
) &&
616 (!victim
|| tmp
->prio
> victim
->prio
))
619 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
623 * This nests ctx->state_mutex, but we always lock
624 * higher priority contexts before lower priority
625 * ones, so this is safe until we introduce
626 * priority inheritance schemes.
628 * XXX if the highest priority context is locked,
629 * this can loop a long time. Might be better to
630 * look at another context or give up after X retries.
632 if (!mutex_trylock(&victim
->state_mutex
)) {
638 if (!spu
|| victim
->prio
<= ctx
->prio
) {
640 * This race can happen because we've dropped
641 * the active list mutex. Not a problem, just
642 * restart the search.
644 mutex_unlock(&victim
->state_mutex
);
649 spu_context_trace(__spu_deactivate__unload
, ctx
, spu
);
651 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
652 cbe_spu_info
[node
].nr_active
--;
653 spu_unbind_context(spu
, victim
);
654 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
656 victim
->stats
.invol_ctx_switch
++;
657 spu
->stats
.invol_ctx_switch
++;
658 spu_add_to_rq(victim
);
660 mutex_unlock(&victim
->state_mutex
);
669 static void __spu_schedule(struct spu
*spu
, struct spu_context
*ctx
)
671 int node
= spu
->node
;
674 spu_set_timeslice(ctx
);
676 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
677 if (spu
->ctx
== NULL
) {
678 spu_bind_context(spu
, ctx
);
679 cbe_spu_info
[node
].nr_active
++;
680 spu
->alloc_state
= SPU_USED
;
683 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
686 wake_up_all(&ctx
->run_wq
);
691 static void spu_schedule(struct spu
*spu
, struct spu_context
*ctx
)
693 /* not a candidate for interruptible because it's called either
694 from the scheduler thread or from spu_deactivate */
695 mutex_lock(&ctx
->state_mutex
);
696 __spu_schedule(spu
, ctx
);
700 static void spu_unschedule(struct spu
*spu
, struct spu_context
*ctx
)
702 int node
= spu
->node
;
704 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
705 cbe_spu_info
[node
].nr_active
--;
706 spu
->alloc_state
= SPU_FREE
;
707 spu_unbind_context(spu
, ctx
);
708 ctx
->stats
.invol_ctx_switch
++;
709 spu
->stats
.invol_ctx_switch
++;
710 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);
714 * spu_activate - find a free spu for a context and execute it
715 * @ctx: spu context to schedule
716 * @flags: flags (currently ignored)
718 * Tries to find a free spu to run @ctx. If no free spu is available
719 * add the context to the runqueue so it gets woken up once an spu
722 int spu_activate(struct spu_context
*ctx
, unsigned long flags
)
727 * If there are multiple threads waiting for a single context
728 * only one actually binds the context while the others will
729 * only be able to acquire the state_mutex once the context
730 * already is in runnable state.
736 if (signal_pending(current
))
739 spu
= spu_get_idle(ctx
);
741 * If this is a realtime thread we try to get it running by
742 * preempting a lower priority thread.
744 if (!spu
&& rt_prio(ctx
->prio
))
745 spu
= find_victim(ctx
);
747 unsigned long runcntl
;
749 runcntl
= ctx
->ops
->runcntl_read(ctx
);
750 __spu_schedule(spu
, ctx
);
751 if (runcntl
& SPU_RUNCNTL_RUNNABLE
)
752 spuctx_switch_state(ctx
, SPU_UTIL_USER
);
757 if (ctx
->flags
& SPU_CREATE_NOSCHED
) {
759 goto spu_activate_top
;
768 * grab_runnable_context - try to find a runnable context
770 * Remove the highest priority context on the runqueue and return it
771 * to the caller. Returns %NULL if no runnable context was found.
773 static struct spu_context
*grab_runnable_context(int prio
, int node
)
775 struct spu_context
*ctx
;
778 spin_lock(&spu_prio
->runq_lock
);
779 best
= find_first_bit(spu_prio
->bitmap
, prio
);
780 while (best
< prio
) {
781 struct list_head
*rq
= &spu_prio
->runq
[best
];
783 list_for_each_entry(ctx
, rq
, rq
) {
784 /* XXX(hch): check for affinity here aswell */
785 if (__node_allowed(ctx
, node
)) {
786 __spu_del_from_rq(ctx
);
794 spin_unlock(&spu_prio
->runq_lock
);
798 static int __spu_deactivate(struct spu_context
*ctx
, int force
, int max_prio
)
800 struct spu
*spu
= ctx
->spu
;
801 struct spu_context
*new = NULL
;
804 new = grab_runnable_context(max_prio
, spu
->node
);
806 spu_unschedule(spu
, ctx
);
808 if (new->flags
& SPU_CREATE_NOSCHED
)
809 wake_up(&new->stop_wq
);
812 spu_schedule(spu
, new);
813 /* this one can't easily be made
815 mutex_lock(&ctx
->state_mutex
);
825 * spu_deactivate - unbind a context from it's physical spu
826 * @ctx: spu context to unbind
828 * Unbind @ctx from the physical spu it is running on and schedule
829 * the highest priority context to run on the freed physical spu.
831 void spu_deactivate(struct spu_context
*ctx
)
833 spu_context_nospu_trace(spu_deactivate__enter
, ctx
);
834 __spu_deactivate(ctx
, 1, MAX_PRIO
);
838 * spu_yield - yield a physical spu if others are waiting
839 * @ctx: spu context to yield
841 * Check if there is a higher priority context waiting and if yes
842 * unbind @ctx from the physical spu and schedule the highest
843 * priority context to run on the freed physical spu instead.
845 void spu_yield(struct spu_context
*ctx
)
847 spu_context_nospu_trace(spu_yield__enter
, ctx
);
848 if (!(ctx
->flags
& SPU_CREATE_NOSCHED
)) {
849 mutex_lock(&ctx
->state_mutex
);
850 __spu_deactivate(ctx
, 0, MAX_PRIO
);
851 mutex_unlock(&ctx
->state_mutex
);
855 static noinline
void spusched_tick(struct spu_context
*ctx
)
857 struct spu_context
*new = NULL
;
858 struct spu
*spu
= NULL
;
861 if (spu_acquire(ctx
))
862 BUG(); /* a kernel thread never has signals pending */
864 if (ctx
->state
!= SPU_STATE_RUNNABLE
)
866 if (spu_stopped(ctx
, &status
))
868 if (ctx
->flags
& SPU_CREATE_NOSCHED
)
870 if (ctx
->policy
== SCHED_FIFO
)
873 if (--ctx
->time_slice
)
878 spu_context_trace(spusched_tick__preempt
, ctx
, spu
);
880 new = grab_runnable_context(ctx
->prio
+ 1, spu
->node
);
882 spu_unschedule(spu
, ctx
);
885 spu_context_nospu_trace(spusched_tick__newslice
, ctx
);
892 spu_schedule(spu
, new);
896 * count_active_contexts - count nr of active tasks
898 * Return the number of tasks currently running or waiting to run.
900 * Note that we don't take runq_lock / list_mutex here. Reading
901 * a single 32bit value is atomic on powerpc, and we don't care
902 * about memory ordering issues here.
904 static unsigned long count_active_contexts(void)
906 int nr_active
= 0, node
;
908 for (node
= 0; node
< MAX_NUMNODES
; node
++)
909 nr_active
+= cbe_spu_info
[node
].nr_active
;
910 nr_active
+= spu_prio
->nr_waiting
;
916 * spu_calc_load - update the avenrun load estimates.
918 * No locking against reading these values from userspace, as for
919 * the CPU loadavg code.
921 static void spu_calc_load(void)
923 unsigned long active_tasks
; /* fixed-point */
925 active_tasks
= count_active_contexts() * FIXED_1
;
926 CALC_LOAD(spu_avenrun
[0], EXP_1
, active_tasks
);
927 CALC_LOAD(spu_avenrun
[1], EXP_5
, active_tasks
);
928 CALC_LOAD(spu_avenrun
[2], EXP_15
, active_tasks
);
931 static void spusched_wake(unsigned long data
)
933 mod_timer(&spusched_timer
, jiffies
+ SPUSCHED_TICK
);
934 wake_up_process(spusched_task
);
937 static void spuloadavg_wake(unsigned long data
)
939 mod_timer(&spuloadavg_timer
, jiffies
+ LOAD_FREQ
);
943 static int spusched_thread(void *unused
)
948 while (!kthread_should_stop()) {
949 set_current_state(TASK_INTERRUPTIBLE
);
951 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
952 struct mutex
*mtx
= &cbe_spu_info
[node
].list_mutex
;
955 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
,
957 struct spu_context
*ctx
= spu
->ctx
;
972 void spuctx_switch_state(struct spu_context
*ctx
,
973 enum spu_utilization_state new_state
)
975 unsigned long long curtime
;
976 signed long long delta
;
979 enum spu_utilization_state old_state
;
982 curtime
= timespec_to_ns(&ts
);
983 delta
= curtime
- ctx
->stats
.tstamp
;
985 WARN_ON(!mutex_is_locked(&ctx
->state_mutex
));
989 old_state
= ctx
->stats
.util_state
;
990 ctx
->stats
.util_state
= new_state
;
991 ctx
->stats
.tstamp
= curtime
;
994 * Update the physical SPU utilization statistics.
997 ctx
->stats
.times
[old_state
] += delta
;
998 spu
->stats
.times
[old_state
] += delta
;
999 spu
->stats
.util_state
= new_state
;
1000 spu
->stats
.tstamp
= curtime
;
1004 #define LOAD_INT(x) ((x) >> FSHIFT)
1005 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1007 static int show_spu_loadavg(struct seq_file
*s
, void *private)
1011 a
= spu_avenrun
[0] + (FIXED_1
/200);
1012 b
= spu_avenrun
[1] + (FIXED_1
/200);
1013 c
= spu_avenrun
[2] + (FIXED_1
/200);
1016 * Note that last_pid doesn't really make much sense for the
1017 * SPU loadavg (it even seems very odd on the CPU side...),
1018 * but we include it here to have a 100% compatible interface.
1020 seq_printf(s
, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1021 LOAD_INT(a
), LOAD_FRAC(a
),
1022 LOAD_INT(b
), LOAD_FRAC(b
),
1023 LOAD_INT(c
), LOAD_FRAC(c
),
1024 count_active_contexts(),
1025 atomic_read(&nr_spu_contexts
),
1026 current
->nsproxy
->pid_ns
->last_pid
);
1030 static int spu_loadavg_open(struct inode
*inode
, struct file
*file
)
1032 return single_open(file
, show_spu_loadavg
, NULL
);
1035 static const struct file_operations spu_loadavg_fops
= {
1036 .open
= spu_loadavg_open
,
1038 .llseek
= seq_lseek
,
1039 .release
= single_release
,
1042 int __init
spu_sched_init(void)
1044 struct proc_dir_entry
*entry
;
1045 int err
= -ENOMEM
, i
;
1047 spu_prio
= kzalloc(sizeof(struct spu_prio_array
), GFP_KERNEL
);
1051 for (i
= 0; i
< MAX_PRIO
; i
++) {
1052 INIT_LIST_HEAD(&spu_prio
->runq
[i
]);
1053 __clear_bit(i
, spu_prio
->bitmap
);
1055 spin_lock_init(&spu_prio
->runq_lock
);
1057 setup_timer(&spusched_timer
, spusched_wake
, 0);
1058 setup_timer(&spuloadavg_timer
, spuloadavg_wake
, 0);
1060 spusched_task
= kthread_run(spusched_thread
, NULL
, "spusched");
1061 if (IS_ERR(spusched_task
)) {
1062 err
= PTR_ERR(spusched_task
);
1063 goto out_free_spu_prio
;
1066 mod_timer(&spuloadavg_timer
, 0);
1068 entry
= create_proc_entry("spu_loadavg", 0, NULL
);
1070 goto out_stop_kthread
;
1071 entry
->proc_fops
= &spu_loadavg_fops
;
1073 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1074 SPUSCHED_TICK
, MIN_SPU_TIMESLICE
, DEF_SPU_TIMESLICE
);
1078 kthread_stop(spusched_task
);
1085 void spu_sched_exit(void)
1090 remove_proc_entry("spu_loadavg", NULL
);
1092 del_timer_sync(&spusched_timer
);
1093 del_timer_sync(&spuloadavg_timer
);
1094 kthread_stop(spusched_task
);
1096 for (node
= 0; node
< MAX_NUMNODES
; node
++) {
1097 mutex_lock(&cbe_spu_info
[node
].list_mutex
);
1098 list_for_each_entry(spu
, &cbe_spu_info
[node
].spus
, cbe_list
)
1099 if (spu
->alloc_state
!= SPU_FREE
)
1100 spu
->alloc_state
= SPU_FREE
;
1101 mutex_unlock(&cbe_spu_info
[node
].list_mutex
);