Merge branch 'linux-2.6'
[wrt350n-kernel.git] / arch / powerpc / platforms / cell / spufs / sched.c
blob00d914232af1450318f1c08256c3a0da91d47294
1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #undef DEBUG
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
29 #include <linux/mm.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/spu.h>
46 #include <asm/spu_csa.h>
47 #include <asm/spu_priv1.h>
48 #include "spufs.h"
50 struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO];
53 spinlock_t runq_lock;
54 int nr_waiting;
57 static unsigned long spu_avenrun[3];
58 static struct spu_prio_array *spu_prio;
59 static struct task_struct *spusched_task;
60 static struct timer_list spusched_timer;
61 static struct timer_list spuloadavg_timer;
64 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
66 #define NORMAL_PRIO 120
69 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
70 * tick for every 10 CPU scheduler ticks.
72 #define SPUSCHED_TICK (10)
75 * These are the 'tuning knobs' of the scheduler:
77 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
78 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
80 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
81 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
83 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
84 #define SCALE_PRIO(x, prio) \
85 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
88 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
89 * [800ms ... 100ms ... 5ms]
91 * The higher a thread's priority, the bigger timeslices
92 * it gets during one round of execution. But even the lowest
93 * priority thread gets MIN_TIMESLICE worth of execution time.
95 void spu_set_timeslice(struct spu_context *ctx)
97 if (ctx->prio < NORMAL_PRIO)
98 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
99 else
100 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
104 * Update scheduling information from the owning thread.
106 void __spu_update_sched_info(struct spu_context *ctx)
109 * assert that the context is not on the runqueue, so it is safe
110 * to change its scheduling parameters.
112 BUG_ON(!list_empty(&ctx->rq));
115 * 32-Bit assignments are atomic on powerpc, and we don't care about
116 * memory ordering here because retrieving the controlling thread is
117 * per definition racy.
119 ctx->tid = current->pid;
122 * We do our own priority calculations, so we normally want
123 * ->static_prio to start with. Unfortunately this field
124 * contains junk for threads with a realtime scheduling
125 * policy so we have to look at ->prio in this case.
127 if (rt_prio(current->prio))
128 ctx->prio = current->prio;
129 else
130 ctx->prio = current->static_prio;
131 ctx->policy = current->policy;
134 * TO DO: the context may be loaded, so we may need to activate
135 * it again on a different node. But it shouldn't hurt anything
136 * to update its parameters, because we know that the scheduler
137 * is not actively looking at this field, since it is not on the
138 * runqueue. The context will be rescheduled on the proper node
139 * if it is timesliced or preempted.
141 ctx->cpus_allowed = current->cpus_allowed;
144 void spu_update_sched_info(struct spu_context *ctx)
146 int node;
148 if (ctx->state == SPU_STATE_RUNNABLE) {
149 node = ctx->spu->node;
152 * Take list_mutex to sync with find_victim().
154 mutex_lock(&cbe_spu_info[node].list_mutex);
155 __spu_update_sched_info(ctx);
156 mutex_unlock(&cbe_spu_info[node].list_mutex);
157 } else {
158 __spu_update_sched_info(ctx);
162 static int __node_allowed(struct spu_context *ctx, int node)
164 if (nr_cpus_node(node)) {
165 cpumask_t mask = node_to_cpumask(node);
167 if (cpus_intersects(mask, ctx->cpus_allowed))
168 return 1;
171 return 0;
174 static int node_allowed(struct spu_context *ctx, int node)
176 int rval;
178 spin_lock(&spu_prio->runq_lock);
179 rval = __node_allowed(ctx, node);
180 spin_unlock(&spu_prio->runq_lock);
182 return rval;
185 void do_notify_spus_active(void)
187 int node;
190 * Wake up the active spu_contexts.
192 * When the awakened processes see their "notify_active" flag is set,
193 * they will call spu_switch_notify().
195 for_each_online_node(node) {
196 struct spu *spu;
198 mutex_lock(&cbe_spu_info[node].list_mutex);
199 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
200 if (spu->alloc_state != SPU_FREE) {
201 struct spu_context *ctx = spu->ctx;
202 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
203 &ctx->sched_flags);
204 mb();
205 wake_up_all(&ctx->stop_wq);
208 mutex_unlock(&cbe_spu_info[node].list_mutex);
213 * spu_bind_context - bind spu context to physical spu
214 * @spu: physical spu to bind to
215 * @ctx: context to bind
217 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
219 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
220 spu->number, spu->node);
221 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
223 if (ctx->flags & SPU_CREATE_NOSCHED)
224 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
226 ctx->stats.slb_flt_base = spu->stats.slb_flt;
227 ctx->stats.class2_intr_base = spu->stats.class2_intr;
229 spu->ctx = ctx;
230 spu->flags = 0;
231 ctx->spu = spu;
232 ctx->ops = &spu_hw_ops;
233 spu->pid = current->pid;
234 spu->tgid = current->tgid;
235 spu_associate_mm(spu, ctx->owner);
236 spu->ibox_callback = spufs_ibox_callback;
237 spu->wbox_callback = spufs_wbox_callback;
238 spu->stop_callback = spufs_stop_callback;
239 spu->mfc_callback = spufs_mfc_callback;
240 mb();
241 spu_unmap_mappings(ctx);
242 spu_restore(&ctx->csa, spu);
243 spu->timestamp = jiffies;
244 spu_cpu_affinity_set(spu, raw_smp_processor_id());
245 spu_switch_notify(spu, ctx);
246 ctx->state = SPU_STATE_RUNNABLE;
248 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
252 * Must be used with the list_mutex held.
254 static inline int sched_spu(struct spu *spu)
256 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
258 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
261 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
263 struct spu_context *ctx;
265 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
266 if (list_empty(&ctx->aff_list))
267 list_add(&ctx->aff_list, &gang->aff_list_head);
269 gang->aff_flags |= AFF_MERGED;
272 static void aff_set_offsets(struct spu_gang *gang)
274 struct spu_context *ctx;
275 int offset;
277 offset = -1;
278 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
279 aff_list) {
280 if (&ctx->aff_list == &gang->aff_list_head)
281 break;
282 ctx->aff_offset = offset--;
285 offset = 0;
286 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
287 if (&ctx->aff_list == &gang->aff_list_head)
288 break;
289 ctx->aff_offset = offset++;
292 gang->aff_flags |= AFF_OFFSETS_SET;
295 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
296 int group_size, int lowest_offset)
298 struct spu *spu;
299 int node, n;
302 * TODO: A better algorithm could be used to find a good spu to be
303 * used as reference location for the ctxs chain.
305 node = cpu_to_node(raw_smp_processor_id());
306 for (n = 0; n < MAX_NUMNODES; n++, node++) {
307 node = (node < MAX_NUMNODES) ? node : 0;
308 if (!node_allowed(ctx, node))
309 continue;
310 mutex_lock(&cbe_spu_info[node].list_mutex);
311 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
312 if ((!mem_aff || spu->has_mem_affinity) &&
313 sched_spu(spu)) {
314 mutex_unlock(&cbe_spu_info[node].list_mutex);
315 return spu;
318 mutex_unlock(&cbe_spu_info[node].list_mutex);
320 return NULL;
323 static void aff_set_ref_point_location(struct spu_gang *gang)
325 int mem_aff, gs, lowest_offset;
326 struct spu_context *ctx;
327 struct spu *tmp;
329 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
330 lowest_offset = 0;
331 gs = 0;
333 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
334 gs++;
336 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
337 aff_list) {
338 if (&ctx->aff_list == &gang->aff_list_head)
339 break;
340 lowest_offset = ctx->aff_offset;
343 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
344 lowest_offset);
347 static struct spu *ctx_location(struct spu *ref, int offset, int node)
349 struct spu *spu;
351 spu = NULL;
352 if (offset >= 0) {
353 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
354 BUG_ON(spu->node != node);
355 if (offset == 0)
356 break;
357 if (sched_spu(spu))
358 offset--;
360 } else {
361 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
362 BUG_ON(spu->node != node);
363 if (offset == 0)
364 break;
365 if (sched_spu(spu))
366 offset++;
370 return spu;
374 * affinity_check is called each time a context is going to be scheduled.
375 * It returns the spu ptr on which the context must run.
377 static int has_affinity(struct spu_context *ctx)
379 struct spu_gang *gang = ctx->gang;
381 if (list_empty(&ctx->aff_list))
382 return 0;
384 if (!gang->aff_ref_spu) {
385 if (!(gang->aff_flags & AFF_MERGED))
386 aff_merge_remaining_ctxs(gang);
387 if (!(gang->aff_flags & AFF_OFFSETS_SET))
388 aff_set_offsets(gang);
389 aff_set_ref_point_location(gang);
392 return gang->aff_ref_spu != NULL;
396 * spu_unbind_context - unbind spu context from physical spu
397 * @spu: physical spu to unbind from
398 * @ctx: context to unbind
400 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
402 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
403 spu->pid, spu->number, spu->node);
404 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
406 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
407 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
409 if (ctx->gang){
410 mutex_lock(&ctx->gang->aff_mutex);
411 if (has_affinity(ctx)) {
412 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
413 ctx->gang->aff_ref_spu = NULL;
415 mutex_unlock(&ctx->gang->aff_mutex);
418 spu_switch_notify(spu, NULL);
419 spu_unmap_mappings(ctx);
420 spu_save(&ctx->csa, spu);
421 spu->timestamp = jiffies;
422 ctx->state = SPU_STATE_SAVED;
423 spu->ibox_callback = NULL;
424 spu->wbox_callback = NULL;
425 spu->stop_callback = NULL;
426 spu->mfc_callback = NULL;
427 spu_associate_mm(spu, NULL);
428 spu->pid = 0;
429 spu->tgid = 0;
430 ctx->ops = &spu_backing_ops;
431 spu->flags = 0;
432 spu->ctx = NULL;
434 ctx->stats.slb_flt +=
435 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
436 ctx->stats.class2_intr +=
437 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
439 /* This maps the underlying spu state to idle */
440 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
441 ctx->spu = NULL;
445 * spu_add_to_rq - add a context to the runqueue
446 * @ctx: context to add
448 static void __spu_add_to_rq(struct spu_context *ctx)
451 * Unfortunately this code path can be called from multiple threads
452 * on behalf of a single context due to the way the problem state
453 * mmap support works.
455 * Fortunately we need to wake up all these threads at the same time
456 * and can simply skip the runqueue addition for every but the first
457 * thread getting into this codepath.
459 * It's still quite hacky, and long-term we should proxy all other
460 * threads through the owner thread so that spu_run is in control
461 * of all the scheduling activity for a given context.
463 if (list_empty(&ctx->rq)) {
464 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
465 set_bit(ctx->prio, spu_prio->bitmap);
466 if (!spu_prio->nr_waiting++)
467 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
471 static void spu_add_to_rq(struct spu_context *ctx)
473 spin_lock(&spu_prio->runq_lock);
474 __spu_add_to_rq(ctx);
475 spin_unlock(&spu_prio->runq_lock);
478 static void __spu_del_from_rq(struct spu_context *ctx)
480 int prio = ctx->prio;
482 if (!list_empty(&ctx->rq)) {
483 if (!--spu_prio->nr_waiting)
484 del_timer(&spusched_timer);
485 list_del_init(&ctx->rq);
487 if (list_empty(&spu_prio->runq[prio]))
488 clear_bit(prio, spu_prio->bitmap);
492 void spu_del_from_rq(struct spu_context *ctx)
494 spin_lock(&spu_prio->runq_lock);
495 __spu_del_from_rq(ctx);
496 spin_unlock(&spu_prio->runq_lock);
499 static void spu_prio_wait(struct spu_context *ctx)
501 DEFINE_WAIT(wait);
504 * The caller must explicitly wait for a context to be loaded
505 * if the nosched flag is set. If NOSCHED is not set, the caller
506 * queues the context and waits for an spu event or error.
508 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
510 spin_lock(&spu_prio->runq_lock);
511 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
512 if (!signal_pending(current)) {
513 __spu_add_to_rq(ctx);
514 spin_unlock(&spu_prio->runq_lock);
515 mutex_unlock(&ctx->state_mutex);
516 schedule();
517 mutex_lock(&ctx->state_mutex);
518 spin_lock(&spu_prio->runq_lock);
519 __spu_del_from_rq(ctx);
521 spin_unlock(&spu_prio->runq_lock);
522 __set_current_state(TASK_RUNNING);
523 remove_wait_queue(&ctx->stop_wq, &wait);
526 static struct spu *spu_get_idle(struct spu_context *ctx)
528 struct spu *spu, *aff_ref_spu;
529 int node, n;
531 if (ctx->gang) {
532 mutex_lock(&ctx->gang->aff_mutex);
533 if (has_affinity(ctx)) {
534 aff_ref_spu = ctx->gang->aff_ref_spu;
535 atomic_inc(&ctx->gang->aff_sched_count);
536 mutex_unlock(&ctx->gang->aff_mutex);
537 node = aff_ref_spu->node;
539 mutex_lock(&cbe_spu_info[node].list_mutex);
540 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
541 if (spu && spu->alloc_state == SPU_FREE)
542 goto found;
543 mutex_unlock(&cbe_spu_info[node].list_mutex);
545 mutex_lock(&ctx->gang->aff_mutex);
546 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
547 ctx->gang->aff_ref_spu = NULL;
548 mutex_unlock(&ctx->gang->aff_mutex);
550 return NULL;
552 mutex_unlock(&ctx->gang->aff_mutex);
554 node = cpu_to_node(raw_smp_processor_id());
555 for (n = 0; n < MAX_NUMNODES; n++, node++) {
556 node = (node < MAX_NUMNODES) ? node : 0;
557 if (!node_allowed(ctx, node))
558 continue;
560 mutex_lock(&cbe_spu_info[node].list_mutex);
561 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
562 if (spu->alloc_state == SPU_FREE)
563 goto found;
565 mutex_unlock(&cbe_spu_info[node].list_mutex);
568 return NULL;
570 found:
571 spu->alloc_state = SPU_USED;
572 mutex_unlock(&cbe_spu_info[node].list_mutex);
573 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
574 spu_init_channels(spu);
575 return spu;
579 * find_victim - find a lower priority context to preempt
580 * @ctx: canidate context for running
582 * Returns the freed physical spu to run the new context on.
584 static struct spu *find_victim(struct spu_context *ctx)
586 struct spu_context *victim = NULL;
587 struct spu *spu;
588 int node, n;
591 * Look for a possible preemption candidate on the local node first.
592 * If there is no candidate look at the other nodes. This isn't
593 * exactly fair, but so far the whole spu scheduler tries to keep
594 * a strong node affinity. We might want to fine-tune this in
595 * the future.
597 restart:
598 node = cpu_to_node(raw_smp_processor_id());
599 for (n = 0; n < MAX_NUMNODES; n++, node++) {
600 node = (node < MAX_NUMNODES) ? node : 0;
601 if (!node_allowed(ctx, node))
602 continue;
604 mutex_lock(&cbe_spu_info[node].list_mutex);
605 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
606 struct spu_context *tmp = spu->ctx;
608 if (tmp && tmp->prio > ctx->prio &&
609 !(tmp->flags & SPU_CREATE_NOSCHED) &&
610 (!victim || tmp->prio > victim->prio))
611 victim = spu->ctx;
613 mutex_unlock(&cbe_spu_info[node].list_mutex);
615 if (victim) {
617 * This nests ctx->state_mutex, but we always lock
618 * higher priority contexts before lower priority
619 * ones, so this is safe until we introduce
620 * priority inheritance schemes.
622 * XXX if the highest priority context is locked,
623 * this can loop a long time. Might be better to
624 * look at another context or give up after X retries.
626 if (!mutex_trylock(&victim->state_mutex)) {
627 victim = NULL;
628 goto restart;
631 spu = victim->spu;
632 if (!spu || victim->prio <= ctx->prio) {
634 * This race can happen because we've dropped
635 * the active list mutex. Not a problem, just
636 * restart the search.
638 mutex_unlock(&victim->state_mutex);
639 victim = NULL;
640 goto restart;
643 mutex_lock(&cbe_spu_info[node].list_mutex);
644 cbe_spu_info[node].nr_active--;
645 spu_unbind_context(spu, victim);
646 mutex_unlock(&cbe_spu_info[node].list_mutex);
648 victim->stats.invol_ctx_switch++;
649 spu->stats.invol_ctx_switch++;
650 spu_add_to_rq(victim);
652 mutex_unlock(&victim->state_mutex);
654 return spu;
658 return NULL;
661 static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
663 int node = spu->node;
664 int success = 0;
666 spu_set_timeslice(ctx);
668 mutex_lock(&cbe_spu_info[node].list_mutex);
669 if (spu->ctx == NULL) {
670 spu_bind_context(spu, ctx);
671 cbe_spu_info[node].nr_active++;
672 spu->alloc_state = SPU_USED;
673 success = 1;
675 mutex_unlock(&cbe_spu_info[node].list_mutex);
677 if (success)
678 wake_up_all(&ctx->run_wq);
679 else
680 spu_add_to_rq(ctx);
683 static void spu_schedule(struct spu *spu, struct spu_context *ctx)
685 /* not a candidate for interruptible because it's called either
686 from the scheduler thread or from spu_deactivate */
687 mutex_lock(&ctx->state_mutex);
688 __spu_schedule(spu, ctx);
689 spu_release(ctx);
692 static void spu_unschedule(struct spu *spu, struct spu_context *ctx)
694 int node = spu->node;
696 mutex_lock(&cbe_spu_info[node].list_mutex);
697 cbe_spu_info[node].nr_active--;
698 spu->alloc_state = SPU_FREE;
699 spu_unbind_context(spu, ctx);
700 ctx->stats.invol_ctx_switch++;
701 spu->stats.invol_ctx_switch++;
702 mutex_unlock(&cbe_spu_info[node].list_mutex);
706 * spu_activate - find a free spu for a context and execute it
707 * @ctx: spu context to schedule
708 * @flags: flags (currently ignored)
710 * Tries to find a free spu to run @ctx. If no free spu is available
711 * add the context to the runqueue so it gets woken up once an spu
712 * is available.
714 int spu_activate(struct spu_context *ctx, unsigned long flags)
716 struct spu *spu;
719 * If there are multiple threads waiting for a single context
720 * only one actually binds the context while the others will
721 * only be able to acquire the state_mutex once the context
722 * already is in runnable state.
724 if (ctx->spu)
725 return 0;
727 spu_activate_top:
728 if (signal_pending(current))
729 return -ERESTARTSYS;
731 spu = spu_get_idle(ctx);
733 * If this is a realtime thread we try to get it running by
734 * preempting a lower priority thread.
736 if (!spu && rt_prio(ctx->prio))
737 spu = find_victim(ctx);
738 if (spu) {
739 unsigned long runcntl;
741 runcntl = ctx->ops->runcntl_read(ctx);
742 __spu_schedule(spu, ctx);
743 if (runcntl & SPU_RUNCNTL_RUNNABLE)
744 spuctx_switch_state(ctx, SPU_UTIL_USER);
746 return 0;
749 if (ctx->flags & SPU_CREATE_NOSCHED) {
750 spu_prio_wait(ctx);
751 goto spu_activate_top;
754 spu_add_to_rq(ctx);
756 return 0;
760 * grab_runnable_context - try to find a runnable context
762 * Remove the highest priority context on the runqueue and return it
763 * to the caller. Returns %NULL if no runnable context was found.
765 static struct spu_context *grab_runnable_context(int prio, int node)
767 struct spu_context *ctx;
768 int best;
770 spin_lock(&spu_prio->runq_lock);
771 best = find_first_bit(spu_prio->bitmap, prio);
772 while (best < prio) {
773 struct list_head *rq = &spu_prio->runq[best];
775 list_for_each_entry(ctx, rq, rq) {
776 /* XXX(hch): check for affinity here aswell */
777 if (__node_allowed(ctx, node)) {
778 __spu_del_from_rq(ctx);
779 goto found;
782 best++;
784 ctx = NULL;
785 found:
786 spin_unlock(&spu_prio->runq_lock);
787 return ctx;
790 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
792 struct spu *spu = ctx->spu;
793 struct spu_context *new = NULL;
795 if (spu) {
796 new = grab_runnable_context(max_prio, spu->node);
797 if (new || force) {
798 spu_unschedule(spu, ctx);
799 if (new) {
800 if (new->flags & SPU_CREATE_NOSCHED)
801 wake_up(&new->stop_wq);
802 else {
803 spu_release(ctx);
804 spu_schedule(spu, new);
805 /* this one can't easily be made
806 interruptible */
807 mutex_lock(&ctx->state_mutex);
813 return new != NULL;
817 * spu_deactivate - unbind a context from it's physical spu
818 * @ctx: spu context to unbind
820 * Unbind @ctx from the physical spu it is running on and schedule
821 * the highest priority context to run on the freed physical spu.
823 void spu_deactivate(struct spu_context *ctx)
825 __spu_deactivate(ctx, 1, MAX_PRIO);
829 * spu_yield - yield a physical spu if others are waiting
830 * @ctx: spu context to yield
832 * Check if there is a higher priority context waiting and if yes
833 * unbind @ctx from the physical spu and schedule the highest
834 * priority context to run on the freed physical spu instead.
836 void spu_yield(struct spu_context *ctx)
838 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
839 mutex_lock(&ctx->state_mutex);
840 __spu_deactivate(ctx, 0, MAX_PRIO);
841 mutex_unlock(&ctx->state_mutex);
845 static noinline void spusched_tick(struct spu_context *ctx)
847 struct spu_context *new = NULL;
848 struct spu *spu = NULL;
849 u32 status;
851 if (spu_acquire(ctx))
852 BUG(); /* a kernel thread never has signals pending */
854 if (ctx->state != SPU_STATE_RUNNABLE)
855 goto out;
856 if (spu_stopped(ctx, &status))
857 goto out;
858 if (ctx->flags & SPU_CREATE_NOSCHED)
859 goto out;
860 if (ctx->policy == SCHED_FIFO)
861 goto out;
863 if (--ctx->time_slice)
864 goto out;
866 spu = ctx->spu;
867 new = grab_runnable_context(ctx->prio + 1, spu->node);
868 if (new) {
869 spu_unschedule(spu, ctx);
870 spu_add_to_rq(ctx);
871 } else {
872 ctx->time_slice++;
874 out:
875 spu_release(ctx);
877 if (new)
878 spu_schedule(spu, new);
882 * count_active_contexts - count nr of active tasks
884 * Return the number of tasks currently running or waiting to run.
886 * Note that we don't take runq_lock / list_mutex here. Reading
887 * a single 32bit value is atomic on powerpc, and we don't care
888 * about memory ordering issues here.
890 static unsigned long count_active_contexts(void)
892 int nr_active = 0, node;
894 for (node = 0; node < MAX_NUMNODES; node++)
895 nr_active += cbe_spu_info[node].nr_active;
896 nr_active += spu_prio->nr_waiting;
898 return nr_active;
902 * spu_calc_load - update the avenrun load estimates.
904 * No locking against reading these values from userspace, as for
905 * the CPU loadavg code.
907 static void spu_calc_load(void)
909 unsigned long active_tasks; /* fixed-point */
911 active_tasks = count_active_contexts() * FIXED_1;
912 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
913 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
914 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
917 static void spusched_wake(unsigned long data)
919 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
920 wake_up_process(spusched_task);
923 static void spuloadavg_wake(unsigned long data)
925 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
926 spu_calc_load();
929 static int spusched_thread(void *unused)
931 struct spu *spu;
932 int node;
934 while (!kthread_should_stop()) {
935 set_current_state(TASK_INTERRUPTIBLE);
936 schedule();
937 for (node = 0; node < MAX_NUMNODES; node++) {
938 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
940 mutex_lock(mtx);
941 list_for_each_entry(spu, &cbe_spu_info[node].spus,
942 cbe_list) {
943 struct spu_context *ctx = spu->ctx;
945 if (ctx) {
946 mutex_unlock(mtx);
947 spusched_tick(ctx);
948 mutex_lock(mtx);
951 mutex_unlock(mtx);
955 return 0;
958 void spuctx_switch_state(struct spu_context *ctx,
959 enum spu_utilization_state new_state)
961 unsigned long long curtime;
962 signed long long delta;
963 struct timespec ts;
964 struct spu *spu;
965 enum spu_utilization_state old_state;
967 ktime_get_ts(&ts);
968 curtime = timespec_to_ns(&ts);
969 delta = curtime - ctx->stats.tstamp;
971 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
972 WARN_ON(delta < 0);
974 spu = ctx->spu;
975 old_state = ctx->stats.util_state;
976 ctx->stats.util_state = new_state;
977 ctx->stats.tstamp = curtime;
980 * Update the physical SPU utilization statistics.
982 if (spu) {
983 ctx->stats.times[old_state] += delta;
984 spu->stats.times[old_state] += delta;
985 spu->stats.util_state = new_state;
986 spu->stats.tstamp = curtime;
990 #define LOAD_INT(x) ((x) >> FSHIFT)
991 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
993 static int show_spu_loadavg(struct seq_file *s, void *private)
995 int a, b, c;
997 a = spu_avenrun[0] + (FIXED_1/200);
998 b = spu_avenrun[1] + (FIXED_1/200);
999 c = spu_avenrun[2] + (FIXED_1/200);
1002 * Note that last_pid doesn't really make much sense for the
1003 * SPU loadavg (it even seems very odd on the CPU side...),
1004 * but we include it here to have a 100% compatible interface.
1006 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1007 LOAD_INT(a), LOAD_FRAC(a),
1008 LOAD_INT(b), LOAD_FRAC(b),
1009 LOAD_INT(c), LOAD_FRAC(c),
1010 count_active_contexts(),
1011 atomic_read(&nr_spu_contexts),
1012 current->nsproxy->pid_ns->last_pid);
1013 return 0;
1016 static int spu_loadavg_open(struct inode *inode, struct file *file)
1018 return single_open(file, show_spu_loadavg, NULL);
1021 static const struct file_operations spu_loadavg_fops = {
1022 .open = spu_loadavg_open,
1023 .read = seq_read,
1024 .llseek = seq_lseek,
1025 .release = single_release,
1028 int __init spu_sched_init(void)
1030 struct proc_dir_entry *entry;
1031 int err = -ENOMEM, i;
1033 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1034 if (!spu_prio)
1035 goto out;
1037 for (i = 0; i < MAX_PRIO; i++) {
1038 INIT_LIST_HEAD(&spu_prio->runq[i]);
1039 __clear_bit(i, spu_prio->bitmap);
1041 spin_lock_init(&spu_prio->runq_lock);
1043 setup_timer(&spusched_timer, spusched_wake, 0);
1044 setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1046 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1047 if (IS_ERR(spusched_task)) {
1048 err = PTR_ERR(spusched_task);
1049 goto out_free_spu_prio;
1052 mod_timer(&spuloadavg_timer, 0);
1054 entry = create_proc_entry("spu_loadavg", 0, NULL);
1055 if (!entry)
1056 goto out_stop_kthread;
1057 entry->proc_fops = &spu_loadavg_fops;
1059 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1060 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1061 return 0;
1063 out_stop_kthread:
1064 kthread_stop(spusched_task);
1065 out_free_spu_prio:
1066 kfree(spu_prio);
1067 out:
1068 return err;
1071 void spu_sched_exit(void)
1073 struct spu *spu;
1074 int node;
1076 remove_proc_entry("spu_loadavg", NULL);
1078 del_timer_sync(&spusched_timer);
1079 del_timer_sync(&spuloadavg_timer);
1080 kthread_stop(spusched_task);
1082 for (node = 0; node < MAX_NUMNODES; node++) {
1083 mutex_lock(&cbe_spu_info[node].list_mutex);
1084 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1085 if (spu->alloc_state != SPU_FREE)
1086 spu->alloc_state = SPU_FREE;
1087 mutex_unlock(&cbe_spu_info[node].list_mutex);
1089 kfree(spu_prio);