1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * SPU scheduler, based on Linux thread priority. For now use
7 * a simple "cooperative" yield model with no preemption. SPU
8 * scheduling will eventually be preemptive: When a thread with
9 * a higher static priority gets ready to run, then an active SPU
10 * context will be preempted and returned to the waitq.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/sched.h>
32 #include <linux/kernel.h>
34 #include <linux/completion.h>
35 #include <linux/vmalloc.h>
36 #include <linux/smp.h>
37 #include <linux/smp_lock.h>
38 #include <linux/stddef.h>
39 #include <linux/unistd.h>
42 #include <asm/mmu_context.h>
44 #include <asm/spu_csa.h>
45 #include <asm/spu_priv1.h>
48 #define SPU_MIN_TIMESLICE (100 * HZ / 1000)
50 #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1)
51 struct spu_prio_array
{
53 unsigned long bitmap
[SPU_BITMAP_SIZE
];
54 wait_queue_head_t waitq
[MAX_PRIO
];
57 /* spu_runqueue - This is the main runqueue data structure for SPUs. */
60 unsigned long nr_active
;
61 unsigned long nr_idle
;
62 unsigned long nr_switches
;
63 struct list_head active_list
;
64 struct list_head idle_list
;
65 struct spu_prio_array prio
;
68 static struct spu_runqueue
*spu_runqueues
= NULL
;
70 static inline struct spu_runqueue
*spu_rq(void)
72 /* Future: make this a per-NODE array,
73 * and use cpu_to_node(smp_processor_id())
78 static inline struct spu
*del_idle(struct spu_runqueue
*rq
)
82 BUG_ON(rq
->nr_idle
<= 0);
83 BUG_ON(list_empty(&rq
->idle_list
));
84 /* Future: Move SPU out of low-power SRI state. */
85 spu
= list_entry(rq
->idle_list
.next
, struct spu
, sched_list
);
86 list_del_init(&spu
->sched_list
);
91 static inline void del_active(struct spu_runqueue
*rq
, struct spu
*spu
)
93 BUG_ON(rq
->nr_active
<= 0);
94 BUG_ON(list_empty(&rq
->active_list
));
95 list_del_init(&spu
->sched_list
);
99 static inline void add_idle(struct spu_runqueue
*rq
, struct spu
*spu
)
101 /* Future: Put SPU into low-power SRI state. */
102 list_add_tail(&spu
->sched_list
, &rq
->idle_list
);
106 static inline void add_active(struct spu_runqueue
*rq
, struct spu
*spu
)
110 list_add_tail(&spu
->sched_list
, &rq
->active_list
);
113 static void prio_wakeup(struct spu_runqueue
*rq
)
115 if (atomic_read(&rq
->prio
.nr_blocked
) && rq
->nr_idle
) {
116 int best
= sched_find_first_bit(rq
->prio
.bitmap
);
117 if (best
< MAX_PRIO
) {
118 wait_queue_head_t
*wq
= &rq
->prio
.waitq
[best
];
119 wake_up_interruptible_nr(wq
, 1);
124 static void prio_wait(struct spu_runqueue
*rq
, struct spu_context
*ctx
,
127 int prio
= current
->prio
;
128 wait_queue_head_t
*wq
= &rq
->prio
.waitq
[prio
];
131 __set_bit(prio
, rq
->prio
.bitmap
);
132 atomic_inc(&rq
->prio
.nr_blocked
);
133 prepare_to_wait_exclusive(wq
, &wait
, TASK_INTERRUPTIBLE
);
134 if (!signal_pending(current
)) {
136 up_write(&ctx
->state_sema
);
137 pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__
,
138 current
->pid
, current
->prio
);
140 down_write(&ctx
->state_sema
);
143 finish_wait(wq
, &wait
);
144 atomic_dec(&rq
->prio
.nr_blocked
);
145 if (!waitqueue_active(wq
))
146 __clear_bit(prio
, rq
->prio
.bitmap
);
149 static inline int is_best_prio(struct spu_runqueue
*rq
)
153 best_prio
= sched_find_first_bit(rq
->prio
.bitmap
);
154 return (current
->prio
< best_prio
) ? 1 : 0;
157 static inline void mm_needs_global_tlbie(struct mm_struct
*mm
)
159 /* Global TLBIE broadcast required with SPEs. */
161 __cpus_setall(&mm
->cpu_vm_mask
, NR_CPUS
);
163 __cpus_setall(&mm
->cpu_vm_mask
, NR_CPUS
+1); /* is this ok? */
167 static inline void bind_context(struct spu
*spu
, struct spu_context
*ctx
)
169 pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__
, current
->pid
,
175 ctx
->ops
= &spu_hw_ops
;
176 spu
->pid
= current
->pid
;
177 spu
->prio
= current
->prio
;
178 spu
->mm
= ctx
->owner
;
179 mm_needs_global_tlbie(spu
->mm
);
180 spu
->ibox_callback
= spufs_ibox_callback
;
181 spu
->wbox_callback
= spufs_wbox_callback
;
182 spu
->stop_callback
= spufs_stop_callback
;
183 spu
->mfc_callback
= spufs_mfc_callback
;
185 spu_unmap_mappings(ctx
);
186 spu_restore(&ctx
->csa
, spu
);
187 spu
->timestamp
= jiffies
;
190 static inline void unbind_context(struct spu
*spu
, struct spu_context
*ctx
)
192 pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__
,
193 spu
->pid
, spu
->number
);
194 spu_unmap_mappings(ctx
);
195 spu_save(&ctx
->csa
, spu
);
196 spu
->timestamp
= jiffies
;
197 ctx
->state
= SPU_STATE_SAVED
;
198 spu
->ibox_callback
= NULL
;
199 spu
->wbox_callback
= NULL
;
200 spu
->stop_callback
= NULL
;
201 spu
->mfc_callback
= NULL
;
204 spu
->prio
= MAX_PRIO
;
205 ctx
->ops
= &spu_backing_ops
;
212 static void spu_reaper(void *data
)
214 struct spu_context
*ctx
= data
;
217 down_write(&ctx
->state_sema
);
219 if (spu
&& test_bit(SPU_CONTEXT_PREEMPT
, &ctx
->flags
)) {
220 if (atomic_read(&spu
->rq
->prio
.nr_blocked
)) {
221 pr_debug("%s: spu=%d\n", __func__
, spu
->number
);
222 ctx
->ops
->runcntl_stop(ctx
);
224 wake_up_all(&ctx
->stop_wq
);
226 clear_bit(SPU_CONTEXT_PREEMPT
, &ctx
->flags
);
229 up_write(&ctx
->state_sema
);
230 put_spu_context(ctx
);
233 static void schedule_spu_reaper(struct spu_runqueue
*rq
, struct spu
*spu
)
235 struct spu_context
*ctx
= get_spu_context(spu
->ctx
);
236 unsigned long now
= jiffies
;
237 unsigned long expire
= spu
->timestamp
+ SPU_MIN_TIMESLICE
;
239 set_bit(SPU_CONTEXT_PREEMPT
, &ctx
->flags
);
240 INIT_WORK(&ctx
->reap_work
, spu_reaper
, ctx
);
241 if (time_after(now
, expire
))
242 schedule_work(&ctx
->reap_work
);
244 schedule_delayed_work(&ctx
->reap_work
, expire
- now
);
247 static void check_preempt_active(struct spu_runqueue
*rq
)
250 struct spu
*worst
= NULL
;
252 list_for_each(p
, &rq
->active_list
) {
253 struct spu
*spu
= list_entry(p
, struct spu
, sched_list
);
254 struct spu_context
*ctx
= spu
->ctx
;
255 if (!test_bit(SPU_CONTEXT_PREEMPT
, &ctx
->flags
)) {
256 if (!worst
|| (spu
->prio
> worst
->prio
)) {
261 if (worst
&& (current
->prio
< worst
->prio
))
262 schedule_spu_reaper(rq
, worst
);
265 static struct spu
*get_idle_spu(struct spu_context
*ctx
, u64 flags
)
267 struct spu_runqueue
*rq
;
268 struct spu
*spu
= NULL
;
273 if (rq
->nr_idle
> 0) {
274 if (is_best_prio(rq
)) {
282 if (signal_pending(current
)) {
290 check_preempt_active(rq
);
291 prio_wait(rq
, ctx
, flags
);
292 if (signal_pending(current
)) {
304 static void put_idle_spu(struct spu
*spu
)
306 struct spu_runqueue
*rq
= spu
->rq
;
314 static int get_active_spu(struct spu
*spu
)
316 struct spu_runqueue
*rq
= spu
->rq
;
322 list_for_each(p
, &rq
->active_list
) {
323 tmp
= list_entry(p
, struct spu
, sched_list
);
334 static void put_active_spu(struct spu
*spu
)
336 struct spu_runqueue
*rq
= spu
->rq
;
344 * spu_activate() & spu_deactivate() require the
345 * caller to have down_write(&ctx->state_sema).
347 * The rq->sem is breifly held (inside or outside a
348 * given ctx lock) for list management, but is never
349 * held during save/restore.
352 int spu_activate(struct spu_context
*ctx
, u64 flags
)
358 spu
= get_idle_spu(ctx
, flags
);
360 return (signal_pending(current
)) ? -ERESTARTSYS
: -EAGAIN
;
361 bind_context(spu
, ctx
);
363 * We're likely to wait for interrupts on the same
364 * CPU that we are now on, so send them here.
366 spu_cpu_affinity_set(spu
, raw_smp_processor_id());
371 void spu_deactivate(struct spu_context
*ctx
)
379 needs_idle
= get_active_spu(spu
);
380 unbind_context(spu
, ctx
);
385 void spu_yield(struct spu_context
*ctx
)
390 down_write(&ctx
->state_sema
);
392 if (spu
&& (sched_find_first_bit(spu
->rq
->prio
.bitmap
) < MAX_PRIO
)) {
393 pr_debug("%s: yielding SPU %d\n", __FUNCTION__
, spu
->number
);
395 ctx
->state
= SPU_STATE_SAVED
;
398 spu
->prio
= MAX_PRIO
;
400 up_write(&ctx
->state_sema
);
401 if (unlikely(need_yield
))
405 int __init
spu_sched_init(void)
407 struct spu_runqueue
*rq
;
411 rq
= spu_runqueues
= kmalloc(sizeof(struct spu_runqueue
), GFP_KERNEL
);
413 printk(KERN_WARNING
"%s: Unable to allocate runqueues.\n",
417 memset(rq
, 0, sizeof(struct spu_runqueue
));
418 init_MUTEX(&rq
->sem
);
419 INIT_LIST_HEAD(&rq
->active_list
);
420 INIT_LIST_HEAD(&rq
->idle_list
);
424 atomic_set(&rq
->prio
.nr_blocked
, 0);
425 for (i
= 0; i
< MAX_PRIO
; i
++) {
426 init_waitqueue_head(&rq
->prio
.waitq
[i
]);
427 __clear_bit(i
, rq
->prio
.bitmap
);
429 __set_bit(MAX_PRIO
, rq
->prio
.bitmap
);
434 pr_debug("%s: adding SPU[%d]\n", __FUNCTION__
, spu
->number
);
437 spu
->timestamp
= jiffies
;
440 printk(KERN_WARNING
"%s: No available SPUs.\n", __FUNCTION__
);
447 void __exit
spu_sched_exit(void)
449 struct spu_runqueue
*rq
= spu_rq();
453 printk(KERN_WARNING
"%s: no runqueues!\n", __FUNCTION__
);
456 while (rq
->nr_idle
> 0) {