27 |
#include "libkern.h" |
#include "libkern.h" |
28 |
#include "queue.h" |
#include "queue.h" |
29 |
|
|
30 |
/* ???? */ |
bool thread_system_running = false; |
|
#define THREAD_POLICY_TIMESHARE 0 |
|
|
#define THREAD_POLICY_REALTIME 1 |
|
|
|
|
|
/* Run queues. |
|
31 |
|
|
32 |
We have a total of 32 run queues; 31 realtime queues and one |
struct thread * thread_select (struct processor *processor); |
33 |
queue for timesharing threads. |
void thread_dispatch (struct thread *thread); |
34 |
|
|
35 |
Scheduling policy is determined by THREAD->SCHED_POLICY. |
#define CALC_PREMUL(THREAD) \ |
36 |
|
do \ |
37 |
|
{ \ |
38 |
|
(THREAD)->sched_premul = ((THREAD)->sched_priority + 1) \ |
39 |
|
<< ((THREAD)->sched_policy * 8); \ |
40 |
|
} \ |
41 |
|
while (0) |
42 |
|
|
43 |
A thread is always on the run queue - except when it's |
/* ???? */ |
|
not active or not suspended. */ |
|
44 |
|
|
45 |
#define NUMBER_OF_RUN_QUEUES 32 |
struct sched_policy sched_policies [4]; |
46 |
|
|
47 |
/* Macro expression is true if PRI is a valid priority. */ |
/* Macro expression is true if PRI is a valid priority. */ |
48 |
#define INVALID_PRIORITY_P(PRI) ((PRI) < 0 || (PRI) >= NUMBER_OF_RUN_QUEUES) |
#define INVALID_PRIORITY_P(PRI) ((PRI) < 0 || (PRI) >= NUMBER_OF_RUN_QUEUES) |
59 |
/* Cache for kernel stacks. */ |
/* Cache for kernel stacks. */ |
60 |
static struct kmem_cache *stack_cache; |
static struct kmem_cache *stack_cache; |
61 |
|
|
|
/* Array of run queues. */ |
|
|
static struct queue_entry runq_queues [NUMBER_OF_RUN_QUEUES]; |
|
|
|
|
|
/* Lock for the run queues. */ |
|
|
static spin_lock_t runq_lock = SPIN_LOCK_INITIALIZER; |
|
|
|
|
|
/* Convenience macros for locking the run queue. */ |
|
|
#if 0 |
|
|
#define lock_run_queues() ({ trace_printf ("locking run queues"); spin_lock (&runq_lock); }) |
|
|
#define unlock_run_queues() ({ trace_printf ("unlocking run queues"); spin_unlock (&runq_lock); }) |
|
|
#else |
|
|
#define lock_run_queues() spin_lock (&runq_lock); |
|
|
#define unlock_run_queues() spin_unlock (&runq_lock); |
|
|
#endif |
|
|
|
|
62 |
|
|
63 |
/* Wait semantics. |
/* Wait semantics. |
64 |
|
|
70 |
only start with a event code. */ |
only start with a event code. */ |
71 |
|
|
72 |
/* Number of events queue. Used for event hashing aswell. */ |
/* Number of events queue. Used for event hashing aswell. */ |
73 |
|
|
74 |
#define NUMBER_OF_WAIT_QUEUES 64 |
#define NUMBER_OF_WAIT_QUEUES 64 |
75 |
|
|
76 |
/* Array of wait queues. */ |
/* Array of wait queues. */ |
92 |
|
|
93 |
stack = (vm_offset_t) kmem_cache_alloc (stack_cache); |
stack = (vm_offset_t) kmem_cache_alloc (stack_cache); |
94 |
if (! stack) |
if (! stack) |
95 |
panic ("NO STACK!"); |
panic ("no kernel stack for thread %p", thread); |
96 |
|
|
97 |
|
trace_count (kernel_stacks++); |
98 |
STACK_ATTACH (thread, stack, continuation); |
STACK_ATTACH (thread, stack, continuation); |
99 |
} |
} |
100 |
|
|
|
|
|
101 |
/* Create a new thread in TASK. The thread is returned in THREADP. */ |
/* Create a new thread in TASK. The thread is returned in THREADP. */ |
102 |
|
|
103 |
kern_return_t |
kern_return_t |
123 |
thread->sched_priority = 6; |
thread->sched_priority = 6; |
124 |
thread->sched_policy = THREAD_POLICY_TIMESHARE; |
thread->sched_policy = THREAD_POLICY_TIMESHARE; |
125 |
thread->sched_state = THREAD_STATE_SUSPEND; |
thread->sched_state = THREAD_STATE_SUSPEND; |
126 |
|
thread->sched_sp = & sched_policies [THREAD_POLICY_TIMESHARE]; |
127 |
|
|
128 |
|
CALC_PREMUL (thread); |
129 |
|
|
130 |
timer_initialize (&thread->timer_user); |
timer_initialize (&thread->timer_user); |
131 |
timer_initialize (&thread->timer_system); |
timer_initialize (&thread->timer_system); |
132 |
|
|
133 |
THREAD_INITIALIZE_PCB (thread); |
THREAD_INITIALIZE_PCB (thread); |
134 |
|
|
|
|
|
135 |
/* Create kernel port for thread. */ |
/* Create kernel port for thread. */ |
136 |
|
|
137 |
thread->thread_port = ipc_port_create_kernel (); |
thread->thread_port = ipc_port_create_kernel (); |
181 |
kern_return_t |
kern_return_t |
182 |
thread_bind (struct thread *thread, struct processor *processor) |
thread_bind (struct thread *thread, struct processor *processor) |
183 |
{ |
{ |
184 |
thread->bound = processor; |
thread->bound_processor = processor; |
185 |
return KERN_SUCCESS; |
return KERN_SUCCESS; |
186 |
} |
} |
187 |
|
|
|
/* Add a thread to the run queue. This function should be called with |
|
|
interrupts disabled. ??? is the check for invalid priority needed? */ |
|
|
|
|
|
static void |
|
|
add_thread_to_run_queue (struct thread *thread) |
|
|
{ |
|
|
lock_run_queues (); |
|
|
|
|
|
if (thread->runq_p) |
|
|
trace_panic ("thread %p already on queue", thread); |
|
|
|
|
|
#if 0 |
|
|
trace_printf ("inserting thread %p on run queue", thread); |
|
|
#endif |
|
|
|
|
|
if (thread->sched_policy == THREAD_POLICY_TIMESHARE) |
|
|
{ |
|
|
queue_enter (&runq_queues [0], thread, struct thread *, runq); |
|
|
thread->runq_list = &runq_queues [0]; |
|
|
} |
|
|
else /* if (thread->sched_policy == THREAD_POLICY_REALTIME) */ |
|
|
{ |
|
|
int whichq = thread->sched_priority; |
|
|
|
|
|
if (INVALID_PRIORITY_P (thread->sched_priority)) |
|
|
{ |
|
|
trace_printf ("invalid priority for thread %p", thread); |
|
|
whichq = NUMBER_OF_RUN_QUEUES - 1; /* ??? is this correct? */ |
|
|
} |
|
|
|
|
|
queue_enter (&runq_queues [whichq], thread, struct thread *, runq); |
|
|
thread->runq_list = &runq_queues [whichq]; |
|
|
} |
|
|
|
|
|
thread->runq_p = 1; |
|
|
|
|
|
unlock_run_queues (); |
|
|
} |
|
|
|
|
|
/* Recalculate credits for timesharing threads. We use the following |
|
|
algorithm: |
|
|
|
|
|
credits = priority + (credits / 2) |
|
|
|
|
|
We calculate on ALL threads, even the waiting ones. This gives a |
|
|
little boost to the thread when it's awaken. */ |
|
|
/* ??? we shouldn't calculate on suspended threads, right ??? */ |
|
|
|
|
|
static void |
|
|
recalculate_credits (void) |
|
|
{ |
|
|
register struct thread *thread; |
|
|
|
|
|
queue_iterate (&runq_queues [0], thread, struct thread *, runq) |
|
|
{ |
|
|
if (thread->sched_state & THREAD_STATE_IDLE) |
|
|
continue; |
|
|
|
|
|
thread->sched_credits |
|
|
= thread->sched_priority + (thread->sched_credits / 2); |
|
|
} |
|
|
} |
|
|
|
|
|
/* Tries to select next thread to be run ??? on PROCESSOR. |
|
|
This function should be called with interrupts disabled. */ |
|
|
|
|
|
/* ??? implement round robin scheduling for realtime threads. */ |
|
|
|
|
|
struct thread * |
|
|
thread_select (struct processor *processor) |
|
|
{ |
|
|
register int i, credit, tries = 0; |
|
|
struct thread *thread, *next; |
|
|
|
|
|
lock_run_queues (); |
|
|
|
|
|
/* We first loop through all our realtime queues. To see if there's |
|
|
a thread available. */ |
|
|
|
|
|
for (i = NUMBER_OF_RUN_QUEUES - 1; i != 1; i--) |
|
|
{ |
|
|
if (queue_empty (&runq_queues [i])) |
|
|
continue; |
|
|
|
|
|
queue_iterate (&runq_queues [i], thread, struct thread *, runq) |
|
|
{ |
|
|
if (thread->sched_state == THREAD_STATE_RUN |
|
|
&& (thread->bound == 0 || thread->bound == processor)) |
|
|
goto found_one; |
|
|
} |
|
|
} |
|
|
|
|
|
/* Look if there's a thread (with timesharing policy) available. |
|
|
We do the recalculation only once. */ |
|
|
|
|
|
retry_scan: |
|
|
|
|
|
next = 0; credit = -1; |
|
|
queue_iterate (&runq_queues [0], thread, struct thread *, runq) |
|
|
{ |
|
|
switch (thread->sched_state & ~THREAD_STATE_SWAPPED) |
|
|
{ |
|
|
case THREAD_STATE_RUN | THREAD_STATE_IDLE: |
|
|
if (next == 0 |
|
|
&& (thread->bound == 0 || thread->bound == processor)) |
|
|
next = thread; |
|
|
break; |
|
|
|
|
|
case THREAD_STATE_RUN: |
|
|
if ((thread->bound == 0 || thread->bound == processor) |
|
|
&& thread->sched_credits > credit) |
|
|
{ |
|
|
credit = thread->sched_credits; |
|
|
next = thread; |
|
|
} |
|
|
break; |
|
|
} |
|
|
} |
|
|
|
|
|
/* If credits is zero we have to recalulcate all credits. */ |
|
|
if (credit == 0 && tries++ < 1) |
|
|
{ |
|
|
recalculate_credits (); |
|
|
goto retry_scan; |
|
|
} |
|
|
|
|
|
/* If NEXT is null here, we check if we can continue to run the |
|
|
current thread. If not - bail out. */ |
|
|
if (next == 0 || next->sched_state & THREAD_STATE_IDLE) |
|
|
{ |
|
|
thread = THREAD_CURRENT (); |
|
|
|
|
|
if ((thread->sched_state & THREAD_SCHED_STATE) == THREAD_STATE_RUN) |
|
|
next = thread; |
|
|
} |
|
|
|
|
|
/* We cut some slack even for the idle thread. */ |
|
|
if (next && next->sched_state & THREAD_STATE_IDLE) |
|
|
{ |
|
|
next->sched_credits = 5; |
|
|
} |
|
|
|
|
|
#define min_quantum 10 |
|
|
if (next->sched_credits < min_quantum) |
|
|
next->sched_credits = min_quantum; |
|
|
|
|
|
if (next == 0) |
|
|
panic ("no thread"); |
|
|
thread = next; |
|
|
|
|
|
found_one: |
|
|
|
|
|
#if 0 |
|
|
if (thread != THREAD_CURRENT ()) |
|
|
trace_printf ("cpu %d thread select - thread %p (state %x)", |
|
|
CPU_CURRENT (), thread, thread->sched_state); |
|
|
#endif |
|
|
|
|
|
if (thread->runq_p) |
|
|
queue_remove (thread->runq_list, thread, struct thread *, runq); |
|
|
thread->runq_p = 0; |
|
|
|
|
|
unlock_run_queues (); |
|
|
return thread; |
|
|
} |
|
|
|
|
188 |
/* Called when the current thread is given a new stack. */ |
/* Called when the current thread is given a new stack. */ |
189 |
|
|
190 |
static void |
static void |
197 |
There might not be an old thread, if we are |
There might not be an old thread, if we are |
198 |
the first thread to run on this processor. */ |
the first thread to run on this processor. */ |
199 |
|
|
|
#if 1 |
|
200 |
if (old_thread != 0) |
if (old_thread != 0) |
201 |
thread_setrun (old_thread, false); |
thread_dispatch (old_thread); |
|
#endif |
|
202 |
|
|
203 |
|
#if 0 |
204 |
trace_printf ("new stack ?"); |
trace_printf ("new stack ?"); |
205 |
|
#endif |
206 |
|
|
207 |
SPL0 (); |
SPL0 (); |
208 |
(*continuation) (); |
(*continuation) (); |
212 |
Try to handoff stack to NEW_THREAD if we have a continuation and |
Try to handoff stack to NEW_THREAD if we have a continuation and |
213 |
the new thread is swapped. */ |
the new thread is swapped. */ |
214 |
|
|
215 |
|
/* ??? we can use `thread_dispatch' in common path. */ |
216 |
|
|
217 |
static bool |
static bool |
218 |
thread_invoke (struct thread *old_thread, void (*continuation) (void), |
thread_invoke (struct thread *old_thread, void (*continuation) (void), |
219 |
struct thread *new_thread) |
struct thread *new_thread) |
245 |
|
|
246 |
switch (old_thread->sched_state & ~THREAD_STATE_SWAPPED) |
switch (old_thread->sched_state & ~THREAD_STATE_SWAPPED) |
247 |
{ |
{ |
|
case THREAD_STATE_RUN|THREAD_STATE_WAIT: |
|
|
case THREAD_STATE_RUN|THREAD_STATE_IDLE: |
|
248 |
case THREAD_STATE_RUN: |
case THREAD_STATE_RUN: |
249 |
thread_setrun (old_thread, false); |
thread_setrun (old_thread, false); |
250 |
break; |
break; |
251 |
} |
} |
252 |
} |
} |
253 |
else |
else |
254 |
; |
{ |
255 |
|
switch (old_thread->sched_state & ~THREAD_STATE_SWAPPED) |
256 |
|
{ |
257 |
|
case THREAD_STATE_RUN: |
258 |
|
thread_setrun (old_thread, false); |
259 |
|
break; |
260 |
|
} |
261 |
|
} |
262 |
} |
} |
263 |
|
|
264 |
/* Check if we have to allocate a new stack for the thread. */ |
/* Check if we have to allocate a new stack for the thread. */ |
267 |
{ |
{ |
268 |
if (new_thread->sched_state & THREAD_STATE_SWAPPED) |
if (new_thread->sched_state & THREAD_STATE_SWAPPED) |
269 |
{ |
{ |
270 |
|
#if 0 |
271 |
trace_printf ("here! %x", new_thread->sched_state); |
trace_printf ("here! %x", new_thread->sched_state); |
272 |
|
#endif |
273 |
|
|
274 |
stack_alloc (new_thread, (void (*)()) thread_continue); |
stack_alloc (new_thread, (void (*)()) thread_continue); |
275 |
new_thread->sched_state &= ~THREAD_STATE_SWAPPED; |
new_thread->sched_state &= ~THREAD_STATE_SWAPPED; |
277 |
|
|
278 |
switch (old_thread->sched_state & ~THREAD_STATE_SWAPPED) |
switch (old_thread->sched_state & ~THREAD_STATE_SWAPPED) |
279 |
{ |
{ |
|
case THREAD_STATE_RUN|THREAD_STATE_WAIT: |
|
|
case THREAD_STATE_RUN|THREAD_STATE_IDLE: |
|
280 |
case THREAD_STATE_RUN: |
case THREAD_STATE_RUN: |
281 |
thread_setrun (old_thread, false); |
thread_setrun (old_thread, false); |
282 |
break; |
break; |
|
default: |
|
|
trace_printf ("no dispatch %x", old_thread->sched_state); |
|
283 |
} |
} |
284 |
} |
} |
285 |
|
|
286 |
|
trace_count (context_switches++); |
287 |
old_thread = SWITCH_CONTEXT (old_thread, continuation, new_thread); |
old_thread = SWITCH_CONTEXT (old_thread, continuation, new_thread); |
288 |
|
|
289 |
/* We are back (from the dead??) */ |
/* We are back (from the dead??) */ |
294 |
void (*new_continuation) () = new_thread->swap_fn; |
void (*new_continuation) () = new_thread->swap_fn; |
295 |
|
|
296 |
new_thread->swap_fn = (void (*)()) 0; |
new_thread->swap_fn = (void (*)()) 0; |
297 |
|
#if 0 |
298 |
trace_printf ("here"); |
trace_printf ("here"); |
299 |
|
#endif |
300 |
SPL0 (); |
SPL0 (); |
301 |
CALL_CONTINUATION (new_continuation); |
CALL_CONTINUATION (new_continuation); |
302 |
/* NOTREACHED */ |
/* NOTREACHED */ |
306 |
} |
} |
307 |
|
|
308 |
|
|
309 |
/* Assert that the current thread is about to go to |
/* Assert that the current thread is about to go to sleep until the |
310 |
sleep until the specified event occurs. */ |
specified event occurs. */ |
311 |
|
|
312 |
static void |
static void |
313 |
assert_wait (int event) |
assert_wait (int event) |
317 |
SPL_T spl; |
SPL_T spl; |
318 |
|
|
319 |
if (thread->wait_event != 0) |
if (thread->wait_event != 0) |
320 |
{ |
trace_panic ("thread already has wait event"); |
|
trace_panic ("thread already has wait event"); |
|
|
return; |
|
|
} |
|
321 |
|
|
322 |
spl = SPLOFF (); |
spl = SPLOFF (); |
323 |
|
|
336 |
SPLON (spl); |
SPLON (spl); |
337 |
} |
} |
338 |
|
|
339 |
|
/* Interrupt waiting thread THREAD. Wait result specified in RESULT. */ |
340 |
|
|
341 |
static void |
static void |
342 |
clear_wait (struct thread *thread, int result) |
clear_wait (struct thread *thread, int result) |
343 |
{ |
{ |
354 |
int hash_index = WAIT_HASH (event); |
int hash_index = WAIT_HASH (event); |
355 |
|
|
356 |
thread_unlock (thread); |
thread_unlock (thread); |
|
|
|
357 |
spin_lock (&wait_locks [hash_index]); |
spin_lock (&wait_locks [hash_index]); |
358 |
thread_lock (thread); |
thread_lock (thread); |
359 |
|
|
367 |
|
|
368 |
thread_unlock (thread); |
thread_unlock (thread); |
369 |
spin_unlock (&wait_locks [hash_index]); |
spin_unlock (&wait_locks [hash_index]); |
370 |
|
|
371 |
|
thread_setrun (thread, true); |
372 |
} |
} |
373 |
else |
else |
374 |
thread_unlock (thread); |
thread_unlock (thread); |
375 |
|
|
|
/* ??? dispatch thread. */ |
|
|
|
|
376 |
SPLON (spl); |
SPLON (spl); |
377 |
} |
} |
378 |
|
|
379 |
|
/* Generic wakeup function. Wake up all threads waiting for EVENT. |
380 |
|
Pass RESULT to threads. Wake up all waiting threads if ONE_P is false. */ |
381 |
|
|
382 |
static void |
static void |
383 |
thread_wakeup_generic (int event, bool one_p, int result) |
thread_wakeup_generic (int event, bool one_p, int result) |
384 |
{ |
{ |
407 |
thread_lock (thread); |
thread_lock (thread); |
408 |
if (thread->wait_event == event) |
if (thread->wait_event == event) |
409 |
{ |
{ |
|
trace_printf ("found waiting thread"); |
|
|
|
|
410 |
queue_remove (&wait_queues [hash_index], thread, |
queue_remove (&wait_queues [hash_index], thread, |
411 |
struct thread *, waitq); |
struct thread *, waitq); |
412 |
|
|
413 |
thread->sched_state &= ~THREAD_STATE_WAIT; |
thread->sched_state &= ~THREAD_STATE_WAIT; |
414 |
thread->wait_event = 0; |
thread->wait_event = 0; |
415 |
/* ??? unlock. */ |
|
416 |
|
/* ??? this is not SMP safe. */ |
417 |
|
|
418 |
|
thread_unlock (thread); |
419 |
|
spin_unlock (&wait_locks [hash_index]); |
420 |
|
thread_setrun (thread, true); |
421 |
|
spin_lock (&wait_locks [hash_index]); |
422 |
} |
} |
423 |
thread_unlock (thread); |
else |
424 |
|
thread_unlock (thread); |
425 |
|
|
426 |
|
if (one_p) |
427 |
|
break; |
428 |
|
|
429 |
thread = next_thread; |
thread = next_thread; |
430 |
} |
} |
434 |
SPLON (spl); |
SPLON (spl); |
435 |
} |
} |
436 |
|
|
437 |
/* Wake up all threads waiting for EVENT. */ |
/*! |
438 |
|
Wake up all threads waiting for an event. |
439 |
|
@param EVENT specify what event. |
440 |
|
*/ |
441 |
|
|
442 |
void |
void |
443 |
thread_wakeup (int event) |
thread_wakeup (int event) |
461 |
thread_wakeup_generic (event, true, 0); |
thread_wakeup_generic (event, true, 0); |
462 |
} |
} |
463 |
|
|
|
|
|
464 |
/* Interrupt THREAD. Bring it back from waiting state. */ |
/* Interrupt THREAD. Bring it back from waiting state. */ |
465 |
|
|
466 |
void |
void |
484 |
|
|
485 |
thread->sched_state &= ~THREAD_STATE_SUSPEND; |
thread->sched_state &= ~THREAD_STATE_SUSPEND; |
486 |
thread->sched_state |= THREAD_STATE_RUN; |
thread->sched_state |= THREAD_STATE_RUN; |
487 |
|
thread_unlock (thread); |
488 |
|
|
489 |
spl = SPLOFF (); |
spl = SPLOFF (); |
490 |
thread_setrun (thread, true); |
thread_setrun (thread, true); |
|
|
|
491 |
SPLON (spl); |
SPLON (spl); |
492 |
|
|
493 |
|
return KERN_SUCCESS; |
494 |
} |
} |
495 |
thread_unlock (thread); |
thread_unlock (thread); |
496 |
|
|
533 |
SPLON (spl); |
SPLON (spl); |
534 |
} |
} |
535 |
|
|
536 |
|
/* Will current threads priority to THREAD and all threads that THREAD |
537 |
|
is blocking on. */ |
538 |
|
|
539 |
|
void |
540 |
|
thread_will_priority (struct thread *thread) |
541 |
|
{ |
542 |
|
bool runq_p; |
543 |
|
SPL_T spl; |
544 |
|
|
545 |
|
spl = SPLOFF (); |
546 |
|
|
547 |
|
while (thread) |
548 |
|
{ |
549 |
|
thread_lock (thread); |
550 |
|
|
551 |
|
if (thread->sched_premul < THREAD_CURRENT()->sched_premul) |
552 |
|
{ |
553 |
|
runq_p = !! thread->runq_list; |
554 |
|
|
555 |
|
if (runq_p) |
556 |
|
(*thread->sched_sp->ops->remove_fn) (thread->sched_sp, thread); |
557 |
|
|
558 |
|
if (! thread->inherited_priority_p) |
559 |
|
{ |
560 |
|
thread->original_priority = thread->sched_priority; |
561 |
|
thread->original_policy = thread->sched_policy; |
562 |
|
thread->original_premul = thread->sched_premul; |
563 |
|
thread->original_sp = thread->sched_sp; |
564 |
|
|
565 |
|
thread->inherited_priority_p = true; |
566 |
|
} |
567 |
|
|
568 |
|
thread->sched_priority = THREAD_CURRENT()->sched_priority; |
569 |
|
thread->sched_policy = THREAD_CURRENT()->sched_policy; |
570 |
|
thread->sched_premul = THREAD_CURRENT()->sched_premul; |
571 |
|
thread->sched_sp = THREAD_CURRENT()->sched_sp; |
572 |
|
|
573 |
|
if (runq_p) |
574 |
|
(*thread->sched_sp->ops->add_fn) (thread->sched_sp, thread, false); |
575 |
|
} |
576 |
|
thread_unlock (thread); |
577 |
|
|
578 |
|
if (! thread->blocked_lock) |
579 |
|
break; |
580 |
|
thread = thread->blocked_lock->thread; |
581 |
|
} |
582 |
|
|
583 |
|
SPLON (spl); |
584 |
|
} |
585 |
|
|
586 |
|
/* Waive inherited priority. THREAD should be the current thread. */ |
587 |
|
|
588 |
|
void |
589 |
|
thread_waive_priority (struct thread *thread) |
590 |
|
{ |
591 |
|
SPL_T spl; |
592 |
|
|
593 |
|
ASSERT (thread == THREAD_CURRENT ()); |
594 |
|
|
595 |
|
spl = SPLOFF (); |
596 |
|
thread_lock (thread); |
597 |
|
|
598 |
|
if (thread->inherited_priority_p) |
599 |
|
{ |
600 |
|
thread->sched_priority = thread->original_priority; |
601 |
|
thread->sched_policy = thread->original_policy; |
602 |
|
thread->sched_premul = thread->original_premul; |
603 |
|
thread->sched_sp = thread->original_sp; |
604 |
|
|
605 |
|
thread->inherited_priority_p = false; |
606 |
|
} |
607 |
|
|
608 |
|
thread_unlock (thread); |
609 |
|
SPLON (spl); |
610 |
|
} |
611 |
|
|
612 |
|
|
613 |
/* Sleep until EVENT occures. If LOCK is valid, unlock it before |
/* Sleep until EVENT occures. If LOCK is valid, unlock it before |
614 |
start snoozing. Block with CONTINUATION. */ |
start snoozing. Block with CONTINUATION. */ |
615 |
|
|
653 |
thread->suspend_cnt = 0; |
thread->suspend_cnt = 0; |
654 |
} |
} |
655 |
|
|
656 |
|
/* Select next thread to be run ??? on PROCESSOR. This function should be |
657 |
|
called with interrupts disabled. */ |
658 |
|
|
659 |
|
struct thread * |
660 |
|
thread_select (struct processor *processor) |
661 |
|
{ |
662 |
|
struct thread *thread = 0; |
663 |
|
int i; |
664 |
|
|
665 |
|
processor->state = PROCESSOR_STATE_RUNNING; |
666 |
|
|
667 |
|
/* Loop through all scheduling policies and find next thread. */ |
668 |
|
|
669 |
|
for (i = 3; i >= 0; i--) |
670 |
|
{ |
671 |
|
thread = (*sched_policies[i].ops->select_fn) (&sched_policies[i], |
672 |
|
processor); |
673 |
|
#if 0 |
674 |
|
if (thread) |
675 |
|
trace_printf ("got thread %p %d", thread, |
676 |
|
(int) thread->scratch_area [3]); |
677 |
|
#endif |
678 |
|
if (thread) |
679 |
|
return thread; |
680 |
|
} |
681 |
|
|
682 |
|
/* If we can not find any thread in the scheduling policies we see |
683 |
|
if we can continue to run the current thread. */ |
684 |
|
|
685 |
|
thread = THREAD_CURRENT (); |
686 |
|
if (thread->sched_state == THREAD_STATE_RUN) |
687 |
|
return thread; |
688 |
|
|
689 |
|
/* We have to select the idle thread for this processor. */ |
690 |
|
|
691 |
|
processor->state = PROCESSOR_STATE_IDLE; |
692 |
|
return processor->idle_thread; |
693 |
|
} |
694 |
|
|
695 |
|
/* Check if we should preempt THREAD with any thread on the run queues. */ |
696 |
|
|
697 |
|
bool |
698 |
|
thread_preemption_needed (struct thread *thread) |
699 |
|
{ |
700 |
|
bool preempt_p; |
701 |
|
SPL_T spl; |
702 |
|
int i; |
703 |
|
|
704 |
|
spl = SPLOFF (); |
705 |
|
|
706 |
|
/* Loop through all scheduling policies and find next thread. */ |
707 |
|
|
708 |
|
for (i = 3, preempt_p = false; i >= 0 && !preempt_p; i--) |
709 |
|
{ |
710 |
|
preempt_p = (*sched_policies[i].ops->preempt_fn) (&sched_policies[i], |
711 |
|
thread); |
712 |
|
} |
713 |
|
|
714 |
|
SPLON (spl); |
715 |
|
return preempt_p; |
716 |
|
} |
717 |
|
|
718 |
|
/* Dispatch THREAD onto idle processor or run queue if no processors |
719 |
|
available. But only if thread is runable. */ |
720 |
|
|
721 |
|
void |
722 |
|
thread_dispatch (struct thread *thread) |
723 |
|
{ |
724 |
|
if ((thread->sched_state & THREAD_SCHED_STATE) == THREAD_STATE_RUN) |
725 |
|
thread_setrun (thread, false); |
726 |
|
} |
727 |
|
|
728 |
|
|
729 |
/* Insert THREAD on run queue and if PREEMP_P is true we check if we |
/* Insert THREAD on run queue and if PREEMP_P is true we check if we |
730 |
should preempt the current CPU. */ |
should preempt the current CPU. */ |
731 |
|
|
732 |
void |
void |
733 |
thread_setrun (struct thread *thread, bool preempt_p) |
thread_setrun (struct thread *thread, bool preempt_p) |
734 |
{ |
{ |
735 |
if (thread->runq_p == false) |
bool invoke_p = false; |
736 |
add_thread_to_run_queue (thread); |
#if NCPUS > 1 |
737 |
|
struct processor *processor; |
738 |
|
int i; |
739 |
|
#endif |
740 |
|
|
741 |
if (! preempt_p) |
assert ((thread->sched_state & THREAD_SCHED_STATE) == THREAD_STATE_RUN); |
|
return; |
|
742 |
|
|
743 |
if (thread->sched_policy == THREAD_POLICY_TIMESHARE |
#if NCPUS > 1 |
744 |
&& (THREAD_CURRENT ()->sched_credits < thread->sched_credits)) |
/* First check if we can continue to execute on processor that last |
745 |
|
executed thread. */ |
746 |
|
|
747 |
|
processor = thread->last_processor; |
748 |
|
if (processor) |
749 |
{ |
{ |
750 |
thread_invoke (THREAD_CURRENT (), 0, thread); |
if (processor->state == PROCESSOR_STATE_IDLE) |
751 |
|
{ |
752 |
|
processor->next_thread = thread; |
753 |
|
processor->state = PROCESSOR_STATE_DISPATCH; |
754 |
|
|
755 |
|
/* ??? send interrupt to processor. */ |
756 |
|
return; |
757 |
|
} |
758 |
} |
} |
759 |
else if (thread->sched_policy == THREAD_POLICY_REALTIME |
|
760 |
&& (THREAD_CURRENT ()->sched_priority < thread->sched_priority)) |
/* No, we can not execute on last processor. Check if there is any free |
761 |
|
processors available at all. We also check if we can preempt thread. */ |
762 |
|
|
763 |
|
for (i = 0; i < ncpus; i++) |
764 |
{ |
{ |
765 |
thread_invoke (THREAD_CURRENT (), 0, thread); |
processor = PROCESSOR_N (i); |
766 |
|
|
767 |
|
if (processor->state == PROCESSOR_STATE_IDLE) |
768 |
|
{ |
769 |
|
processor->next_thread = thread; |
770 |
|
processor->state = PROCESSOR_STATE_DISPATCH; |
771 |
|
|
772 |
|
/* ??? send interrupt to processor. */ |
773 |
|
return; |
774 |
|
} |
775 |
|
else if (processor->state == PROCESSOR_STATE_RUNNING && |
776 |
|
thread->sched_premul > processor->current_thread->sched_premul) |
777 |
|
{ |
778 |
|
/* ??? send interrupt to processor. */ |
779 |
|
return; |
780 |
|
} |
781 |
} |
} |
782 |
|
#endif |
783 |
|
|
784 |
|
/* ??? check if we can make thread run on any other CPU. */ |
785 |
|
|
786 |
|
invoke_p = (*thread->sched_sp->ops->add_fn) (thread->sched_sp, |
787 |
|
thread, preempt_p); |
788 |
|
|
789 |
|
if (invoke_p) |
790 |
|
thread_block (0); |
791 |
|
} |
792 |
|
|
793 |
|
kern_return_t |
794 |
|
thread_set_priority (struct thread *thread, int policy, int priority) |
795 |
|
{ |
796 |
|
SPL_T spl; |
797 |
|
|
798 |
|
if (policy < 0 || policy > 2 || priority < 0 || priority > 127) |
799 |
|
return KERN_FAILURE; /* ??? */ |
800 |
|
|
801 |
|
thread_lock (thread); |
802 |
|
spl = SPLOFF (); |
803 |
|
|
804 |
|
thread->sched_policy = policy; |
805 |
|
thread->sched_priority = (priority >> 3); |
806 |
|
thread->sched_sp = & sched_policies [policy]; |
807 |
|
CALC_PREMUL (thread); |
808 |
|
|
809 |
|
SPLON (spl); |
810 |
|
thread_unlock (thread); |
811 |
|
|
812 |
|
return KERN_SUCCESS; |
813 |
} |
} |
814 |
|
|
815 |
|
|
816 |
/* Function for bootstraping the thread system. */ |
/* Function for bootstraping the thread system. */ |
817 |
|
|
818 |
void |
void |
820 |
{ |
{ |
821 |
int i; |
int i; |
822 |
|
|
823 |
thread_cache = kmem_cache_create ("thread cache", sizeof (struct thread), |
/* Create memory pools for thread structures and kernel stacks. */ |
824 |
0); |
|
825 |
|
thread_cache = kmem_cache_create ("thread cache", sizeof (struct thread), 0); |
826 |
assert (thread_cache); |
assert (thread_cache); |
827 |
|
|
828 |
stack_cache = kmem_cache_create ("stack cache", KERNEL_STACK_SIZE, 0); |
stack_cache = kmem_cache_create ("stack cache", KERNEL_STACK_SIZE, 0); |
829 |
assert (stack_cache); |
assert (stack_cache); |
830 |
|
|
|
/* Initialize all the run queues. */ |
|
831 |
|
|
832 |
for (i = 0; i < NUMBER_OF_RUN_QUEUES; i++) |
/* Initialize scheduling policies. */ |
833 |
queue_init (&runq_queues [i]); |
|
834 |
|
sched_policy_fifo_init (&sched_policies [THREAD_POLICY_REALTIME], |
835 |
|
THREAD_POLICY_REALTIME); |
836 |
|
sched_policy_fifo_init (&sched_policies [THREAD_POLICY_FIFO], |
837 |
|
THREAD_POLICY_FIFO); |
838 |
|
sched_policy_rr_init (&sched_policies [THREAD_POLICY_RR], |
839 |
|
THREAD_POLICY_RR); |
840 |
|
sched_policy_ts_init (&sched_policies [THREAD_POLICY_TIMESHARE], |
841 |
|
THREAD_POLICY_TIMESHARE); |
842 |
|
|
843 |
/* Initialize all wait queues. */ |
/* Initialize all wait queues. */ |
844 |
|
|