/[rtmk]/rtmk/sched-rr.c
ViewVC logotype

Diff of /rtmk/sched-rr.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.1 by jrydberg, Thu Jan 3 01:32:43 2002 UTC revision 1.2 by jrydberg, Mon Jan 7 02:50:17 2002 UTC
# Line 32  rr_select (struct sched_policy *sp, stru Line 32  rr_select (struct sched_policy *sp, stru
32    
33    /* If RUN_MAP is zero - there is no available threads in this policy.  */    /* If RUN_MAP is zero - there is no available threads in this policy.  */
34    
35      spin_lock (&sp->interlock);
36    if (! sp->run_map)    if (! sp->run_map)
37      return 0;      {
38          spin_unlock (&sp->interlock);
39          return 0;
40        }
41    
42    /* Remove thread with highest priority from thread, and update the queue    /* Remove thread with highest priority from thread, and update the queue
43       map if the queue became empty.  */       map if the queue became empty.  */
# Line 45  rr_select (struct sched_policy *sp, stru Line 49  rr_select (struct sched_policy *sp, stru
49    
50    --sp->run_count;    --sp->run_count;
51    sp->run_map &= ~((!! empty_p) << whichq);    sp->run_map &= ~((!! empty_p) << whichq);
52      spin_unlock (&sp->interlock);
53    
54    /* Set thread quantum.  */    /* Set thread quantum.  */
55    
# Line 63  rr_add (struct sched_policy *sp, struct Line 68  rr_add (struct sched_policy *sp, struct
68    
69    /* Insert thread into scheduling policy's run queue and update RUN_MAP.  */    /* Insert thread into scheduling policy's run queue and update RUN_MAP.  */
70    
71      spin_unlock (&sp->interlock);
72    whichq = (15 - thread->sched_priority);    whichq = (15 - thread->sched_priority);
73    queue_enter (&sp->run_queues [whichq], thread, struct thread *, runq);    queue_enter (&sp->run_queues [whichq], thread, struct thread *, runq);
74    
# Line 74  rr_add (struct sched_policy *sp, struct Line 80  rr_add (struct sched_policy *sp, struct
80    if (may_preempt_p)    if (may_preempt_p)
81      {      {
82        if (THREAD_CURRENT()->sched_premul < thread->sched_premul)        if (THREAD_CURRENT()->sched_premul < thread->sched_premul)
83          return may_preempt_p;          {
84              spin_unlock (&sp->interlock);
85              return may_preempt_p;
86            }
87      }      }
88    
89      spin_unlock (&sp->interlock);
90    return false;    return false;
91  }  }
92    
# Line 90  rr_remove (struct sched_policy *sp, stru Line 100  rr_remove (struct sched_policy *sp, stru
100    
101    /* Insert thread into scheduling policy's run queue and update RUN_MAP.  */    /* Insert thread into scheduling policy's run queue and update RUN_MAP.  */
102    
103      spin_lock (&sp->interlock);
104    whichq = thread->sched_priority;    whichq = thread->sched_priority;
105    queue_remove (&sp->run_queues [whichq], thread, struct thread *, runq);    queue_remove (&sp->run_queues [whichq], thread, struct thread *, runq);
106    empty_p = queue_empty (&sp->run_queues [whichq]);    empty_p = queue_empty (&sp->run_queues [whichq]);
107    
108    --sp->run_count;    --sp->run_count;
109    sp->run_map &= ~((!! empty_p) << whichq);    sp->run_map &= ~((!! empty_p) << whichq);
110      spin_unlock (&sp->interlock);
111  }  }
112    
113  /* Scheduling decisions at periodic clock tick.  Returns true if we should  /* Scheduling decisions at periodic clock tick.  Returns true if we should
# Line 116  rr_clock (struct sched_policy *sp, struc Line 128  rr_clock (struct sched_policy *sp, struc
128  static bool  static bool
129  rr_preempt_p (struct sched_policy *sp, struct thread *thread)  rr_preempt_p (struct sched_policy *sp, struct thread *thread)
130  {  {
131      spin_lock (&sp->interlock);
132    if (! sp->run_map)    if (! sp->run_map)
133      return false;      {
134          spin_unlock (&sp->interlock);
135          return false;
136        }
137    
138    if (thread->sched_policy < sp->policy_index)    if (thread->sched_policy < sp->policy_index)
139      return true;      {
140          spin_unlock (&sp->interlock);
141          return true;
142        }
143    
144    if ((ffs (sp->run_map) - 1) > (15 - thread->sched_priority))    if ((ffs (sp->run_map) - 1) > (15 - thread->sched_priority))
145      return true;      {
146          spin_unlock (&sp->interlock);
147          return true;
148        }
149    
150      spin_unlock (&sp->interlock);
151    return false;    return false;
152  }  }
153    

Legend:
Removed from v.1.1  
changed lines
  Added in v.1.2

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26