/[rtmk]/rtmk/thread.c
ViewVC logotype

Diff of /rtmk/thread.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.23 by jrydberg, Wed Mar 27 00:13:45 2002 UTC revision 1.24 by jrydberg, Tue Apr 9 22:18:16 2002 UTC
# Line 94  static spin_lock_t wait_locks [NUMBER_OF Line 94  static spin_lock_t wait_locks [NUMBER_OF
94  #define WAIT_HASH(event) \  #define WAIT_HASH(event) \
95    (((int)((event) < 0 ? ((event) ^ -1) : (event)))%NUMBER_OF_WAIT_QUEUES)    (((int)((event) < 0 ? ((event) ^ -1) : (event)))%NUMBER_OF_WAIT_QUEUES)
96    
97  /* Allocate a new kernel stack for THREAD.  */  /* List of free (and already allocated) stacks.  This is
98       protected by the stack_list_lock spin-lock.  */
99    vm_offset_t stack_list_first;
100    spin_lock_t stack_list_lock = SPIN_LOCK_INITIALIZER;
101    
102    /* Allocate a new kernel stack for THREAD.  */
103  static void  static void
104  stack_alloc (struct thread *thread, void (*continuation) ())  stack_alloc (struct thread *thread, void (*continuation) ())
105  {  {
106    vm_offset_t stack;    vm_offset_t stack;
107    
108    stack = (vm_offset_t) kmem_cache_alloc (stack_cache);    spin_lock (&stack_list_lock);
109    if (! stack)    if (stack_list_first == 0)
110      panic ("no kernel stack for thread %p", thread);      {
111          spin_unlock (&stack_list_lock);
112          stack = (vm_offset_t) kmem_cache_alloc (stack_cache);
113          assert (stack);
114          spin_lock (&stack_list_lock);
115    
116          *(vm_offset_t *) stack = stack_list_first;
117          stack_list_first = stack;
118    
119          trace_count (kernel_stacks++);
120          trace_printf ("allocated stack %p for thread %p",
121                        stack, thread);
122        }
123      else
124        trace_printf ("stack hit for thread %p", thread);
125    
126    trace_printf ("allocated stack %p for thread %p",    /* When we get here stack list is locked, and
127                  stack, thread);       there is at least one free stack.  */
128      stack = stack_list_first;
129      stack_list_first = *(vm_offset_t *) stack;
130      spin_unlock (&stack_list_lock);
131    
   trace_count (kernel_stacks++);  
132    STACK_ATTACH (thread, stack, continuation);    STACK_ATTACH (thread, stack, continuation);
133  }  }
134    
135    /* Free stack wich was allocated with stack_alloc.  */
136    static void
137    stack_free (vm_offset_t stack)
138    {
139      spin_lock (&stack_list_lock);
140      *(vm_offset_t *) stack = stack_list_first;
141      stack_list_first = stack;
142      spin_unlock (&stack_list_lock);
143    }
144    
145  /* Reaper thread continuation.    /* Reaper thread continuation.  
146     This deallocates memory used by terminated threads.  */     This deallocates memory used by terminated threads.  */
147    
# Line 215  thread_deallocate (struct thread *thread Line 245  thread_deallocate (struct thread *thread
245    ASSERT (thread->sched_state & THREAD_STATE_ZOMBIE);    ASSERT (thread->sched_state & THREAD_STATE_ZOMBIE);
246    
247    if (thread->kernel_stack)    if (thread->kernel_stack)
248      kmem_cache_free (stack_cache, (void *) thread->kernel_stack);      stack_free (thread->kernel_stack);
249    
250    /* ??? release other resources.  */    /* ??? release other resources.  */
251    
# Line 486  thread_invoke (struct thread *old_thread Line 516  thread_invoke (struct thread *old_thread
516            if ((old_thread->sched_state & THREAD_SCHED_STATE)            if ((old_thread->sched_state & THREAD_SCHED_STATE)
517                == THREAD_STATE_RUN)                == THREAD_STATE_RUN)
518              thread_setrun (old_thread, false);              thread_setrun (old_thread, false);
519    #if 0
520              /* We can release stack for this thread.  */
521              else
522                {
523                  trace_printf ("can dealloc stack (%p) for thread %p", old_thread->swap_fn,
524                                old_thread);
525          
526                  stack_free (old_thread->kernel_stack);
527                  old_thread->kernel_stack = 0;
528                  old_thread->sched_state |= THREAD_STATE_SWAPPED;
529                }
530    #endif
531          }          }
532      }      }
533    
# Line 997  thread_setrun (struct thread *thread, bo Line 1039  thread_setrun (struct thread *thread, bo
1039    
1040    assert ((thread->sched_state & THREAD_SCHED_STATE) == THREAD_STATE_RUN);    assert ((thread->sched_state & THREAD_SCHED_STATE) == THREAD_STATE_RUN);
1041    
   /* Check if we can deallocate the kernel stack for THREAD.  */  
   
 #if 0  
   if (thread->swap_fn)  
     {  
       printf ("can dealloc stack (%p) for thread %p\n", thread->swap_fn,  
               thread);  
         
       /* kmem_cache_free (stack_cache, (void *) thread->kernel_stack); */  
       thread->kernel_stack = 0;  
       thread->sched_state |= THREAD_STATE_SWAPPED;  
     }  
 #endif  
   
1042  #if NCPUS > 1  #if NCPUS > 1
1043    /* First check if we can continue to execute on processor that last    /* First check if we can continue to execute on processor that last
1044       executed thread.  */       executed thread.  */

Legend:
Removed from v.1.23  
changed lines
  Added in v.1.24

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26