/[hurd]/hurd-l4/libhurd-cap-server/bucket-manage-mt.c
ViewVC logotype

Diff of /hurd-l4/libhurd-cap-server/bucket-manage-mt.c

Parent Directory Parent Directory | Revision Log Revision Log | View Patch Patch

revision 1.8 by marcus, Sat Oct 23 01:07:01 2004 UTC revision 1.9 by marcus, Mon Oct 25 15:11:18 2004 UTC
# Line 42  Line 42 
42    l4_xreceive_timeout (from, l4_timeouts (L4_ZERO_TIME, L4_NEVER), fromp)    l4_xreceive_timeout (from, l4_timeouts (L4_ZERO_TIME, L4_NEVER), fromp)
43    
44    
45  /* FIXME: Throughout this file, for debugging the behaviour could be  /* FIXME: Throughout this file, for debugging, the behaviour could be
46     relaxed to return errors to callers which would otherwise be     relaxed to return errors to callers which would otherwise be
47     ignored (due to malformed requests etc).  */     ignored (due to malformed requests etc).  */
48    
# Line 345  manage_demuxer_cleanup (hurd_cap_rpc_con Line 345  manage_demuxer_cleanup (hurd_cap_rpc_con
345  /* A worker thread for RPC processing.  The behaviour of this function  /* A worker thread for RPC processing.  The behaviour of this function
346     is tightly integrated with the behaviour of the manager thread.  */     is tightly integrated with the behaviour of the manager thread.  */
347  static void *  static void *
348  manage_mt_worker (void *arg)  manage_mt_worker (void *arg, bool async)
349  {  {
350    struct worker_info *info = (struct worker_info *) arg;    struct worker_info *info = (struct worker_info *) arg;
351    hurd_cap_bucket_t bucket = info->bucket;    hurd_cap_bucket_t bucket = info->bucket;
# Line 355  manage_mt_worker (void *arg) Line 355  manage_mt_worker (void *arg)
355    l4_time_t timeout = info->timeout;    l4_time_t timeout = info->timeout;
356    l4_thread_id_t from;    l4_thread_id_t from;
357    l4_msg_tag_t msg_tag;    l4_msg_tag_t msg_tag;
358      bool current_worker_is_us;
359    
360    /* Prepare the worker queue item.  As we are always the current    /* Prepare the worker queue item.  [SYNC: As we are always the
361       worker thread when we are started up, we do not add ourselves to       current worker thread when we are started up, we do not add
362       the free list.  */       ourselves to the free list.]  */
363    worker->thread = pthread_self ();    worker->thread = pthread_self ();
364    worker->tid = l4_myself ();    worker->tid = l4_myself ();
365    worker->next = NULL;    worker->next = NULL;
366    worker->prevp = NULL;    worker->prevp = NULL;
367    
368    /* When we are started up, we are supposed to listen as soon as    if (__builtin_expect (async, 0))
369       possible to the next incoming message.  The first time, we do      {
370       this without a timeout.  */        /* We have to add ourselves to the free list and inform the
371    msg_tag = l4_xreceive (manager, &from);           worker_alloc_async thread.  */
372          pthread_mutex_lock (&bucket->lock);
373    
374          if (bucket->is_manager_waiting && !bucket->free_worker)
375            {
376              /* The manager is starving for worker threads.  */
377              pthread_cond_broadcast (&bucket->cond);
378            }
379          _hurd_cap_list_item_add (&bucket->free_worker, worker);
380    
381          /* Notify the worker_alloc_async thread that we have started up
382             and added ourselves to the free list.  */
383          bucket->worker_alloc_state = _HURD_CAP_STATE_RED;
384    
385          /* This will wake up the worker_alloc_async thread, but also the
386             manager in case it is blocked on getting a new worker
387             thread.  */
388          pthread_cond_broadcast (&bucket->cond);
389          pthread_mutex_unlock (&bucket->lock);
390    
391          /* We do not know if we will be the current worker thread or
392             not, so we must wait with a timeout.  */
393          msg_tag = l4_xreceive_timeout (manager, timeout, &from);
394        }
395      else
396        {
397          /* When we are started up, we are supposed to listen as soon as
398             possible to the next incoming message.  When we know we are the
399             current worker thread, we do this without a timeout.  */
400          msg_tag = l4_xreceive (manager, &from);
401        }
402    
403    while (1)    while (1)
404      {      {
# Line 377  manage_mt_worker (void *arg) Line 408  manage_mt_worker (void *arg)
408    
409            l4_word_t err_code = l4_error_code ();            l4_word_t err_code = l4_error_code ();
410            l4_word_t ipc_err = (err_code >> 1) & 0x7;            l4_word_t ipc_err = (err_code >> 1) & 0x7;
           unsigned int current_worker_is_us;  
411    
412            if (ipc_err == L4_IPC_CANCELED || ipc_err == L4_IPC_ABORTED)            if (ipc_err == L4_IPC_CANCELED || ipc_err == L4_IPC_ABORTED)
413              /* We have been canceled for shutdown.  */              /* We have been canceled for shutdown.  */
# Line 671  manage_mt_worker (void *arg) Line 701  manage_mt_worker (void *arg)
701  }  }
702    
703    
704    /* A worker thread for RPC processing.  The behaviour of this function
705       is tightly integrated with the behaviour of the manager thread.  */
706    static void *
707    manage_mt_worker_sync (void *arg)
708    {
709      return manage_mt_worker (arg, false);
710    }
711    
712    
713    /* A worker thread for RPC processing.  The behaviour of this function
714       is tightly integrated with the behaviour of the manager thread.  */
715    static void *
716    manage_mt_worker_async (void *arg)
717    {
718      return manage_mt_worker (arg, true);
719    }
720    
721    
722  /* Return the next free worker thread.  If no free worker thread is  /* Return the next free worker thread.  If no free worker thread is
723     available, create a new one.  If that fails, block until one     available, create a new one.  If that fails, block until one
724     becomes free.  If we are interrupted while blocking, return     becomes free.  If we are interrupted while blocking, return
# Line 695  manage_mt_get_next_worker (struct worker Line 743  manage_mt_get_next_worker (struct worker
743          err = EAGAIN;          err = EAGAIN;
744        else        else
745          err = pthread_create_from_l4_tid_np (worker_thread, NULL,          err = pthread_create_from_l4_tid_np (worker_thread, NULL,
746                                               worker, manage_mt_worker, info);                                               worker, manage_mt_worker_sync,
747                                                 info);
748    
749        if (!err)        if (!err)
750          {          {
# Line 744  manage_mt_get_next_worker (struct worker Line 793  manage_mt_get_next_worker (struct worker
793    return worker_item->tid;    return worker_item->tid;
794  }  }
795    
796    
797    /* A worker thread for allocating new worker threads.  Only used if
798       asynchronous worker thread allocation is requested.  This is only
799       necessary (and useful) for physmem, to break out of a potential
800       dead-lock with the task server.  */
801    static void *
802    worker_alloc_async (void *arg)
803    {
804      struct worker_info *info = (struct worker_info *) arg;
805      hurd_cap_bucket_t bucket = info->bucket;
806      error_t err;
807    
808      pthread_mutex_lock (&bucket->lock);
809      if (bucket->state == _HURD_CAP_STATE_BLACK)
810        {
811          pthread_mutex_unlock (&bucket->lock);
812          return NULL;
813        }
814    
815      while (1)
816        {
817          err = hurd_cond_wait (&bucket->cond, &bucket->lock);
818          /* We ignore the error, as the only error that can occur is
819             ECANCELED, and only if the bucket state has gone to black for
820             shutdown.  */
821          if (bucket->state == _HURD_CAP_STATE_BLACK)
822            break;
823    
824          if (bucket->worker_alloc_state == _HURD_CAP_STATE_GREEN)
825            {
826              l4_thread_id_t worker = l4_nilthread;
827              pthread_t worker_thread;
828              _hurd_cap_list_item_t worker_item;
829    
830              pthread_mutex_unlock (&bucket->lock);
831    
832              worker = pthread_pool_get_np ();
833              if (worker == l4_nilthread)
834                err = EAGAIN;
835              else
836                err = pthread_create_from_l4_tid_np (&worker_thread, NULL,
837                                                     worker,
838                                                     manage_mt_worker_async,
839                                                     info);
840              if (!err)
841                {
842                  pthread_detach (worker_thread);
843    
844                  pthread_mutex_lock (&bucket->lock);
845                  bucket->worker_alloc_state == _HURD_CAP_STATE_YELLOW;
846                  /* We ignore any error, as the only error that can occur
847                     is ECANCELED, and only if the bucket state goes to
848                     black for shutdown.  But particularly in that case we
849                     want to wait until the thread has fully come up and
850                     entered the free list, so it's properly accounted for
851                     and will be canceled at shutdown by the manager.  */
852                  while (bucket->worker_alloc_state == _HURD_CAP_STATE_YELLOW)
853                    err = hurd_cond_wait (&bucket->cond, &bucket->lock);
854    
855                  /* Will be set by the started thread.  */
856                  assert (bucket->worker_alloc_state == _HURD_CAP_STATE_RED);
857                }
858              else
859                {
860                  pthread_mutex_lock (&bucket->lock);
861                  bucket->worker_alloc_state == _HURD_CAP_STATE_RED;
862                }
863    
864              if (bucket->state == _HURD_CAP_STATE_BLACK)
865                break;
866            }
867        }
868    
869      bucket->worker_alloc_state == _HURD_CAP_STATE_BLACK;
870      pthread_mutex_unlock (&bucket->lock);
871    
872      return NULL;
873    }
874      
875    
876    
877  /* Start managing RPCs on the bucket BUCKET.  The ROOT capability  /* Start managing RPCs on the bucket BUCKET.  The ROOT capability
878     object, which must be unlocked and have one reference throughout     object, which must be unlocked and have one reference throughout
# Line 779  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 908  hurd_cap_bucket_manage_mt (hurd_cap_buck
908    info.timeout = (worker_timeout_sec == 0) ? L4_NEVER    info.timeout = (worker_timeout_sec == 0) ? L4_NEVER
909      : l4_time_period (UINT64_C (1000000) * worker_timeout_sec);      : l4_time_period (UINT64_C (1000000) * worker_timeout_sec);
910    
   /* We never accept any map or grant items.  FIXME: For now, we also  
      do not accept any string buffer items.  */  
   l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);  
   
   /* Because we do not accept any string items, we do not actually  
      need to set the Xfer timeouts.  But this is what we want to set  
      them to when we eventually do support string items.  */  
   l4_set_xfer_timeouts (l4_timeouts (L4_ZERO_TIME, L4_NEVER));  
   
911    /* We create the first worker thread ourselves, to catch any    /* We create the first worker thread ourselves, to catch any
912       possible error at this stage and bail out properly if needed.  */       possible error at this stage and bail out properly if needed.  */
913    worker = pthread_pool_get_np ();    worker = pthread_pool_get_np ();
914    if (worker == l4_nilthread)    if (worker == l4_nilthread)
915      return EAGAIN;      return EAGAIN;
916    err = pthread_create_from_l4_tid_np (&worker_thread, NULL,    err = pthread_create_from_l4_tid_np (&worker_thread, NULL,
917                                         worker, manage_mt_worker, &info);                                         worker, manage_mt_worker_sync, &info);
918    if (err)    if (err)
919      return err;      return err;
920    pthread_detach (worker_thread);    pthread_detach (worker_thread);
921    
922    pthread_mutex_lock (&bucket->lock);    pthread_mutex_lock (&bucket->lock);
923      if (bucket->is_worker_alloc_async)
924        {
925          /* Prevent creation of new worker threads initially.  */
926          bucket->worker_alloc_state = _HURD_CAP_STATE_RED;
927    
928          /* Asynchronous worker thread allocation is requested.  */
929          err = pthread_create (&bucket->worker_alloc, NULL,
930                                worker_alloc_async, &info);
931          
932          if (err)
933            {
934              /* Cancel the worker thread.  */
935              pthread_cancel (worker_thread);
936              hurd_cond_wait (&bucket->cond, &bucket->lock);
937              pthread_mutex_unlock (&bucket->lock);
938              return err;
939            }
940        }
941    bucket->manager = pthread_self ();    bucket->manager = pthread_self ();
942    bucket->is_managed = true;    bucket->is_managed = true;
943    bucket->is_manager_waiting = false;    bucket->is_manager_waiting = false;
# Line 810  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 948  hurd_cap_bucket_manage_mt (hurd_cap_buck
948        l4_thread_id_t from = l4_anythread;        l4_thread_id_t from = l4_anythread;
949        l4_msg_tag_t msg_tag;        l4_msg_tag_t msg_tag;
950    
951          /* We never accept any map or grant items.  FIXME: For now, we
952             also do not accept any string buffer items.  */
953          l4_accept (L4_UNTYPED_WORDS_ACCEPTOR);
954    
955          /* Because we do not accept any string items, we do not actually
956             need to set the Xfer timeouts.  But this is what we want to set
957             them to when we eventually do support string items.  */
958          l4_set_xfer_timeouts (l4_timeouts (L4_ZERO_TIME, L4_ZERO_TIME));
959    
960        /* FIXME: Make sure we have enabled deferred cancellation, and        /* FIXME: Make sure we have enabled deferred cancellation, and
961           use an L4 ipc() stub that supports that.  In fact, this must           use an L4 ipc() stub that supports that.  In fact, this must
962           be true for most of the IPC operations in this file.  */           be true for most of the IPC operations in this file.  */
# Line 861  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 1008  hurd_cap_bucket_manage_mt (hurd_cap_buck
1008            /* FIXME: Make sure to use a non-cancellable l4_lcall that            /* FIXME: Make sure to use a non-cancellable l4_lcall that
1009               does preserve any pending cancellation flag for this               does preserve any pending cancellation flag for this
1010               thread.  Alternatively, we can handle cancellation here               thread.  Alternatively, we can handle cancellation here
1011               (reply ECANCEL to user, and enter shutdown sequence.  */               (reply ECANCELED to user, and enter shutdown
1012                 sequence.  */
1013            msg_tag = l4_lcall (worker);            msg_tag = l4_lcall (worker);
1014            assert (l4_ipc_succeeded (msg_tag));            assert (l4_ipc_succeeded (msg_tag));
1015    
# Line 885  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 1033  hurd_cap_bucket_manage_mt (hurd_cap_buck
1033              }              }
1034          }          }
1035      }      }
1036            
1037    /* At this point, bucket->lock is held.  Start the shutdown    /* At this point, bucket->lock is held.  Start the shutdown
1038       sequence.  */       sequence.  */
1039    assert (!bucket->pending_rpcs);    assert (!bucket->pending_rpcs);
1040    
1041    /* First force all the waiting rpcs onto the free list.  They will    /* First shutdown the allocator thread, if any.  */
1042      if (bucket->is_worker_alloc_async)
1043        {
1044          pthread_cancel (bucket->worker_alloc);
1045          pthread_join (bucket->worker_alloc, NULL);
1046        }
1047          
1048      /* Now force all the waiting rpcs onto the free list.  They will
1049       have noticed the state change to _HURD_CAP_STATE_BLACK already,       have noticed the state change to _HURD_CAP_STATE_BLACK already,
1050       we just have to block until the last one wakes us up.  */       we just have to block until the last one wakes us up.  */
1051    while (bucket->waiting_rpcs)    while (bucket->waiting_rpcs)
1052      hurd_cond_wait (&bucket->cond, &bucket->lock);      hurd_cond_wait (&bucket->cond, &bucket->lock);
1053          
1054    /* Cancel the free workers.  */    /* Cancel the free workers.  */
1055    item = bucket->free_worker;    item = bucket->free_worker;
1056    while (item)    while (item)
# Line 904  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 1059  hurd_cap_bucket_manage_mt (hurd_cap_buck
1059        item = item->next;        item = item->next;
1060      }      }
1061    
1062      /* Request the condition to be broadcasted.  */
1063    bucket->is_manager_waiting = true;    bucket->is_manager_waiting = true;
1064    
1065    while (bucket->free_worker)    while (bucket->free_worker)
# Line 919  hurd_cap_bucket_manage_mt (hurd_cap_buck Line 1075  hurd_cap_bucket_manage_mt (hurd_cap_buck
1075    if (worker != l4_nilthread)    if (worker != l4_nilthread)
1076      {      {
1077        pthread_cancel (worker_thread);        pthread_cancel (worker_thread);
1078        while (bucket->free_worker)        hurd_cond_wait (&bucket->cond, &bucket->lock);
         {  
           /* We ignore cancellations at this point, because we are already  
              shutting down.  */  
           hurd_cond_wait (&bucket->cond, &bucket->lock);  
         }  
1079      }      }
1080    
1081    bucket->is_managed = false;    bucket->is_managed = false;

Legend:
Removed from v.1.8  
changed lines
  Added in v.1.9

savannah-hackers-public@gnu.org
ViewVC Help
Powered by ViewVC 1.1.26