Print this page
10098 task_alloc() in libfakekernel gets KM_NOSLEEP test wrong


  83         int             tq_nthreads;
  84         int             tq_nalloc;
  85         int             tq_minalloc;
  86         int             tq_maxalloc;
  87         kcondvar_t      tq_maxalloc_cv;
  88         int             tq_maxalloc_wait;
  89         taskq_ent_t     *tq_freelist;
  90         taskq_ent_t     tq_task;
  91 };
  92 
  93 static taskq_ent_t *
  94 task_alloc(taskq_t *tq, int tqflags)
  95 {
  96         taskq_ent_t *t;
  97         int rv;
  98 
  99 again:  if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
 100                 tq->tq_freelist = t->tqent_next;
 101         } else {
 102                 if (tq->tq_nalloc >= tq->tq_maxalloc) {
 103                         if (!(tqflags & KM_SLEEP))
 104                                 return (NULL);
 105 
 106                         /*
 107                          * We don't want to exceed tq_maxalloc, but we can't
 108                          * wait for other tasks to complete (and thus free up
 109                          * task structures) without risking deadlock with
 110                          * the caller.  So, we just delay for one second
 111                          * to throttle the allocation rate. If we have tasks
 112                          * complete before one second timeout expires then
 113                          * taskq_ent_free will signal us and we will
 114                          * immediately retry the allocation.
 115                          */
 116                         tq->tq_maxalloc_wait++;
 117                         rv = cv_timedwait(&tq->tq_maxalloc_cv,
 118                             &tq->tq_lock, ddi_get_lbolt() + hz);
 119                         tq->tq_maxalloc_wait--;
 120                         if (rv > 0)
 121                                 goto again;             /* signaled */
 122                 }
 123                 mutex_exit(&tq->tq_lock);




  83         int             tq_nthreads;
  84         int             tq_nalloc;
  85         int             tq_minalloc;
  86         int             tq_maxalloc;
  87         kcondvar_t      tq_maxalloc_cv;
  88         int             tq_maxalloc_wait;
  89         taskq_ent_t     *tq_freelist;
  90         taskq_ent_t     tq_task;
  91 };
  92 
  93 static taskq_ent_t *
  94 task_alloc(taskq_t *tq, int tqflags)
  95 {
  96         taskq_ent_t *t;
  97         int rv;
  98 
  99 again:  if ((t = tq->tq_freelist) != NULL && tq->tq_nalloc >= tq->tq_minalloc) {
 100                 tq->tq_freelist = t->tqent_next;
 101         } else {
 102                 if (tq->tq_nalloc >= tq->tq_maxalloc) {
 103                         if (tqflags & KM_NOSLEEP)
 104                                 return (NULL);
 105 
 106                         /*
 107                          * We don't want to exceed tq_maxalloc, but we can't
 108                          * wait for other tasks to complete (and thus free up
 109                          * task structures) without risking deadlock with
 110                          * the caller.  So, we just delay for one second
 111                          * to throttle the allocation rate. If we have tasks
 112                          * complete before one second timeout expires then
 113                          * taskq_ent_free will signal us and we will
 114                          * immediately retry the allocation.
 115                          */
 116                         tq->tq_maxalloc_wait++;
 117                         rv = cv_timedwait(&tq->tq_maxalloc_cv,
 118                             &tq->tq_lock, ddi_get_lbolt() + hz);
 119                         tq->tq_maxalloc_wait--;
 120                         if (rv > 0)
 121                                 goto again;             /* signaled */
 122                 }
 123                 mutex_exit(&tq->tq_lock);