Print this page
    
10092 sysevent_evc_control() dereferences pointer before checking for NULL
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/os/evchannels.c
          +++ new/usr/src/uts/common/os/evchannels.c
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  
    | 
      ↓ open down ↓ | 
    15 lines elided | 
    
      ↑ open up ↑ | 
  
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  
  25   25  /*
       26 + * Copyright (c) 2018, Joyent, Inc.
       27 + */
       28 +
       29 +/*
  26   30   * This file contains the source of the general purpose event channel extension
  27   31   * to the sysevent framework. This implementation is made up mainly of four
  28   32   * layers of functionality: the event queues (evch_evq_*()), the handling of
  29   33   * channels (evch_ch*()), the kernel interface (sysevent_evc_*()) and the
  30   34   * interface for the sysevent pseudo driver (evch_usr*()).
  31   35   * Libsysevent.so uses the pseudo driver sysevent's ioctl to access the event
  32   36   * channel extensions. The driver in turn uses the evch_usr*() functions below.
  33   37   *
  34   38   * The interfaces for user land and kernel are declared in sys/sysevent.h
  35   39   * Internal data structures for event channels are defined in
  36   40   * sys/sysevent_impl.h.
  37   41   *
  38   42   * The basic data structure for an event channel is of type evch_chan_t.
  39   43   * All channels are maintained by a list named evch_list. The list head
  40   44   * is of type evch_dlist_t.
  41   45   */
  42   46  
  43   47  #include <sys/types.h>
  44   48  #include <sys/errno.h>
  45   49  #include <sys/stropts.h>
  46   50  #include <sys/debug.h>
  47   51  #include <sys/ddi.h>
  48   52  #include <sys/vmem.h>
  49   53  #include <sys/cmn_err.h>
  50   54  #include <sys/callb.h>
  51   55  #include <sys/sysevent.h>
  52   56  #include <sys/sysevent_impl.h>
  53   57  #include <sys/sysmacros.h>
  54   58  #include <sys/disp.h>
  55   59  #include <sys/atomic.h>
  56   60  #include <sys/door.h>
  57   61  #include <sys/zone.h>
  58   62  #include <sys/sdt.h>
  59   63  
  60   64  /* Back-off delay for door_ki_upcall */
  61   65  #define EVCH_MIN_PAUSE  8
  62   66  #define EVCH_MAX_PAUSE  128
  63   67  
  64   68  #define GEVENT(ev)      ((evch_gevent_t *)((char *)ev - \
  65   69                              offsetof(evch_gevent_t, ge_payload)))
  66   70  
  67   71  #define EVCH_EVQ_EVCOUNT(x)     ((&(x)->eq_eventq)->sq_count)
  68   72  #define EVCH_EVQ_HIGHWM(x)      ((&(x)->eq_eventq)->sq_highwm)
  69   73  
  70   74  #define CH_HOLD_PEND            1
  71   75  #define CH_HOLD_PEND_INDEF      2
  72   76  
  73   77  struct evch_globals {
  74   78          evch_dlist_t evch_list;
  75   79          kmutex_t evch_list_lock;
  76   80  };
  77   81  
  78   82  /* Variables used by event channel routines */
  79   83  static int              evq_initcomplete = 0;
  80   84  static zone_key_t       evch_zone_key;
  81   85  static uint32_t         evch_channels_max;
  82   86  static uint32_t         evch_bindings_max = EVCH_MAX_BINDS_PER_CHANNEL;
  83   87  static uint32_t         evch_events_max;
  84   88  
  85   89  static void evch_evq_unsub(evch_eventq_t *, evch_evqsub_t *);
  86   90  static void evch_evq_destroy(evch_eventq_t *);
  87   91  
  88   92  /*
  89   93   * List handling. These functions handle a doubly linked list. The list has
  90   94   * to be protected by the calling functions. evch_dlist_t is the list head.
  91   95   * Every node of the list has to put a evch_dlelem_t data type in its data
  92   96   * structure as its first element.
  93   97   *
  94   98   * evch_dl_init         - Initialize list head
  95   99   * evch_dl_fini         - Terminate list handling
  96  100   * evch_dl_is_init      - Returns one if list is initialized
  97  101   * evch_dl_add          - Add element to end of list
  98  102   * evch_dl_del          - Remove given element from list
  99  103   * evch_dl_search       - Lookup element in list
 100  104   * evch_dl_getnum       - Get number of elements in list
 101  105   * evch_dl_next         - Get next elements of list
 102  106   */
 103  107  
 104  108  static void
 105  109  evch_dl_init(evch_dlist_t *hp)
 106  110  {
 107  111          hp->dh_head.dl_prev = hp->dh_head.dl_next = &hp->dh_head;
 108  112          hp->dh_count = 0;
 109  113  }
 110  114  
 111  115  /*
 112  116   * Assumes that list is empty.
 113  117   */
 114  118  static void
 115  119  evch_dl_fini(evch_dlist_t *hp)
 116  120  {
 117  121          hp->dh_head.dl_prev = hp->dh_head.dl_next = NULL;
 118  122  }
 119  123  
 120  124  static int
 121  125  evch_dl_is_init(evch_dlist_t *hp)
 122  126  {
 123  127          return (hp->dh_head.dl_next != NULL ? 1 : 0);
 124  128  }
 125  129  
 126  130  /*
 127  131   * Add an element at the end of the list.
 128  132   */
 129  133  static void
 130  134  evch_dl_add(evch_dlist_t *hp, evch_dlelem_t *el)
 131  135  {
 132  136          evch_dlelem_t   *x = hp->dh_head.dl_prev;
 133  137          evch_dlelem_t   *y = &hp->dh_head;
 134  138  
 135  139          x->dl_next = el;
 136  140          y->dl_prev = el;
 137  141          el->dl_next = y;
 138  142          el->dl_prev = x;
 139  143          hp->dh_count++;
 140  144  }
 141  145  
 142  146  /*
 143  147   * Remove arbitrary element out of dlist.
 144  148   */
 145  149  static void
 146  150  evch_dl_del(evch_dlist_t *hp, evch_dlelem_t *p)
 147  151  {
 148  152          ASSERT(hp->dh_count > 0 && p != &hp->dh_head);
 149  153          p->dl_prev->dl_next = p->dl_next;
 150  154          p->dl_next->dl_prev = p->dl_prev;
 151  155          p->dl_prev = NULL;
 152  156          p->dl_next = NULL;
 153  157          hp->dh_count--;
 154  158  }
 155  159  
 156  160  /*
 157  161   * Search an element in a list. Caller provides comparison callback function.
 158  162   */
 159  163  static evch_dlelem_t *
 160  164  evch_dl_search(evch_dlist_t *hp, int (*cmp)(evch_dlelem_t *, char *), char *s)
 161  165  {
 162  166          evch_dlelem_t *p;
 163  167  
 164  168          for (p = hp->dh_head.dl_next; p != &hp->dh_head; p = p->dl_next) {
 165  169                  if (cmp(p, s) == 0) {
 166  170                          return (p);
 167  171                  }
 168  172          }
 169  173          return (NULL);
 170  174  }
 171  175  
 172  176  /*
 173  177   * Return number of elements in the list.
 174  178   */
 175  179  static int
 176  180  evch_dl_getnum(evch_dlist_t *hp)
 177  181  {
 178  182          return (hp->dh_count);
 179  183  }
 180  184  
 181  185  /*
 182  186   * Find next element of a evch_dlist_t list. Find first element if el == NULL.
 183  187   * Returns NULL if end of list is reached.
 184  188   */
 185  189  static void *
 186  190  evch_dl_next(evch_dlist_t *hp, void *el)
 187  191  {
 188  192          evch_dlelem_t *ep = (evch_dlelem_t *)el;
 189  193  
 190  194          if (hp->dh_count == 0) {
 191  195                  return (NULL);
 192  196          }
 193  197          if (ep == NULL) {
 194  198                  return (hp->dh_head.dl_next);
 195  199          }
 196  200          if ((ep = ep->dl_next) == (evch_dlelem_t *)hp) {
 197  201                  return (NULL);
 198  202          }
 199  203          return ((void *)ep);
 200  204  }
 201  205  
 202  206  /*
 203  207   * Queue handling routines. Mutexes have to be entered previously.
 204  208   *
 205  209   * evch_q_init  - Initialize queue head
 206  210   * evch_q_in    - Put element into queue
 207  211   * evch_q_out   - Get element out of queue
 208  212   * evch_q_next  - Iterate over the elements of a queue
 209  213   */
 210  214  static void
 211  215  evch_q_init(evch_squeue_t *q)
 212  216  {
 213  217          q->sq_head = NULL;
 214  218          q->sq_tail = (evch_qelem_t *)q;
 215  219          q->sq_count = 0;
 216  220          q->sq_highwm = 0;
 217  221  }
 218  222  
 219  223  /*
 220  224   * Put element into the queue q
 221  225   */
 222  226  static void
 223  227  evch_q_in(evch_squeue_t *q, evch_qelem_t *el)
 224  228  {
 225  229          q->sq_tail->q_next = el;
 226  230          el->q_next = NULL;
 227  231          q->sq_tail = el;
 228  232          q->sq_count++;
 229  233          if (q->sq_count > q->sq_highwm) {
 230  234                  q->sq_highwm = q->sq_count;
 231  235          }
 232  236  }
 233  237  
 234  238  /*
 235  239   * Returns NULL if queue is empty.
 236  240   */
 237  241  static evch_qelem_t *
 238  242  evch_q_out(evch_squeue_t *q)
 239  243  {
 240  244          evch_qelem_t *el;
 241  245  
 242  246          if ((el = q->sq_head) != NULL) {
 243  247                  q->sq_head = el->q_next;
 244  248                  q->sq_count--;
 245  249                  if (q->sq_head == NULL) {
 246  250                          q->sq_tail = (evch_qelem_t *)q;
 247  251                  }
 248  252          }
 249  253          return (el);
 250  254  }
 251  255  
 252  256  /*
 253  257   * Returns element after *el or first if el == NULL. NULL is returned
 254  258   * if queue is empty or *el points to the last element in the queue.
 255  259   */
 256  260  static evch_qelem_t *
 257  261  evch_q_next(evch_squeue_t *q, evch_qelem_t *el)
 258  262  {
 259  263          if (el == NULL)
 260  264                  return (q->sq_head);
 261  265          return (el->q_next);
 262  266  }
 263  267  
 264  268  /*
 265  269   * Event queue handling functions. An event queue is the basic building block
 266  270   * of an event channel. One event queue makes up the publisher-side event queue.
 267  271   * Further event queues build the per-subscriber queues of an event channel.
 268  272   * Each queue is associated an event delivery thread.
 269  273   * These functions support a two-step initialization. First step, when kernel
 270  274   * memory is ready and second when threads are ready.
 271  275   * Events consist of an administrating evch_gevent_t structure with the event
 272  276   * data appended as variable length payload.
 273  277   * The internal interface functions for the event queue handling are:
 274  278   *
 275  279   * evch_evq_create      - create an event queue
 276  280   * evch_evq_thrcreate   - create thread for an event queue.
 277  281   * evch_evq_destroy     - delete an event queue
 278  282   * evch_evq_sub         - Subscribe to event delivery from an event queue
 279  283   * evch_evq_unsub       - Unsubscribe
 280  284   * evch_evq_pub         - Post an event into an event queue
 281  285   * evch_evq_stop        - Put delivery thread on hold
 282  286   * evch_evq_continue    - Resume event delivery thread
 283  287   * evch_evq_status      - Return status of delivery thread, running or on hold
 284  288   * evch_evq_evzalloc    - Allocate an event structure
 285  289   * evch_evq_evfree      - Free an event structure
 286  290   * evch_evq_evadd_dest  - Add a destructor function to an event structure
 287  291   * evch_evq_evnext      - Iterate over events non-destructive
 288  292   */
 289  293  
 290  294  /*ARGSUSED*/
 291  295  static void *
 292  296  evch_zoneinit(zoneid_t zoneid)
 293  297  {
 294  298          struct evch_globals *eg;
 295  299  
 296  300          eg = kmem_zalloc(sizeof (*eg), KM_SLEEP);
 297  301          evch_dl_init(&eg->evch_list);
 298  302          return (eg);
 299  303  }
 300  304  
 301  305  /*ARGSUSED*/
 302  306  static void
 303  307  evch_zonefree(zoneid_t zoneid, void *arg)
 304  308  {
 305  309          struct evch_globals *eg = arg;
 306  310          evch_chan_t *chp;
 307  311          evch_subd_t *sdp;
 308  312  
 309  313          mutex_enter(&eg->evch_list_lock);
 310  314  
 311  315          /*
 312  316           * Keep picking the head element off the list until there are no
 313  317           * more.
 314  318           */
 315  319          while ((chp = evch_dl_next(&eg->evch_list, NULL)) != NULL) {
 316  320  
 317  321                  /*
 318  322                   * Since all processes are gone, all bindings should be gone,
 319  323                   * and only channels with SUB_KEEP subscribers should remain.
 320  324                   */
 321  325                  mutex_enter(&chp->ch_mutex);
 322  326                  ASSERT(chp->ch_bindings == 0);
 323  327                  ASSERT(evch_dl_getnum(&chp->ch_subscr) != 0 ||
 324  328                      chp->ch_holdpend == CH_HOLD_PEND_INDEF);
 325  329  
 326  330                  /* Forcibly unsubscribe each remaining subscription */
 327  331                  while ((sdp = evch_dl_next(&chp->ch_subscr, NULL)) != NULL) {
 328  332                          /*
 329  333                           * We should only be tearing down persistent
 330  334                           * subscribers at this point, since all processes
 331  335                           * from this zone are gone.
 332  336                           */
 333  337                          ASSERT(sdp->sd_active == 0);
 334  338                          ASSERT((sdp->sd_persist & EVCH_SUB_KEEP) != 0);
 335  339                          /*
 336  340                           * Disconnect subscriber queue from main event queue.
 337  341                           */
 338  342                          evch_evq_unsub(chp->ch_queue, sdp->sd_msub);
 339  343  
 340  344                          /* Destruct per subscriber queue */
 341  345                          evch_evq_unsub(sdp->sd_queue, sdp->sd_ssub);
 342  346                          evch_evq_destroy(sdp->sd_queue);
 343  347                          /*
 344  348                           * Eliminate the subscriber data from channel list.
 345  349                           */
 346  350                          evch_dl_del(&chp->ch_subscr, &sdp->sd_link);
 347  351                          kmem_free(sdp->sd_classname, sdp->sd_clnsize);
 348  352                          kmem_free(sdp->sd_ident, strlen(sdp->sd_ident) + 1);
 349  353                          kmem_free(sdp, sizeof (evch_subd_t));
 350  354                  }
 351  355  
 352  356                  /* Channel must now have no subscribers */
 353  357                  ASSERT(evch_dl_getnum(&chp->ch_subscr) == 0);
 354  358  
 355  359                  /* Just like unbind */
 356  360                  mutex_exit(&chp->ch_mutex);
 357  361                  evch_dl_del(&eg->evch_list, &chp->ch_link);
 358  362                  evch_evq_destroy(chp->ch_queue);
 359  363                  mutex_destroy(&chp->ch_mutex);
 360  364                  mutex_destroy(&chp->ch_pubmx);
 361  365                  cv_destroy(&chp->ch_pubcv);
 362  366                  kmem_free(chp->ch_name, chp->ch_namelen);
 363  367                  kmem_free(chp, sizeof (evch_chan_t));
 364  368          }
 365  369  
 366  370          mutex_exit(&eg->evch_list_lock);
 367  371          /* all channels should now be gone */
 368  372          ASSERT(evch_dl_getnum(&eg->evch_list) == 0);
 369  373          kmem_free(eg, sizeof (*eg));
 370  374  }
 371  375  
 372  376  /*
 373  377   * Frees evch_gevent_t structure including the payload, if the reference count
 374  378   * drops to or below zero. Below zero happens when the event is freed
 375  379   * without beeing queued into a queue.
 376  380   */
 377  381  static void
 378  382  evch_gevent_free(evch_gevent_t *evp)
 379  383  {
 380  384          int32_t refcnt;
 381  385  
 382  386          refcnt = (int32_t)atomic_dec_32_nv(&evp->ge_refcount);
 383  387          if (refcnt <= 0) {
 384  388                  if (evp->ge_destruct != NULL) {
 385  389                          evp->ge_destruct((void *)&(evp->ge_payload),
 386  390                              evp->ge_dstcookie);
 387  391                  }
 388  392                  kmem_free(evp, evp->ge_size);
 389  393          }
 390  394  }
 391  395  
 392  396  /*
 393  397   * Deliver is called for every subscription to the current event
 394  398   * It calls the registered filter function and then the registered delivery
 395  399   * callback routine. Returns 0 on success. The callback routine returns
 396  400   * EVQ_AGAIN or EVQ_SLEEP in case the event could not be delivered.
 397  401   */
 398  402  static int
 399  403  evch_deliver(evch_evqsub_t *sp, evch_gevent_t *ep)
 400  404  {
 401  405          void            *uep = &ep->ge_payload;
 402  406          int             res = EVQ_DELIVER;
 403  407  
 404  408          if (sp->su_filter != NULL) {
 405  409                  res = sp->su_filter(uep, sp->su_fcookie);
 406  410          }
 407  411          if (res == EVQ_DELIVER) {
 408  412                  return (sp->su_callb(uep, sp->su_cbcookie));
 409  413          }
 410  414          return (0);
 411  415  }
 412  416  
 413  417  /*
 414  418   * Holds event delivery in case of eq_holdmode set or in case the
 415  419   * event queue is empty. Mutex must be held when called.
 416  420   * Wakes up a thread waiting for the delivery thread reaching the hold mode.
 417  421   */
 418  422  static void
 419  423  evch_delivery_hold(evch_eventq_t *eqp, callb_cpr_t *cpip)
 420  424  {
 421  425          if (eqp->eq_tabortflag == 0) {
 422  426                  do {
 423  427                          if (eqp->eq_holdmode) {
 424  428                                  cv_signal(&eqp->eq_onholdcv);
 425  429                          }
 426  430                          CALLB_CPR_SAFE_BEGIN(cpip);
 427  431                          cv_wait(&eqp->eq_thrsleepcv, &eqp->eq_queuemx);
 428  432                          CALLB_CPR_SAFE_END(cpip, &eqp->eq_queuemx);
 429  433                  } while (eqp->eq_holdmode);
 430  434          }
 431  435  }
 432  436  
 433  437  /*
 434  438   * Event delivery thread. Enumerates all subscribers and calls evch_deliver()
 435  439   * for each one.
 436  440   */
 437  441  static void
 438  442  evch_delivery_thr(evch_eventq_t *eqp)
 439  443  {
 440  444          evch_qelem_t    *qep;
 441  445          callb_cpr_t     cprinfo;
 442  446          int             res;
 443  447          evch_evqsub_t   *sub;
 444  448          int             deltime;
 445  449          int             repeatcount;
 446  450          char            thnam[32];
 447  451  
 448  452          (void) snprintf(thnam, sizeof (thnam), "sysevent_chan-%d",
 449  453              (int)eqp->eq_thrid);
 450  454          CALLB_CPR_INIT(&cprinfo, &eqp->eq_queuemx, callb_generic_cpr, thnam);
 451  455          mutex_enter(&eqp->eq_queuemx);
 452  456          while (eqp->eq_tabortflag == 0) {
 453  457                  while (eqp->eq_holdmode == 0 && eqp->eq_tabortflag == 0 &&
 454  458                      (qep = evch_q_out(&eqp->eq_eventq)) != NULL) {
 455  459  
 456  460                          /* Filter and deliver event to all subscribers */
 457  461                          deltime = EVCH_MIN_PAUSE;
 458  462                          repeatcount = EVCH_MAX_TRY_DELIVERY;
 459  463                          eqp->eq_curevent = qep->q_objref;
 460  464                          sub = evch_dl_next(&eqp->eq_subscr, NULL);
 461  465                          while (sub != NULL) {
 462  466                                  eqp->eq_dactive = 1;
 463  467                                  mutex_exit(&eqp->eq_queuemx);
 464  468                                  res = evch_deliver(sub, qep->q_objref);
 465  469                                  mutex_enter(&eqp->eq_queuemx);
 466  470                                  eqp->eq_dactive = 0;
 467  471                                  cv_signal(&eqp->eq_dactivecv);
 468  472                                  switch (res) {
 469  473                                  case EVQ_SLEEP:
 470  474                                          /*
 471  475                                           * Wait for subscriber to return.
 472  476                                           */
 473  477                                          eqp->eq_holdmode = 1;
 474  478                                          evch_delivery_hold(eqp, &cprinfo);
 475  479                                          if (eqp->eq_tabortflag) {
 476  480                                                  break;
 477  481                                          }
 478  482                                          continue;
 479  483                                  case EVQ_AGAIN:
 480  484                                          CALLB_CPR_SAFE_BEGIN(&cprinfo);
 481  485                                          mutex_exit(&eqp->eq_queuemx);
 482  486                                          delay(deltime);
 483  487                                          deltime =
 484  488                                              deltime > EVCH_MAX_PAUSE ?
 485  489                                              deltime : deltime << 1;
 486  490                                          mutex_enter(&eqp->eq_queuemx);
 487  491                                          CALLB_CPR_SAFE_END(&cprinfo,
 488  492                                              &eqp->eq_queuemx);
 489  493                                          if (repeatcount-- > 0) {
 490  494                                                  continue;
 491  495                                          }
 492  496                                          break;
 493  497                                  }
 494  498                                  if (eqp->eq_tabortflag) {
 495  499                                          break;
 496  500                                  }
 497  501                                  sub = evch_dl_next(&eqp->eq_subscr, sub);
 498  502                                  repeatcount = EVCH_MAX_TRY_DELIVERY;
 499  503                          }
 500  504                          eqp->eq_curevent = NULL;
 501  505  
 502  506                          /* Free event data and queue element */
 503  507                          evch_gevent_free((evch_gevent_t *)qep->q_objref);
 504  508                          kmem_free(qep, qep->q_objsize);
 505  509                  }
 506  510  
 507  511                  /* Wait for next event or end of hold mode if set */
 508  512                  evch_delivery_hold(eqp, &cprinfo);
 509  513          }
 510  514          CALLB_CPR_EXIT(&cprinfo);       /* Does mutex_exit of eqp->eq_queuemx */
 511  515          thread_exit();
 512  516  }
 513  517  
 514  518  /*
 515  519   * Create the event delivery thread for an existing event queue.
 516  520   */
 517  521  static void
 518  522  evch_evq_thrcreate(evch_eventq_t *eqp)
 519  523  {
 520  524          kthread_t *thp;
 521  525  
 522  526          thp = thread_create(NULL, 0, evch_delivery_thr, (char *)eqp, 0, &p0,
 523  527              TS_RUN, minclsyspri);
 524  528          eqp->eq_thrid = thp->t_did;
 525  529  }
 526  530  
 527  531  /*
 528  532   * Create event queue.
 529  533   */
 530  534  static evch_eventq_t *
 531  535  evch_evq_create()
 532  536  {
 533  537          evch_eventq_t *p;
 534  538  
 535  539          /* Allocate and initialize event queue descriptor */
 536  540          p = kmem_zalloc(sizeof (evch_eventq_t), KM_SLEEP);
 537  541          mutex_init(&p->eq_queuemx, NULL, MUTEX_DEFAULT, NULL);
 538  542          cv_init(&p->eq_thrsleepcv, NULL, CV_DEFAULT, NULL);
 539  543          evch_q_init(&p->eq_eventq);
 540  544          evch_dl_init(&p->eq_subscr);
 541  545          cv_init(&p->eq_dactivecv, NULL, CV_DEFAULT, NULL);
 542  546          cv_init(&p->eq_onholdcv, NULL, CV_DEFAULT, NULL);
 543  547  
 544  548          /* Create delivery thread */
 545  549          if (evq_initcomplete) {
 546  550                  evch_evq_thrcreate(p);
 547  551          }
 548  552          return (p);
 549  553  }
 550  554  
 551  555  /*
 552  556   * Destroy an event queue. All subscribers have to be unsubscribed prior to
 553  557   * this call.
 554  558   */
 555  559  static void
 556  560  evch_evq_destroy(evch_eventq_t *eqp)
 557  561  {
 558  562          evch_qelem_t *qep;
 559  563  
 560  564          ASSERT(evch_dl_getnum(&eqp->eq_subscr) == 0);
 561  565          /* Kill delivery thread */
 562  566          if (eqp->eq_thrid != NULL) {
 563  567                  mutex_enter(&eqp->eq_queuemx);
 564  568                  eqp->eq_tabortflag = 1;
 565  569                  eqp->eq_holdmode = 0;
 566  570                  cv_signal(&eqp->eq_thrsleepcv);
 567  571                  mutex_exit(&eqp->eq_queuemx);
 568  572                  thread_join(eqp->eq_thrid);
 569  573          }
 570  574  
 571  575          /* Get rid of stale events in the event queue */
 572  576          while ((qep = (evch_qelem_t *)evch_q_out(&eqp->eq_eventq)) != NULL) {
 573  577                  evch_gevent_free((evch_gevent_t *)qep->q_objref);
 574  578                  kmem_free(qep, qep->q_objsize);
 575  579          }
 576  580  
 577  581          /* Wrap up event queue structure */
 578  582          cv_destroy(&eqp->eq_onholdcv);
 579  583          cv_destroy(&eqp->eq_dactivecv);
 580  584          cv_destroy(&eqp->eq_thrsleepcv);
 581  585          evch_dl_fini(&eqp->eq_subscr);
 582  586          mutex_destroy(&eqp->eq_queuemx);
 583  587  
 584  588          /* Free descriptor structure */
 585  589          kmem_free(eqp, sizeof (evch_eventq_t));
 586  590  }
 587  591  
 588  592  /*
 589  593   * Subscribe to an event queue. Every subscriber provides a filter callback
 590  594   * routine and an event delivery callback routine.
 591  595   */
 592  596  static evch_evqsub_t *
 593  597  evch_evq_sub(evch_eventq_t *eqp, filter_f filter, void *fcookie,
 594  598      deliver_f callb, void *cbcookie)
 595  599  {
 596  600          evch_evqsub_t *sp = kmem_zalloc(sizeof (evch_evqsub_t), KM_SLEEP);
 597  601  
 598  602          /* Initialize subscriber structure */
 599  603          sp->su_filter = filter;
 600  604          sp->su_fcookie = fcookie;
 601  605          sp->su_callb = callb;
 602  606          sp->su_cbcookie = cbcookie;
 603  607  
 604  608          /* Add subscription to queue */
 605  609          mutex_enter(&eqp->eq_queuemx);
 606  610          evch_dl_add(&eqp->eq_subscr, &sp->su_link);
 607  611          mutex_exit(&eqp->eq_queuemx);
 608  612          return (sp);
 609  613  }
 610  614  
 611  615  /*
 612  616   * Unsubscribe from an event queue.
 613  617   */
 614  618  static void
 615  619  evch_evq_unsub(evch_eventq_t *eqp, evch_evqsub_t *sp)
 616  620  {
 617  621          mutex_enter(&eqp->eq_queuemx);
 618  622  
 619  623          /* Wait if delivery is just in progress */
 620  624          if (eqp->eq_dactive) {
 621  625                  cv_wait(&eqp->eq_dactivecv, &eqp->eq_queuemx);
 622  626          }
 623  627          evch_dl_del(&eqp->eq_subscr, &sp->su_link);
 624  628          mutex_exit(&eqp->eq_queuemx);
 625  629          kmem_free(sp, sizeof (evch_evqsub_t));
 626  630  }
 627  631  
 628  632  /*
 629  633   * Publish an event. Returns 0 on success and -1 if memory alloc failed.
 630  634   */
 631  635  static int
 632  636  evch_evq_pub(evch_eventq_t *eqp, void *ev, int flags)
 633  637  {
 634  638          size_t size;
 635  639          evch_qelem_t    *qep;
 636  640          evch_gevent_t   *evp = GEVENT(ev);
 637  641  
 638  642          size = sizeof (evch_qelem_t);
 639  643          if (flags & EVCH_TRYHARD) {
 640  644                  qep = kmem_alloc_tryhard(size, &size, KM_NOSLEEP);
 641  645          } else {
 642  646                  qep = kmem_alloc(size, flags & EVCH_NOSLEEP ?
 643  647                      KM_NOSLEEP : KM_SLEEP);
 644  648          }
 645  649          if (qep == NULL) {
 646  650                  return (-1);
 647  651          }
 648  652          qep->q_objref = (void *)evp;
 649  653          qep->q_objsize = size;
 650  654          atomic_inc_32(&evp->ge_refcount);
 651  655          mutex_enter(&eqp->eq_queuemx);
 652  656          evch_q_in(&eqp->eq_eventq, qep);
 653  657  
 654  658          /* Wakeup delivery thread */
 655  659          cv_signal(&eqp->eq_thrsleepcv);
 656  660          mutex_exit(&eqp->eq_queuemx);
 657  661          return (0);
 658  662  }
 659  663  
 660  664  /*
 661  665   * Enter hold mode of an event queue. Event delivery thread stops event
 662  666   * handling after delivery of current event (if any).
 663  667   */
 664  668  static void
 665  669  evch_evq_stop(evch_eventq_t *eqp)
 666  670  {
 667  671          mutex_enter(&eqp->eq_queuemx);
 668  672          eqp->eq_holdmode = 1;
 669  673          if (evq_initcomplete) {
 670  674                  cv_signal(&eqp->eq_thrsleepcv);
 671  675                  cv_wait(&eqp->eq_onholdcv, &eqp->eq_queuemx);
 672  676          }
 673  677          mutex_exit(&eqp->eq_queuemx);
 674  678  }
 675  679  
 676  680  /*
 677  681   * Continue event delivery.
 678  682   */
 679  683  static void
 680  684  evch_evq_continue(evch_eventq_t *eqp)
 681  685  {
 682  686          mutex_enter(&eqp->eq_queuemx);
 683  687          eqp->eq_holdmode = 0;
 684  688          cv_signal(&eqp->eq_thrsleepcv);
 685  689          mutex_exit(&eqp->eq_queuemx);
 686  690  }
 687  691  
 688  692  /*
 689  693   * Returns status of delivery thread. 0 if running and 1 if on hold.
 690  694   */
 691  695  static int
 692  696  evch_evq_status(evch_eventq_t *eqp)
 693  697  {
 694  698          return (eqp->eq_holdmode);
 695  699  }
 696  700  
 697  701  /*
 698  702   * Add a destructor function to an event structure.
 699  703   */
 700  704  static void
 701  705  evch_evq_evadd_dest(void *ev, destr_f destructor, void *cookie)
 702  706  {
 703  707          evch_gevent_t *evp = GEVENT(ev);
 704  708  
 705  709          evp->ge_destruct = destructor;
 706  710          evp->ge_dstcookie = cookie;
 707  711  }
 708  712  
 709  713  /*
 710  714   * Allocate evch_gevent_t structure. Return address of payload offset of
 711  715   * evch_gevent_t.  If EVCH_TRYHARD allocation is requested, we use
 712  716   * kmem_alloc_tryhard to alloc memory of at least paylsize bytes.
 713  717   *
 714  718   * If either memory allocation is unsuccessful, we return NULL.
 715  719   */
 716  720  static void *
 717  721  evch_evq_evzalloc(size_t paylsize, int flag)
 718  722  {
 719  723          evch_gevent_t   *evp;
 720  724          size_t          rsize, evsize, ge_size;
 721  725  
 722  726          rsize = offsetof(evch_gevent_t, ge_payload) + paylsize;
 723  727          if (flag & EVCH_TRYHARD) {
 724  728                  evp = kmem_alloc_tryhard(rsize, &evsize, KM_NOSLEEP);
 725  729                  ge_size = evsize;
 726  730          } else {
 727  731                  evp = kmem_alloc(rsize, flag & EVCH_NOSLEEP ? KM_NOSLEEP :
 728  732                      KM_SLEEP);
 729  733                  ge_size = rsize;
 730  734          }
 731  735  
 732  736          if (evp) {
 733  737                  bzero(evp, rsize);
 734  738                  evp->ge_size = ge_size;
 735  739                  return (&evp->ge_payload);
 736  740          }
 737  741          return (evp);
 738  742  }
 739  743  
 740  744  /*
 741  745   * Free event structure. Argument ev is address of payload offset.
 742  746   */
 743  747  static void
 744  748  evch_evq_evfree(void *ev)
 745  749  {
 746  750          evch_gevent_free(GEVENT(ev));
 747  751  }
 748  752  
 749  753  /*
 750  754   * Iterate over all events in the event queue. Begin with an event
 751  755   * which is currently being delivered. No mutexes are grabbed and no
 752  756   * resources allocated so that this function can be called in panic
 753  757   * context too. This function has to be called with ev == NULL initially.
 754  758   * Actually argument ev is only a flag. Internally the member eq_nextev
 755  759   * is used to determine the next event. But ev allows for the convenient
 756  760   * use like
 757  761   *      ev = NULL;
 758  762   *      while ((ev = evch_evq_evnext(evp, ev)) != NULL) ...
 759  763   */
 760  764  static void *
 761  765  evch_evq_evnext(evch_eventq_t *evq, void *ev)
 762  766  {
 763  767          if (ev == NULL) {
 764  768                  evq->eq_nextev = NULL;
 765  769                  if (evq->eq_curevent != NULL)
 766  770                          return (&evq->eq_curevent->ge_payload);
 767  771          }
 768  772          evq->eq_nextev = evch_q_next(&evq->eq_eventq, evq->eq_nextev);
 769  773          if (evq->eq_nextev == NULL)
 770  774                  return (NULL);
 771  775          return (&((evch_gevent_t *)evq->eq_nextev->q_objref)->ge_payload);
 772  776  }
 773  777  
 774  778  /*
 775  779   * Channel handling functions. First some support functions. Functions belonging
 776  780   * to the channel handling interface start with evch_ch. The following functions
 777  781   * make up the channel handling internal interfaces:
 778  782   *
 779  783   * evch_chinit          - Initialize channel handling
 780  784   * evch_chinitthr       - Second step init: initialize threads
 781  785   * evch_chbind          - Bind to a channel
 782  786   * evch_chunbind        - Unbind from a channel
 783  787   * evch_chsubscribe     - Subscribe to a sysevent class
 784  788   * evch_chunsubscribe   - Unsubscribe
 785  789   * evch_chpublish       - Publish an event
 786  790   * evch_chgetnames      - Get names of all channels
 787  791   * evch_chgetchdata     - Get data of a channel
 788  792   * evch_chrdevent_init  - Init event q traversal
 789  793   * evch_chgetnextev     - Read out events queued for a subscriber
 790  794   * evch_chrdevent_fini  - Finish event q traversal
 791  795   */
 792  796  
 793  797  /*
 794  798   * Compare channel name. Used for evch_dl_search to find a channel with the
 795  799   * name s.
 796  800   */
 797  801  static int
 798  802  evch_namecmp(evch_dlelem_t *ep, char *s)
 799  803  {
 800  804          return (strcmp(((evch_chan_t *)ep)->ch_name, s));
 801  805  }
 802  806  
 803  807  /*
 804  808   * Simple wildcarded match test of event class string 'class' to
 805  809   * wildcarded subscription string 'pat'.  Recursive only if
 806  810   * 'pat' includes a wildcard, otherwise essentially just strcmp.
 807  811   */
 808  812  static int
 809  813  evch_clsmatch(char *class, const char *pat)
 810  814  {
 811  815          char c;
 812  816  
 813  817          do {
 814  818                  if ((c = *pat++) == '\0')
 815  819                          return (*class == '\0');
 816  820  
 817  821                  if (c == '*') {
 818  822                          while (*pat == '*')
 819  823                                  pat++; /* consecutive *'s can be collapsed */
 820  824  
 821  825                          if (*pat == '\0')
 822  826                                  return (1);
 823  827  
 824  828                          while (*class != '\0') {
 825  829                                  if (evch_clsmatch(class++, pat) != 0)
 826  830                                          return (1);
 827  831                          }
 828  832  
 829  833                          return (0);
 830  834                  }
 831  835          } while (c == *class++);
 832  836  
 833  837          return (0);
 834  838  }
 835  839  
 836  840  /*
 837  841   * Sysevent filter callback routine. Enables event delivery only if it matches
 838  842   * the event class pattern string given by parameter cookie.
 839  843   */
 840  844  static int
 841  845  evch_class_filter(void *ev, void *cookie)
 842  846  {
 843  847          const char *pat = (const char *)cookie;
 844  848  
 845  849          if (pat == NULL || evch_clsmatch(SE_CLASS_NAME(ev), pat))
 846  850                  return (EVQ_DELIVER);
 847  851  
 848  852          return (EVQ_IGNORE);
 849  853  }
 850  854  
 851  855  /*
 852  856   * Callback routine to propagate the event into a per subscriber queue.
 853  857   */
 854  858  static int
 855  859  evch_subq_deliver(void *evp, void *cookie)
 856  860  {
 857  861          evch_subd_t *p = (evch_subd_t *)cookie;
 858  862  
 859  863          (void) evch_evq_pub(p->sd_queue, evp, EVCH_SLEEP);
 860  864          return (EVQ_CONT);
 861  865  }
 862  866  
 863  867  /*
 864  868   * Call kernel callback routine for sysevent kernel delivery.
 865  869   */
 866  870  static int
 867  871  evch_kern_deliver(void *evp, void *cookie)
 868  872  {
 869  873          sysevent_impl_t *ev = (sysevent_impl_t *)evp;
 870  874          evch_subd_t     *sdp = (evch_subd_t *)cookie;
 871  875  
 872  876          return (sdp->sd_callback(ev, sdp->sd_cbcookie));
 873  877  }
 874  878  
 875  879  /*
 876  880   * Door upcall for user land sysevent delivery.
 877  881   */
 878  882  static int
 879  883  evch_door_deliver(void *evp, void *cookie)
 880  884  {
 881  885          int             error;
 882  886          size_t          size;
 883  887          sysevent_impl_t *ev = (sysevent_impl_t *)evp;
 884  888          door_arg_t      darg;
 885  889          evch_subd_t     *sdp = (evch_subd_t *)cookie;
 886  890          int             nticks = EVCH_MIN_PAUSE;
 887  891          uint32_t        retval;
 888  892          int             retry = 20;
 889  893  
 890  894          /* Initialize door args */
 891  895          size = sizeof (sysevent_impl_t) + SE_PAYLOAD_SZ(ev);
 892  896  
 893  897          darg.rbuf = (char *)&retval;
 894  898          darg.rsize = sizeof (retval);
 895  899          darg.data_ptr = (char *)ev;
 896  900          darg.data_size = size;
 897  901          darg.desc_ptr = NULL;
 898  902          darg.desc_num = 0;
 899  903  
 900  904          for (;;) {
 901  905                  if ((error = door_ki_upcall_limited(sdp->sd_door, &darg,
 902  906                      NULL, SIZE_MAX, 0)) == 0) {
 903  907                          break;
 904  908                  }
 905  909                  switch (error) {
 906  910                  case EAGAIN:
 907  911                          /* Cannot deliver event - process may be forking */
 908  912                          delay(nticks);
 909  913                          nticks <<= 1;
 910  914                          if (nticks > EVCH_MAX_PAUSE) {
 911  915                                  nticks = EVCH_MAX_PAUSE;
 912  916                          }
 913  917                          if (retry-- <= 0) {
 914  918                                  cmn_err(CE_CONT, "event delivery thread: "
 915  919                                      "door_ki_upcall error EAGAIN\n");
 916  920                                  return (EVQ_CONT);
 917  921                          }
 918  922                          break;
 919  923                  case EINTR:
 920  924                  case EBADF:
 921  925                          /* Process died */
 922  926                          return (EVQ_SLEEP);
 923  927                  default:
 924  928                          cmn_err(CE_CONT,
 925  929                              "event delivery thread: door_ki_upcall error %d\n",
 926  930                              error);
 927  931                          return (EVQ_CONT);
 928  932                  }
 929  933          }
 930  934          if (retval == EAGAIN) {
 931  935                  return (EVQ_AGAIN);
 932  936          }
 933  937          return (EVQ_CONT);
 934  938  }
 935  939  
 936  940  /*
 937  941   * Callback routine for evch_dl_search() to compare subscriber id's. Used by
 938  942   * evch_subscribe() and evch_chrdevent_init().
 939  943   */
 940  944  static int
 941  945  evch_subidcmp(evch_dlelem_t *ep, char *s)
 942  946  {
 943  947          return (strcmp(((evch_subd_t *)ep)->sd_ident, s));
 944  948  }
 945  949  
 946  950  /*
 947  951   * Callback routine for evch_dl_search() to find a subscriber with EVCH_SUB_DUMP
 948  952   * set (indicated by sub->sd_dump != 0). Used by evch_chrdevent_init() and
 949  953   * evch_subscribe(). Needs to returns 0 if subscriber with sd_dump set is
 950  954   * found.
 951  955   */
 952  956  /*ARGSUSED1*/
 953  957  static int
 954  958  evch_dumpflgcmp(evch_dlelem_t *ep, char *s)
 955  959  {
 956  960          return (((evch_subd_t *)ep)->sd_dump ? 0 : 1);
 957  961  }
 958  962  
 959  963  /*
 960  964   * Event destructor function. Used to maintain the number of events per channel.
 961  965   */
 962  966  /*ARGSUSED*/
 963  967  static void
 964  968  evch_destr_event(void *ev, void *ch)
 965  969  {
 966  970          evch_chan_t *chp = (evch_chan_t *)ch;
 967  971  
 968  972          mutex_enter(&chp->ch_pubmx);
 969  973          chp->ch_nevents--;
 970  974          cv_signal(&chp->ch_pubcv);
 971  975          mutex_exit(&chp->ch_pubmx);
 972  976  }
 973  977  
 974  978  /*
 975  979   * Integer square root according to Newton's iteration.
 976  980   */
 977  981  static uint32_t
 978  982  evch_isqrt(uint64_t n)
 979  983  {
 980  984          uint64_t        x = n >> 1;
 981  985          uint64_t        xn = x - 1;
 982  986          static uint32_t lowval[] = { 0, 1, 1, 2 };
 983  987  
 984  988          if (n < 4) {
 985  989                  return (lowval[n]);
 986  990          }
 987  991          while (xn < x) {
 988  992                  x = xn;
 989  993                  xn = (x + n / x) / 2;
 990  994          }
 991  995          return ((uint32_t)xn);
 992  996  }
 993  997  
 994  998  /*
 995  999   * First step sysevent channel initialization. Called when kernel memory
 996 1000   * allocator is initialized.
 997 1001   */
 998 1002  static void
 999 1003  evch_chinit()
1000 1004  {
1001 1005          size_t k;
1002 1006  
1003 1007          /*
1004 1008           * Calculate limits: max no of channels and max no of events per
1005 1009           * channel. The smallest machine with 128 MByte will allow for
1006 1010           * >= 8 channels and an upper limit of 2048 events per channel.
1007 1011           * The event limit is the number of channels times 256 (hence
1008 1012           * the shift factor of 8). These number where selected arbitrarily.
1009 1013           */
1010 1014          k = kmem_maxavail() >> 20;
1011 1015          evch_channels_max = min(evch_isqrt(k), EVCH_MAX_CHANNELS);
1012 1016          evch_events_max = evch_channels_max << 8;
1013 1017  
1014 1018          /*
1015 1019           * Will trigger creation of the global zone's evch state.
1016 1020           */
1017 1021          zone_key_create(&evch_zone_key, evch_zoneinit, NULL, evch_zonefree);
1018 1022  }
1019 1023  
1020 1024  /*
1021 1025   * Second step sysevent channel initialization. Called when threads are ready.
1022 1026   */
1023 1027  static void
1024 1028  evch_chinitthr()
1025 1029  {
1026 1030          struct evch_globals *eg;
1027 1031          evch_chan_t     *chp;
1028 1032          evch_subd_t     *sdp;
1029 1033  
1030 1034          /*
1031 1035           * We're early enough in boot that we know that only the global
1032 1036           * zone exists; we only need to initialize its threads.
1033 1037           */
1034 1038          eg = zone_getspecific(evch_zone_key, global_zone);
1035 1039          ASSERT(eg != NULL);
1036 1040  
1037 1041          for (chp = evch_dl_next(&eg->evch_list, NULL); chp != NULL;
1038 1042              chp = evch_dl_next(&eg->evch_list, chp)) {
1039 1043                  for (sdp = evch_dl_next(&chp->ch_subscr, NULL); sdp;
1040 1044                      sdp = evch_dl_next(&chp->ch_subscr, sdp)) {
1041 1045                          evch_evq_thrcreate(sdp->sd_queue);
1042 1046                  }
1043 1047                  evch_evq_thrcreate(chp->ch_queue);
1044 1048          }
1045 1049          evq_initcomplete = 1;
1046 1050  }
1047 1051  
1048 1052  /*
1049 1053   * Sysevent channel bind. Create channel and allocate binding structure.
1050 1054   */
1051 1055  static int
1052 1056  evch_chbind(const char *chnam, evch_bind_t **scpp, uint32_t flags)
1053 1057  {
1054 1058          struct evch_globals *eg;
1055 1059          evch_bind_t     *bp;
1056 1060          evch_chan_t     *p;
1057 1061          char            *chn;
1058 1062          size_t          namlen;
1059 1063          int             rv;
1060 1064  
1061 1065          eg = zone_getspecific(evch_zone_key, curproc->p_zone);
1062 1066          ASSERT(eg != NULL);
1063 1067  
1064 1068          /* Create channel if it does not exist */
1065 1069          ASSERT(evch_dl_is_init(&eg->evch_list));
1066 1070          if ((namlen = strlen(chnam) + 1) > MAX_CHNAME_LEN) {
1067 1071                  return (EINVAL);
1068 1072          }
1069 1073          mutex_enter(&eg->evch_list_lock);
1070 1074          if ((p = (evch_chan_t *)evch_dl_search(&eg->evch_list, evch_namecmp,
1071 1075              (char *)chnam)) == NULL) {
1072 1076                  if (flags & EVCH_CREAT) {
1073 1077                          if (evch_dl_getnum(&eg->evch_list) >=
1074 1078                              evch_channels_max) {
1075 1079                                  mutex_exit(&eg->evch_list_lock);
1076 1080                                  return (ENOMEM);
1077 1081                          }
1078 1082                          chn = kmem_alloc(namlen, KM_SLEEP);
1079 1083                          bcopy(chnam, chn, namlen);
1080 1084  
1081 1085                          /* Allocate and initialize channel descriptor */
1082 1086                          p = kmem_zalloc(sizeof (evch_chan_t), KM_SLEEP);
1083 1087                          p->ch_name = chn;
1084 1088                          p->ch_namelen = namlen;
1085 1089                          mutex_init(&p->ch_mutex, NULL, MUTEX_DEFAULT, NULL);
1086 1090                          p->ch_queue = evch_evq_create();
1087 1091                          evch_dl_init(&p->ch_subscr);
1088 1092                          if (evq_initcomplete) {
1089 1093                                  p->ch_uid = crgetuid(curthread->t_cred);
1090 1094                                  p->ch_gid = crgetgid(curthread->t_cred);
1091 1095                          }
1092 1096                          cv_init(&p->ch_pubcv, NULL, CV_DEFAULT, NULL);
1093 1097                          mutex_init(&p->ch_pubmx, NULL, MUTEX_DEFAULT, NULL);
1094 1098                          p->ch_maxev = min(EVCH_DEFAULT_EVENTS, evch_events_max);
1095 1099                          p->ch_maxsubscr = EVCH_MAX_SUBSCRIPTIONS;
1096 1100                          p->ch_maxbinds = evch_bindings_max;
1097 1101                          p->ch_ctime = gethrestime_sec();
1098 1102  
1099 1103                          if (flags & (EVCH_HOLD_PEND | EVCH_HOLD_PEND_INDEF)) {
1100 1104                                  if (flags & EVCH_HOLD_PEND_INDEF)
1101 1105                                          p->ch_holdpend = CH_HOLD_PEND_INDEF;
1102 1106                                  else
1103 1107                                          p->ch_holdpend = CH_HOLD_PEND;
1104 1108  
1105 1109                                  evch_evq_stop(p->ch_queue);
1106 1110                          }
1107 1111  
1108 1112                          /* Put new descriptor into channel list */
1109 1113                          evch_dl_add(&eg->evch_list, (evch_dlelem_t *)p);
1110 1114                  } else {
1111 1115                          mutex_exit(&eg->evch_list_lock);
1112 1116                          return (ENOENT);
1113 1117                  }
1114 1118          }
1115 1119  
1116 1120          /* Check for max binds and create binding */
1117 1121          mutex_enter(&p->ch_mutex);
1118 1122          if (p->ch_bindings >= p->ch_maxbinds) {
1119 1123                  rv = ENOMEM;
1120 1124                  /*
1121 1125                   * No need to destroy the channel because this call did not
1122 1126                   * create it. Other bindings will be present if ch_maxbinds
1123 1127                   * is exceeded.
1124 1128                   */
1125 1129                  goto errorexit;
1126 1130          }
1127 1131          bp = kmem_alloc(sizeof (evch_bind_t), KM_SLEEP);
1128 1132          bp->bd_channel = p;
1129 1133          bp->bd_sublst = NULL;
1130 1134          p->ch_bindings++;
1131 1135          rv = 0;
1132 1136          *scpp = bp;
1133 1137  errorexit:
1134 1138          mutex_exit(&p->ch_mutex);
1135 1139          mutex_exit(&eg->evch_list_lock);
1136 1140          return (rv);
1137 1141  }
1138 1142  
1139 1143  /*
1140 1144   * Unbind: Free bind structure. Remove channel if last binding was freed.
1141 1145   */
1142 1146  static void
1143 1147  evch_chunbind(evch_bind_t *bp)
1144 1148  {
1145 1149          struct evch_globals *eg;
1146 1150          evch_chan_t *chp = bp->bd_channel;
1147 1151  
1148 1152          eg = zone_getspecific(evch_zone_key, curproc->p_zone);
1149 1153          ASSERT(eg != NULL);
1150 1154  
1151 1155          mutex_enter(&eg->evch_list_lock);
1152 1156          mutex_enter(&chp->ch_mutex);
1153 1157          ASSERT(chp->ch_bindings > 0);
1154 1158          chp->ch_bindings--;
1155 1159          kmem_free(bp, sizeof (evch_bind_t));
1156 1160          if (chp->ch_bindings == 0 && evch_dl_getnum(&chp->ch_subscr) == 0 &&
1157 1161              (chp->ch_nevents == 0 || chp->ch_holdpend != CH_HOLD_PEND_INDEF)) {
1158 1162                  /*
1159 1163                   * No more bindings and no persistent subscriber(s).  If there
1160 1164                   * are no events in the channel then destroy the channel;
1161 1165                   * otherwise destroy the channel only if we're not holding
1162 1166                   * pending events indefinitely.
1163 1167                   */
1164 1168                  mutex_exit(&chp->ch_mutex);
1165 1169                  evch_dl_del(&eg->evch_list, &chp->ch_link);
1166 1170                  evch_evq_destroy(chp->ch_queue);
1167 1171                  nvlist_free(chp->ch_propnvl);
1168 1172                  mutex_destroy(&chp->ch_mutex);
1169 1173                  mutex_destroy(&chp->ch_pubmx);
1170 1174                  cv_destroy(&chp->ch_pubcv);
1171 1175                  kmem_free(chp->ch_name, chp->ch_namelen);
1172 1176                  kmem_free(chp, sizeof (evch_chan_t));
1173 1177          } else
1174 1178                  mutex_exit(&chp->ch_mutex);
1175 1179          mutex_exit(&eg->evch_list_lock);
1176 1180  }
1177 1181  
1178 1182  static int
1179 1183  wildcard_count(const char *class)
1180 1184  {
1181 1185          int count = 0;
1182 1186          char c;
1183 1187  
1184 1188          if (class == NULL)
1185 1189                  return (0);
1186 1190  
1187 1191          while ((c = *class++) != '\0') {
1188 1192                  if (c == '*')
1189 1193                          count++;
1190 1194          }
1191 1195  
1192 1196          return (count);
1193 1197  }
1194 1198  
1195 1199  /*
1196 1200   * Subscribe to a channel. dtype is either EVCH_DELKERN for kernel callbacks
1197 1201   * or EVCH_DELDOOR for door upcall delivery to user land. Depending on dtype
1198 1202   * dinfo gives the call back routine address or the door handle.
1199 1203   */
1200 1204  static int
1201 1205  evch_chsubscribe(evch_bind_t *bp, int dtype, const char *sid, const char *class,
1202 1206      void *dinfo, void *cookie, int flags, pid_t pid)
1203 1207  {
1204 1208          evch_chan_t     *chp = bp->bd_channel;
1205 1209          evch_eventq_t   *eqp = chp->ch_queue;
1206 1210          evch_subd_t     *sdp;
1207 1211          evch_subd_t     *esp;
1208 1212          int             (*delivfkt)();
1209 1213          char            *clb = NULL;
1210 1214          int             clblen = 0;
1211 1215          char            *subid;
1212 1216          int             subidblen;
1213 1217  
1214 1218          /*
1215 1219           * Check if only known flags are set.
1216 1220           */
1217 1221          if (flags & ~(EVCH_SUB_KEEP | EVCH_SUB_DUMP))
1218 1222                  return (EINVAL);
1219 1223  
1220 1224          /*
1221 1225           * Enforce a limit on the number of wildcards allowed in the class
1222 1226           * subscription string (limits recursion in pattern matching).
1223 1227           */
1224 1228          if (wildcard_count(class) > EVCH_WILDCARD_MAX)
1225 1229                  return (EINVAL);
1226 1230  
1227 1231          /*
1228 1232           * Check if we have already a subscription with that name and if we
1229 1233           * have to reconnect the subscriber to a persistent subscription.
1230 1234           */
1231 1235          mutex_enter(&chp->ch_mutex);
1232 1236          if ((esp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr,
1233 1237              evch_subidcmp, (char *)sid)) != NULL) {
1234 1238                  int error = 0;
1235 1239                  if ((flags & EVCH_SUB_KEEP) && (esp->sd_active == 0)) {
1236 1240                          /*
1237 1241                           * Subscription with the name on hold, reconnect to
1238 1242                           * existing queue.
1239 1243                           */
1240 1244                          ASSERT(dtype == EVCH_DELDOOR);
1241 1245                          esp->sd_subnxt = bp->bd_sublst;
1242 1246                          bp->bd_sublst = esp;
1243 1247                          esp->sd_pid = pid;
1244 1248                          esp->sd_door = (door_handle_t)dinfo;
1245 1249                          esp->sd_active++;
1246 1250                          evch_evq_continue(esp->sd_queue);
1247 1251                  } else {
1248 1252                          /* Subscriber with given name already exists */
1249 1253                          error = EEXIST;
1250 1254                  }
1251 1255                  mutex_exit(&chp->ch_mutex);
1252 1256                  return (error);
1253 1257          }
1254 1258  
1255 1259          if (evch_dl_getnum(&chp->ch_subscr) >= chp->ch_maxsubscr) {
1256 1260                  mutex_exit(&chp->ch_mutex);
1257 1261                  return (ENOMEM);
1258 1262          }
1259 1263  
1260 1264          if (flags & EVCH_SUB_DUMP && evch_dl_search(&chp->ch_subscr,
1261 1265              evch_dumpflgcmp, NULL) != NULL) {
1262 1266                  /*
1263 1267                   * Subscription with EVCH_SUB_DUMP flagged already exists.
1264 1268                   * Only one subscription with EVCH_SUB_DUMP possible. Return
1265 1269                   * error.
1266 1270                   */
1267 1271                  mutex_exit(&chp->ch_mutex);
1268 1272                  return (EINVAL);
1269 1273          }
1270 1274  
1271 1275          if (class != NULL) {
1272 1276                  clblen = strlen(class) + 1;
1273 1277                  clb = kmem_alloc(clblen, KM_SLEEP);
1274 1278                  bcopy(class, clb, clblen);
1275 1279          }
1276 1280  
1277 1281          subidblen = strlen(sid) + 1;
1278 1282          subid = kmem_alloc(subidblen, KM_SLEEP);
1279 1283          bcopy(sid, subid, subidblen);
1280 1284  
1281 1285          /* Create per subscriber queue */
1282 1286          sdp = kmem_zalloc(sizeof (evch_subd_t), KM_SLEEP);
1283 1287          sdp->sd_queue = evch_evq_create();
1284 1288  
1285 1289          /* Subscribe to subscriber queue */
1286 1290          sdp->sd_persist = flags & EVCH_SUB_KEEP ? 1 : 0;
1287 1291          sdp->sd_dump = flags & EVCH_SUB_DUMP ? 1 : 0;
1288 1292          sdp->sd_type = dtype;
1289 1293          sdp->sd_cbcookie = cookie;
1290 1294          sdp->sd_ident = subid;
1291 1295          if (dtype == EVCH_DELKERN) {
1292 1296                  sdp->sd_callback = (kerndlv_f)dinfo;
1293 1297                  delivfkt = evch_kern_deliver;
1294 1298          } else {
1295 1299                  sdp->sd_door = (door_handle_t)dinfo;
1296 1300                  delivfkt = evch_door_deliver;
1297 1301          }
1298 1302          sdp->sd_ssub =
1299 1303              evch_evq_sub(sdp->sd_queue, NULL, NULL, delivfkt, (void *)sdp);
1300 1304  
1301 1305          /* Connect per subscriber queue to main event queue */
1302 1306          sdp->sd_msub = evch_evq_sub(eqp, evch_class_filter, clb,
1303 1307              evch_subq_deliver, (void *)sdp);
1304 1308          sdp->sd_classname = clb;
1305 1309          sdp->sd_clnsize = clblen;
1306 1310          sdp->sd_pid = pid;
1307 1311          sdp->sd_active++;
1308 1312  
1309 1313          /* Add subscription to binding */
1310 1314          sdp->sd_subnxt = bp->bd_sublst;
1311 1315          bp->bd_sublst = sdp;
1312 1316  
1313 1317          /* Add subscription to channel */
1314 1318          evch_dl_add(&chp->ch_subscr, &sdp->sd_link);
1315 1319          if (chp->ch_holdpend && evch_dl_getnum(&chp->ch_subscr) == 1) {
1316 1320  
1317 1321                  /* Let main event queue run in case of HOLDPEND */
1318 1322                  evch_evq_continue(eqp);
1319 1323          }
1320 1324          mutex_exit(&chp->ch_mutex);
1321 1325  
1322 1326          return (0);
1323 1327  }
1324 1328  
1325 1329  /*
1326 1330   * If flag == EVCH_SUB_KEEP only non-persistent subscriptions are deleted.
1327 1331   * When sid == NULL all subscriptions except the ones with EVCH_SUB_KEEP set
1328 1332   * are removed.
1329 1333   */
1330 1334  static void
1331 1335  evch_chunsubscribe(evch_bind_t *bp, const char *sid, uint32_t flags)
1332 1336  {
1333 1337          evch_subd_t     *sdp;
1334 1338          evch_subd_t     *next;
1335 1339          evch_subd_t     *prev;
1336 1340          evch_chan_t     *chp = bp->bd_channel;
1337 1341  
1338 1342          mutex_enter(&chp->ch_mutex);
1339 1343          if (chp->ch_holdpend) {
1340 1344                  evch_evq_stop(chp->ch_queue);   /* Hold main event queue */
1341 1345          }
1342 1346          prev = NULL;
1343 1347          for (sdp = bp->bd_sublst; sdp; sdp = next) {
1344 1348                  if (sid == NULL || strcmp(sid, sdp->sd_ident) == 0) {
1345 1349                          if (flags == 0 || sdp->sd_persist == 0) {
1346 1350                                  /*
1347 1351                                   * Disconnect subscriber queue from main event
1348 1352                                   * queue.
1349 1353                                   */
1350 1354                                  evch_evq_unsub(chp->ch_queue, sdp->sd_msub);
1351 1355  
1352 1356                                  /* Destruct per subscriber queue */
1353 1357                                  evch_evq_unsub(sdp->sd_queue, sdp->sd_ssub);
1354 1358                                  evch_evq_destroy(sdp->sd_queue);
1355 1359                                  /*
1356 1360                                   * Eliminate the subscriber data from channel
1357 1361                                   * list.
1358 1362                                   */
1359 1363                                  evch_dl_del(&chp->ch_subscr, &sdp->sd_link);
1360 1364                                  kmem_free(sdp->sd_classname, sdp->sd_clnsize);
1361 1365                                  if (sdp->sd_type == EVCH_DELDOOR) {
1362 1366                                          door_ki_rele(sdp->sd_door);
1363 1367                                  }
1364 1368                                  next = sdp->sd_subnxt;
1365 1369                                  if (prev) {
1366 1370                                          prev->sd_subnxt = next;
1367 1371                                  } else {
1368 1372                                          bp->bd_sublst = next;
1369 1373                                  }
1370 1374                                  kmem_free(sdp->sd_ident,
1371 1375                                      strlen(sdp->sd_ident) + 1);
1372 1376                                  kmem_free(sdp, sizeof (evch_subd_t));
1373 1377                          } else {
1374 1378                                  /*
1375 1379                                   * EVCH_SUB_KEEP case
1376 1380                                   */
1377 1381                                  evch_evq_stop(sdp->sd_queue);
1378 1382                                  if (sdp->sd_type == EVCH_DELDOOR) {
1379 1383                                          door_ki_rele(sdp->sd_door);
1380 1384                                  }
1381 1385                                  sdp->sd_active--;
1382 1386                                  ASSERT(sdp->sd_active == 0);
1383 1387                                  next = sdp->sd_subnxt;
1384 1388                                  prev = sdp;
1385 1389                          }
1386 1390                          if (sid != NULL) {
1387 1391                                  break;
1388 1392                          }
1389 1393                  } else {
1390 1394                          next = sdp->sd_subnxt;
1391 1395                          prev = sdp;
1392 1396                  }
1393 1397          }
1394 1398          if (!(chp->ch_holdpend && evch_dl_getnum(&chp->ch_subscr) == 0)) {
1395 1399                  /*
1396 1400                   * Continue dispatch thread except if no subscribers are present
1397 1401                   * in HOLDPEND mode.
1398 1402                   */
1399 1403                  evch_evq_continue(chp->ch_queue);
1400 1404          }
1401 1405          mutex_exit(&chp->ch_mutex);
1402 1406  }
1403 1407  
1404 1408  /*
1405 1409   * Publish an event. Returns zero on success and an error code else.
1406 1410   */
1407 1411  static int
1408 1412  evch_chpublish(evch_bind_t *bp, sysevent_impl_t *ev, int flags)
1409 1413  {
1410 1414          evch_chan_t *chp = bp->bd_channel;
1411 1415  
1412 1416          DTRACE_SYSEVENT2(post, evch_bind_t *, bp, sysevent_impl_t *, ev);
1413 1417  
1414 1418          mutex_enter(&chp->ch_pubmx);
1415 1419          if (chp->ch_nevents >= chp->ch_maxev) {
1416 1420                  if (!(flags & EVCH_QWAIT)) {
1417 1421                          evch_evq_evfree(ev);
1418 1422                          mutex_exit(&chp->ch_pubmx);
1419 1423                          return (EAGAIN);
1420 1424                  } else {
1421 1425                          while (chp->ch_nevents >= chp->ch_maxev) {
1422 1426                                  if (cv_wait_sig(&chp->ch_pubcv,
1423 1427                                      &chp->ch_pubmx) == 0) {
1424 1428  
1425 1429                                          /* Got Signal, return EINTR */
1426 1430                                          evch_evq_evfree(ev);
1427 1431                                          mutex_exit(&chp->ch_pubmx);
1428 1432                                          return (EINTR);
1429 1433                                  }
1430 1434                          }
1431 1435                  }
1432 1436          }
1433 1437          chp->ch_nevents++;
1434 1438          mutex_exit(&chp->ch_pubmx);
1435 1439          SE_TIME(ev) = gethrtime();
1436 1440          SE_SEQ(ev) = log_sysevent_new_id();
1437 1441          /*
1438 1442           * Add the destructor function to the event structure, now that the
1439 1443           * event is accounted for. The only task of the descructor is to
1440 1444           * decrement the channel event count. The evq_*() routines (including
1441 1445           * the event delivery thread) do not have knowledge of the channel
1442 1446           * data. So the anonymous destructor handles the channel data for it.
1443 1447           */
1444 1448          evch_evq_evadd_dest(ev, evch_destr_event, (void *)chp);
1445 1449          return (evch_evq_pub(chp->ch_queue, ev, flags) == 0 ? 0 : EAGAIN);
1446 1450  }
1447 1451  
1448 1452  /*
1449 1453   * Fills a buffer consecutive with the names of all available channels.
1450 1454   * Returns the length of all name strings or -1 if buffer size was unsufficient.
1451 1455   */
1452 1456  static int
1453 1457  evch_chgetnames(char *buf, size_t size)
1454 1458  {
1455 1459          struct evch_globals *eg;
1456 1460          int             len = 0;
1457 1461          char            *addr = buf;
1458 1462          int             max = size;
1459 1463          evch_chan_t     *chp;
1460 1464  
1461 1465          eg = zone_getspecific(evch_zone_key, curproc->p_zone);
1462 1466          ASSERT(eg != NULL);
1463 1467  
1464 1468          mutex_enter(&eg->evch_list_lock);
1465 1469          for (chp = evch_dl_next(&eg->evch_list, NULL); chp != NULL;
1466 1470              chp = evch_dl_next(&eg->evch_list, chp)) {
1467 1471                  len += chp->ch_namelen;
1468 1472                  if (len >= max) {
1469 1473                          mutex_exit(&eg->evch_list_lock);
1470 1474                          return (-1);
1471 1475                  }
1472 1476                  bcopy(chp->ch_name, addr, chp->ch_namelen);
1473 1477                  addr += chp->ch_namelen;
1474 1478          }
1475 1479          mutex_exit(&eg->evch_list_lock);
1476 1480          addr[0] = 0;
1477 1481          return (len + 1);
1478 1482  }
1479 1483  
1480 1484  /*
1481 1485   * Fills the data of one channel and all subscribers of that channel into
1482 1486   * a buffer. Returns -1 if the channel name is invalid and 0 on buffer overflow.
1483 1487   */
1484 1488  static int
1485 1489  evch_chgetchdata(char *chname, void *buf, size_t size)
1486 1490  {
1487 1491          struct evch_globals *eg;
1488 1492          char            *cpaddr;
1489 1493          int             bufmax;
1490 1494          int             buflen;
1491 1495          evch_chan_t     *chp;
1492 1496          sev_chinfo_t    *p = (sev_chinfo_t *)buf;
1493 1497          int             chdlen;
1494 1498          evch_subd_t     *sdp;
1495 1499          sev_subinfo_t   *subp;
1496 1500          int             idlen;
1497 1501          int             len;
1498 1502  
1499 1503          eg = zone_getspecific(evch_zone_key, curproc->p_zone);
1500 1504          ASSERT(eg != NULL);
1501 1505  
1502 1506          mutex_enter(&eg->evch_list_lock);
1503 1507          chp = (evch_chan_t *)evch_dl_search(&eg->evch_list, evch_namecmp,
1504 1508              chname);
1505 1509          if (chp == NULL) {
1506 1510                  mutex_exit(&eg->evch_list_lock);
1507 1511                  return (-1);
1508 1512          }
1509 1513          chdlen = offsetof(sev_chinfo_t, cd_subinfo);
1510 1514          if (size < chdlen) {
1511 1515                  mutex_exit(&eg->evch_list_lock);
1512 1516                  return (0);
1513 1517          }
1514 1518          p->cd_version = 0;
1515 1519          p->cd_suboffs = chdlen;
1516 1520          p->cd_uid = chp->ch_uid;
1517 1521          p->cd_gid = chp->ch_gid;
1518 1522          p->cd_perms = 0;
1519 1523          p->cd_ctime = chp->ch_ctime;
1520 1524          p->cd_maxev = chp->ch_maxev;
1521 1525          p->cd_evhwm = EVCH_EVQ_HIGHWM(chp->ch_queue);
1522 1526          p->cd_nevents = EVCH_EVQ_EVCOUNT(chp->ch_queue);
1523 1527          p->cd_maxsub = chp->ch_maxsubscr;
1524 1528          p->cd_nsub = evch_dl_getnum(&chp->ch_subscr);
1525 1529          p->cd_maxbinds = chp->ch_maxbinds;
1526 1530          p->cd_nbinds = chp->ch_bindings;
1527 1531          p->cd_holdpend = chp->ch_holdpend;
1528 1532          p->cd_limev = evch_events_max;
1529 1533          cpaddr = (char *)p + chdlen;
1530 1534          bufmax = size - chdlen;
1531 1535          buflen = 0;
1532 1536  
1533 1537          for (sdp = evch_dl_next(&chp->ch_subscr, NULL); sdp != NULL;
1534 1538              sdp = evch_dl_next(&chp->ch_subscr, sdp)) {
1535 1539                  idlen = strlen(sdp->sd_ident) + 1;
1536 1540                  len = SE_ALIGN(offsetof(sev_subinfo_t, sb_strings) + idlen +
1537 1541                      sdp->sd_clnsize);
1538 1542                  buflen += len;
1539 1543                  if (buflen >= bufmax) {
1540 1544                          mutex_exit(&eg->evch_list_lock);
1541 1545                          return (0);
1542 1546                  }
1543 1547                  subp = (sev_subinfo_t *)cpaddr;
1544 1548                  subp->sb_nextoff = len;
1545 1549                  subp->sb_stroff = offsetof(sev_subinfo_t, sb_strings);
1546 1550                  if (sdp->sd_classname) {
1547 1551                          bcopy(sdp->sd_classname, subp->sb_strings + idlen,
1548 1552                              sdp->sd_clnsize);
1549 1553                          subp->sb_clnamoff = idlen;
1550 1554                  } else {
1551 1555                          subp->sb_clnamoff = idlen - 1;
1552 1556                  }
1553 1557                  subp->sb_pid = sdp->sd_pid;
1554 1558                  subp->sb_nevents = EVCH_EVQ_EVCOUNT(sdp->sd_queue);
1555 1559                  subp->sb_evhwm = EVCH_EVQ_HIGHWM(sdp->sd_queue);
1556 1560                  subp->sb_persist = sdp->sd_persist;
1557 1561                  subp->sb_status = evch_evq_status(sdp->sd_queue);
1558 1562                  subp->sb_active = sdp->sd_active;
1559 1563                  subp->sb_dump = sdp->sd_dump;
1560 1564                  bcopy(sdp->sd_ident, subp->sb_strings, idlen);
1561 1565                  cpaddr += len;
1562 1566          }
1563 1567          mutex_exit(&eg->evch_list_lock);
1564 1568          return (chdlen + buflen);
1565 1569  }
1566 1570  
1567 1571  static void
1568 1572  evch_chsetpropnvl(evch_bind_t *bp, nvlist_t *nvl)
1569 1573  {
1570 1574          evch_chan_t *chp = bp->bd_channel;
1571 1575  
1572 1576          mutex_enter(&chp->ch_mutex);
1573 1577  
1574 1578          nvlist_free(chp->ch_propnvl);
1575 1579  
1576 1580          chp->ch_propnvl = nvl;
1577 1581          chp->ch_propnvlgen++;
1578 1582  
1579 1583          mutex_exit(&chp->ch_mutex);
1580 1584  }
1581 1585  
1582 1586  static int
1583 1587  evch_chgetpropnvl(evch_bind_t *bp, nvlist_t **nvlp, int64_t *genp)
1584 1588  {
1585 1589          evch_chan_t *chp = bp->bd_channel;
1586 1590          int rc = 0;
1587 1591  
1588 1592          mutex_enter(&chp->ch_mutex);
1589 1593  
1590 1594          if (chp->ch_propnvl != NULL)
1591 1595                  rc = (nvlist_dup(chp->ch_propnvl, nvlp, 0) == 0) ? 0 : ENOMEM;
1592 1596          else
1593 1597                  *nvlp = NULL;   /* rc still 0 */
1594 1598  
1595 1599          if (genp)
1596 1600                  *genp = chp->ch_propnvlgen;
1597 1601  
1598 1602          mutex_exit(&chp->ch_mutex);
1599 1603  
1600 1604          if (rc != 0)
1601 1605                  *nvlp = NULL;
1602 1606  
1603 1607          return (rc);
1604 1608  
1605 1609  }
1606 1610  
1607 1611  /*
1608 1612   * Init iteration of all events of a channel. This function creates a new
1609 1613   * event queue and puts all events from the channel into that queue.
1610 1614   * Subsequent calls to evch_chgetnextev will deliver the events from that
1611 1615   * queue. Only one thread per channel is allowed to read through the events.
1612 1616   * Returns 0 on success and 1 if there is already someone reading the
1613 1617   * events.
1614 1618   * If argument subid == NULL, we look for a subscriber which has
1615 1619   * flag EVCH_SUB_DUMP set.
1616 1620   */
1617 1621  /*
1618 1622   * Static variables that are used to traverse events of a channel in panic case.
1619 1623   */
1620 1624  static evch_chan_t      *evch_chan;
1621 1625  static evch_eventq_t    *evch_subq;
1622 1626  static sysevent_impl_t  *evch_curev;
1623 1627  
1624 1628  static evchanq_t *
1625 1629  evch_chrdevent_init(evch_chan_t *chp, char *subid)
1626 1630  {
1627 1631          evch_subd_t     *sdp;
1628 1632          void            *ev;
1629 1633          int             pmqstat;        /* Prev status of main queue */
1630 1634          int             psqstat;        /* Prev status of subscriber queue */
1631 1635          evchanq_t       *snp;           /* Pointer to q with snapshot of ev */
1632 1636          compare_f       compfunc;
1633 1637  
1634 1638          compfunc = subid == NULL ? evch_dumpflgcmp : evch_subidcmp;
1635 1639          if (panicstr != NULL) {
1636 1640                  evch_chan = chp;
1637 1641                  evch_subq = NULL;
1638 1642                  evch_curev = NULL;
1639 1643                  if ((sdp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr,
1640 1644                      compfunc, subid)) != NULL) {
1641 1645                          evch_subq = sdp->sd_queue;
1642 1646                  }
1643 1647                  return (NULL);
1644 1648          }
1645 1649          mutex_enter(&chp->ch_mutex);
1646 1650          sdp = (evch_subd_t *)evch_dl_search(&chp->ch_subscr, compfunc, subid);
1647 1651          /*
1648 1652           * Stop main event queue and subscriber queue if not already
1649 1653           * in stop mode.
1650 1654           */
1651 1655          pmqstat = evch_evq_status(chp->ch_queue);
1652 1656          if (pmqstat == 0)
1653 1657                  evch_evq_stop(chp->ch_queue);
1654 1658          if (sdp != NULL) {
1655 1659                  psqstat = evch_evq_status(sdp->sd_queue);
1656 1660                  if (psqstat == 0)
1657 1661                          evch_evq_stop(sdp->sd_queue);
1658 1662          }
1659 1663          /*
1660 1664           * Create event queue to make a snapshot of all events in the
1661 1665           * channel.
1662 1666           */
1663 1667          snp = kmem_alloc(sizeof (evchanq_t), KM_SLEEP);
1664 1668          snp->sn_queue = evch_evq_create();
1665 1669          evch_evq_stop(snp->sn_queue);
1666 1670          /*
1667 1671           * Make a snapshot of the subscriber queue and the main event queue.
1668 1672           */
1669 1673          if (sdp != NULL) {
1670 1674                  ev = NULL;
1671 1675                  while ((ev = evch_evq_evnext(sdp->sd_queue, ev)) != NULL) {
1672 1676                          (void) evch_evq_pub(snp->sn_queue, ev, EVCH_SLEEP);
1673 1677                  }
1674 1678          }
1675 1679          ev = NULL;
1676 1680          while ((ev = evch_evq_evnext(chp->ch_queue, ev)) != NULL) {
1677 1681                  (void) evch_evq_pub(snp->sn_queue, ev, EVCH_SLEEP);
1678 1682          }
1679 1683          snp->sn_nxtev = NULL;
1680 1684          /*
1681 1685           * Restart main and subscriber queue if previously stopped
1682 1686           */
1683 1687          if (sdp != NULL && psqstat == 0)
1684 1688                  evch_evq_continue(sdp->sd_queue);
1685 1689          if (pmqstat == 0)
1686 1690                  evch_evq_continue(chp->ch_queue);
1687 1691          mutex_exit(&chp->ch_mutex);
1688 1692          return (snp);
1689 1693  }
1690 1694  
1691 1695  /*
1692 1696   * Free all resources of the event queue snapshot. In case of panic
1693 1697   * context snp must be NULL and no resources need to be free'ed.
1694 1698   */
1695 1699  static void
1696 1700  evch_chrdevent_fini(evchanq_t *snp)
1697 1701  {
1698 1702          if (snp != NULL) {
1699 1703                  evch_evq_destroy(snp->sn_queue);
1700 1704                  kmem_free(snp, sizeof (evchanq_t));
1701 1705          }
1702 1706  }
1703 1707  
1704 1708  /*
1705 1709   * Get address of next event from an event channel.
1706 1710   * This function might be called in a panic context. In that case
1707 1711   * no resources will be allocated and no locks grabbed.
1708 1712   * In normal operation context a snapshot of the event queues of the
1709 1713   * specified event channel will be taken.
1710 1714   */
1711 1715  static sysevent_impl_t *
1712 1716  evch_chgetnextev(evchanq_t *snp)
1713 1717  {
1714 1718          if (panicstr != NULL) {
1715 1719                  if (evch_chan == NULL)
1716 1720                          return (NULL);
1717 1721                  if (evch_subq != NULL) {
1718 1722                          /*
1719 1723                           * We have a subscriber queue. Traverse this queue
1720 1724                           * first.
1721 1725                           */
1722 1726                          if ((evch_curev = (sysevent_impl_t *)
1723 1727                              evch_evq_evnext(evch_subq, evch_curev)) != NULL) {
1724 1728                                  return (evch_curev);
1725 1729                          } else {
1726 1730                                  /*
1727 1731                                   * All subscriber events traversed. evch_subq
1728 1732                                   * == NULL indicates to take the main event
1729 1733                                   * queue now.
1730 1734                                   */
1731 1735                                  evch_subq = NULL;
1732 1736                          }
1733 1737                  }
1734 1738                  /*
1735 1739                   * Traverse the main event queue.
1736 1740                   */
1737 1741                  if ((evch_curev = (sysevent_impl_t *)
1738 1742                      evch_evq_evnext(evch_chan->ch_queue, evch_curev)) ==
1739 1743                      NULL) {
1740 1744                          evch_chan = NULL;
1741 1745                  }
1742 1746                  return (evch_curev);
1743 1747          }
1744 1748          ASSERT(snp != NULL);
1745 1749          snp->sn_nxtev = (sysevent_impl_t *)evch_evq_evnext(snp->sn_queue,
1746 1750              snp->sn_nxtev);
1747 1751          return (snp->sn_nxtev);
1748 1752  }
1749 1753  
1750 1754  /*
1751 1755   * The functions below build up the interface for the kernel to bind/unbind,
1752 1756   * subscribe/unsubscribe and publish to event channels. It consists of the
1753 1757   * following functions:
1754 1758   *
1755 1759   * sysevent_evc_bind        - Bind to a channel. Create a channel if required
1756 1760   * sysevent_evc_unbind      - Unbind from a channel. Destroy ch. if last unbind
1757 1761   * sysevent_evc_subscribe   - Subscribe to events from a channel
1758 1762   * sysevent_evc_unsubscribe - Unsubscribe from an event class
1759 1763   * sysevent_evc_publish     - Publish an event to an event channel
1760 1764   * sysevent_evc_control     - Various control operation on event channel
1761 1765   * sysevent_evc_setpropnvl  - Set channel property nvlist
1762 1766   * sysevent_evc_getpropnvl  - Get channel property nvlist
1763 1767   *
1764 1768   * The function below are for evaluating a sysevent:
1765 1769   *
1766 1770   * sysevent_get_class_name  - Get pointer to event class string
1767 1771   * sysevent_get_subclass_name - Get pointer to event subclass string
1768 1772   * sysevent_get_seq         - Get unique event sequence number
1769 1773   * sysevent_get_time        - Get hrestime of event publish
1770 1774   * sysevent_get_size        - Get size of event structure
1771 1775   * sysevent_get_pub         - Get publisher string
1772 1776   * sysevent_get_attr_list   - Get copy of attribute list
1773 1777   *
1774 1778   * The following interfaces represent stability level project privat
1775 1779   * and allow to save the events of an event channel even in a panic case.
1776 1780   *
1777 1781   * sysevent_evc_walk_init   - Take a snapshot of the events in a channel
1778 1782   * sysevent_evc_walk_step   - Read next event from snapshot
1779 1783   * sysevent_evc_walk_fini   - Free resources from event channel snapshot
1780 1784   * sysevent_evc_event_attr  - Get event payload address and size
1781 1785   */
1782 1786  /*
1783 1787   * allocate sysevent structure with optional space for attributes
1784 1788   */
1785 1789  static sysevent_impl_t *
1786 1790  sysevent_evc_alloc(const char *class, const char *subclass, const char *pub,
1787 1791      size_t pub_sz, size_t atsz, uint32_t flag)
1788 1792  {
1789 1793          int             payload_sz;
1790 1794          int             class_sz, subclass_sz;
1791 1795          int             aligned_class_sz, aligned_subclass_sz, aligned_pub_sz;
1792 1796          sysevent_impl_t *ev;
1793 1797  
1794 1798          /*
1795 1799           * Calculate and reserve space for the class, subclass and
1796 1800           * publisher strings in the event buffer
1797 1801           */
1798 1802          class_sz = strlen(class) + 1;
1799 1803          subclass_sz = strlen(subclass) + 1;
1800 1804  
1801 1805          ASSERT((class_sz <= MAX_CLASS_LEN) && (subclass_sz <=
1802 1806              MAX_SUBCLASS_LEN) && (pub_sz <= MAX_PUB_LEN));
1803 1807  
1804 1808          /* String sizes must be 64-bit aligned in the event buffer */
1805 1809          aligned_class_sz = SE_ALIGN(class_sz);
1806 1810          aligned_subclass_sz = SE_ALIGN(subclass_sz);
1807 1811          aligned_pub_sz = SE_ALIGN(pub_sz);
1808 1812  
1809 1813          /*
1810 1814           * Calculate payload size. Consider the space needed for alignment
1811 1815           * and subtract the size of the uint64_t placeholder variables of
1812 1816           * sysevent_impl_t.
1813 1817           */
1814 1818          payload_sz = (aligned_class_sz - sizeof (uint64_t)) +
1815 1819              (aligned_subclass_sz - sizeof (uint64_t)) +
1816 1820              (aligned_pub_sz - sizeof (uint64_t)) - sizeof (uint64_t) +
1817 1821              atsz;
1818 1822  
1819 1823          /*
1820 1824           * Allocate event buffer plus additional payload overhead
1821 1825           */
1822 1826          if ((ev = evch_evq_evzalloc(sizeof (sysevent_impl_t) +
1823 1827              payload_sz, flag)) == NULL) {
1824 1828                  return (NULL);
1825 1829          }
1826 1830  
1827 1831          /* Initialize the event buffer data */
1828 1832          SE_VERSION(ev) = SYS_EVENT_VERSION;
1829 1833          bcopy(class, SE_CLASS_NAME(ev), class_sz);
1830 1834  
1831 1835          SE_SUBCLASS_OFF(ev) = SE_ALIGN(offsetof(sysevent_impl_t,
1832 1836              se_class_name)) + aligned_class_sz;
1833 1837          bcopy(subclass, SE_SUBCLASS_NAME(ev), subclass_sz);
1834 1838  
1835 1839          SE_PUB_OFF(ev) = SE_SUBCLASS_OFF(ev) + aligned_subclass_sz;
1836 1840          bcopy(pub, SE_PUB_NAME(ev), pub_sz);
1837 1841  
1838 1842          SE_ATTR_PTR(ev) = (uint64_t)0;
1839 1843          SE_PAYLOAD_SZ(ev) = payload_sz;
1840 1844  
1841 1845          return (ev);
1842 1846  }
1843 1847  
1844 1848  /*
1845 1849   * Initialize event channel handling queues.
1846 1850   */
1847 1851  void
1848 1852  sysevent_evc_init()
1849 1853  {
1850 1854          evch_chinit();
1851 1855  }
1852 1856  
1853 1857  /*
1854 1858   * Second initialization step: create threads, if event channels are already
1855 1859   * created
1856 1860   */
1857 1861  void
1858 1862  sysevent_evc_thrinit()
1859 1863  {
1860 1864          evch_chinitthr();
1861 1865  }
1862 1866  
1863 1867  int
1864 1868  sysevent_evc_bind(const char *ch_name, evchan_t **scpp, uint32_t flags)
1865 1869  {
1866 1870          ASSERT(ch_name != NULL && scpp != NULL);
1867 1871          ASSERT((flags & ~EVCH_B_FLAGS) == 0);
1868 1872          return (evch_chbind(ch_name, (evch_bind_t **)scpp, flags));
1869 1873  }
1870 1874  
1871 1875  int
1872 1876  sysevent_evc_unbind(evchan_t *scp)
1873 1877  {
1874 1878          evch_bind_t *bp = (evch_bind_t *)scp;
1875 1879  
1876 1880          ASSERT(scp != NULL);
1877 1881          evch_chunsubscribe(bp, NULL, 0);
1878 1882          evch_chunbind(bp);
1879 1883  
1880 1884          return (0);
1881 1885  }
1882 1886  
1883 1887  int
1884 1888  sysevent_evc_subscribe(evchan_t *scp, const char *sid, const char *class,
1885 1889      int (*callb)(sysevent_t *ev, void *cookie),
1886 1890      void *cookie, uint32_t flags)
1887 1891  {
1888 1892          ASSERT(scp != NULL && sid != NULL && class != NULL && callb != NULL);
1889 1893          ASSERT(flags == 0);
1890 1894          if (strlen(sid) > MAX_SUBID_LEN) {
1891 1895                  return (EINVAL);
1892 1896          }
1893 1897          if (strcmp(class, EC_ALL) == 0) {
1894 1898                  class = NULL;
1895 1899          }
1896 1900          return (evch_chsubscribe((evch_bind_t *)scp, EVCH_DELKERN, sid, class,
1897 1901              (void *)callb, cookie, 0, 0));
1898 1902  }
1899 1903  
1900 1904  int
1901 1905  sysevent_evc_unsubscribe(evchan_t *scp, const char *sid)
1902 1906  {
1903 1907          ASSERT(scp != NULL && sid != NULL);
1904 1908          if (strcmp(sid, EVCH_ALLSUB) == 0) {
1905 1909                  sid = NULL;
1906 1910          }
1907 1911          evch_chunsubscribe((evch_bind_t *)scp, sid, 0);
1908 1912  
1909 1913          return (0);
1910 1914  }
1911 1915  
1912 1916  /*
1913 1917   * Publish kernel event. Returns 0 on success, error code else.
1914 1918   * Optional attribute data is packed into the event structure.
1915 1919   */
1916 1920  int
1917 1921  sysevent_evc_publish(evchan_t *scp, const char *class, const char *subclass,
1918 1922      const char *vendor, const char *pubs, nvlist_t *attr, uint32_t flags)
1919 1923  {
1920 1924          sysevent_impl_t *evp;
1921 1925          char            pub[MAX_PUB_LEN];
1922 1926          int             pub_sz;         /* includes terminating 0 */
1923 1927          int             km_flags;
1924 1928          size_t          asz = 0;
1925 1929          uint64_t        attr_offset;
1926 1930          caddr_t         patt;
1927 1931          int             err;
1928 1932  
1929 1933          ASSERT(scp != NULL && class != NULL && subclass != NULL &&
1930 1934              vendor != NULL && pubs != NULL);
1931 1935  
1932 1936          ASSERT((flags & ~(EVCH_SLEEP | EVCH_NOSLEEP | EVCH_TRYHARD |
1933 1937              EVCH_QWAIT)) == 0);
1934 1938  
1935 1939          km_flags = flags & (EVCH_SLEEP | EVCH_NOSLEEP | EVCH_TRYHARD);
1936 1940          ASSERT(km_flags == EVCH_SLEEP || km_flags == EVCH_NOSLEEP ||
1937 1941              km_flags == EVCH_TRYHARD);
1938 1942  
1939 1943          pub_sz = snprintf(pub, MAX_PUB_LEN, "%s:kern:%s", vendor, pubs) + 1;
1940 1944          if (pub_sz > MAX_PUB_LEN)
1941 1945                  return (EINVAL);
1942 1946  
1943 1947          if (attr != NULL) {
1944 1948                  if ((err = nvlist_size(attr, &asz, NV_ENCODE_NATIVE)) != 0) {
1945 1949                          return (err);
1946 1950                  }
1947 1951          }
1948 1952          evp = sysevent_evc_alloc(class, subclass, pub, pub_sz, asz, km_flags);
1949 1953          if (evp == NULL) {
1950 1954                  return (ENOMEM);
1951 1955          }
1952 1956          if (attr != NULL) {
1953 1957                  /*
1954 1958                   * Pack attributes into event buffer. Event buffer already
1955 1959                   * has enough room for the packed nvlist.
1956 1960                   */
1957 1961                  attr_offset = SE_ATTR_OFF(evp);
1958 1962                  patt = (caddr_t)evp + attr_offset;
1959 1963  
1960 1964                  err = nvlist_pack(attr, &patt, &asz, NV_ENCODE_NATIVE,
1961 1965                      km_flags & EVCH_SLEEP ? KM_SLEEP : KM_NOSLEEP);
1962 1966  
1963 1967                  ASSERT(err != ENOMEM);
1964 1968  
1965 1969                  if (err != 0) {
1966 1970                          return (EINVAL);
1967 1971                  }
1968 1972  
  
    | 
      ↓ open down ↓ | 
    1933 lines elided | 
    
      ↑ open up ↑ | 
  
1969 1973                  evp->seh_attr_off = attr_offset;
1970 1974                  SE_FLAG(evp) = SE_PACKED_BUF;
1971 1975          }
1972 1976          return (evch_chpublish((evch_bind_t *)scp, evp, flags));
1973 1977  }
1974 1978  
1975 1979  int
1976 1980  sysevent_evc_control(evchan_t *scp, int cmd, ...)
1977 1981  {
1978 1982          va_list         ap;
1979      -        evch_chan_t     *chp = ((evch_bind_t *)scp)->bd_channel;
     1983 +        evch_chan_t     *chp;
1980 1984          uint32_t        *chlenp;
1981 1985          uint32_t        chlen;
1982 1986          uint32_t        ochlen;
1983 1987          int             rc = 0;
1984 1988  
1985 1989          if (scp == NULL) {
1986 1990                  return (EINVAL);
1987 1991          }
1988 1992  
     1993 +        chp = ((evch_bind_t *)scp)->bd_channel;
     1994 +
1989 1995          va_start(ap, cmd);
1990 1996          mutex_enter(&chp->ch_mutex);
1991 1997          switch (cmd) {
1992 1998          case EVCH_GET_CHAN_LEN:
1993 1999                  chlenp = va_arg(ap, uint32_t *);
1994 2000                  *chlenp = chp->ch_maxev;
1995 2001                  break;
1996 2002          case EVCH_SET_CHAN_LEN:
1997 2003                  chlen = va_arg(ap, uint32_t);
1998 2004                  ochlen = chp->ch_maxev;
1999 2005                  chp->ch_maxev = min(chlen, evch_events_max);
2000 2006                  if (ochlen < chp->ch_maxev) {
2001 2007                          cv_signal(&chp->ch_pubcv);
2002 2008                  }
2003 2009                  break;
2004 2010          case EVCH_GET_CHAN_LEN_MAX:
2005 2011                  *va_arg(ap, uint32_t *) = evch_events_max;
2006 2012                  break;
2007 2013          default:
2008 2014                  rc = EINVAL;
2009 2015          }
2010 2016  
2011 2017          mutex_exit(&chp->ch_mutex);
2012 2018          va_end(ap);
2013 2019          return (rc);
2014 2020  }
2015 2021  
2016 2022  int
2017 2023  sysevent_evc_setpropnvl(evchan_t *scp, nvlist_t *nvl)
2018 2024  {
2019 2025          nvlist_t *nvlcp = nvl;
2020 2026  
2021 2027          if (nvl != NULL && nvlist_dup(nvl, &nvlcp, 0) != 0)
2022 2028                  return (ENOMEM);
2023 2029  
2024 2030          evch_chsetpropnvl((evch_bind_t *)scp, nvlcp);
2025 2031  
2026 2032          return (0);
2027 2033  }
2028 2034  
2029 2035  int
2030 2036  sysevent_evc_getpropnvl(evchan_t *scp, nvlist_t **nvlp)
2031 2037  {
2032 2038          return (evch_chgetpropnvl((evch_bind_t *)scp, nvlp, NULL));
2033 2039  }
2034 2040  
2035 2041  /*
2036 2042   * Project private interface to take a snapshot of all events of the
2037 2043   * specified event channel. Argument subscr may be a subscriber id, the empty
2038 2044   * string "", or NULL. The empty string indicates that no subscriber is
2039 2045   * selected, for example if a previous subscriber died. sysevent_evc_walk_next()
2040 2046   * will deliver events from the main event queue in this case. If subscr is
2041 2047   * NULL, the subscriber with the EVCH_SUB_DUMP flag set (subd->sd_dump != 0)
2042 2048   * will be selected.
2043 2049   *
2044 2050   * In panic case this function returns NULL. This is legal. The NULL has
2045 2051   * to be delivered to sysevent_evc_walk_step() and sysevent_evc_walk_fini().
2046 2052   */
2047 2053  evchanq_t *
2048 2054  sysevent_evc_walk_init(evchan_t *scp, char *subscr)
2049 2055  {
2050 2056          if (panicstr != NULL && scp == NULL)
2051 2057                  return (NULL);
2052 2058          ASSERT(scp != NULL);
2053 2059          return (evch_chrdevent_init(((evch_bind_t *)scp)->bd_channel, subscr));
2054 2060  }
2055 2061  
2056 2062  /*
2057 2063   * Project private interface to read events from a previously taken
2058 2064   * snapshot (with sysevent_evc_walk_init). In case of panic events
2059 2065   * are retrieved directly from the channel data structures. No resources
2060 2066   * are allocated and no mutexes are grabbed in panic context.
2061 2067   */
2062 2068  sysevent_t *
2063 2069  sysevent_evc_walk_step(evchanq_t *evcq)
2064 2070  {
2065 2071          return ((sysevent_t *)evch_chgetnextev(evcq));
2066 2072  }
2067 2073  
2068 2074  /*
2069 2075   * Project private interface to free a previously taken snapshot.
2070 2076   */
2071 2077  void
2072 2078  sysevent_evc_walk_fini(evchanq_t *evcq)
2073 2079  {
2074 2080          evch_chrdevent_fini(evcq);
2075 2081  }
2076 2082  
2077 2083  /*
2078 2084   * Get address and size of an event payload. Returns NULL when no
2079 2085   * payload present.
2080 2086   */
2081 2087  char *
2082 2088  sysevent_evc_event_attr(sysevent_t *ev, size_t *plsize)
2083 2089  {
2084 2090          char    *attrp;
2085 2091          size_t  aoff;
2086 2092          size_t  asz;
2087 2093  
2088 2094          aoff = SE_ATTR_OFF(ev);
2089 2095          attrp = (char *)ev + aoff;
2090 2096          asz = *plsize = SE_SIZE(ev) - aoff;
2091 2097          return (asz ? attrp : NULL);
2092 2098  }
2093 2099  
2094 2100  /*
2095 2101   * sysevent_get_class_name - Get class name string
2096 2102   */
2097 2103  char *
2098 2104  sysevent_get_class_name(sysevent_t *ev)
2099 2105  {
2100 2106          return (SE_CLASS_NAME(ev));
2101 2107  }
2102 2108  
2103 2109  /*
2104 2110   * sysevent_get_subclass_name - Get subclass name string
2105 2111   */
2106 2112  char *
2107 2113  sysevent_get_subclass_name(sysevent_t *ev)
2108 2114  {
2109 2115          return (SE_SUBCLASS_NAME(ev));
2110 2116  }
2111 2117  
2112 2118  /*
2113 2119   * sysevent_get_seq - Get event sequence id
2114 2120   */
2115 2121  uint64_t
2116 2122  sysevent_get_seq(sysevent_t *ev)
2117 2123  {
2118 2124          return (SE_SEQ(ev));
2119 2125  }
2120 2126  
2121 2127  /*
2122 2128   * sysevent_get_time - Get event timestamp
2123 2129   */
2124 2130  void
2125 2131  sysevent_get_time(sysevent_t *ev, hrtime_t *etime)
2126 2132  {
2127 2133          *etime = SE_TIME(ev);
2128 2134  }
2129 2135  
2130 2136  /*
2131 2137   * sysevent_get_size - Get event buffer size
2132 2138   */
2133 2139  size_t
2134 2140  sysevent_get_size(sysevent_t *ev)
2135 2141  {
2136 2142          return ((size_t)SE_SIZE(ev));
2137 2143  }
2138 2144  
2139 2145  /*
2140 2146   * sysevent_get_pub - Get publisher name string
2141 2147   */
2142 2148  char *
2143 2149  sysevent_get_pub(sysevent_t *ev)
2144 2150  {
2145 2151          return (SE_PUB_NAME(ev));
2146 2152  }
2147 2153  
2148 2154  /*
2149 2155   * sysevent_get_attr_list - stores address of a copy of the attribute list
2150 2156   * associated with the given sysevent buffer. The list must be freed by the
2151 2157   * caller.
2152 2158   */
2153 2159  int
2154 2160  sysevent_get_attr_list(sysevent_t *ev, nvlist_t **nvlist)
2155 2161  {
2156 2162          int             error;
2157 2163          caddr_t         attr;
2158 2164          size_t          attr_len;
2159 2165          uint64_t        attr_offset;
2160 2166  
2161 2167          *nvlist = NULL;
2162 2168          if (SE_FLAG(ev) != SE_PACKED_BUF) {
2163 2169                  return (EINVAL);
2164 2170          }
2165 2171          attr_offset = SE_ATTR_OFF(ev);
2166 2172          if (SE_SIZE(ev) == attr_offset) {
2167 2173                  return (EINVAL);
2168 2174          }
2169 2175  
2170 2176          /* unpack nvlist */
2171 2177          attr = (caddr_t)ev + attr_offset;
2172 2178          attr_len = SE_SIZE(ev) - attr_offset;
2173 2179          if ((error = nvlist_unpack(attr, attr_len, nvlist, 0)) != 0) {
2174 2180                  error = error != ENOMEM ? EINVAL : error;
2175 2181                  return (error);
2176 2182          }
2177 2183          return (0);
2178 2184  }
2179 2185  
2180 2186  /*
2181 2187   * Functions called by the sysevent driver for general purpose event channels
2182 2188   *
2183 2189   * evch_usrchanopen     - Create/Bind to an event channel
2184 2190   * evch_usrchanclose    - Unbind/Destroy event channel
2185 2191   * evch_usrallocev      - Allocate event data structure
2186 2192   * evch_usrfreeev       - Free event data structure
2187 2193   * evch_usrpostevent    - Publish event
2188 2194   * evch_usrsubscribe    - Subscribe (register callback function)
2189 2195   * evch_usrunsubscribe  - Unsubscribe
2190 2196   * evch_usrcontrol_set  - Set channel properties
2191 2197   * evch_usrcontrol_get  - Get channel properties
2192 2198   * evch_usrgetchnames   - Get list of channel names
2193 2199   * evch_usrgetchdata    - Get data of an event channel
2194 2200   * evch_usrsetpropnvl   - Set channel properties nvlist
2195 2201   * evch_usrgetpropnvl   - Get channel properties nvlist
2196 2202   */
2197 2203  evchan_t *
2198 2204  evch_usrchanopen(const char *name, uint32_t flags, int *err)
2199 2205  {
2200 2206          evch_bind_t *bp = NULL;
2201 2207  
2202 2208          *err = evch_chbind(name, &bp, flags);
2203 2209          return ((evchan_t *)bp);
2204 2210  }
2205 2211  
2206 2212  /*
2207 2213   * Unbind from the channel.
2208 2214   */
2209 2215  void
2210 2216  evch_usrchanclose(evchan_t *cbp)
2211 2217  {
2212 2218          evch_chunbind((evch_bind_t *)cbp);
2213 2219  }
2214 2220  
2215 2221  /*
2216 2222   * Allocates log_evch_eventq_t structure but returns the pointer of the embedded
2217 2223   * sysevent_impl_t structure as the opaque sysevent_t * data type
2218 2224   */
2219 2225  sysevent_impl_t *
2220 2226  evch_usrallocev(size_t evsize, uint32_t flags)
2221 2227  {
2222 2228          return ((sysevent_impl_t *)evch_evq_evzalloc(evsize, flags));
2223 2229  }
2224 2230  
2225 2231  /*
2226 2232   * Free evch_eventq_t structure
2227 2233   */
2228 2234  void
2229 2235  evch_usrfreeev(sysevent_impl_t *ev)
2230 2236  {
2231 2237          evch_evq_evfree((void *)ev);
2232 2238  }
2233 2239  
2234 2240  /*
2235 2241   * Posts an event to the given channel. The event structure has to be
2236 2242   * allocated by evch_usrallocev(). Returns zero on success and an error
2237 2243   * code else. Attributes have to be packed and included in the event structure.
2238 2244   *
2239 2245   */
2240 2246  int
2241 2247  evch_usrpostevent(evchan_t *bp, sysevent_impl_t *ev, uint32_t flags)
2242 2248  {
2243 2249          return (evch_chpublish((evch_bind_t *)bp, ev, flags));
2244 2250  }
2245 2251  
2246 2252  /*
2247 2253   * Subscribe function for user land subscriptions
2248 2254   */
2249 2255  int
2250 2256  evch_usrsubscribe(evchan_t *bp, const char *sid, const char *class,
2251 2257      int d, uint32_t flags)
2252 2258  {
2253 2259          door_handle_t   dh = door_ki_lookup(d);
2254 2260          int             rv;
2255 2261  
2256 2262          if (dh == NULL) {
2257 2263                  return (EINVAL);
2258 2264          }
2259 2265          if ((rv = evch_chsubscribe((evch_bind_t *)bp, EVCH_DELDOOR, sid, class,
2260 2266              (void *)dh, NULL, flags, curproc->p_pid)) != 0) {
2261 2267                  door_ki_rele(dh);
2262 2268          }
2263 2269          return (rv);
2264 2270  }
2265 2271  
2266 2272  /*
2267 2273   * Flag can be EVCH_SUB_KEEP or 0. EVCH_SUB_KEEP preserves persistent
2268 2274   * subscribers
2269 2275   */
2270 2276  void
2271 2277  evch_usrunsubscribe(evchan_t *bp, const char *subid, uint32_t flags)
2272 2278  {
2273 2279          evch_chunsubscribe((evch_bind_t *)bp, subid, flags);
2274 2280  }
2275 2281  
2276 2282  /*ARGSUSED*/
2277 2283  int
2278 2284  evch_usrcontrol_set(evchan_t *bp, int cmd, uint32_t value)
2279 2285  {
2280 2286          evch_chan_t     *chp = ((evch_bind_t *)bp)->bd_channel;
2281 2287          uid_t           uid = crgetuid(curthread->t_cred);
2282 2288          int             rc = 0;
2283 2289  
2284 2290          mutex_enter(&chp->ch_mutex);
2285 2291          switch (cmd) {
2286 2292          case EVCH_SET_CHAN_LEN:
2287 2293                  if (uid && uid != chp->ch_uid) {
2288 2294                          rc = EACCES;
2289 2295                          break;
2290 2296                  }
2291 2297                  chp->ch_maxev = min(value, evch_events_max);
2292 2298                  break;
2293 2299          default:
2294 2300                  rc = EINVAL;
2295 2301          }
2296 2302          mutex_exit(&chp->ch_mutex);
2297 2303          return (rc);
2298 2304  }
2299 2305  
2300 2306  /*ARGSUSED*/
2301 2307  int
2302 2308  evch_usrcontrol_get(evchan_t *bp, int cmd, uint32_t *value)
2303 2309  {
2304 2310          evch_chan_t     *chp = ((evch_bind_t *)bp)->bd_channel;
2305 2311          int             rc = 0;
2306 2312  
2307 2313          mutex_enter(&chp->ch_mutex);
2308 2314          switch (cmd) {
2309 2315          case EVCH_GET_CHAN_LEN:
2310 2316                  *value = chp->ch_maxev;
2311 2317                  break;
2312 2318          case EVCH_GET_CHAN_LEN_MAX:
2313 2319                  *value = evch_events_max;
2314 2320                  break;
2315 2321          default:
2316 2322                  rc = EINVAL;
2317 2323          }
2318 2324          mutex_exit(&chp->ch_mutex);
2319 2325          return (rc);
2320 2326  }
2321 2327  
2322 2328  int
2323 2329  evch_usrgetchnames(char *buf, size_t size)
2324 2330  {
2325 2331          return (evch_chgetnames(buf, size));
2326 2332  }
2327 2333  
2328 2334  int
2329 2335  evch_usrgetchdata(char *chname, void *buf, size_t size)
2330 2336  {
2331 2337          return (evch_chgetchdata(chname, buf, size));
2332 2338  }
2333 2339  
2334 2340  void
2335 2341  evch_usrsetpropnvl(evchan_t *bp, nvlist_t *nvl)
2336 2342  {
2337 2343          evch_chsetpropnvl((evch_bind_t *)bp, nvl);
2338 2344  }
2339 2345  
2340 2346  int
2341 2347  evch_usrgetpropnvl(evchan_t *bp, nvlist_t **nvlp, int64_t *genp)
2342 2348  {
2343 2349          return (evch_chgetpropnvl((evch_bind_t *)bp, nvlp, genp));
2344 2350  }
  
    | 
      ↓ open down ↓ | 
    346 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX