1 /*
   2  * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
   3  *
   4  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
   5  * Use is subject to license terms.
   6  * Copyright (c) 2016 by Delphix. All rights reserved.
   7  *
   8  * Permission to use, copy, modify, and distribute this software and its
   9  * documentation is hereby granted, provided that the above copyright
  10  * notice appears in all copies.
  11  *
  12  * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
  13  * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
  14  * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
  15  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT.  SUN SHALL NOT BE LIABLE FOR
  16  * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
  17  * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
  18  *
  19  * Copyright (c) 1994 The Australian National University.
  20  * All rights reserved.
  21  *
  22  * Permission to use, copy, modify, and distribute this software and its
  23  * documentation is hereby granted, provided that the above copyright
  24  * notice appears in all copies.  This software is provided without any
  25  * warranty, express or implied. The Australian National University
  26  * makes no representations about the suitability of this software for
  27  * any purpose.
  28  *
  29  * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
  30  * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
  31  * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
  32  * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
  33  * OF SUCH DAMAGE.
  34  *
  35  * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
  36  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
  37  * AND FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS
  38  * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
  39  * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
  40  * OR MODIFICATIONS.
  41  *
  42  * This driver is derived from the original SVR4 STREAMS PPP driver
  43  * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
  44  *
  45  * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
  46  * for improved performance and scalability.
  47  */
  48 
  49 #define RCSID   "$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
  50 
  51 #include <sys/types.h>
  52 #include <sys/debug.h>
  53 #include <sys/param.h>
  54 #include <sys/stat.h>
  55 #include <sys/stream.h>
  56 #include <sys/stropts.h>
  57 #include <sys/sysmacros.h>
  58 #include <sys/errno.h>
  59 #include <sys/time.h>
  60 #include <sys/cmn_err.h>
  61 #include <sys/kmem.h>
  62 #include <sys/conf.h>
  63 #include <sys/dlpi.h>
  64 #include <sys/ddi.h>
  65 #include <sys/kstat.h>
  66 #include <sys/strsun.h>
  67 #include <sys/ethernet.h>
  68 #include <sys/policy.h>
  69 #include <sys/zone.h>
  70 #include <net/ppp_defs.h>
  71 #include <net/pppio.h>
  72 #include "sppp.h"
  73 #include "s_common.h"
  74 
  75 /*
  76  * This is used to tag official Solaris sources.  Please do not define
  77  * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
  78  */
  79 #ifdef INTERNAL_BUILD
  80 /* MODINFO is limited to 32 characters. */
  81 const char sppp_module_description[] = "PPP 4.0 mux";
  82 #else /* INTERNAL_BUILD */
  83 const char sppp_module_description[] = "ANU PPP mux";
  84 
  85 /* LINTED */
  86 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
  87 #ifdef DEBUG
  88 " DEBUG"
  89 #endif
  90 "\n";
  91 #endif /* INTERNAL_BUILD */
  92 
  93 static void     sppp_inner_ioctl(queue_t *, mblk_t *);
  94 static void     sppp_outer_ioctl(queue_t *, mblk_t *);
  95 static queue_t  *sppp_send(queue_t *, mblk_t **, spppstr_t *);
  96 static queue_t  *sppp_recv(queue_t *, mblk_t **, spppstr_t *);
  97 static void     sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
  98 static queue_t  *sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
  99 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
 100 static int      sppp_kstat_update(kstat_t *, int);
 101 static void     sppp_release_pkts(sppa_t *, uint16_t);
 102 
 103 /*
 104  * sps_list contains the list of active per-stream instance state structures
 105  * ordered on the minor device number (see sppp.h for details). All streams
 106  * opened to this driver are threaded together in this list.
 107  */
 108 static spppstr_t *sps_list = NULL;
 109 /*
 110  * ppa_list contains the list of active per-attachment instance state
 111  * structures ordered on the ppa id number (see sppp.h for details). All of
 112  * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
 113  * in this list. There is exactly one ppa structure for a given PPP interface,
 114  * and multiple sps streams (upper streams) may share a ppa by performing
 115  * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
 116  */
 117 static sppa_t *ppa_list = NULL;
 118 
 119 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
 120 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
 121 
 122 /*
 123  * map proto (which is an IANA defined ppp network protocol) to
 124  * a bit position indicated by NP_* in ppa_npflag
 125  */
 126 static uint32_t
 127 sppp_ppp2np(uint16_t proto)
 128 {
 129         switch (proto) {
 130         case PPP_IP:
 131                 return (NP_IP);
 132         case PPP_IPV6:
 133                 return (NP_IPV6);
 134         default:
 135                 return (0);
 136         }
 137 }
 138 
 139 /*
 140  * sppp_open()
 141  *
 142  * MT-Perimeters:
 143  *    exclusive inner, exclusive outer.
 144  *
 145  * Description:
 146  *    Common open procedure for module.
 147  */
 148 /* ARGSUSED */
 149 int
 150 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
 151 {
 152         spppstr_t       *sps;
 153         spppstr_t       **nextmn;
 154         minor_t         mn;
 155 
 156         ASSERT(q != NULL && devp != NULL);
 157         ASSERT(sflag != MODOPEN);
 158 
 159         if (q->q_ptr != NULL) {
 160                 return (0);             /* already open */
 161         }
 162         if (sflag != CLONEOPEN) {
 163                 return (OPENFAIL);
 164         }
 165         /*
 166          * The sps list is sorted using the minor number as the key. The
 167          * following code walks the list to find the lowest valued minor
 168          * number available to be used.
 169          */
 170         mn = 0;
 171         for (nextmn = &sps_list; (sps = *nextmn) != NULL;
 172             nextmn = &sps->sps_nextmn) {
 173                 if (sps->sps_mn_id != mn) {
 174                         break;
 175                 }
 176                 ++mn;
 177         }
 178         sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
 179         ASSERT(sps != NULL);            /* KM_SLEEP must never return NULL */
 180         sps->sps_nextmn = *nextmn;   /* insert stream in global list */
 181         *nextmn = sps;
 182         sps->sps_mn_id = mn;         /* save minor id for this stream */
 183         sps->sps_rq = q;             /* save read queue pointer */
 184         sps->sps_sap = -1;           /* no sap bound to stream */
 185         sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
 186         sps->sps_npmode = NPMODE_DROP;       /* drop all packets initially */
 187         sps->sps_zoneid = crgetzoneid(credp);
 188         q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
 189         /*
 190          * We explicitly disable the automatic queue scheduling for the
 191          * write-side to obtain complete control over queuing during transmit.
 192          * Packets will be queued at the upper write queue and the service
 193          * routine will not be called until it gets scheduled by having the
 194          * lower write service routine call the qenable(WR(uq)) for all streams
 195          * attached to the same ppa instance.
 196          */
 197         noenable(WR(q));
 198         *devp = makedevice(getmajor(*devp), mn);
 199         qprocson(q);
 200         return (0);
 201 }
 202 
 203 /*
 204  * Free storage used by a PPA.  This is not called until the last PPA
 205  * user closes their connection or reattaches to a different PPA.
 206  */
 207 static void
 208 sppp_free_ppa(sppa_t *ppa)
 209 {
 210         sppa_t **nextppa;
 211 
 212         ASSERT(ppa->ppa_refcnt == 1);
 213         if (ppa->ppa_kstats != NULL) {
 214                 kstat_delete(ppa->ppa_kstats);
 215                 ppa->ppa_kstats = NULL;
 216         }
 217         mutex_destroy(&ppa->ppa_sta_lock);
 218         mutex_destroy(&ppa->ppa_npmutex);
 219         rw_destroy(&ppa->ppa_sib_lock);
 220         nextppa = &ppa_list;
 221         while (*nextppa != NULL) {
 222                 if (*nextppa == ppa) {
 223                         *nextppa = ppa->ppa_nextppa;
 224                         break;
 225                 }
 226                 nextppa = &(*nextppa)->ppa_nextppa;
 227         }
 228         kmem_free(ppa, sizeof (*ppa));
 229 }
 230 
 231 /*
 232  * Create a new PPA.  Caller must be exclusive on outer perimeter.
 233  */
 234 sppa_t *
 235 sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid)
 236 {
 237         sppa_t *ppa;
 238         sppa_t *curppa;
 239         sppa_t **availppa;
 240         char unit[32];          /* Unit name */
 241         const char **cpp;
 242         kstat_t *ksp;
 243         kstat_named_t *knt;
 244 
 245         /*
 246          * NOTE: unit *must* be named for the driver
 247          * name plus the ppa number so that netstat
 248          * can find the statistics.
 249          */
 250         (void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
 251         /*
 252          * Make sure we can allocate a buffer to
 253          * contain the ppa to be sent upstream, as
 254          * well as the actual ppa structure and its
 255          * associated kstat structure.
 256          */
 257         ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
 258             KM_NOSLEEP);
 259         ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
 260             sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
 261 
 262         if (ppa == NULL || ksp == NULL) {
 263                 if (ppa != NULL) {
 264                         kmem_free(ppa, sizeof (sppa_t));
 265                 }
 266                 if (ksp != NULL) {
 267                         kstat_delete(ksp);
 268                 }
 269                 return (NULL);
 270         }
 271         ppa->ppa_kstats = ksp;               /* chain kstat structure */
 272         ppa->ppa_ppa_id = ppa_id;    /* record ppa id */
 273         ppa->ppa_zoneid = zoneid;    /* zone that owns this PPA */
 274         ppa->ppa_mtu = PPP_MAXMTU;   /* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
 275         ppa->ppa_mru = PPP_MAXMRU;   /* 65000 */
 276 
 277         mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
 278         mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
 279         rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
 280 
 281         /*
 282          * Prepare and install kstat counters.  Note that for netstat
 283          * -i to work, there needs to be "ipackets", "opackets",
 284          * "ierrors", and "oerrors" kstat named variables.
 285          */
 286         knt = (kstat_named_t *)ksp->ks_data;
 287         for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
 288             cpp++) {
 289                 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
 290                 knt++;
 291         }
 292         for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
 293             cpp++) {
 294                 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
 295                 knt++;
 296         }
 297         ksp->ks_update = sppp_kstat_update;
 298         ksp->ks_private = (void *)ppa;
 299         kstat_install(ksp);
 300 
 301         /* link to the next ppa and insert into global list */
 302         availppa = &ppa_list;
 303         while ((curppa = *availppa) != NULL) {
 304                 if (ppa_id < curppa->ppa_ppa_id)
 305                         break;
 306                 availppa = &curppa->ppa_nextppa;
 307         }
 308         ppa->ppa_nextppa = *availppa;
 309         *availppa = ppa;
 310         return (ppa);
 311 }
 312 
 313 /*
 314  * sppp_close()
 315  *
 316  * MT-Perimeters:
 317  *    exclusive inner, exclusive outer.
 318  *
 319  * Description:
 320  *    Common close procedure for module.
 321  */
 322 /* ARGSUSED */
 323 int
 324 sppp_close(queue_t *q, int flags __unused, cred_t *credp __unused)
 325 {
 326         spppstr_t       *sps;
 327         spppstr_t       **nextmn;
 328         spppstr_t       *sib;
 329         sppa_t          *ppa;
 330         mblk_t          *mp;
 331 
 332         ASSERT(q != NULL && q->q_ptr != NULL);
 333         sps = (spppstr_t *)q->q_ptr;
 334         qprocsoff(q);
 335 
 336         ppa = sps->sps_ppa;
 337         if (ppa == NULL) {
 338                 ASSERT(!IS_SPS_CONTROL(sps));
 339                 goto close_unattached;
 340         }
 341         if (IS_SPS_CONTROL(sps)) {
 342                 uint32_t        cnt = 0;
 343 
 344                 ASSERT(ppa != NULL);
 345                 ASSERT(ppa->ppa_ctl == sps);
 346                 ppa->ppa_ctl = NULL;
 347                 /*
 348                  * STREAMS framework always issues I_UNLINK prior to close,
 349                  * since we only allow I_LINK under the control stream.
 350                  * A given ppa structure has at most one lower stream pointed
 351                  * by the ppa_lower_wq field, because we only allow a single
 352                  * linkage (I_LINK) to be done on the control stream.
 353                  */
 354                 ASSERT(ppa->ppa_lower_wq == NULL);
 355                 /*
 356                  * Walk through all of sibling streams attached to this ppa,
 357                  * and remove all references to this ppa. We have exclusive
 358                  * access for the entire driver here, so there's no need
 359                  * to hold ppa_sib_lock.
 360                  */
 361                 cnt++;
 362                 sib = ppa->ppa_streams;
 363                 while (sib != NULL) {
 364                         ASSERT(ppa == sib->sps_ppa);
 365                         sib->sps_npmode = NPMODE_DROP;
 366                         sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
 367                         /*
 368                          * There should be a preallocated hangup
 369                          * message here.  Fetch it and send it up to
 370                          * the stream head.  This will cause IP to
 371                          * mark the interface as "down."
 372                          */
 373                         if ((mp = sib->sps_hangup) != NULL) {
 374                                 sib->sps_hangup = NULL;
 375                                 /*
 376                                  * M_HANGUP works with IP, but snoop
 377                                  * is lame and requires M_ERROR.  Send
 378                                  * up a clean error code instead.
 379                                  *
 380                                  * XXX if snoop is fixed, fix this, too.
 381                                  */
 382                                 MTYPE(mp) = M_ERROR;
 383                                 *mp->b_wptr++ = ENXIO;
 384                                 putnext(sib->sps_rq, mp);
 385                         }
 386                         qenable(WR(sib->sps_rq));
 387                         cnt++;
 388                         sib = sib->sps_nextsib;
 389                 }
 390                 ASSERT(ppa->ppa_refcnt == cnt);
 391         } else {
 392                 ASSERT(ppa->ppa_streams != NULL);
 393                 ASSERT(ppa->ppa_ctl != sps);
 394                 mp = NULL;
 395                 if (sps->sps_sap == PPP_IP) {
 396                         ppa->ppa_ip_cache = NULL;
 397                         mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
 398                 } else if (sps->sps_sap == PPP_IPV6) {
 399                         ppa->ppa_ip6_cache = NULL;
 400                         mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
 401                 }
 402                 /* Tell the daemon the bad news. */
 403                 if (mp != NULL && ppa->ppa_ctl != NULL &&
 404                     (sps->sps_npmode == NPMODE_PASS ||
 405                     sps->sps_npmode == NPMODE_QUEUE)) {
 406                         putnext(ppa->ppa_ctl->sps_rq, mp);
 407                 } else {
 408                         freemsg(mp);
 409                 }
 410                 /*
 411                  * Walk through all of sibling streams attached to the
 412                  * same ppa, and remove this stream from the sibling
 413                  * streams list. We have exclusive access for the
 414                  * entire driver here, so there's no need to hold
 415                  * ppa_sib_lock.
 416                  */
 417                 sib = ppa->ppa_streams;
 418                 if (sib == sps) {
 419                         ppa->ppa_streams = sps->sps_nextsib;
 420                 } else {
 421                         while (sib->sps_nextsib != NULL) {
 422                                 if (sib->sps_nextsib == sps) {
 423                                         sib->sps_nextsib = sps->sps_nextsib;
 424                                         break;
 425                                 }
 426                                 sib = sib->sps_nextsib;
 427                         }
 428                 }
 429                 sps->sps_nextsib = NULL;
 430                 freemsg(sps->sps_hangup);
 431                 sps->sps_hangup = NULL;
 432                 /*
 433                  * Check if this is a promiscous stream. If the SPS_PROMISC bit
 434                  * is still set, it means that the stream is closed without
 435                  * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
 436                  * In this case, we simply decrement the promiscous counter,
 437                  * and it's safe to do it without holding ppa_sib_lock since
 438                  * we're exclusive (inner and outer) at this point.
 439                  */
 440                 if (IS_SPS_PROMISC(sps)) {
 441                         ASSERT(ppa->ppa_promicnt > 0);
 442                         ppa->ppa_promicnt--;
 443                 }
 444         }
 445         /* If we're the only one left, then delete now. */
 446         if (ppa->ppa_refcnt <= 1)
 447                 sppp_free_ppa(ppa);
 448         else
 449                 ppa->ppa_refcnt--;
 450 close_unattached:
 451         q->q_ptr = WR(q)->q_ptr = NULL;
 452         for (nextmn = &sps_list; *nextmn != NULL;
 453             nextmn = &(*nextmn)->sps_nextmn) {
 454                 if (*nextmn == sps) {
 455                         *nextmn = sps->sps_nextmn;
 456                         break;
 457                 }
 458         }
 459         kmem_free(sps, sizeof (spppstr_t));
 460         return (0);
 461 }
 462 
 463 static void
 464 sppp_ioctl(struct queue *q, mblk_t *mp)
 465 {
 466         spppstr_t       *sps;
 467         spppstr_t       *nextsib;
 468         sppa_t          *ppa;
 469         struct iocblk   *iop;
 470         mblk_t          *nmp;
 471         enum NPmode     npmode;
 472         struct ppp_idle *pip;
 473         struct ppp_stats64 *psp;
 474         struct ppp_comp_stats *pcsp;
 475         hrtime_t        hrtime;
 476         int             sap;
 477         int             count = 0;
 478         int             error = EINVAL;
 479 
 480         sps = (spppstr_t *)q->q_ptr;
 481         ppa = sps->sps_ppa;
 482 
 483         iop = (struct iocblk *)mp->b_rptr;
 484         switch (iop->ioc_cmd) {
 485         case PPPIO_NPMODE:
 486                 if (!IS_SPS_CONTROL(sps)) {
 487                         break;          /* return EINVAL */
 488                 } else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
 489                     (mp->b_cont == NULL)) {
 490                         error = EPROTO;
 491                         break;
 492                 }
 493                 ASSERT(ppa != NULL);
 494                 ASSERT(mp->b_cont->b_rptr != NULL);
 495                 ASSERT(sps->sps_npmode == NPMODE_PASS);
 496                 sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
 497                 npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
 498                 /*
 499                  * Walk the sibling streams which belong to the same
 500                  * ppa, and try to find a stream with matching sap
 501                  * number.
 502                  */
 503                 rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
 504                 for (nextsib = ppa->ppa_streams; nextsib != NULL;
 505                     nextsib = nextsib->sps_nextsib) {
 506                         if (nextsib->sps_sap == sap) {
 507                                 break;  /* found it */
 508                         }
 509                 }
 510                 if (nextsib == NULL) {
 511                         rw_exit(&ppa->ppa_sib_lock);
 512                         break;          /* return EINVAL */
 513                 } else {
 514                         nextsib->sps_npmode = npmode;
 515                         if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
 516                             (WR(nextsib->sps_rq)->q_first != NULL)) {
 517                                 qenable(WR(nextsib->sps_rq));
 518                         }
 519                 }
 520                 rw_exit(&ppa->ppa_sib_lock);
 521                 error = 0;      /* return success */
 522                 break;
 523         case PPPIO_GIDLE:
 524                 if (ppa == NULL) {
 525                         ASSERT(!IS_SPS_CONTROL(sps));
 526                         error = ENOLINK;
 527                         break;
 528                 } else if (!IS_PPA_TIMESTAMP(ppa)) {
 529                         break;          /* return EINVAL */
 530                 }
 531                 if ((nmp = allocb(sizeof (struct ppp_idle),
 532                     BPRI_MED)) == NULL) {
 533                         mutex_enter(&ppa->ppa_sta_lock);
 534                         ppa->ppa_allocbfail++;
 535                         mutex_exit(&ppa->ppa_sta_lock);
 536                         error = ENOSR;
 537                         break;
 538                 }
 539                 if (mp->b_cont != NULL) {
 540                         freemsg(mp->b_cont);
 541                 }
 542                 mp->b_cont = nmp;
 543                 pip = (struct ppp_idle *)nmp->b_wptr;
 544                 nmp->b_wptr += sizeof (struct ppp_idle);
 545                 /*
 546                  * Get current timestamp and subtract the tx and rx
 547                  * timestamps to get the actual idle time to be
 548                  * returned.
 549                  */
 550                 hrtime = gethrtime();
 551                 pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
 552                 pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
 553                 count = msgsize(nmp);
 554                 error = 0;
 555                 break;          /* return success (error is 0) */
 556         case PPPIO_GTYPE:
 557                 nmp = allocb(sizeof (uint32_t), BPRI_MED);
 558                 if (nmp == NULL) {
 559                         error = ENOSR;
 560                         break;
 561                 }
 562                 if (mp->b_cont != NULL) {
 563                         freemsg(mp->b_cont);
 564                 }
 565                 mp->b_cont = nmp;
 566                 /*
 567                  * Let the requestor know that we are the PPP
 568                  * multiplexer (PPPTYP_MUX).
 569                  */
 570                 *(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
 571                 nmp->b_wptr += sizeof (uint32_t);
 572                 count = msgsize(nmp);
 573                 error = 0;              /* return success */
 574                 break;
 575         case PPPIO_GETSTAT64:
 576                 if (ppa == NULL) {
 577                         break;          /* return EINVAL */
 578                 } else if ((ppa->ppa_lower_wq != NULL) &&
 579                     !IS_PPA_LASTMOD(ppa)) {
 580                         mutex_enter(&ppa->ppa_sta_lock);
 581                         /*
 582                          * We match sps_ioc_id on the M_IOC{ACK,NAK},
 583                          * so if the response hasn't come back yet,
 584                          * new ioctls must be queued instead.
 585                          */
 586                         if (IS_SPS_IOCQ(sps)) {
 587                                 mutex_exit(&ppa->ppa_sta_lock);
 588                                 if (!putq(q, mp)) {
 589                                         error = EAGAIN;
 590                                         break;
 591                                 }
 592                                 return;
 593                         } else {
 594                                 ppa->ppa_ioctlsfwd++;
 595                                 /*
 596                                  * Record the ioctl CMD & ID - this will be
 597                                  * used to check the ACK or NAK responses
 598                                  * coming from below.
 599                                  */
 600                                 sps->sps_ioc_id = iop->ioc_id;
 601                                 sps->sps_flags |= SPS_IOCQ;
 602                                 mutex_exit(&ppa->ppa_sta_lock);
 603                         }
 604                         putnext(ppa->ppa_lower_wq, mp);
 605                         return; /* don't ack or nak the request */
 606                 }
 607                 nmp = allocb(sizeof (*psp), BPRI_MED);
 608                 if (nmp == NULL) {
 609                         mutex_enter(&ppa->ppa_sta_lock);
 610                         ppa->ppa_allocbfail++;
 611                         mutex_exit(&ppa->ppa_sta_lock);
 612                         error = ENOSR;
 613                         break;
 614                 }
 615                 if (mp->b_cont != NULL) {
 616                         freemsg(mp->b_cont);
 617                 }
 618                 mp->b_cont = nmp;
 619                 psp = (struct ppp_stats64 *)nmp->b_wptr;
 620                 /*
 621                  * Copy the contents of ppp_stats64 structure for this
 622                  * ppa and return them to the caller.
 623                  */
 624                 mutex_enter(&ppa->ppa_sta_lock);
 625                 bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
 626                 mutex_exit(&ppa->ppa_sta_lock);
 627                 nmp->b_wptr += sizeof (*psp);
 628                 count = sizeof (*psp);
 629                 error = 0;              /* return success */
 630                 break;
 631         case PPPIO_GETCSTAT:
 632                 if (ppa == NULL) {
 633                         break;          /* return EINVAL */
 634                 } else if ((ppa->ppa_lower_wq != NULL) &&
 635                     !IS_PPA_LASTMOD(ppa)) {
 636                         mutex_enter(&ppa->ppa_sta_lock);
 637                         /*
 638                          * See comments in PPPIO_GETSTAT64 case
 639                          * in sppp_ioctl().
 640                          */
 641                         if (IS_SPS_IOCQ(sps)) {
 642                                 mutex_exit(&ppa->ppa_sta_lock);
 643                                 if (!putq(q, mp)) {
 644                                         error = EAGAIN;
 645                                         break;
 646                                 }
 647                                 return;
 648                         } else {
 649                                 ppa->ppa_ioctlsfwd++;
 650                                 /*
 651                                  * Record the ioctl CMD & ID - this will be
 652                                  * used to check the ACK or NAK responses
 653                                  * coming from below.
 654                                  */
 655                                 sps->sps_ioc_id = iop->ioc_id;
 656                                 sps->sps_flags |= SPS_IOCQ;
 657                                 mutex_exit(&ppa->ppa_sta_lock);
 658                         }
 659                         putnext(ppa->ppa_lower_wq, mp);
 660                         return; /* don't ack or nak the request */
 661                 }
 662                 nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
 663                 if (nmp == NULL) {
 664                         mutex_enter(&ppa->ppa_sta_lock);
 665                         ppa->ppa_allocbfail++;
 666                         mutex_exit(&ppa->ppa_sta_lock);
 667                         error = ENOSR;
 668                         break;
 669                 }
 670                 if (mp->b_cont != NULL) {
 671                         freemsg(mp->b_cont);
 672                 }
 673                 mp->b_cont = nmp;
 674                 pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
 675                 nmp->b_wptr += sizeof (struct ppp_comp_stats);
 676                 bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
 677                 count = msgsize(nmp);
 678                 error = 0;              /* return success */
 679                 break;
 680         }
 681 
 682         if (error == 0) {
 683                 /* Success; tell the user. */
 684                 miocack(q, mp, count, 0);
 685         } else {
 686                 /* Failure; send error back upstream. */
 687                 miocnak(q, mp, 0, error);
 688         }
 689 }
 690 
 691 /*
 692  * sppp_uwput()
 693  *
 694  * MT-Perimeters:
 695  *    shared inner, shared outer.
 696  *
 697  * Description:
 698  *    Upper write-side put procedure. Messages from above arrive here.
 699  */
 700 int
 701 sppp_uwput(queue_t *q, mblk_t *mp)
 702 {
 703         queue_t         *nextq;
 704         spppstr_t       *sps;
 705         sppa_t          *ppa;
 706         struct iocblk   *iop;
 707         int             error;
 708 
 709         ASSERT(q != NULL && q->q_ptr != NULL);
 710         ASSERT(mp != NULL && mp->b_rptr != NULL);
 711         sps = (spppstr_t *)q->q_ptr;
 712         ppa = sps->sps_ppa;
 713 
 714         switch (MTYPE(mp)) {
 715         case M_PCPROTO:
 716         case M_PROTO:
 717                 if (IS_SPS_CONTROL(sps)) {
 718                         ASSERT(ppa != NULL);
 719                         /*
 720                          * Intentionally change this to a high priority
 721                          * message so it doesn't get queued up. M_PROTO is
 722                          * specifically used for signalling between pppd and its
 723                          * kernel-level component(s), such as ppptun, so we
 724                          * make sure that it doesn't get queued up behind
 725                          * data messages.
 726                          */
 727                         MTYPE(mp) = M_PCPROTO;
 728                         if ((ppa->ppa_lower_wq != NULL) &&
 729                             canputnext(ppa->ppa_lower_wq)) {
 730                                 mutex_enter(&ppa->ppa_sta_lock);
 731                                 ppa->ppa_mctlsfwd++;
 732                                 mutex_exit(&ppa->ppa_sta_lock);
 733                                 putnext(ppa->ppa_lower_wq, mp);
 734                         } else {
 735                                 mutex_enter(&ppa->ppa_sta_lock);
 736                                 ppa->ppa_mctlsfwderr++;
 737                                 mutex_exit(&ppa->ppa_sta_lock);
 738                                 freemsg(mp);
 739                         }
 740                 } else {
 741                         (void) sppp_mproto(q, mp, sps);
 742                         return (0);
 743                 }
 744                 break;
 745         case M_DATA:
 746                 if ((nextq = sppp_send(q, &mp, sps)) != NULL)
 747                         putnext(nextq, mp);
 748                 break;
 749         case M_IOCTL:
 750                 error = EINVAL;
 751                 iop = (struct iocblk *)mp->b_rptr;
 752                 switch (iop->ioc_cmd) {
 753                 case DLIOCRAW:
 754                 case DL_IOC_HDR_INFO:
 755                 case PPPIO_ATTACH:
 756                 case PPPIO_DEBUG:
 757                 case PPPIO_DETACH:
 758                 case PPPIO_LASTMOD:
 759                 case PPPIO_MRU:
 760                 case PPPIO_MTU:
 761                 case PPPIO_USETIMESTAMP:
 762                 case PPPIO_BLOCKNP:
 763                 case PPPIO_UNBLOCKNP:
 764                         qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
 765                         return (0);
 766                 case I_LINK:
 767                 case I_UNLINK:
 768                 case PPPIO_NEWPPA:
 769                         qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
 770                         return (0);
 771                 case PPPIO_NPMODE:
 772                 case PPPIO_GIDLE:
 773                 case PPPIO_GTYPE:
 774                 case PPPIO_GETSTAT64:
 775                 case PPPIO_GETCSTAT:
 776                         /*
 777                          * These require additional auto variables to
 778                          * handle, so (for optimization reasons)
 779                          * they're moved off to a separate function.
 780                          */
 781                         sppp_ioctl(q, mp);
 782                         return (0);
 783                 case PPPIO_GETSTAT:
 784                         break;                  /* 32 bit interface gone */
 785                 default:
 786                         if (iop->ioc_cr == NULL ||
 787                             secpolicy_ppp_config(iop->ioc_cr) != 0) {
 788                                 error = EPERM;
 789                                 break;
 790                         } else if ((ppa == NULL) ||
 791                             (ppa->ppa_lower_wq == NULL)) {
 792                                 break;          /* return EINVAL */
 793                         }
 794                         mutex_enter(&ppa->ppa_sta_lock);
 795                         /*
 796                          * See comments in PPPIO_GETSTAT64 case
 797                          * in sppp_ioctl().
 798                          */
 799                         if (IS_SPS_IOCQ(sps)) {
 800                                 mutex_exit(&ppa->ppa_sta_lock);
 801                                 if (!putq(q, mp)) {
 802                                         error = EAGAIN;
 803                                         break;
 804                                 }
 805                                 return (0);
 806                         } else {
 807                                 ppa->ppa_ioctlsfwd++;
 808                                 /*
 809                                  * Record the ioctl CMD & ID -
 810                                  * this will be used to check the
 811                                  * ACK or NAK responses coming from below.
 812                                  */
 813                                 sps->sps_ioc_id = iop->ioc_id;
 814                                 sps->sps_flags |= SPS_IOCQ;
 815                                 mutex_exit(&ppa->ppa_sta_lock);
 816                         }
 817                         putnext(ppa->ppa_lower_wq, mp);
 818                         return (0);     /* don't ack or nak the request */
 819                 }
 820                 /* Failure; send error back upstream. */
 821                 miocnak(q, mp, 0, error);
 822                 break;
 823         case M_FLUSH:
 824                 if (*mp->b_rptr & FLUSHW) {
 825                         flushq(q, FLUSHDATA);
 826                 }
 827                 if (*mp->b_rptr & FLUSHR) {
 828                         *mp->b_rptr &= ~FLUSHW;
 829                         qreply(q, mp);
 830                 } else {
 831                         freemsg(mp);
 832                 }
 833                 break;
 834         default:
 835                 freemsg(mp);
 836                 break;
 837         }
 838         return (0);
 839 }
 840 
 841 /*
 842  * sppp_uwsrv()
 843  *
 844  * MT-Perimeters:
 845  *    exclusive inner, shared outer.
 846  *
 847  * Description:
 848  *    Upper write-side service procedure. Note that this procedure does
 849  *    not get called when a message is placed on our write-side queue, since
 850  *    automatic queue scheduling has been turned off by noenable() when
 851  *    the queue was opened. We do this on purpose, as we explicitly control
 852  *    the write-side queue. Therefore, this procedure gets called when
 853  *    the lower write service procedure qenable() the upper write stream queue.
 854  */
 855 int
 856 sppp_uwsrv(queue_t *q)
 857 {
 858         spppstr_t       *sps;
 859         sppa_t          *ppa;
 860         mblk_t          *mp;
 861         queue_t         *nextq;
 862         struct iocblk   *iop;
 863 
 864         ASSERT(q != NULL && q->q_ptr != NULL);
 865         sps = (spppstr_t *)q->q_ptr;
 866 
 867         while ((mp = getq(q)) != NULL) {
 868                 if (MTYPE(mp) == M_IOCTL) {
 869                         ppa = sps->sps_ppa;
 870                         if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
 871                                 miocnak(q, mp, 0, EINVAL);
 872                                 continue;
 873                         }
 874 
 875                         iop = (struct iocblk *)mp->b_rptr;
 876                         mutex_enter(&ppa->ppa_sta_lock);
 877                         /*
 878                          * See comments in PPPIO_GETSTAT64 case
 879                          * in sppp_ioctl().
 880                          */
 881                         if (IS_SPS_IOCQ(sps)) {
 882                                 mutex_exit(&ppa->ppa_sta_lock);
 883                                 if (putbq(q, mp) == 0)
 884                                         miocnak(q, mp, 0, EAGAIN);
 885                                 break;
 886                         } else {
 887                                 ppa->ppa_ioctlsfwd++;
 888                                 sps->sps_ioc_id = iop->ioc_id;
 889                                 sps->sps_flags |= SPS_IOCQ;
 890                                 mutex_exit(&ppa->ppa_sta_lock);
 891                                 putnext(ppa->ppa_lower_wq, mp);
 892                         }
 893                 } else if ((nextq =
 894                     sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
 895                         if (mp != NULL) {
 896                                 if (putbq(q, mp) == 0)
 897                                         freemsg(mp);
 898                                 break;
 899                         }
 900                 } else {
 901                         putnext(nextq, mp);
 902                 }
 903         }
 904         return (0);
 905 }
 906 
 907 void
 908 sppp_remove_ppa(spppstr_t *sps)
 909 {
 910         spppstr_t *nextsib;
 911         sppa_t *ppa = sps->sps_ppa;
 912 
 913         rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
 914         if (ppa->ppa_refcnt <= 1) {
 915                 rw_exit(&ppa->ppa_sib_lock);
 916                 sppp_free_ppa(ppa);
 917         } else {
 918                 nextsib = ppa->ppa_streams;
 919                 if (nextsib == sps) {
 920                         ppa->ppa_streams = sps->sps_nextsib;
 921                 } else {
 922                         while (nextsib->sps_nextsib != NULL) {
 923                                 if (nextsib->sps_nextsib == sps) {
 924                                         nextsib->sps_nextsib =
 925                                             sps->sps_nextsib;
 926                                         break;
 927                                 }
 928                                 nextsib = nextsib->sps_nextsib;
 929                         }
 930                 }
 931                 ppa->ppa_refcnt--;
 932                 /*
 933                  * And if this stream was marked as promiscuous
 934                  * (SPS_PROMISC), then we need to update the
 935                  * promiscuous streams count. This should only happen
 936                  * when DL_DETACH_REQ is issued prior to marking the
 937                  * stream as non-promiscuous, through
 938                  * DL_PROMISCOFF_REQ request.
 939                  */
 940                 if (IS_SPS_PROMISC(sps)) {
 941                         ASSERT(ppa->ppa_promicnt > 0);
 942                         ppa->ppa_promicnt--;
 943                 }
 944                 rw_exit(&ppa->ppa_sib_lock);
 945         }
 946         sps->sps_nextsib = NULL;
 947         sps->sps_ppa = NULL;
 948         freemsg(sps->sps_hangup);
 949         sps->sps_hangup = NULL;
 950 }
 951 
 952 sppa_t *
 953 sppp_find_ppa(uint32_t ppa_id)
 954 {
 955         sppa_t *ppa;
 956 
 957         for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
 958                 if (ppa->ppa_ppa_id == ppa_id) {
 959                         break;  /* found the ppa */
 960                 }
 961         }
 962         return (ppa);
 963 }
 964 
 965 /*
 966  * sppp_inner_ioctl()
 967  *
 968  * MT-Perimeters:
 969  *    exclusive inner, shared outer
 970  *
 971  * Description:
 972  *    Called by sppp_uwput as a result of receiving ioctls which require
 973  *    an exclusive access at the inner perimeter.
 974  */
 975 static void
 976 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
 977 {
 978         spppstr_t       *sps;
 979         sppa_t          *ppa;
 980         struct iocblk   *iop;
 981         mblk_t          *nmp;
 982         int             error = EINVAL;
 983         int             count = 0;
 984         int             dbgcmd;
 985         int             mru, mtu;
 986         uint32_t        ppa_id;
 987         hrtime_t        hrtime;
 988         uint16_t        proto;
 989 
 990         ASSERT(q != NULL && q->q_ptr != NULL);
 991         ASSERT(mp != NULL && mp->b_rptr != NULL);
 992 
 993         sps = (spppstr_t *)q->q_ptr;
 994         ppa = sps->sps_ppa;
 995         iop = (struct iocblk *)mp->b_rptr;
 996         switch (iop->ioc_cmd) {
 997         case DLIOCRAW:
 998                 if (IS_SPS_CONTROL(sps)) {
 999                         break;          /* return EINVAL */
1000                 }
1001                 sps->sps_flags |= SPS_RAWDATA;
1002                 error = 0;              /* return success */
1003                 break;
1004         case DL_IOC_HDR_INFO:
1005                 if (IS_SPS_CONTROL(sps)) {
1006                         break;          /* return EINVAL */
1007                 } else if ((mp->b_cont == NULL) ||
1008                     *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
1009                     (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
1010                     SPPP_ADDRL))) {
1011                         error = EPROTO;
1012                         break;
1013                 } else if (ppa == NULL) {
1014                         error = ENOLINK;
1015                         break;
1016                 }
1017                 if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
1018                         mutex_enter(&ppa->ppa_sta_lock);
1019                         ppa->ppa_allocbfail++;
1020                         mutex_exit(&ppa->ppa_sta_lock);
1021                         error = ENOMEM;
1022                         break;
1023                 }
1024                 *(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
1025                 *(uchar_t *)nmp->b_wptr++ = PPP_UI;
1026                 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
1027                 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
1028                 ASSERT(MBLKL(nmp) == PPP_HDRLEN);
1029 
1030                 linkb(mp, nmp);
1031                 sps->sps_flags |= SPS_FASTPATH;
1032                 error = 0;              /* return success */
1033                 count = msgsize(nmp);
1034                 break;
1035         case PPPIO_ATTACH:
1036                 if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1037                     (sps->sps_dlstate != DL_UNATTACHED) ||
1038                     (iop->ioc_count != sizeof (uint32_t))) {
1039                         break;          /* return EINVAL */
1040                 } else if (mp->b_cont == NULL) {
1041                         error = EPROTO;
1042                         break;
1043                 }
1044                 ASSERT(mp->b_cont->b_rptr != NULL);
1045                 /* If there's something here, it's detached. */
1046                 if (ppa != NULL) {
1047                         sppp_remove_ppa(sps);
1048                 }
1049                 ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1050                 ppa = sppp_find_ppa(ppa_id);
1051                 /*
1052                  * If we can't find it, then it's either because the requestor
1053                  * has supplied a wrong ppa_id to be attached to, or because
1054                  * the control stream for the specified ppa_id has been closed
1055                  * before we get here.
1056                  */
1057                 if (ppa == NULL) {
1058                         error = ENOENT;
1059                         break;
1060                 }
1061                 if (iop->ioc_cr == NULL ||
1062                     ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) {
1063                         error = EPERM;
1064                         break;
1065                 }
1066                 /*
1067                  * Preallocate the hangup message so that we're always
1068                  * able to send this upstream in the event of a
1069                  * catastrophic failure.
1070                  */
1071                 if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
1072                         error = ENOSR;
1073                         break;
1074                 }
1075                 /*
1076                  * There are two ways to attach a stream to a ppa: one is
1077                  * through DLPI (DL_ATTACH_REQ) and the other is through
1078                  * PPPIO_ATTACH. This is why we need to distinguish whether or
1079                  * not a stream was allocated via PPPIO_ATTACH, so that we can
1080                  * properly detach it when we receive PPPIO_DETACH ioctl
1081                  * request.
1082                  */
1083                 sps->sps_flags |= SPS_PIOATTACH;
1084                 sps->sps_ppa = ppa;
1085                 /*
1086                  * Add this stream to the head of the list of sibling streams
1087                  * which belong to the same ppa as specified.
1088                  */
1089                 rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1090                 ppa->ppa_refcnt++;
1091                 sps->sps_nextsib = ppa->ppa_streams;
1092                 ppa->ppa_streams = sps;
1093                 rw_exit(&ppa->ppa_sib_lock);
1094                 error = 0;              /* return success */
1095                 break;
1096         case PPPIO_BLOCKNP:
1097         case PPPIO_UNBLOCKNP:
1098                 if (iop->ioc_cr == NULL ||
1099                     secpolicy_ppp_config(iop->ioc_cr) != 0) {
1100                         error = EPERM;
1101                         break;
1102                 }
1103                 error = miocpullup(mp, sizeof (uint16_t));
1104                 if (error != 0)
1105                         break;
1106                 ASSERT(mp->b_cont->b_rptr != NULL);
1107                 proto = *(uint16_t *)mp->b_cont->b_rptr;
1108                 if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1109                         uint32_t npflagpos = sppp_ppp2np(proto);
1110                         /*
1111                          * Mark proto as blocked in ppa_npflag until the
1112                          * corresponding queues for proto have been plumbed.
1113                          */
1114                         if (npflagpos != 0) {
1115                                 mutex_enter(&ppa->ppa_npmutex);
1116                                 ppa->ppa_npflag |= (1 << npflagpos);
1117                                 mutex_exit(&ppa->ppa_npmutex);
1118                         } else {
1119                                 error = EINVAL;
1120                         }
1121                 } else {
1122                         /*
1123                          * reset ppa_npflag and release proto
1124                          * packets that were being held in control queue.
1125                          */
1126                         sppp_release_pkts(ppa, proto);
1127                 }
1128                 break;
1129         case PPPIO_DEBUG:
1130                 if (iop->ioc_cr == NULL ||
1131                     secpolicy_ppp_config(iop->ioc_cr) != 0) {
1132                         error = EPERM;
1133                         break;
1134                 } else if (iop->ioc_count != sizeof (uint32_t)) {
1135                         break;          /* return EINVAL */
1136                 } else if (mp->b_cont == NULL) {
1137                         error = EPROTO;
1138                         break;
1139                 }
1140                 ASSERT(mp->b_cont->b_rptr != NULL);
1141                 dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1142                 /*
1143                  * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1144                  * that SPS_KDEBUG needs to be enabled for this upper stream.
1145                  */
1146                 if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1147                         sps->sps_flags |= SPS_KDEBUG;
1148                         error = 0;      /* return success */
1149                         break;
1150                 }
1151                 /*
1152                  * Otherwise, for any other values, we send them down only if
1153                  * there is an attachment and if the attachment has something
1154                  * linked underneath it.
1155                  */
1156                 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1157                         error = ENOLINK;
1158                         break;
1159                 }
1160                 mutex_enter(&ppa->ppa_sta_lock);
1161                 /*
1162                  * See comments in PPPIO_GETSTAT64 case
1163                  * in sppp_ioctl().
1164                  */
1165                 if (IS_SPS_IOCQ(sps)) {
1166                         mutex_exit(&ppa->ppa_sta_lock);
1167                         if (!putq(q, mp)) {
1168                                 error = EAGAIN;
1169                                 break;
1170                         }
1171                         return;
1172                 } else {
1173                         ppa->ppa_ioctlsfwd++;
1174                         /*
1175                          * Record the ioctl CMD & ID -
1176                          * this will be used to check the
1177                          * ACK or NAK responses coming from below.
1178                          */
1179                         sps->sps_ioc_id = iop->ioc_id;
1180                         sps->sps_flags |= SPS_IOCQ;
1181                         mutex_exit(&ppa->ppa_sta_lock);
1182                 }
1183                 putnext(ppa->ppa_lower_wq, mp);
1184                 return;                 /* don't ack or nak the request */
1185         case PPPIO_DETACH:
1186                 if (!IS_SPS_PIOATTACH(sps)) {
1187                         break;          /* return EINVAL */
1188                 }
1189                 /*
1190                  * The SPS_PIOATTACH flag set on the stream tells us that
1191                  * the ppa field is still valid. In the event that the control
1192                  * stream be closed prior to this stream's detachment, the
1193                  * SPS_PIOATTACH flag would have been cleared from this stream
1194                  * during close; in that case we won't get here.
1195                  */
1196                 ASSERT(ppa != NULL);
1197                 ASSERT(ppa->ppa_ctl != sps);
1198                 ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1199 
1200                 /*
1201                  * We don't actually detach anything until the stream is
1202                  * closed or reattached.
1203                  */
1204 
1205                 sps->sps_flags &= ~SPS_PIOATTACH;
1206                 error = 0;              /* return success */
1207                 break;
1208         case PPPIO_LASTMOD:
1209                 if (!IS_SPS_CONTROL(sps)) {
1210                         break;          /* return EINVAL */
1211                 }
1212                 ASSERT(ppa != NULL);
1213                 ppa->ppa_flags |= PPA_LASTMOD;
1214                 error = 0;              /* return success */
1215                 break;
1216         case PPPIO_MRU:
1217                 if (!IS_SPS_CONTROL(sps) ||
1218                     (iop->ioc_count != sizeof (uint32_t))) {
1219                         break;          /* return EINVAL */
1220                 } else if (mp->b_cont == NULL) {
1221                         error = EPROTO;
1222                         break;
1223                 }
1224                 ASSERT(ppa != NULL);
1225                 ASSERT(mp->b_cont->b_rptr != NULL);
1226                 mru = *(uint32_t *)mp->b_cont->b_rptr;
1227                 if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1228                         error = EPROTO;
1229                         break;
1230                 }
1231                 if (mru < PPP_MRU) {
1232                         mru = PPP_MRU;
1233                 }
1234                 ppa->ppa_mru = (uint16_t)mru;
1235                 /*
1236                  * If there's something beneath this driver for the ppa, then
1237                  * inform it (or them) of the MRU size. Only do this is we
1238                  * are not the last PPP module on the stream.
1239                  */
1240                 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1241                         (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1242                             mru);
1243                 }
1244                 error = 0;              /* return success */
1245                 break;
1246         case PPPIO_MTU:
1247                 if (!IS_SPS_CONTROL(sps) ||
1248                     (iop->ioc_count != sizeof (uint32_t))) {
1249                         break;          /* return EINVAL */
1250                 } else if (mp->b_cont == NULL) {
1251                         error = EPROTO;
1252                         break;
1253                 }
1254                 ASSERT(ppa != NULL);
1255                 ASSERT(mp->b_cont->b_rptr != NULL);
1256                 mtu = *(uint32_t *)mp->b_cont->b_rptr;
1257                 if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1258                         error = EPROTO;
1259                         break;
1260                 }
1261                 ppa->ppa_mtu = (uint16_t)mtu;
1262                 /*
1263                  * If there's something beneath this driver for the ppa, then
1264                  * inform it (or them) of the MTU size. Only do this if we
1265                  * are not the last PPP module on the stream.
1266                  */
1267                 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1268                         (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1269                             mtu);
1270                 }
1271                 error = 0;              /* return success */
1272                 break;
1273         case PPPIO_USETIMESTAMP:
1274                 if (!IS_SPS_CONTROL(sps)) {
1275                         break;          /* return EINVAL */
1276                 }
1277                 if (!IS_PPA_TIMESTAMP(ppa)) {
1278                         hrtime = gethrtime();
1279                         ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1280                         ppa->ppa_flags |= PPA_TIMESTAMP;
1281                 }
1282                 error = 0;
1283                 break;
1284         }
1285 
1286         if (error == 0) {
1287                 /* Success; tell the user */
1288                 miocack(q, mp, count, 0);
1289         } else {
1290                 /* Failure; send error back upstream */
1291                 miocnak(q, mp, 0, error);
1292         }
1293 }
1294 
1295 /*
1296  * sppp_outer_ioctl()
1297  *
1298  * MT-Perimeters:
1299  *    exclusive inner, exclusive outer
1300  *
1301  * Description:
1302  *    Called by sppp_uwput as a result of receiving ioctls which require
1303  *    an exclusive access at the outer perimeter.
1304  */
1305 static void
1306 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1307 {
1308         spppstr_t       *sps = q->q_ptr;
1309         spppstr_t       *nextsib;
1310         queue_t         *lwq;
1311         sppa_t          *ppa;
1312         struct iocblk   *iop;
1313         int             error = EINVAL;
1314         int             count = 0;
1315         uint32_t        ppa_id;
1316         mblk_t          *nmp;
1317         zoneid_t        zoneid;
1318 
1319         sps = (spppstr_t *)q->q_ptr;
1320         ppa = sps->sps_ppa;
1321         iop = (struct iocblk *)mp->b_rptr;
1322         switch (iop->ioc_cmd) {
1323         case I_LINK:
1324                 if (!IS_SPS_CONTROL(sps)) {
1325                         break;          /* return EINVAL */
1326                 } else if (ppa->ppa_lower_wq != NULL) {
1327                         error = EEXIST;
1328                         break;
1329                 }
1330                 ASSERT(ppa->ppa_ctl != NULL);
1331                 ASSERT(sps->sps_npmode == NPMODE_PASS);
1332                 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1333 
1334                 lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1335                 ASSERT(lwq != NULL);
1336 
1337                 ppa->ppa_lower_wq = lwq;
1338                 lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1339                 /*
1340                  * Unblock upper network streams which now feed this lower
1341                  * stream. We don't need to hold ppa_sib_lock here, since we
1342                  * are writer at the outer perimeter.
1343                  */
1344                 if (WR(sps->sps_rq)->q_first != NULL)
1345                         qenable(WR(sps->sps_rq));
1346                 for (nextsib = ppa->ppa_streams; nextsib != NULL;
1347                     nextsib = nextsib->sps_nextsib) {
1348                         nextsib->sps_npmode = NPMODE_PASS;
1349                         if (WR(nextsib->sps_rq)->q_first != NULL) {
1350                                 qenable(WR(nextsib->sps_rq));
1351                         }
1352                 }
1353 
1354                 /*
1355                  * Also unblock (run once) our lower read-side queue.  This is
1356                  * where packets received while doing the I_LINK may be
1357                  * languishing; see sppp_lrsrv.
1358                  */
1359                 qenable(RD(lwq));
1360 
1361                 /*
1362                  * Send useful information down to the modules which are now
1363                  * linked below this driver (for this particular ppa). Only
1364                  * do this if we are not the last PPP module on the stream.
1365                  */
1366                 if (!IS_PPA_LASTMOD(ppa)) {
1367                         (void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1368                             ppa->ppa_ppa_id);
1369                         (void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1370                         (void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1371                 }
1372 
1373                 if (IS_SPS_KDEBUG(sps)) {
1374                         SPDEBUG(PPP_DRV_NAME
1375                             "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1376                             "flags=0x%b\n", sps->sps_mn_id,
1377                             (void *)ppa->ppa_lower_wq, (void *)sps,
1378                             sps->sps_flags, SPS_FLAGS_STR,
1379                             (void *)ppa, ppa->ppa_flags,
1380                             PPA_FLAGS_STR);
1381                 }
1382                 error = 0;              /* return success */
1383                 break;
1384         case I_UNLINK:
1385                 ASSERT(IS_SPS_CONTROL(sps));
1386                 ASSERT(ppa != NULL);
1387                 lwq = ppa->ppa_lower_wq;
1388                 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1389                 ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1390 
1391                 if (IS_SPS_KDEBUG(sps)) {
1392                         SPDEBUG(PPP_DRV_NAME
1393                             "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1394                             "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1395                             (void *)lwq, (void *)sps, sps->sps_flags,
1396                             SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1397                             PPA_FLAGS_STR);
1398                 }
1399                 /*
1400                  * While accessing the outer perimeter exclusively, we
1401                  * disassociate our ppa's lower_wq from the lower stream linked
1402                  * beneath us, and we also disassociate our control stream from
1403                  * the q_ptr of the lower stream.
1404                  */
1405                 lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1406                 ppa->ppa_lower_wq = NULL;
1407                 /*
1408                  * Unblock streams which now feed back up the control stream,
1409                  * and acknowledge the request. We don't need to hold
1410                  * ppa_sib_lock here, since we are writer at the outer
1411                  * perimeter.
1412                  */
1413                 if (WR(sps->sps_rq)->q_first != NULL)
1414                         qenable(WR(sps->sps_rq));
1415                 for (nextsib = ppa->ppa_streams; nextsib != NULL;
1416                     nextsib = nextsib->sps_nextsib) {
1417                         if (WR(nextsib->sps_rq)->q_first != NULL) {
1418                                 qenable(WR(nextsib->sps_rq));
1419                         }
1420                 }
1421                 error = 0;              /* return success */
1422                 break;
1423         case PPPIO_NEWPPA:
1424                 /*
1425                  * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1426                  * on a stream which DLPI is used (since certain DLPI messages
1427                  * will cause state transition reflected in sps_dlstate,
1428                  * changing it from its default DL_UNATTACHED value). In other
1429                  * words, we won't allow a network/snoop stream to become
1430                  * a control stream.
1431                  */
1432                 if (iop->ioc_cr == NULL ||
1433                     secpolicy_ppp_config(iop->ioc_cr) != 0) {
1434                         error = EPERM;
1435                         break;
1436                 } else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1437                     (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1438                         break;          /* return EINVAL */
1439                 }
1440                 /* Get requested unit number (if any) */
1441                 if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1442                         ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1443                 else
1444                         ppa_id = 0;
1445                 /* Get mblk to use for response message */
1446                 nmp = allocb(sizeof (uint32_t), BPRI_MED);
1447                 if (nmp == NULL) {
1448                         error = ENOSR;
1449                         break;
1450                 }
1451                 if (mp->b_cont != NULL) {
1452                         freemsg(mp->b_cont);
1453                 }
1454                 mp->b_cont = nmp;            /* chain our response mblk */
1455                 /*
1456                  * Walk the global ppa list and determine the lowest
1457                  * available ppa_id number to be used.
1458                  */
1459                 if (ppa_id == (uint32_t)-1)
1460                         ppa_id = 0;
1461                 zoneid = crgetzoneid(iop->ioc_cr);
1462                 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1463                         if (ppa_id == (uint32_t)-2) {
1464                                 if (ppa->ppa_ctl == NULL &&
1465                                     ppa->ppa_zoneid == zoneid)
1466                                         break;
1467                         } else {
1468                                 if (ppa_id < ppa->ppa_ppa_id)
1469                                         break;
1470                                 if (ppa_id == ppa->ppa_ppa_id)
1471                                         ++ppa_id;
1472                         }
1473                 }
1474                 if (ppa_id == (uint32_t)-2) {
1475                         if (ppa == NULL) {
1476                                 error = ENXIO;
1477                                 break;
1478                         }
1479                         /* Clear timestamp and lastmod flags */
1480                         ppa->ppa_flags = 0;
1481                 } else {
1482                         ppa = sppp_create_ppa(ppa_id, zoneid);
1483                         if (ppa == NULL) {
1484                                 error = ENOMEM;
1485                                 break;
1486                         }
1487                 }
1488 
1489                 sps->sps_ppa = ppa;          /* chain the ppa structure */
1490                 sps->sps_npmode = NPMODE_PASS;       /* network packets may travel */
1491                 sps->sps_flags |= SPS_CONTROL;       /* this is the control stream */
1492 
1493                 ppa->ppa_refcnt++;           /* new PPA reference */
1494                 ppa->ppa_ctl = sps;          /* back ptr to upper stream */
1495                 /*
1496                  * Return the newly created ppa_id to the requestor and
1497                  * acnowledge the request.
1498                  */
1499                 *(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1500                 nmp->b_wptr += sizeof (uint32_t);
1501 
1502                 if (IS_SPS_KDEBUG(sps)) {
1503                         SPDEBUG(PPP_DRV_NAME
1504                             "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1505                             "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1506                             (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1507                             (void *)ppa, ppa->ppa_flags,
1508                             PPA_FLAGS_STR);
1509                 }
1510                 count = msgsize(nmp);
1511                 error = 0;
1512                 break;
1513         }
1514 
1515         if (error == 0) {
1516                 /* Success; tell the user. */
1517                 miocack(q, mp, count, 0);
1518         } else {
1519                 /* Failure; send error back upstream. */
1520                 miocnak(q, mp, 0, error);
1521         }
1522 }
1523 
1524 /*
1525  * sppp_send()
1526  *
1527  * MT-Perimeters:
1528  *    shared inner, shared outer.
1529  *
1530  * Description:
1531  *    Called by sppp_uwput to handle M_DATA message type.  Returns
1532  *    queue_t for putnext, or NULL to mean that the packet was
1533  *    handled internally.
1534  */
1535 static queue_t *
1536 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1537 {
1538         mblk_t  *mp;
1539         sppa_t  *ppa;
1540         int     is_promisc;
1541         int     msize;
1542         int     error = 0;
1543         queue_t *nextq;
1544 
1545         ASSERT(mpp != NULL);
1546         mp = *mpp;
1547         ASSERT(q != NULL && q->q_ptr != NULL);
1548         ASSERT(mp != NULL && mp->b_rptr != NULL);
1549         ASSERT(sps != NULL);
1550         ASSERT(q->q_ptr == sps);
1551         /*
1552          * We only let M_DATA through if the sender is either the control
1553          * stream (for PPP control packets) or one of the network streams
1554          * (for IP packets) in IP fastpath mode. If this stream is not attached
1555          * to any ppas, then discard data coming down through this stream.
1556          */
1557         ppa = sps->sps_ppa;
1558         if (ppa == NULL) {
1559                 ASSERT(!IS_SPS_CONTROL(sps));
1560                 error = ENOLINK;
1561         } else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1562                 error = EPROTO;
1563         }
1564         if (error != 0) {
1565                 merror(q, mp, error);
1566                 return (NULL);
1567         }
1568         msize = msgdsize(mp);
1569         if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1570                 /* Log, and send it anyway */
1571                 mutex_enter(&ppa->ppa_sta_lock);
1572                 ppa->ppa_otoolongs++;
1573                 mutex_exit(&ppa->ppa_sta_lock);
1574         } else if (msize < PPP_HDRLEN) {
1575                 /*
1576                  * Log, and send it anyway. We log it because we get things
1577                  * in M_DATA form here, which tells us that the sender is
1578                  * either IP in fastpath transmission mode, or pppd. In both
1579                  * cases, they are currently expected to send the 4-bytes
1580                  * PPP header in front of any possible payloads.
1581                  */
1582                 mutex_enter(&ppa->ppa_sta_lock);
1583                 ppa->ppa_orunts++;
1584                 mutex_exit(&ppa->ppa_sta_lock);
1585         }
1586 
1587         if (IS_SPS_KDEBUG(sps)) {
1588                 SPDEBUG(PPP_DRV_NAME
1589                     "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1590                     "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1591                     (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1592                     (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1593         }
1594         /*
1595          * Should there be any promiscuous stream(s), send the data up
1596          * for each promiscuous stream that we recognize. Make sure that
1597          * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1598          * the control stream as we obviously never allow the control stream
1599          * to become promiscous and bind to PPP_ALLSAP.
1600          */
1601         rw_enter(&ppa->ppa_sib_lock, RW_READER);
1602         is_promisc = sps->sps_ppa->ppa_promicnt;
1603         if (is_promisc) {
1604                 ASSERT(ppa->ppa_streams != NULL);
1605                 sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1606         }
1607         rw_exit(&ppa->ppa_sib_lock);
1608         /*
1609          * Only time-stamp the packet with hrtime if the upper stream
1610          * is configured to do so.  PPP control (negotiation) messages
1611          * are never considered link activity; only data is activity.
1612          */
1613         if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1614                 ppa->ppa_lasttx = gethrtime();
1615         }
1616         /*
1617          * If there's already a message in the write-side service queue,
1618          * then queue this message there as well, otherwise, try to send
1619          * it down to the module immediately below us.
1620          */
1621         if (q->q_first != NULL ||
1622             (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1623                 mp = *mpp;
1624                 if (mp != NULL && putq(q, mp) == 0) {
1625                         mutex_enter(&ppa->ppa_sta_lock);
1626                         ppa->ppa_oqdropped++;
1627                         mutex_exit(&ppa->ppa_sta_lock);
1628                         freemsg(mp);
1629                 }
1630                 return (NULL);
1631         }
1632         return (nextq);
1633 }
1634 
1635 /*
1636  * sppp_outpkt()
1637  *
1638  * MT-Perimeters:
1639  *    shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1640  *    exclusive inner, shared outer (if called from sppp_wsrv).
1641  *
1642  * Description:
1643  *    Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1644  *    or 2) sppp_uwsrv when processing the upper write-side service queue.
1645  *    For both cases, it prepares to send the data to the module below
1646  *    this driver if there is a lower stream linked underneath. If none, then
1647  *    the data will be sent upstream via the control channel to pppd.
1648  *
1649  * Returns:
1650  *      Non-NULL queue_t if message should be sent now, otherwise
1651  *      if *mpp == NULL, then message was freed, otherwise put *mpp
1652  *      (back) on the queue.  (Does not do putq/putbq, since it's
1653  *      called both from srv and put procedures.)
1654  */
1655 static queue_t *
1656 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1657 {
1658         mblk_t          *mp;
1659         sppa_t          *ppa;
1660         enum NPmode     npmode;
1661         mblk_t          *mpnew;
1662 
1663         ASSERT(mpp != NULL);
1664         mp = *mpp;
1665         ASSERT(q != NULL && q->q_ptr != NULL);
1666         ASSERT(mp != NULL && mp->b_rptr != NULL);
1667         ASSERT(sps != NULL);
1668 
1669         ppa = sps->sps_ppa;
1670         npmode = sps->sps_npmode;
1671 
1672         if (npmode == NPMODE_QUEUE) {
1673                 ASSERT(!IS_SPS_CONTROL(sps));
1674                 return (NULL);  /* queue it for later */
1675         } else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1676             npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1677                 /*
1678                  * This can not be the control stream, as it must always have
1679                  * a valid ppa, and its npmode must always be NPMODE_PASS.
1680                  */
1681                 ASSERT(!IS_SPS_CONTROL(sps));
1682                 if (npmode == NPMODE_DROP) {
1683                         freemsg(mp);
1684                 } else {
1685                         /*
1686                          * If we no longer have the control stream, or if the
1687                          * mode is set to NPMODE_ERROR, then we need to tell IP
1688                          * that the interface need to be marked as down. In
1689                          * other words, we tell IP to be quiescent.
1690                          */
1691                         merror(q, mp, EPROTO);
1692                 }
1693                 *mpp = NULL;
1694                 return (NULL);  /* don't queue it */
1695         }
1696         /*
1697          * Do we have a driver stream linked underneath ? If not, we need to
1698          * notify pppd that the link needs to be brought up and configure
1699          * this upper stream to drop subsequent outgoing packets. This is
1700          * for demand-dialing, in which case pppd has done the IP plumbing
1701          * but hasn't linked the driver stream underneath us. Therefore, when
1702          * a packet is sent down the IP interface, a notification message
1703          * will be sent up the control stream to pppd in order for it to
1704          * establish the physical link. The driver stream is then expected
1705          * to be linked underneath after physical link establishment is done.
1706          */
1707         if (ppa->ppa_lower_wq == NULL) {
1708                 ASSERT(ppa->ppa_ctl != NULL);
1709                 ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1710 
1711                 *mpp = NULL;
1712                 mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1713                 if (mpnew == NULL) {
1714                         freemsg(mp);
1715                         mutex_enter(&ppa->ppa_sta_lock);
1716                         ppa->ppa_allocbfail++;
1717                         mutex_exit(&ppa->ppa_sta_lock);
1718                         return (NULL);  /* don't queue it */
1719                 }
1720                 /* Include the data in the message for logging. */
1721                 mpnew->b_cont = mp;
1722                 mutex_enter(&ppa->ppa_sta_lock);
1723                 ppa->ppa_lsneedup++;
1724                 mutex_exit(&ppa->ppa_sta_lock);
1725                 /*
1726                  * We need to set the mode to NPMODE_DROP, but should only
1727                  * do so when this stream is not the control stream.
1728                  */
1729                 if (!IS_SPS_CONTROL(sps)) {
1730                         sps->sps_npmode = NPMODE_DROP;
1731                 }
1732                 putnext(ppa->ppa_ctl->sps_rq, mpnew);
1733                 return (NULL);  /* don't queue it */
1734         }
1735         /*
1736          * If so, then try to send it down. The lower queue is only ever
1737          * detached while holding an exclusive lock on the whole driver,
1738          * so we can be confident that the lower queue is still there.
1739          */
1740         if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1741                 mutex_enter(&ppa->ppa_sta_lock);
1742                 ppa->ppa_stats.p.ppp_opackets++;
1743                 if (IS_SPS_CONTROL(sps)) {
1744                         ppa->ppa_opkt_ctl++;
1745                 }
1746                 ppa->ppa_stats.p.ppp_obytes += msize;
1747                 mutex_exit(&ppa->ppa_sta_lock);
1748                 return (ppa->ppa_lower_wq);  /* don't queue it */
1749         }
1750         return (NULL);  /* queue it for later */
1751 }
1752 
1753 /*
1754  * sppp_lwsrv()
1755  *
1756  * MT-Perimeters:
1757  *    exclusive inner, shared outer.
1758  *
1759  * Description:
1760  *    Lower write-side service procedure. No messages are ever placed on
1761  *    the write queue here, this just back-enables all upper write side
1762  *    service procedures.
1763  */
1764 int
1765 sppp_lwsrv(queue_t *q)
1766 {
1767         sppa_t          *ppa;
1768         spppstr_t       *nextsib;
1769 
1770         ASSERT(q != NULL && q->q_ptr != NULL);
1771         ppa = (sppa_t *)q->q_ptr;
1772         ASSERT(ppa != NULL);
1773 
1774         rw_enter(&ppa->ppa_sib_lock, RW_READER);
1775         if ((nextsib = ppa->ppa_ctl) != NULL &&
1776             WR(nextsib->sps_rq)->q_first != NULL)
1777                 qenable(WR(nextsib->sps_rq));
1778         for (nextsib = ppa->ppa_streams; nextsib != NULL;
1779             nextsib = nextsib->sps_nextsib) {
1780                 if (WR(nextsib->sps_rq)->q_first != NULL) {
1781                         qenable(WR(nextsib->sps_rq));
1782                 }
1783         }
1784         rw_exit(&ppa->ppa_sib_lock);
1785         return (0);
1786 }
1787 
1788 /*
1789  * sppp_lrput()
1790  *
1791  * MT-Perimeters:
1792  *    shared inner, shared outer.
1793  *
1794  * Description:
1795  *    Lower read-side put procedure. Messages from below get here.
1796  *    Data messages are handled separately to limit stack usage
1797  *    going into IP.
1798  *
1799  *    Note that during I_UNLINK processing, it's possible for a downstream
1800  *    message to enable upstream data (due to pass_wput() removing the
1801  *    SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1802  *    In this case, the only thing above us is passthru, and we might as well
1803  *    discard.
1804  */
1805 int
1806 sppp_lrput(queue_t *q, mblk_t *mp)
1807 {
1808         sppa_t          *ppa;
1809         spppstr_t       *sps;
1810 
1811         if ((ppa = q->q_ptr) == NULL) {
1812                 freemsg(mp);
1813                 return (0);
1814         }
1815 
1816         sps = ppa->ppa_ctl;
1817 
1818         if (MTYPE(mp) != M_DATA) {
1819                 sppp_recv_nondata(q, mp, sps);
1820         } else if (sps == NULL) {
1821                 freemsg(mp);
1822         } else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1823                 putnext(q, mp);
1824         }
1825         return (0);
1826 }
1827 
1828 /*
1829  * sppp_lrsrv()
1830  *
1831  * MT-Perimeters:
1832  *    exclusive inner, shared outer.
1833  *
1834  * Description:
1835  *    Lower read-side service procedure.  This is run once after the I_LINK
1836  *    occurs in order to clean up any packets that came in while we were
1837  *    transferring in the lower stream.  Otherwise, it's not used.
1838  */
1839 int
1840 sppp_lrsrv(queue_t *q)
1841 {
1842         mblk_t *mp;
1843 
1844         while ((mp = getq(q)) != NULL)
1845                 sppp_lrput(q, mp);
1846         return (0);
1847 }
1848 
1849 /*
1850  * sppp_recv_nondata()
1851  *
1852  * MT-Perimeters:
1853  *    shared inner, shared outer.
1854  *
1855  * Description:
1856  *    All received non-data messages come through here.
1857  */
1858 static void
1859 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1860 {
1861         sppa_t          *ppa;
1862         spppstr_t       *destsps;
1863         struct iocblk   *iop;
1864 
1865         ppa = (sppa_t *)q->q_ptr;
1866         ctlsps = ppa->ppa_ctl;
1867 
1868         switch (MTYPE(mp)) {
1869         case M_CTL:
1870                 mutex_enter(&ppa->ppa_sta_lock);
1871                 if (*mp->b_rptr == PPPCTL_IERROR) {
1872                         ppa->ppa_stats.p.ppp_ierrors++;
1873                         ppa->ppa_ierr_low++;
1874                         ppa->ppa_mctlsknown++;
1875                 } else if (*mp->b_rptr == PPPCTL_OERROR) {
1876                         ppa->ppa_stats.p.ppp_oerrors++;
1877                         ppa->ppa_oerr_low++;
1878                         ppa->ppa_mctlsknown++;
1879                 } else {
1880                         ppa->ppa_mctlsunknown++;
1881                 }
1882                 mutex_exit(&ppa->ppa_sta_lock);
1883                 freemsg(mp);
1884                 break;
1885         case M_IOCTL:
1886                 miocnak(q, mp, 0, EINVAL);
1887                 break;
1888         case M_IOCACK:
1889         case M_IOCNAK:
1890                 iop = (struct iocblk *)mp->b_rptr;
1891                 ASSERT(iop != NULL);
1892                 /*
1893                  * Attempt to match up the response with the stream that the
1894                  * request came from. If ioc_id doesn't match the one that we
1895                  * recorded, then discard this message.
1896                  */
1897                 rw_enter(&ppa->ppa_sib_lock, RW_READER);
1898                 if ((destsps = ctlsps) == NULL ||
1899                     destsps->sps_ioc_id != iop->ioc_id) {
1900                         destsps = ppa->ppa_streams;
1901                         while (destsps != NULL) {
1902                                 if (destsps->sps_ioc_id == iop->ioc_id) {
1903                                         break;  /* found the upper stream */
1904                                 }
1905                                 destsps = destsps->sps_nextsib;
1906                         }
1907                 }
1908                 rw_exit(&ppa->ppa_sib_lock);
1909                 if (destsps == NULL) {
1910                         mutex_enter(&ppa->ppa_sta_lock);
1911                         ppa->ppa_ioctlsfwderr++;
1912                         mutex_exit(&ppa->ppa_sta_lock);
1913                         freemsg(mp);
1914                         break;
1915                 }
1916                 mutex_enter(&ppa->ppa_sta_lock);
1917                 ppa->ppa_ioctlsfwdok++;
1918 
1919                 /*
1920                  * Clear SPS_IOCQ and enable the lower write side queue,
1921                  * this would allow the upper stream service routine
1922                  * to start processing the queue for pending messages.
1923                  * sppp_lwsrv -> sppp_uwsrv.
1924                  */
1925                 destsps->sps_flags &= ~SPS_IOCQ;
1926                 mutex_exit(&ppa->ppa_sta_lock);
1927                 qenable(WR(destsps->sps_rq));
1928 
1929                 putnext(destsps->sps_rq, mp);
1930                 break;
1931         case M_HANGUP:
1932                 /*
1933                  * Free the original mblk_t. We don't really want to send
1934                  * a M_HANGUP message upstream, so we need to translate this
1935                  * message into something else.
1936                  */
1937                 freemsg(mp);
1938                 if (ctlsps == NULL)
1939                         break;
1940                 mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1941                 if (mp == NULL) {
1942                         mutex_enter(&ppa->ppa_sta_lock);
1943                         ppa->ppa_allocbfail++;
1944                         mutex_exit(&ppa->ppa_sta_lock);
1945                         break;
1946                 }
1947                 mutex_enter(&ppa->ppa_sta_lock);
1948                 ppa->ppa_lsdown++;
1949                 mutex_exit(&ppa->ppa_sta_lock);
1950                 putnext(ctlsps->sps_rq, mp);
1951                 break;
1952         case M_FLUSH:
1953                 if (*mp->b_rptr & FLUSHR) {
1954                         flushq(q, FLUSHDATA);
1955                 }
1956                 if (*mp->b_rptr & FLUSHW) {
1957                         *mp->b_rptr &= ~FLUSHR;
1958                         qreply(q, mp);
1959                 } else {
1960                         freemsg(mp);
1961                 }
1962                 break;
1963         default:
1964                 if (ctlsps != NULL &&
1965                     (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1966                         putnext(ctlsps->sps_rq, mp);
1967                 } else {
1968                         mutex_enter(&ppa->ppa_sta_lock);
1969                         ppa->ppa_iqdropped++;
1970                         mutex_exit(&ppa->ppa_sta_lock);
1971                         freemsg(mp);
1972                 }
1973                 break;
1974         }
1975 }
1976 
1977 /*
1978  * sppp_recv()
1979  *
1980  * MT-Perimeters:
1981  *    shared inner, shared outer.
1982  *
1983  * Description:
1984  *    Receive function called by sppp_lrput.  Finds appropriate
1985  *    receive stream and does accounting.
1986  */
1987 static queue_t *
1988 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1989 {
1990         mblk_t          *mp;
1991         int             len;
1992         sppa_t          *ppa;
1993         spppstr_t       *destsps;
1994         mblk_t          *zmp;
1995         uint32_t        npflagpos;
1996 
1997         ASSERT(mpp != NULL);
1998         mp = *mpp;
1999         ASSERT(q != NULL && q->q_ptr != NULL);
2000         ASSERT(mp != NULL && mp->b_rptr != NULL);
2001         ASSERT(ctlsps != NULL);
2002         ASSERT(IS_SPS_CONTROL(ctlsps));
2003         ppa = ctlsps->sps_ppa;
2004         ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
2005 
2006         len = msgdsize(mp);
2007         mutex_enter(&ppa->ppa_sta_lock);
2008         ppa->ppa_stats.p.ppp_ibytes += len;
2009         mutex_exit(&ppa->ppa_sta_lock);
2010         /*
2011          * If the entire data size of the mblk is less than the length of the
2012          * PPP header, then free it. We can't do much with such message anyway,
2013          * since we can't really determine what the PPP protocol type is.
2014          */
2015         if (len < PPP_HDRLEN) {
2016                 /* Log, and free it */
2017                 mutex_enter(&ppa->ppa_sta_lock);
2018                 ppa->ppa_irunts++;
2019                 mutex_exit(&ppa->ppa_sta_lock);
2020                 freemsg(mp);
2021                 return (NULL);
2022         } else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
2023                 /* Log, and accept it anyway */
2024                 mutex_enter(&ppa->ppa_sta_lock);
2025                 ppa->ppa_itoolongs++;
2026                 mutex_exit(&ppa->ppa_sta_lock);
2027         }
2028         /*
2029          * We need at least be able to read the PPP protocol from the header,
2030          * so if the first message block is too small, then we concatenate the
2031          * rest of the following blocks into one message.
2032          */
2033         if (MBLKL(mp) < PPP_HDRLEN) {
2034                 zmp = msgpullup(mp, PPP_HDRLEN);
2035                 freemsg(mp);
2036                 mp = zmp;
2037                 if (mp == NULL) {
2038                         mutex_enter(&ppa->ppa_sta_lock);
2039                         ppa->ppa_allocbfail++;
2040                         mutex_exit(&ppa->ppa_sta_lock);
2041                         return (NULL);
2042                 }
2043                 *mpp = mp;
2044         }
2045         /*
2046          * Hold this packet in the control-queue until
2047          * the matching network-layer upper stream for the PPP protocol (sap)
2048          * has not been plumbed and configured
2049          */
2050         npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
2051         mutex_enter(&ppa->ppa_npmutex);
2052         if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
2053                 /*
2054                  * proto is currently blocked; Hold up to 4 packets
2055                  * in the kernel.
2056                  */
2057                 if (ppa->ppa_holdpkts[npflagpos] > 3 ||
2058                     putq(ctlsps->sps_rq, mp) == 0)
2059                         freemsg(mp);
2060                 else
2061                         ppa->ppa_holdpkts[npflagpos]++;
2062                 mutex_exit(&ppa->ppa_npmutex);
2063                 return (NULL);
2064         }
2065         mutex_exit(&ppa->ppa_npmutex);
2066         /*
2067          * Try to find a matching network-layer upper stream for the specified
2068          * PPP protocol (sap), and if none is found, send this frame up the
2069          * control stream.
2070          */
2071         destsps = sppp_inpkt(q, mp, ctlsps);
2072         if (destsps == NULL) {
2073                 mutex_enter(&ppa->ppa_sta_lock);
2074                 ppa->ppa_ipkt_ctl++;
2075                 mutex_exit(&ppa->ppa_sta_lock);
2076                 if (canputnext(ctlsps->sps_rq)) {
2077                         if (IS_SPS_KDEBUG(ctlsps)) {
2078                                 SPDEBUG(PPP_DRV_NAME
2079                                     "/%d: M_DATA recv (%d bytes) sps=0x%p "
2080                                     "flags=0x%b ppa=0x%p flags=0x%b\n",
2081                                     ctlsps->sps_mn_id, len, (void *)ctlsps,
2082                                     ctlsps->sps_flags, SPS_FLAGS_STR,
2083                                     (void *)ppa, ppa->ppa_flags,
2084                                     PPA_FLAGS_STR);
2085                         }
2086                         return (ctlsps->sps_rq);
2087                 } else {
2088                         mutex_enter(&ppa->ppa_sta_lock);
2089                         ppa->ppa_iqdropped++;
2090                         mutex_exit(&ppa->ppa_sta_lock);
2091                         freemsg(mp);
2092                         return (NULL);
2093                 }
2094         }
2095         if (canputnext(destsps->sps_rq)) {
2096                 if (IS_SPS_KDEBUG(destsps)) {
2097                         SPDEBUG(PPP_DRV_NAME
2098                             "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2099                             "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
2100                             (void *)destsps, destsps->sps_flags,
2101                             SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
2102                             PPA_FLAGS_STR);
2103                 }
2104                 /*
2105                  * If fastpath is enabled on the network-layer stream, then
2106                  * make sure we skip over the PPP header, otherwise, we wrap
2107                  * the message in a DLPI message.
2108                  */
2109                 if (IS_SPS_FASTPATH(destsps)) {
2110                         mp->b_rptr += PPP_HDRLEN;
2111                         return (destsps->sps_rq);
2112                 } else {
2113                         spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2114                         ASSERT(uqs != NULL);
2115                         mp->b_rptr += PPP_HDRLEN;
2116                         mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2117                         if (mp != NULL) {
2118                                 *mpp = mp;
2119                                 return (destsps->sps_rq);
2120                         } else {
2121                                 mutex_enter(&ppa->ppa_sta_lock);
2122                                 ppa->ppa_allocbfail++;
2123                                 mutex_exit(&ppa->ppa_sta_lock);
2124                                 /* mp already freed by sppp_dladdud */
2125                                 return (NULL);
2126                         }
2127                 }
2128         } else {
2129                 mutex_enter(&ppa->ppa_sta_lock);
2130                 ppa->ppa_iqdropped++;
2131                 mutex_exit(&ppa->ppa_sta_lock);
2132                 freemsg(mp);
2133                 return (NULL);
2134         }
2135 }
2136 
2137 /*
2138  * sppp_inpkt()
2139  *
2140  * MT-Perimeters:
2141  *    shared inner, shared outer.
2142  *
2143  * Description:
2144  *    Find the destination upper stream for the received packet, called
2145  *    from sppp_recv.
2146  *
2147  * Returns:
2148  *    ptr to destination upper network stream, or NULL for control stream.
2149  */
2150 /* ARGSUSED */
2151 static spppstr_t *
2152 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2153 {
2154         spppstr_t       *destsps = NULL;
2155         sppa_t          *ppa;
2156         uint16_t        proto;
2157         int             is_promisc;
2158 
2159         ASSERT(q != NULL && q->q_ptr != NULL);
2160         ASSERT(mp != NULL && mp->b_rptr != NULL);
2161         ASSERT(IS_SPS_CONTROL(ctlsps));
2162         ppa = ctlsps->sps_ppa;
2163         ASSERT(ppa != NULL);
2164         /*
2165          * From RFC 1661 (Section 2):
2166          *
2167          * The Protocol field is one or two octets, and its value identifies
2168          * the datagram encapsulated in the Information field of the packet.
2169          * The field is transmitted and received most significant octet first.
2170          *
2171          * The structure of this field is consistent with the ISO 3309
2172          * extension mechanism for address fields.  All Protocols MUST be odd;
2173          * the least significant bit of the least significant octet MUST equal
2174          * "1".  Also, all Protocols MUST be assigned such that the least
2175          * significant bit of the most significant octet equals "0". Frames
2176          * received which don't comply with these rules MUST be treated as
2177          * having an unrecognized Protocol.
2178          *
2179          * Protocol field values in the "0***" to "3***" range identify the
2180          * network-layer protocol of specific packets, and values in the
2181          * "8***" to "b***" range identify packets belonging to the associated
2182          * Network Control Protocols (NCPs), if any.
2183          *
2184          * Protocol field values in the "4***" to "7***" range are used for
2185          * protocols with low volume traffic which have no associated NCP.
2186          * Protocol field values in the "c***" to "f***" range identify packets
2187          * as link-layer Control Protocols (such as LCP).
2188          */
2189         proto = PPP_PROTOCOL(mp->b_rptr);
2190         mutex_enter(&ppa->ppa_sta_lock);
2191         ppa->ppa_stats.p.ppp_ipackets++;
2192         mutex_exit(&ppa->ppa_sta_lock);
2193         /*
2194          * We check if this is not a network-layer protocol, and if so,
2195          * then send this packet up the control stream.
2196          */
2197         if (proto > 0x7fff) {
2198                 goto inpkt_done;        /* send it up the control stream */
2199         }
2200         /*
2201          * Try to grab the destination upper stream from the network-layer
2202          * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2203          * protocol types. Otherwise, if the type is not known to the cache,
2204          * or if its sap can't be matched with any of the upper streams, then
2205          * send this packet up the control stream so that it can be rejected.
2206          */
2207         if (proto == PPP_IP) {
2208                 destsps = ppa->ppa_ip_cache;
2209         } else if (proto == PPP_IPV6) {
2210                 destsps = ppa->ppa_ip6_cache;
2211         }
2212         /*
2213          * Toss this one away up the control stream if there's no matching sap;
2214          * this way the protocol can be rejected (destsps is NULL).
2215          */
2216 
2217 inpkt_done:
2218         /*
2219          * Only time-stamp the packet with hrtime if the upper stream
2220          * is configured to do so.  PPP control (negotiation) messages
2221          * are never considered link activity; only data is activity.
2222          */
2223         if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2224                 ppa->ppa_lastrx = gethrtime();
2225         }
2226         /*
2227          * Should there be any promiscuous stream(s), send the data up for
2228          * each promiscuous stream that we recognize. We skip the control
2229          * stream as we obviously never allow the control stream to become
2230          * promiscous and bind to PPP_ALLSAP.
2231          */
2232         rw_enter(&ppa->ppa_sib_lock, RW_READER);
2233         is_promisc = ppa->ppa_promicnt;
2234         if (is_promisc) {
2235                 ASSERT(ppa->ppa_streams != NULL);
2236                 sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2237         }
2238         rw_exit(&ppa->ppa_sib_lock);
2239         return (destsps);
2240 }
2241 
2242 /*
2243  * sppp_kstat_update()
2244  *
2245  * Description:
2246  *    Update per-ppa kstat interface statistics.
2247  */
2248 static int
2249 sppp_kstat_update(kstat_t *ksp, int rw)
2250 {
2251         register sppa_t         *ppa;
2252         register sppp_kstats_t  *pppkp;
2253         register struct pppstat64 *sp;
2254 
2255         if (rw == KSTAT_WRITE) {
2256                 return (EACCES);
2257         }
2258 
2259         ppa = (sppa_t *)ksp->ks_private;
2260         ASSERT(ppa != NULL);
2261 
2262         pppkp = (sppp_kstats_t *)ksp->ks_data;
2263         sp = &ppa->ppa_stats.p;
2264 
2265         mutex_enter(&ppa->ppa_sta_lock);
2266         pppkp->allocbfail.value.ui32 = ppa->ppa_allocbfail;
2267         pppkp->mctlsfwd.value.ui32   = ppa->ppa_mctlsfwd;
2268         pppkp->mctlsfwderr.value.ui32        = ppa->ppa_mctlsfwderr;
2269         pppkp->rbytes.value.ui32     = sp->ppp_ibytes;
2270         pppkp->rbytes64.value.ui64   = sp->ppp_ibytes;
2271         pppkp->ierrors.value.ui32    = sp->ppp_ierrors;
2272         pppkp->ierrors_lower.value.ui32      = ppa->ppa_ierr_low;
2273         pppkp->ioctlsfwd.value.ui32  = ppa->ppa_ioctlsfwd;
2274         pppkp->ioctlsfwdok.value.ui32        = ppa->ppa_ioctlsfwdok;
2275         pppkp->ioctlsfwderr.value.ui32       = ppa->ppa_ioctlsfwderr;
2276         pppkp->ipackets.value.ui32   = sp->ppp_ipackets;
2277         pppkp->ipackets64.value.ui64 = sp->ppp_ipackets;
2278         pppkp->ipackets_ctl.value.ui32       = ppa->ppa_ipkt_ctl;
2279         pppkp->iqdropped.value.ui32  = ppa->ppa_iqdropped;
2280         pppkp->irunts.value.ui32     = ppa->ppa_irunts;
2281         pppkp->itoolongs.value.ui32  = ppa->ppa_itoolongs;
2282         pppkp->lsneedup.value.ui32   = ppa->ppa_lsneedup;
2283         pppkp->lsdown.value.ui32     = ppa->ppa_lsdown;
2284         pppkp->mctlsknown.value.ui32 = ppa->ppa_mctlsknown;
2285         pppkp->mctlsunknown.value.ui32       = ppa->ppa_mctlsunknown;
2286         pppkp->obytes.value.ui32     = sp->ppp_obytes;
2287         pppkp->obytes64.value.ui64   = sp->ppp_obytes;
2288         pppkp->oerrors.value.ui32    = sp->ppp_oerrors;
2289         pppkp->oerrors_lower.value.ui32      = ppa->ppa_oerr_low;
2290         pppkp->opackets.value.ui32   = sp->ppp_opackets;
2291         pppkp->opackets64.value.ui64 = sp->ppp_opackets;
2292         pppkp->opackets_ctl.value.ui32       = ppa->ppa_opkt_ctl;
2293         pppkp->oqdropped.value.ui32  = ppa->ppa_oqdropped;
2294         pppkp->otoolongs.value.ui32  = ppa->ppa_otoolongs;
2295         pppkp->orunts.value.ui32     = ppa->ppa_orunts;
2296         mutex_exit(&ppa->ppa_sta_lock);
2297 
2298         return (0);
2299 }
2300 
2301 /*
2302  * Turn off proto in ppa_npflag to indicate that
2303  * the corresponding network protocol has been plumbed.
2304  * Release proto packets that were being held in the control
2305  * queue in anticipation of this event.
2306  */
2307 static void
2308 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2309 {
2310         uint32_t npflagpos = sppp_ppp2np(proto);
2311         int count;
2312         mblk_t *mp;
2313         uint16_t mp_proto;
2314         queue_t *q;
2315         spppstr_t *destsps;
2316 
2317         ASSERT(ppa != NULL);
2318 
2319         if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2320                 return;
2321 
2322         mutex_enter(&ppa->ppa_npmutex);
2323         ppa->ppa_npflag &= ~(1 << npflagpos);
2324         count = ppa->ppa_holdpkts[npflagpos];
2325         ppa->ppa_holdpkts[npflagpos] = 0;
2326         mutex_exit(&ppa->ppa_npmutex);
2327 
2328         q = ppa->ppa_ctl->sps_rq;
2329 
2330         while (count > 0) {
2331                 mp = getq(q);
2332                 ASSERT(mp != NULL);
2333 
2334                 mp_proto = PPP_PROTOCOL(mp->b_rptr);
2335                 if (mp_proto !=  proto) {
2336                         (void) putq(q, mp);
2337                         continue;
2338                 }
2339                 count--;
2340                 destsps = NULL;
2341                 if (mp_proto == PPP_IP) {
2342                         destsps = ppa->ppa_ip_cache;
2343                 } else if (mp_proto == PPP_IPV6) {
2344                         destsps = ppa->ppa_ip6_cache;
2345                 }
2346                 ASSERT(destsps != NULL);
2347 
2348                 if (IS_SPS_FASTPATH(destsps)) {
2349                         mp->b_rptr += PPP_HDRLEN;
2350                 } else {
2351                         spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2352                         ASSERT(uqs != NULL);
2353                         mp->b_rptr += PPP_HDRLEN;
2354                         mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2355                         if (mp == NULL) {
2356                                 mutex_enter(&ppa->ppa_sta_lock);
2357                                 ppa->ppa_allocbfail++;
2358                                 mutex_exit(&ppa->ppa_sta_lock);
2359                                 /* mp already freed by sppp_dladdud */
2360                                 continue;
2361                         }
2362                 }
2363 
2364                 if (canputnext(destsps->sps_rq)) {
2365                         putnext(destsps->sps_rq, mp);
2366                 } else {
2367                         mutex_enter(&ppa->ppa_sta_lock);
2368                         ppa->ppa_iqdropped++;
2369                         mutex_exit(&ppa->ppa_sta_lock);
2370                         freemsg(mp);
2371                         continue;
2372                 }
2373         }
2374 }