Print this page
10687 Service routine cast changes need smatch fixes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/ppp/sppp/sppp.c
+++ new/usr/src/uts/common/io/ppp/sppp/sppp.c
1 1 /*
2 2 * sppp.c - Solaris STREAMS PPP multiplexing pseudo-driver
3 3 *
4 4 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
5 5 * Use is subject to license terms.
6 6 * Copyright (c) 2016 by Delphix. All rights reserved.
7 + * Copyright 2019, Joyent, Inc.
7 8 *
8 9 * Permission to use, copy, modify, and distribute this software and its
9 10 * documentation is hereby granted, provided that the above copyright
10 11 * notice appears in all copies.
11 12 *
12 13 * SUN MAKES NO REPRESENTATION OR WARRANTIES ABOUT THE SUITABILITY OF
13 14 * THE SOFTWARE, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
14 15 * TO THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
15 16 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT. SUN SHALL NOT BE LIABLE FOR
16 17 * ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
17 18 * DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES
18 19 *
19 20 * Copyright (c) 1994 The Australian National University.
20 21 * All rights reserved.
21 22 *
22 23 * Permission to use, copy, modify, and distribute this software and its
23 24 * documentation is hereby granted, provided that the above copyright
24 25 * notice appears in all copies. This software is provided without any
25 26 * warranty, express or implied. The Australian National University
26 27 * makes no representations about the suitability of this software for
27 28 * any purpose.
28 29 *
29 30 * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY
30 31 * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
31 32 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
32 33 * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY
33 34 * OF SUCH DAMAGE.
34 35 *
35 36 * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES,
36 37 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
37 38 * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
38 39 * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO
39 40 * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,
40 41 * OR MODIFICATIONS.
41 42 *
42 43 * This driver is derived from the original SVR4 STREAMS PPP driver
43 44 * originally written by Paul Mackerras <paul.mackerras@cs.anu.edu.au>.
44 45 *
45 46 * Adi Masputra <adi.masputra@sun.com> rewrote and restructured the code
46 47 * for improved performance and scalability.
47 48 */
48 49
49 50 #define RCSID "$Id: sppp.c,v 1.0 2000/05/08 01:10:12 masputra Exp $"
50 51
51 52 #include <sys/types.h>
52 53 #include <sys/debug.h>
53 54 #include <sys/param.h>
54 55 #include <sys/stat.h>
55 56 #include <sys/stream.h>
56 57 #include <sys/stropts.h>
57 58 #include <sys/sysmacros.h>
58 59 #include <sys/errno.h>
59 60 #include <sys/time.h>
60 61 #include <sys/cmn_err.h>
61 62 #include <sys/kmem.h>
62 63 #include <sys/conf.h>
63 64 #include <sys/dlpi.h>
64 65 #include <sys/ddi.h>
65 66 #include <sys/kstat.h>
66 67 #include <sys/strsun.h>
67 68 #include <sys/ethernet.h>
68 69 #include <sys/policy.h>
69 70 #include <sys/zone.h>
70 71 #include <net/ppp_defs.h>
71 72 #include <net/pppio.h>
72 73 #include "sppp.h"
73 74 #include "s_common.h"
74 75
75 76 /*
76 77 * This is used to tag official Solaris sources. Please do not define
77 78 * "INTERNAL_BUILD" when building this software outside of Sun Microsystems.
78 79 */
79 80 #ifdef INTERNAL_BUILD
80 81 /* MODINFO is limited to 32 characters. */
81 82 const char sppp_module_description[] = "PPP 4.0 mux";
82 83 #else /* INTERNAL_BUILD */
83 84 const char sppp_module_description[] = "ANU PPP mux";
84 85
85 86 /* LINTED */
86 87 static const char buildtime[] = "Built " __DATE__ " at " __TIME__
87 88 #ifdef DEBUG
88 89 " DEBUG"
89 90 #endif
90 91 "\n";
91 92 #endif /* INTERNAL_BUILD */
92 93
93 94 static void sppp_inner_ioctl(queue_t *, mblk_t *);
94 95 static void sppp_outer_ioctl(queue_t *, mblk_t *);
95 96 static queue_t *sppp_send(queue_t *, mblk_t **, spppstr_t *);
96 97 static queue_t *sppp_recv(queue_t *, mblk_t **, spppstr_t *);
97 98 static void sppp_recv_nondata(queue_t *, mblk_t *, spppstr_t *);
98 99 static queue_t *sppp_outpkt(queue_t *, mblk_t **, int, spppstr_t *);
99 100 static spppstr_t *sppp_inpkt(queue_t *, mblk_t *, spppstr_t *);
100 101 static int sppp_kstat_update(kstat_t *, int);
101 102 static void sppp_release_pkts(sppa_t *, uint16_t);
102 103
103 104 /*
104 105 * sps_list contains the list of active per-stream instance state structures
105 106 * ordered on the minor device number (see sppp.h for details). All streams
106 107 * opened to this driver are threaded together in this list.
107 108 */
108 109 static spppstr_t *sps_list = NULL;
109 110 /*
110 111 * ppa_list contains the list of active per-attachment instance state
111 112 * structures ordered on the ppa id number (see sppp.h for details). All of
112 113 * the ppa structures created once per PPPIO_NEWPPA ioctl are threaded together
113 114 * in this list. There is exactly one ppa structure for a given PPP interface,
114 115 * and multiple sps streams (upper streams) may share a ppa by performing
115 116 * an attachment explicitly (PPPIO_ATTACH) or implicitly (DL_ATTACH_REQ).
116 117 */
117 118 static sppa_t *ppa_list = NULL;
118 119
119 120 static const char *kstats_names[] = { SPPP_KSTATS_NAMES };
120 121 static const char *kstats64_names[] = { SPPP_KSTATS64_NAMES };
121 122
122 123 /*
123 124 * map proto (which is an IANA defined ppp network protocol) to
124 125 * a bit position indicated by NP_* in ppa_npflag
125 126 */
126 127 static uint32_t
127 128 sppp_ppp2np(uint16_t proto)
128 129 {
129 130 switch (proto) {
130 131 case PPP_IP:
131 132 return (NP_IP);
132 133 case PPP_IPV6:
133 134 return (NP_IPV6);
134 135 default:
135 136 return (0);
136 137 }
137 138 }
138 139
139 140 /*
140 141 * sppp_open()
141 142 *
142 143 * MT-Perimeters:
143 144 * exclusive inner, exclusive outer.
144 145 *
145 146 * Description:
146 147 * Common open procedure for module.
147 148 */
148 149 /* ARGSUSED */
149 150 int
150 151 sppp_open(queue_t *q, dev_t *devp, int oflag, int sflag, cred_t *credp)
151 152 {
152 153 spppstr_t *sps;
153 154 spppstr_t **nextmn;
154 155 minor_t mn;
155 156
156 157 ASSERT(q != NULL && devp != NULL);
157 158 ASSERT(sflag != MODOPEN);
158 159
159 160 if (q->q_ptr != NULL) {
160 161 return (0); /* already open */
161 162 }
162 163 if (sflag != CLONEOPEN) {
163 164 return (OPENFAIL);
164 165 }
165 166 /*
166 167 * The sps list is sorted using the minor number as the key. The
167 168 * following code walks the list to find the lowest valued minor
168 169 * number available to be used.
169 170 */
170 171 mn = 0;
171 172 for (nextmn = &sps_list; (sps = *nextmn) != NULL;
172 173 nextmn = &sps->sps_nextmn) {
173 174 if (sps->sps_mn_id != mn) {
174 175 break;
175 176 }
176 177 ++mn;
177 178 }
178 179 sps = (spppstr_t *)kmem_zalloc(sizeof (spppstr_t), KM_SLEEP);
179 180 ASSERT(sps != NULL); /* KM_SLEEP must never return NULL */
180 181 sps->sps_nextmn = *nextmn; /* insert stream in global list */
181 182 *nextmn = sps;
182 183 sps->sps_mn_id = mn; /* save minor id for this stream */
183 184 sps->sps_rq = q; /* save read queue pointer */
184 185 sps->sps_sap = -1; /* no sap bound to stream */
185 186 sps->sps_dlstate = DL_UNATTACHED; /* dlpi state is unattached */
186 187 sps->sps_npmode = NPMODE_DROP; /* drop all packets initially */
187 188 sps->sps_zoneid = crgetzoneid(credp);
188 189 q->q_ptr = WR(q)->q_ptr = (caddr_t)sps;
189 190 /*
190 191 * We explicitly disable the automatic queue scheduling for the
191 192 * write-side to obtain complete control over queuing during transmit.
192 193 * Packets will be queued at the upper write queue and the service
193 194 * routine will not be called until it gets scheduled by having the
194 195 * lower write service routine call the qenable(WR(uq)) for all streams
195 196 * attached to the same ppa instance.
196 197 */
197 198 noenable(WR(q));
198 199 *devp = makedevice(getmajor(*devp), mn);
199 200 qprocson(q);
200 201 return (0);
201 202 }
202 203
203 204 /*
204 205 * Free storage used by a PPA. This is not called until the last PPA
205 206 * user closes their connection or reattaches to a different PPA.
206 207 */
207 208 static void
208 209 sppp_free_ppa(sppa_t *ppa)
209 210 {
210 211 sppa_t **nextppa;
211 212
212 213 ASSERT(ppa->ppa_refcnt == 1);
213 214 if (ppa->ppa_kstats != NULL) {
214 215 kstat_delete(ppa->ppa_kstats);
215 216 ppa->ppa_kstats = NULL;
216 217 }
217 218 mutex_destroy(&ppa->ppa_sta_lock);
218 219 mutex_destroy(&ppa->ppa_npmutex);
219 220 rw_destroy(&ppa->ppa_sib_lock);
220 221 nextppa = &ppa_list;
221 222 while (*nextppa != NULL) {
222 223 if (*nextppa == ppa) {
223 224 *nextppa = ppa->ppa_nextppa;
224 225 break;
225 226 }
226 227 nextppa = &(*nextppa)->ppa_nextppa;
227 228 }
228 229 kmem_free(ppa, sizeof (*ppa));
229 230 }
230 231
231 232 /*
232 233 * Create a new PPA. Caller must be exclusive on outer perimeter.
233 234 */
234 235 sppa_t *
235 236 sppp_create_ppa(uint32_t ppa_id, zoneid_t zoneid)
236 237 {
237 238 sppa_t *ppa;
238 239 sppa_t *curppa;
239 240 sppa_t **availppa;
240 241 char unit[32]; /* Unit name */
241 242 const char **cpp;
242 243 kstat_t *ksp;
243 244 kstat_named_t *knt;
244 245
245 246 /*
246 247 * NOTE: unit *must* be named for the driver
247 248 * name plus the ppa number so that netstat
248 249 * can find the statistics.
249 250 */
250 251 (void) sprintf(unit, "%s" "%d", PPP_DRV_NAME, ppa_id);
251 252 /*
252 253 * Make sure we can allocate a buffer to
253 254 * contain the ppa to be sent upstream, as
254 255 * well as the actual ppa structure and its
255 256 * associated kstat structure.
256 257 */
257 258 ppa = (sppa_t *)kmem_zalloc(sizeof (sppa_t),
258 259 KM_NOSLEEP);
259 260 ksp = kstat_create(PPP_DRV_NAME, ppa_id, unit, "net", KSTAT_TYPE_NAMED,
260 261 sizeof (sppp_kstats_t) / sizeof (kstat_named_t), 0);
261 262
262 263 if (ppa == NULL || ksp == NULL) {
263 264 if (ppa != NULL) {
264 265 kmem_free(ppa, sizeof (sppa_t));
265 266 }
266 267 if (ksp != NULL) {
267 268 kstat_delete(ksp);
268 269 }
269 270 return (NULL);
270 271 }
271 272 ppa->ppa_kstats = ksp; /* chain kstat structure */
272 273 ppa->ppa_ppa_id = ppa_id; /* record ppa id */
273 274 ppa->ppa_zoneid = zoneid; /* zone that owns this PPA */
274 275 ppa->ppa_mtu = PPP_MAXMTU; /* 65535-(PPP_HDRLEN+PPP_FCSLEN) */
275 276 ppa->ppa_mru = PPP_MAXMRU; /* 65000 */
276 277
277 278 mutex_init(&ppa->ppa_sta_lock, NULL, MUTEX_DRIVER, NULL);
278 279 mutex_init(&ppa->ppa_npmutex, NULL, MUTEX_DRIVER, NULL);
279 280 rw_init(&ppa->ppa_sib_lock, NULL, RW_DRIVER, NULL);
280 281
281 282 /*
282 283 * Prepare and install kstat counters. Note that for netstat
283 284 * -i to work, there needs to be "ipackets", "opackets",
284 285 * "ierrors", and "oerrors" kstat named variables.
285 286 */
286 287 knt = (kstat_named_t *)ksp->ks_data;
287 288 for (cpp = kstats_names; cpp < kstats_names + Dim(kstats_names);
288 289 cpp++) {
289 290 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT32);
290 291 knt++;
291 292 }
292 293 for (cpp = kstats64_names; cpp < kstats64_names + Dim(kstats64_names);
293 294 cpp++) {
294 295 kstat_named_init(knt, *cpp, KSTAT_DATA_UINT64);
295 296 knt++;
296 297 }
297 298 ksp->ks_update = sppp_kstat_update;
298 299 ksp->ks_private = (void *)ppa;
299 300 kstat_install(ksp);
300 301
301 302 /* link to the next ppa and insert into global list */
302 303 availppa = &ppa_list;
303 304 while ((curppa = *availppa) != NULL) {
304 305 if (ppa_id < curppa->ppa_ppa_id)
305 306 break;
306 307 availppa = &curppa->ppa_nextppa;
307 308 }
308 309 ppa->ppa_nextppa = *availppa;
309 310 *availppa = ppa;
310 311 return (ppa);
311 312 }
312 313
313 314 /*
314 315 * sppp_close()
315 316 *
316 317 * MT-Perimeters:
317 318 * exclusive inner, exclusive outer.
318 319 *
319 320 * Description:
320 321 * Common close procedure for module.
321 322 */
322 323 /* ARGSUSED */
323 324 int
324 325 sppp_close(queue_t *q, int flags __unused, cred_t *credp __unused)
325 326 {
326 327 spppstr_t *sps;
327 328 spppstr_t **nextmn;
328 329 spppstr_t *sib;
329 330 sppa_t *ppa;
330 331 mblk_t *mp;
331 332
332 333 ASSERT(q != NULL && q->q_ptr != NULL);
333 334 sps = (spppstr_t *)q->q_ptr;
334 335 qprocsoff(q);
335 336
336 337 ppa = sps->sps_ppa;
337 338 if (ppa == NULL) {
338 339 ASSERT(!IS_SPS_CONTROL(sps));
339 340 goto close_unattached;
340 341 }
341 342 if (IS_SPS_CONTROL(sps)) {
342 343 uint32_t cnt = 0;
343 344
344 345 ASSERT(ppa != NULL);
345 346 ASSERT(ppa->ppa_ctl == sps);
346 347 ppa->ppa_ctl = NULL;
347 348 /*
348 349 * STREAMS framework always issues I_UNLINK prior to close,
349 350 * since we only allow I_LINK under the control stream.
350 351 * A given ppa structure has at most one lower stream pointed
351 352 * by the ppa_lower_wq field, because we only allow a single
352 353 * linkage (I_LINK) to be done on the control stream.
353 354 */
354 355 ASSERT(ppa->ppa_lower_wq == NULL);
355 356 /*
356 357 * Walk through all of sibling streams attached to this ppa,
357 358 * and remove all references to this ppa. We have exclusive
358 359 * access for the entire driver here, so there's no need
359 360 * to hold ppa_sib_lock.
360 361 */
361 362 cnt++;
362 363 sib = ppa->ppa_streams;
363 364 while (sib != NULL) {
364 365 ASSERT(ppa == sib->sps_ppa);
365 366 sib->sps_npmode = NPMODE_DROP;
366 367 sib->sps_flags &= ~(SPS_PIOATTACH | SPS_CACHED);
367 368 /*
368 369 * There should be a preallocated hangup
369 370 * message here. Fetch it and send it up to
370 371 * the stream head. This will cause IP to
371 372 * mark the interface as "down."
372 373 */
373 374 if ((mp = sib->sps_hangup) != NULL) {
374 375 sib->sps_hangup = NULL;
375 376 /*
376 377 * M_HANGUP works with IP, but snoop
377 378 * is lame and requires M_ERROR. Send
378 379 * up a clean error code instead.
379 380 *
380 381 * XXX if snoop is fixed, fix this, too.
381 382 */
382 383 MTYPE(mp) = M_ERROR;
383 384 *mp->b_wptr++ = ENXIO;
384 385 putnext(sib->sps_rq, mp);
385 386 }
386 387 qenable(WR(sib->sps_rq));
387 388 cnt++;
388 389 sib = sib->sps_nextsib;
389 390 }
390 391 ASSERT(ppa->ppa_refcnt == cnt);
391 392 } else {
392 393 ASSERT(ppa->ppa_streams != NULL);
393 394 ASSERT(ppa->ppa_ctl != sps);
394 395 mp = NULL;
395 396 if (sps->sps_sap == PPP_IP) {
396 397 ppa->ppa_ip_cache = NULL;
397 398 mp = create_lsmsg(PPP_LINKSTAT_IPV4_UNBOUND);
398 399 } else if (sps->sps_sap == PPP_IPV6) {
399 400 ppa->ppa_ip6_cache = NULL;
400 401 mp = create_lsmsg(PPP_LINKSTAT_IPV6_UNBOUND);
401 402 }
402 403 /* Tell the daemon the bad news. */
403 404 if (mp != NULL && ppa->ppa_ctl != NULL &&
404 405 (sps->sps_npmode == NPMODE_PASS ||
405 406 sps->sps_npmode == NPMODE_QUEUE)) {
406 407 putnext(ppa->ppa_ctl->sps_rq, mp);
407 408 } else {
408 409 freemsg(mp);
409 410 }
410 411 /*
411 412 * Walk through all of sibling streams attached to the
412 413 * same ppa, and remove this stream from the sibling
413 414 * streams list. We have exclusive access for the
414 415 * entire driver here, so there's no need to hold
415 416 * ppa_sib_lock.
416 417 */
417 418 sib = ppa->ppa_streams;
418 419 if (sib == sps) {
419 420 ppa->ppa_streams = sps->sps_nextsib;
420 421 } else {
421 422 while (sib->sps_nextsib != NULL) {
422 423 if (sib->sps_nextsib == sps) {
423 424 sib->sps_nextsib = sps->sps_nextsib;
424 425 break;
425 426 }
426 427 sib = sib->sps_nextsib;
427 428 }
428 429 }
429 430 sps->sps_nextsib = NULL;
430 431 freemsg(sps->sps_hangup);
431 432 sps->sps_hangup = NULL;
432 433 /*
433 434 * Check if this is a promiscous stream. If the SPS_PROMISC bit
434 435 * is still set, it means that the stream is closed without
435 436 * ever having issued DL_DETACH_REQ or DL_PROMISCOFF_REQ.
436 437 * In this case, we simply decrement the promiscous counter,
437 438 * and it's safe to do it without holding ppa_sib_lock since
438 439 * we're exclusive (inner and outer) at this point.
439 440 */
440 441 if (IS_SPS_PROMISC(sps)) {
441 442 ASSERT(ppa->ppa_promicnt > 0);
442 443 ppa->ppa_promicnt--;
443 444 }
444 445 }
445 446 /* If we're the only one left, then delete now. */
446 447 if (ppa->ppa_refcnt <= 1)
447 448 sppp_free_ppa(ppa);
448 449 else
449 450 ppa->ppa_refcnt--;
450 451 close_unattached:
451 452 q->q_ptr = WR(q)->q_ptr = NULL;
452 453 for (nextmn = &sps_list; *nextmn != NULL;
453 454 nextmn = &(*nextmn)->sps_nextmn) {
454 455 if (*nextmn == sps) {
455 456 *nextmn = sps->sps_nextmn;
456 457 break;
457 458 }
458 459 }
459 460 kmem_free(sps, sizeof (spppstr_t));
460 461 return (0);
461 462 }
462 463
463 464 static void
464 465 sppp_ioctl(struct queue *q, mblk_t *mp)
465 466 {
466 467 spppstr_t *sps;
467 468 spppstr_t *nextsib;
468 469 sppa_t *ppa;
469 470 struct iocblk *iop;
470 471 mblk_t *nmp;
471 472 enum NPmode npmode;
472 473 struct ppp_idle *pip;
473 474 struct ppp_stats64 *psp;
474 475 struct ppp_comp_stats *pcsp;
475 476 hrtime_t hrtime;
476 477 int sap;
477 478 int count = 0;
478 479 int error = EINVAL;
479 480
480 481 sps = (spppstr_t *)q->q_ptr;
481 482 ppa = sps->sps_ppa;
482 483
483 484 iop = (struct iocblk *)mp->b_rptr;
484 485 switch (iop->ioc_cmd) {
485 486 case PPPIO_NPMODE:
486 487 if (!IS_SPS_CONTROL(sps)) {
487 488 break; /* return EINVAL */
488 489 } else if (iop->ioc_count != 2 * sizeof (uint32_t) ||
489 490 (mp->b_cont == NULL)) {
490 491 error = EPROTO;
491 492 break;
492 493 }
493 494 ASSERT(ppa != NULL);
494 495 ASSERT(mp->b_cont->b_rptr != NULL);
495 496 ASSERT(sps->sps_npmode == NPMODE_PASS);
496 497 sap = ((uint32_t *)mp->b_cont->b_rptr)[0];
497 498 npmode = (enum NPmode)((uint32_t *)mp->b_cont->b_rptr)[1];
498 499 /*
499 500 * Walk the sibling streams which belong to the same
500 501 * ppa, and try to find a stream with matching sap
501 502 * number.
502 503 */
503 504 rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
504 505 for (nextsib = ppa->ppa_streams; nextsib != NULL;
505 506 nextsib = nextsib->sps_nextsib) {
506 507 if (nextsib->sps_sap == sap) {
507 508 break; /* found it */
508 509 }
509 510 }
510 511 if (nextsib == NULL) {
511 512 rw_exit(&ppa->ppa_sib_lock);
512 513 break; /* return EINVAL */
513 514 } else {
514 515 nextsib->sps_npmode = npmode;
515 516 if ((nextsib->sps_npmode != NPMODE_QUEUE) &&
516 517 (WR(nextsib->sps_rq)->q_first != NULL)) {
517 518 qenable(WR(nextsib->sps_rq));
518 519 }
519 520 }
520 521 rw_exit(&ppa->ppa_sib_lock);
521 522 error = 0; /* return success */
522 523 break;
523 524 case PPPIO_GIDLE:
524 525 if (ppa == NULL) {
525 526 ASSERT(!IS_SPS_CONTROL(sps));
526 527 error = ENOLINK;
527 528 break;
528 529 } else if (!IS_PPA_TIMESTAMP(ppa)) {
529 530 break; /* return EINVAL */
530 531 }
531 532 if ((nmp = allocb(sizeof (struct ppp_idle),
532 533 BPRI_MED)) == NULL) {
533 534 mutex_enter(&ppa->ppa_sta_lock);
534 535 ppa->ppa_allocbfail++;
535 536 mutex_exit(&ppa->ppa_sta_lock);
536 537 error = ENOSR;
537 538 break;
538 539 }
539 540 if (mp->b_cont != NULL) {
540 541 freemsg(mp->b_cont);
541 542 }
542 543 mp->b_cont = nmp;
543 544 pip = (struct ppp_idle *)nmp->b_wptr;
544 545 nmp->b_wptr += sizeof (struct ppp_idle);
545 546 /*
546 547 * Get current timestamp and subtract the tx and rx
547 548 * timestamps to get the actual idle time to be
548 549 * returned.
549 550 */
550 551 hrtime = gethrtime();
551 552 pip->xmit_idle = (hrtime - ppa->ppa_lasttx) / 1000000000ul;
552 553 pip->recv_idle = (hrtime - ppa->ppa_lastrx) / 1000000000ul;
553 554 count = msgsize(nmp);
554 555 error = 0;
555 556 break; /* return success (error is 0) */
556 557 case PPPIO_GTYPE:
557 558 nmp = allocb(sizeof (uint32_t), BPRI_MED);
558 559 if (nmp == NULL) {
559 560 error = ENOSR;
560 561 break;
561 562 }
562 563 if (mp->b_cont != NULL) {
563 564 freemsg(mp->b_cont);
564 565 }
565 566 mp->b_cont = nmp;
566 567 /*
567 568 * Let the requestor know that we are the PPP
568 569 * multiplexer (PPPTYP_MUX).
569 570 */
570 571 *(uint32_t *)nmp->b_wptr = PPPTYP_MUX;
571 572 nmp->b_wptr += sizeof (uint32_t);
572 573 count = msgsize(nmp);
573 574 error = 0; /* return success */
574 575 break;
575 576 case PPPIO_GETSTAT64:
576 577 if (ppa == NULL) {
577 578 break; /* return EINVAL */
578 579 } else if ((ppa->ppa_lower_wq != NULL) &&
579 580 !IS_PPA_LASTMOD(ppa)) {
580 581 mutex_enter(&ppa->ppa_sta_lock);
581 582 /*
582 583 * We match sps_ioc_id on the M_IOC{ACK,NAK},
583 584 * so if the response hasn't come back yet,
584 585 * new ioctls must be queued instead.
585 586 */
586 587 if (IS_SPS_IOCQ(sps)) {
587 588 mutex_exit(&ppa->ppa_sta_lock);
588 589 if (!putq(q, mp)) {
589 590 error = EAGAIN;
590 591 break;
591 592 }
592 593 return;
593 594 } else {
594 595 ppa->ppa_ioctlsfwd++;
595 596 /*
596 597 * Record the ioctl CMD & ID - this will be
597 598 * used to check the ACK or NAK responses
598 599 * coming from below.
599 600 */
600 601 sps->sps_ioc_id = iop->ioc_id;
601 602 sps->sps_flags |= SPS_IOCQ;
602 603 mutex_exit(&ppa->ppa_sta_lock);
603 604 }
604 605 putnext(ppa->ppa_lower_wq, mp);
605 606 return; /* don't ack or nak the request */
606 607 }
607 608 nmp = allocb(sizeof (*psp), BPRI_MED);
608 609 if (nmp == NULL) {
609 610 mutex_enter(&ppa->ppa_sta_lock);
610 611 ppa->ppa_allocbfail++;
611 612 mutex_exit(&ppa->ppa_sta_lock);
612 613 error = ENOSR;
613 614 break;
614 615 }
615 616 if (mp->b_cont != NULL) {
616 617 freemsg(mp->b_cont);
617 618 }
618 619 mp->b_cont = nmp;
619 620 psp = (struct ppp_stats64 *)nmp->b_wptr;
620 621 /*
621 622 * Copy the contents of ppp_stats64 structure for this
622 623 * ppa and return them to the caller.
623 624 */
624 625 mutex_enter(&ppa->ppa_sta_lock);
625 626 bcopy(&ppa->ppa_stats, psp, sizeof (*psp));
626 627 mutex_exit(&ppa->ppa_sta_lock);
627 628 nmp->b_wptr += sizeof (*psp);
628 629 count = sizeof (*psp);
629 630 error = 0; /* return success */
630 631 break;
631 632 case PPPIO_GETCSTAT:
632 633 if (ppa == NULL) {
633 634 break; /* return EINVAL */
634 635 } else if ((ppa->ppa_lower_wq != NULL) &&
635 636 !IS_PPA_LASTMOD(ppa)) {
636 637 mutex_enter(&ppa->ppa_sta_lock);
637 638 /*
638 639 * See comments in PPPIO_GETSTAT64 case
639 640 * in sppp_ioctl().
640 641 */
641 642 if (IS_SPS_IOCQ(sps)) {
642 643 mutex_exit(&ppa->ppa_sta_lock);
643 644 if (!putq(q, mp)) {
644 645 error = EAGAIN;
645 646 break;
646 647 }
647 648 return;
648 649 } else {
649 650 ppa->ppa_ioctlsfwd++;
650 651 /*
651 652 * Record the ioctl CMD & ID - this will be
652 653 * used to check the ACK or NAK responses
653 654 * coming from below.
654 655 */
655 656 sps->sps_ioc_id = iop->ioc_id;
656 657 sps->sps_flags |= SPS_IOCQ;
657 658 mutex_exit(&ppa->ppa_sta_lock);
658 659 }
659 660 putnext(ppa->ppa_lower_wq, mp);
660 661 return; /* don't ack or nak the request */
661 662 }
662 663 nmp = allocb(sizeof (struct ppp_comp_stats), BPRI_MED);
663 664 if (nmp == NULL) {
664 665 mutex_enter(&ppa->ppa_sta_lock);
665 666 ppa->ppa_allocbfail++;
666 667 mutex_exit(&ppa->ppa_sta_lock);
667 668 error = ENOSR;
668 669 break;
669 670 }
670 671 if (mp->b_cont != NULL) {
671 672 freemsg(mp->b_cont);
672 673 }
673 674 mp->b_cont = nmp;
674 675 pcsp = (struct ppp_comp_stats *)nmp->b_wptr;
675 676 nmp->b_wptr += sizeof (struct ppp_comp_stats);
676 677 bzero((caddr_t)pcsp, sizeof (struct ppp_comp_stats));
677 678 count = msgsize(nmp);
678 679 error = 0; /* return success */
679 680 break;
680 681 }
681 682
682 683 if (error == 0) {
683 684 /* Success; tell the user. */
684 685 miocack(q, mp, count, 0);
685 686 } else {
686 687 /* Failure; send error back upstream. */
687 688 miocnak(q, mp, 0, error);
688 689 }
689 690 }
690 691
691 692 /*
692 693 * sppp_uwput()
693 694 *
694 695 * MT-Perimeters:
695 696 * shared inner, shared outer.
696 697 *
697 698 * Description:
698 699 * Upper write-side put procedure. Messages from above arrive here.
699 700 */
700 701 int
701 702 sppp_uwput(queue_t *q, mblk_t *mp)
702 703 {
703 704 queue_t *nextq;
704 705 spppstr_t *sps;
705 706 sppa_t *ppa;
706 707 struct iocblk *iop;
707 708 int error;
708 709
709 710 ASSERT(q != NULL && q->q_ptr != NULL);
710 711 ASSERT(mp != NULL && mp->b_rptr != NULL);
711 712 sps = (spppstr_t *)q->q_ptr;
712 713 ppa = sps->sps_ppa;
713 714
714 715 switch (MTYPE(mp)) {
715 716 case M_PCPROTO:
716 717 case M_PROTO:
717 718 if (IS_SPS_CONTROL(sps)) {
718 719 ASSERT(ppa != NULL);
719 720 /*
720 721 * Intentionally change this to a high priority
721 722 * message so it doesn't get queued up. M_PROTO is
722 723 * specifically used for signalling between pppd and its
723 724 * kernel-level component(s), such as ppptun, so we
724 725 * make sure that it doesn't get queued up behind
725 726 * data messages.
726 727 */
727 728 MTYPE(mp) = M_PCPROTO;
728 729 if ((ppa->ppa_lower_wq != NULL) &&
729 730 canputnext(ppa->ppa_lower_wq)) {
730 731 mutex_enter(&ppa->ppa_sta_lock);
731 732 ppa->ppa_mctlsfwd++;
732 733 mutex_exit(&ppa->ppa_sta_lock);
733 734 putnext(ppa->ppa_lower_wq, mp);
734 735 } else {
735 736 mutex_enter(&ppa->ppa_sta_lock);
736 737 ppa->ppa_mctlsfwderr++;
737 738 mutex_exit(&ppa->ppa_sta_lock);
738 739 freemsg(mp);
739 740 }
740 741 } else {
741 742 (void) sppp_mproto(q, mp, sps);
742 743 return (0);
743 744 }
744 745 break;
745 746 case M_DATA:
746 747 if ((nextq = sppp_send(q, &mp, sps)) != NULL)
747 748 putnext(nextq, mp);
748 749 break;
749 750 case M_IOCTL:
750 751 error = EINVAL;
751 752 iop = (struct iocblk *)mp->b_rptr;
752 753 switch (iop->ioc_cmd) {
753 754 case DLIOCRAW:
754 755 case DL_IOC_HDR_INFO:
755 756 case PPPIO_ATTACH:
756 757 case PPPIO_DEBUG:
757 758 case PPPIO_DETACH:
758 759 case PPPIO_LASTMOD:
759 760 case PPPIO_MRU:
760 761 case PPPIO_MTU:
761 762 case PPPIO_USETIMESTAMP:
762 763 case PPPIO_BLOCKNP:
763 764 case PPPIO_UNBLOCKNP:
764 765 qwriter(q, mp, sppp_inner_ioctl, PERIM_INNER);
765 766 return (0);
766 767 case I_LINK:
767 768 case I_UNLINK:
768 769 case PPPIO_NEWPPA:
769 770 qwriter(q, mp, sppp_outer_ioctl, PERIM_OUTER);
770 771 return (0);
771 772 case PPPIO_NPMODE:
772 773 case PPPIO_GIDLE:
773 774 case PPPIO_GTYPE:
774 775 case PPPIO_GETSTAT64:
775 776 case PPPIO_GETCSTAT:
776 777 /*
777 778 * These require additional auto variables to
778 779 * handle, so (for optimization reasons)
779 780 * they're moved off to a separate function.
780 781 */
781 782 sppp_ioctl(q, mp);
782 783 return (0);
783 784 case PPPIO_GETSTAT:
784 785 break; /* 32 bit interface gone */
785 786 default:
786 787 if (iop->ioc_cr == NULL ||
787 788 secpolicy_ppp_config(iop->ioc_cr) != 0) {
788 789 error = EPERM;
789 790 break;
790 791 } else if ((ppa == NULL) ||
791 792 (ppa->ppa_lower_wq == NULL)) {
792 793 break; /* return EINVAL */
793 794 }
794 795 mutex_enter(&ppa->ppa_sta_lock);
795 796 /*
796 797 * See comments in PPPIO_GETSTAT64 case
797 798 * in sppp_ioctl().
798 799 */
799 800 if (IS_SPS_IOCQ(sps)) {
800 801 mutex_exit(&ppa->ppa_sta_lock);
801 802 if (!putq(q, mp)) {
802 803 error = EAGAIN;
803 804 break;
804 805 }
805 806 return (0);
806 807 } else {
807 808 ppa->ppa_ioctlsfwd++;
808 809 /*
809 810 * Record the ioctl CMD & ID -
810 811 * this will be used to check the
811 812 * ACK or NAK responses coming from below.
812 813 */
813 814 sps->sps_ioc_id = iop->ioc_id;
814 815 sps->sps_flags |= SPS_IOCQ;
815 816 mutex_exit(&ppa->ppa_sta_lock);
816 817 }
817 818 putnext(ppa->ppa_lower_wq, mp);
818 819 return (0); /* don't ack or nak the request */
819 820 }
820 821 /* Failure; send error back upstream. */
821 822 miocnak(q, mp, 0, error);
822 823 break;
823 824 case M_FLUSH:
824 825 if (*mp->b_rptr & FLUSHW) {
825 826 flushq(q, FLUSHDATA);
826 827 }
827 828 if (*mp->b_rptr & FLUSHR) {
828 829 *mp->b_rptr &= ~FLUSHW;
829 830 qreply(q, mp);
830 831 } else {
831 832 freemsg(mp);
832 833 }
833 834 break;
834 835 default:
835 836 freemsg(mp);
836 837 break;
837 838 }
838 839 return (0);
839 840 }
840 841
841 842 /*
842 843 * sppp_uwsrv()
843 844 *
844 845 * MT-Perimeters:
845 846 * exclusive inner, shared outer.
846 847 *
847 848 * Description:
848 849 * Upper write-side service procedure. Note that this procedure does
849 850 * not get called when a message is placed on our write-side queue, since
850 851 * automatic queue scheduling has been turned off by noenable() when
851 852 * the queue was opened. We do this on purpose, as we explicitly control
852 853 * the write-side queue. Therefore, this procedure gets called when
853 854 * the lower write service procedure qenable() the upper write stream queue.
854 855 */
855 856 int
856 857 sppp_uwsrv(queue_t *q)
857 858 {
858 859 spppstr_t *sps;
859 860 sppa_t *ppa;
860 861 mblk_t *mp;
861 862 queue_t *nextq;
862 863 struct iocblk *iop;
863 864
864 865 ASSERT(q != NULL && q->q_ptr != NULL);
865 866 sps = (spppstr_t *)q->q_ptr;
866 867
867 868 while ((mp = getq(q)) != NULL) {
868 869 if (MTYPE(mp) == M_IOCTL) {
869 870 ppa = sps->sps_ppa;
870 871 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
871 872 miocnak(q, mp, 0, EINVAL);
872 873 continue;
873 874 }
874 875
875 876 iop = (struct iocblk *)mp->b_rptr;
876 877 mutex_enter(&ppa->ppa_sta_lock);
877 878 /*
878 879 * See comments in PPPIO_GETSTAT64 case
879 880 * in sppp_ioctl().
880 881 */
881 882 if (IS_SPS_IOCQ(sps)) {
882 883 mutex_exit(&ppa->ppa_sta_lock);
883 884 if (putbq(q, mp) == 0)
884 885 miocnak(q, mp, 0, EAGAIN);
885 886 break;
886 887 } else {
887 888 ppa->ppa_ioctlsfwd++;
888 889 sps->sps_ioc_id = iop->ioc_id;
889 890 sps->sps_flags |= SPS_IOCQ;
890 891 mutex_exit(&ppa->ppa_sta_lock);
891 892 putnext(ppa->ppa_lower_wq, mp);
892 893 }
893 894 } else if ((nextq =
894 895 sppp_outpkt(q, &mp, msgdsize(mp), sps)) == NULL) {
895 896 if (mp != NULL) {
896 897 if (putbq(q, mp) == 0)
897 898 freemsg(mp);
898 899 break;
899 900 }
900 901 } else {
901 902 putnext(nextq, mp);
902 903 }
903 904 }
904 905 return (0);
905 906 }
906 907
907 908 void
908 909 sppp_remove_ppa(spppstr_t *sps)
909 910 {
910 911 spppstr_t *nextsib;
911 912 sppa_t *ppa = sps->sps_ppa;
912 913
913 914 rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
914 915 if (ppa->ppa_refcnt <= 1) {
915 916 rw_exit(&ppa->ppa_sib_lock);
916 917 sppp_free_ppa(ppa);
917 918 } else {
918 919 nextsib = ppa->ppa_streams;
919 920 if (nextsib == sps) {
920 921 ppa->ppa_streams = sps->sps_nextsib;
921 922 } else {
922 923 while (nextsib->sps_nextsib != NULL) {
923 924 if (nextsib->sps_nextsib == sps) {
924 925 nextsib->sps_nextsib =
925 926 sps->sps_nextsib;
926 927 break;
927 928 }
928 929 nextsib = nextsib->sps_nextsib;
929 930 }
930 931 }
931 932 ppa->ppa_refcnt--;
932 933 /*
933 934 * And if this stream was marked as promiscuous
934 935 * (SPS_PROMISC), then we need to update the
935 936 * promiscuous streams count. This should only happen
936 937 * when DL_DETACH_REQ is issued prior to marking the
937 938 * stream as non-promiscuous, through
938 939 * DL_PROMISCOFF_REQ request.
939 940 */
940 941 if (IS_SPS_PROMISC(sps)) {
941 942 ASSERT(ppa->ppa_promicnt > 0);
942 943 ppa->ppa_promicnt--;
943 944 }
944 945 rw_exit(&ppa->ppa_sib_lock);
945 946 }
946 947 sps->sps_nextsib = NULL;
947 948 sps->sps_ppa = NULL;
948 949 freemsg(sps->sps_hangup);
949 950 sps->sps_hangup = NULL;
950 951 }
951 952
952 953 sppa_t *
953 954 sppp_find_ppa(uint32_t ppa_id)
954 955 {
955 956 sppa_t *ppa;
956 957
957 958 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
958 959 if (ppa->ppa_ppa_id == ppa_id) {
959 960 break; /* found the ppa */
960 961 }
961 962 }
962 963 return (ppa);
963 964 }
964 965
965 966 /*
966 967 * sppp_inner_ioctl()
967 968 *
968 969 * MT-Perimeters:
969 970 * exclusive inner, shared outer
970 971 *
971 972 * Description:
972 973 * Called by sppp_uwput as a result of receiving ioctls which require
973 974 * an exclusive access at the inner perimeter.
974 975 */
975 976 static void
976 977 sppp_inner_ioctl(queue_t *q, mblk_t *mp)
977 978 {
978 979 spppstr_t *sps;
979 980 sppa_t *ppa;
980 981 struct iocblk *iop;
981 982 mblk_t *nmp;
982 983 int error = EINVAL;
983 984 int count = 0;
984 985 int dbgcmd;
985 986 int mru, mtu;
986 987 uint32_t ppa_id;
987 988 hrtime_t hrtime;
988 989 uint16_t proto;
989 990
990 991 ASSERT(q != NULL && q->q_ptr != NULL);
991 992 ASSERT(mp != NULL && mp->b_rptr != NULL);
992 993
993 994 sps = (spppstr_t *)q->q_ptr;
994 995 ppa = sps->sps_ppa;
995 996 iop = (struct iocblk *)mp->b_rptr;
996 997 switch (iop->ioc_cmd) {
997 998 case DLIOCRAW:
998 999 if (IS_SPS_CONTROL(sps)) {
999 1000 break; /* return EINVAL */
1000 1001 }
1001 1002 sps->sps_flags |= SPS_RAWDATA;
1002 1003 error = 0; /* return success */
1003 1004 break;
1004 1005 case DL_IOC_HDR_INFO:
1005 1006 if (IS_SPS_CONTROL(sps)) {
1006 1007 break; /* return EINVAL */
1007 1008 } else if ((mp->b_cont == NULL) ||
1008 1009 *((t_uscalar_t *)mp->b_cont->b_rptr) != DL_UNITDATA_REQ ||
1009 1010 (MBLKL(mp->b_cont) < (sizeof (dl_unitdata_req_t) +
1010 1011 SPPP_ADDRL))) {
1011 1012 error = EPROTO;
1012 1013 break;
1013 1014 } else if (ppa == NULL) {
1014 1015 error = ENOLINK;
1015 1016 break;
1016 1017 }
1017 1018 if ((nmp = allocb(PPP_HDRLEN, BPRI_MED)) == NULL) {
1018 1019 mutex_enter(&ppa->ppa_sta_lock);
1019 1020 ppa->ppa_allocbfail++;
1020 1021 mutex_exit(&ppa->ppa_sta_lock);
1021 1022 error = ENOMEM;
1022 1023 break;
1023 1024 }
1024 1025 *(uchar_t *)nmp->b_wptr++ = PPP_ALLSTATIONS;
1025 1026 *(uchar_t *)nmp->b_wptr++ = PPP_UI;
1026 1027 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap >> 8;
1027 1028 *(uchar_t *)nmp->b_wptr++ = sps->sps_sap & 0xff;
1028 1029 ASSERT(MBLKL(nmp) == PPP_HDRLEN);
1029 1030
1030 1031 linkb(mp, nmp);
1031 1032 sps->sps_flags |= SPS_FASTPATH;
1032 1033 error = 0; /* return success */
1033 1034 count = msgsize(nmp);
1034 1035 break;
1035 1036 case PPPIO_ATTACH:
1036 1037 if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1037 1038 (sps->sps_dlstate != DL_UNATTACHED) ||
1038 1039 (iop->ioc_count != sizeof (uint32_t))) {
1039 1040 break; /* return EINVAL */
1040 1041 } else if (mp->b_cont == NULL) {
1041 1042 error = EPROTO;
1042 1043 break;
1043 1044 }
1044 1045 ASSERT(mp->b_cont->b_rptr != NULL);
1045 1046 /* If there's something here, it's detached. */
1046 1047 if (ppa != NULL) {
1047 1048 sppp_remove_ppa(sps);
1048 1049 }
1049 1050 ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1050 1051 ppa = sppp_find_ppa(ppa_id);
1051 1052 /*
1052 1053 * If we can't find it, then it's either because the requestor
1053 1054 * has supplied a wrong ppa_id to be attached to, or because
1054 1055 * the control stream for the specified ppa_id has been closed
1055 1056 * before we get here.
1056 1057 */
1057 1058 if (ppa == NULL) {
1058 1059 error = ENOENT;
1059 1060 break;
1060 1061 }
1061 1062 if (iop->ioc_cr == NULL ||
1062 1063 ppa->ppa_zoneid != crgetzoneid(iop->ioc_cr)) {
1063 1064 error = EPERM;
1064 1065 break;
1065 1066 }
1066 1067 /*
1067 1068 * Preallocate the hangup message so that we're always
1068 1069 * able to send this upstream in the event of a
1069 1070 * catastrophic failure.
1070 1071 */
1071 1072 if ((sps->sps_hangup = allocb(1, BPRI_MED)) == NULL) {
1072 1073 error = ENOSR;
1073 1074 break;
1074 1075 }
1075 1076 /*
1076 1077 * There are two ways to attach a stream to a ppa: one is
1077 1078 * through DLPI (DL_ATTACH_REQ) and the other is through
1078 1079 * PPPIO_ATTACH. This is why we need to distinguish whether or
1079 1080 * not a stream was allocated via PPPIO_ATTACH, so that we can
1080 1081 * properly detach it when we receive PPPIO_DETACH ioctl
1081 1082 * request.
1082 1083 */
1083 1084 sps->sps_flags |= SPS_PIOATTACH;
1084 1085 sps->sps_ppa = ppa;
1085 1086 /*
1086 1087 * Add this stream to the head of the list of sibling streams
1087 1088 * which belong to the same ppa as specified.
1088 1089 */
1089 1090 rw_enter(&ppa->ppa_sib_lock, RW_WRITER);
1090 1091 ppa->ppa_refcnt++;
1091 1092 sps->sps_nextsib = ppa->ppa_streams;
1092 1093 ppa->ppa_streams = sps;
1093 1094 rw_exit(&ppa->ppa_sib_lock);
1094 1095 error = 0; /* return success */
1095 1096 break;
1096 1097 case PPPIO_BLOCKNP:
1097 1098 case PPPIO_UNBLOCKNP:
1098 1099 if (iop->ioc_cr == NULL ||
1099 1100 secpolicy_ppp_config(iop->ioc_cr) != 0) {
1100 1101 error = EPERM;
1101 1102 break;
1102 1103 }
1103 1104 error = miocpullup(mp, sizeof (uint16_t));
1104 1105 if (error != 0)
1105 1106 break;
1106 1107 ASSERT(mp->b_cont->b_rptr != NULL);
1107 1108 proto = *(uint16_t *)mp->b_cont->b_rptr;
1108 1109 if (iop->ioc_cmd == PPPIO_BLOCKNP) {
1109 1110 uint32_t npflagpos = sppp_ppp2np(proto);
1110 1111 /*
1111 1112 * Mark proto as blocked in ppa_npflag until the
1112 1113 * corresponding queues for proto have been plumbed.
1113 1114 */
1114 1115 if (npflagpos != 0) {
1115 1116 mutex_enter(&ppa->ppa_npmutex);
1116 1117 ppa->ppa_npflag |= (1 << npflagpos);
1117 1118 mutex_exit(&ppa->ppa_npmutex);
1118 1119 } else {
1119 1120 error = EINVAL;
1120 1121 }
1121 1122 } else {
1122 1123 /*
1123 1124 * reset ppa_npflag and release proto
1124 1125 * packets that were being held in control queue.
1125 1126 */
1126 1127 sppp_release_pkts(ppa, proto);
1127 1128 }
1128 1129 break;
1129 1130 case PPPIO_DEBUG:
1130 1131 if (iop->ioc_cr == NULL ||
1131 1132 secpolicy_ppp_config(iop->ioc_cr) != 0) {
1132 1133 error = EPERM;
1133 1134 break;
1134 1135 } else if (iop->ioc_count != sizeof (uint32_t)) {
1135 1136 break; /* return EINVAL */
1136 1137 } else if (mp->b_cont == NULL) {
1137 1138 error = EPROTO;
1138 1139 break;
1139 1140 }
1140 1141 ASSERT(mp->b_cont->b_rptr != NULL);
1141 1142 dbgcmd = *(uint32_t *)mp->b_cont->b_rptr;
1142 1143 /*
1143 1144 * We accept PPPDBG_LOG + PPPDBG_DRIVER value as an indication
1144 1145 * that SPS_KDEBUG needs to be enabled for this upper stream.
1145 1146 */
1146 1147 if (dbgcmd == PPPDBG_LOG + PPPDBG_DRIVER) {
1147 1148 sps->sps_flags |= SPS_KDEBUG;
1148 1149 error = 0; /* return success */
1149 1150 break;
1150 1151 }
1151 1152 /*
1152 1153 * Otherwise, for any other values, we send them down only if
1153 1154 * there is an attachment and if the attachment has something
1154 1155 * linked underneath it.
1155 1156 */
1156 1157 if ((ppa == NULL) || (ppa->ppa_lower_wq == NULL)) {
1157 1158 error = ENOLINK;
1158 1159 break;
1159 1160 }
1160 1161 mutex_enter(&ppa->ppa_sta_lock);
1161 1162 /*
1162 1163 * See comments in PPPIO_GETSTAT64 case
1163 1164 * in sppp_ioctl().
1164 1165 */
1165 1166 if (IS_SPS_IOCQ(sps)) {
1166 1167 mutex_exit(&ppa->ppa_sta_lock);
1167 1168 if (!putq(q, mp)) {
1168 1169 error = EAGAIN;
1169 1170 break;
1170 1171 }
1171 1172 return;
1172 1173 } else {
1173 1174 ppa->ppa_ioctlsfwd++;
1174 1175 /*
1175 1176 * Record the ioctl CMD & ID -
1176 1177 * this will be used to check the
1177 1178 * ACK or NAK responses coming from below.
1178 1179 */
1179 1180 sps->sps_ioc_id = iop->ioc_id;
1180 1181 sps->sps_flags |= SPS_IOCQ;
1181 1182 mutex_exit(&ppa->ppa_sta_lock);
1182 1183 }
1183 1184 putnext(ppa->ppa_lower_wq, mp);
1184 1185 return; /* don't ack or nak the request */
1185 1186 case PPPIO_DETACH:
1186 1187 if (!IS_SPS_PIOATTACH(sps)) {
1187 1188 break; /* return EINVAL */
1188 1189 }
1189 1190 /*
1190 1191 * The SPS_PIOATTACH flag set on the stream tells us that
1191 1192 * the ppa field is still valid. In the event that the control
1192 1193 * stream be closed prior to this stream's detachment, the
1193 1194 * SPS_PIOATTACH flag would have been cleared from this stream
1194 1195 * during close; in that case we won't get here.
1195 1196 */
1196 1197 ASSERT(ppa != NULL);
1197 1198 ASSERT(ppa->ppa_ctl != sps);
1198 1199 ASSERT(sps->sps_dlstate == DL_UNATTACHED);
1199 1200
1200 1201 /*
1201 1202 * We don't actually detach anything until the stream is
1202 1203 * closed or reattached.
1203 1204 */
1204 1205
1205 1206 sps->sps_flags &= ~SPS_PIOATTACH;
1206 1207 error = 0; /* return success */
1207 1208 break;
1208 1209 case PPPIO_LASTMOD:
1209 1210 if (!IS_SPS_CONTROL(sps)) {
1210 1211 break; /* return EINVAL */
1211 1212 }
1212 1213 ASSERT(ppa != NULL);
1213 1214 ppa->ppa_flags |= PPA_LASTMOD;
1214 1215 error = 0; /* return success */
1215 1216 break;
1216 1217 case PPPIO_MRU:
1217 1218 if (!IS_SPS_CONTROL(sps) ||
1218 1219 (iop->ioc_count != sizeof (uint32_t))) {
1219 1220 break; /* return EINVAL */
1220 1221 } else if (mp->b_cont == NULL) {
1221 1222 error = EPROTO;
1222 1223 break;
1223 1224 }
1224 1225 ASSERT(ppa != NULL);
1225 1226 ASSERT(mp->b_cont->b_rptr != NULL);
1226 1227 mru = *(uint32_t *)mp->b_cont->b_rptr;
1227 1228 if ((mru <= 0) || (mru > PPP_MAXMRU)) {
1228 1229 error = EPROTO;
1229 1230 break;
1230 1231 }
1231 1232 if (mru < PPP_MRU) {
1232 1233 mru = PPP_MRU;
1233 1234 }
1234 1235 ppa->ppa_mru = (uint16_t)mru;
1235 1236 /*
1236 1237 * If there's something beneath this driver for the ppa, then
1237 1238 * inform it (or them) of the MRU size. Only do this is we
1238 1239 * are not the last PPP module on the stream.
1239 1240 */
1240 1241 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1241 1242 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MRU,
1242 1243 mru);
1243 1244 }
1244 1245 error = 0; /* return success */
1245 1246 break;
1246 1247 case PPPIO_MTU:
1247 1248 if (!IS_SPS_CONTROL(sps) ||
1248 1249 (iop->ioc_count != sizeof (uint32_t))) {
1249 1250 break; /* return EINVAL */
1250 1251 } else if (mp->b_cont == NULL) {
1251 1252 error = EPROTO;
1252 1253 break;
1253 1254 }
1254 1255 ASSERT(ppa != NULL);
1255 1256 ASSERT(mp->b_cont->b_rptr != NULL);
1256 1257 mtu = *(uint32_t *)mp->b_cont->b_rptr;
1257 1258 if ((mtu <= 0) || (mtu > PPP_MAXMTU)) {
1258 1259 error = EPROTO;
1259 1260 break;
1260 1261 }
1261 1262 ppa->ppa_mtu = (uint16_t)mtu;
1262 1263 /*
1263 1264 * If there's something beneath this driver for the ppa, then
1264 1265 * inform it (or them) of the MTU size. Only do this if we
1265 1266 * are not the last PPP module on the stream.
1266 1267 */
1267 1268 if (!IS_PPA_LASTMOD(ppa) && (ppa->ppa_lower_wq != NULL)) {
1268 1269 (void) putctl4(ppa->ppa_lower_wq, M_CTL, PPPCTL_MTU,
1269 1270 mtu);
1270 1271 }
1271 1272 error = 0; /* return success */
1272 1273 break;
1273 1274 case PPPIO_USETIMESTAMP:
1274 1275 if (!IS_SPS_CONTROL(sps)) {
1275 1276 break; /* return EINVAL */
1276 1277 }
1277 1278 if (!IS_PPA_TIMESTAMP(ppa)) {
1278 1279 hrtime = gethrtime();
1279 1280 ppa->ppa_lasttx = ppa->ppa_lastrx = hrtime;
1280 1281 ppa->ppa_flags |= PPA_TIMESTAMP;
1281 1282 }
1282 1283 error = 0;
1283 1284 break;
1284 1285 }
1285 1286
1286 1287 if (error == 0) {
1287 1288 /* Success; tell the user */
1288 1289 miocack(q, mp, count, 0);
1289 1290 } else {
1290 1291 /* Failure; send error back upstream */
1291 1292 miocnak(q, mp, 0, error);
1292 1293 }
1293 1294 }
1294 1295
1295 1296 /*
1296 1297 * sppp_outer_ioctl()
1297 1298 *
1298 1299 * MT-Perimeters:
1299 1300 * exclusive inner, exclusive outer
1300 1301 *
1301 1302 * Description:
1302 1303 * Called by sppp_uwput as a result of receiving ioctls which require
1303 1304 * an exclusive access at the outer perimeter.
1304 1305 */
1305 1306 static void
1306 1307 sppp_outer_ioctl(queue_t *q, mblk_t *mp)
1307 1308 {
1308 1309 spppstr_t *sps = q->q_ptr;
1309 1310 spppstr_t *nextsib;
1310 1311 queue_t *lwq;
1311 1312 sppa_t *ppa;
1312 1313 struct iocblk *iop;
1313 1314 int error = EINVAL;
1314 1315 int count = 0;
1315 1316 uint32_t ppa_id;
1316 1317 mblk_t *nmp;
1317 1318 zoneid_t zoneid;
1318 1319
1319 1320 sps = (spppstr_t *)q->q_ptr;
1320 1321 ppa = sps->sps_ppa;
1321 1322 iop = (struct iocblk *)mp->b_rptr;
1322 1323 switch (iop->ioc_cmd) {
1323 1324 case I_LINK:
1324 1325 if (!IS_SPS_CONTROL(sps)) {
1325 1326 break; /* return EINVAL */
1326 1327 } else if (ppa->ppa_lower_wq != NULL) {
1327 1328 error = EEXIST;
1328 1329 break;
1329 1330 }
1330 1331 ASSERT(ppa->ppa_ctl != NULL);
1331 1332 ASSERT(sps->sps_npmode == NPMODE_PASS);
1332 1333 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1333 1334
1334 1335 lwq = ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot;
1335 1336 ASSERT(lwq != NULL);
1336 1337
1337 1338 ppa->ppa_lower_wq = lwq;
1338 1339 lwq->q_ptr = RD(lwq)->q_ptr = (caddr_t)ppa;
1339 1340 /*
1340 1341 * Unblock upper network streams which now feed this lower
1341 1342 * stream. We don't need to hold ppa_sib_lock here, since we
1342 1343 * are writer at the outer perimeter.
1343 1344 */
1344 1345 if (WR(sps->sps_rq)->q_first != NULL)
1345 1346 qenable(WR(sps->sps_rq));
1346 1347 for (nextsib = ppa->ppa_streams; nextsib != NULL;
1347 1348 nextsib = nextsib->sps_nextsib) {
1348 1349 nextsib->sps_npmode = NPMODE_PASS;
1349 1350 if (WR(nextsib->sps_rq)->q_first != NULL) {
1350 1351 qenable(WR(nextsib->sps_rq));
1351 1352 }
1352 1353 }
1353 1354
1354 1355 /*
1355 1356 * Also unblock (run once) our lower read-side queue. This is
1356 1357 * where packets received while doing the I_LINK may be
1357 1358 * languishing; see sppp_lrsrv.
1358 1359 */
1359 1360 qenable(RD(lwq));
1360 1361
1361 1362 /*
1362 1363 * Send useful information down to the modules which are now
1363 1364 * linked below this driver (for this particular ppa). Only
1364 1365 * do this if we are not the last PPP module on the stream.
1365 1366 */
1366 1367 if (!IS_PPA_LASTMOD(ppa)) {
1367 1368 (void) putctl8(lwq, M_CTL, PPPCTL_UNIT,
1368 1369 ppa->ppa_ppa_id);
1369 1370 (void) putctl4(lwq, M_CTL, PPPCTL_MRU, ppa->ppa_mru);
1370 1371 (void) putctl4(lwq, M_CTL, PPPCTL_MTU, ppa->ppa_mtu);
1371 1372 }
1372 1373
1373 1374 if (IS_SPS_KDEBUG(sps)) {
1374 1375 SPDEBUG(PPP_DRV_NAME
1375 1376 "/%d: I_LINK lwq=0x%p sps=0x%p flags=0x%b ppa=0x%p "
1376 1377 "flags=0x%b\n", sps->sps_mn_id,
1377 1378 (void *)ppa->ppa_lower_wq, (void *)sps,
1378 1379 sps->sps_flags, SPS_FLAGS_STR,
1379 1380 (void *)ppa, ppa->ppa_flags,
1380 1381 PPA_FLAGS_STR);
1381 1382 }
1382 1383 error = 0; /* return success */
1383 1384 break;
1384 1385 case I_UNLINK:
1385 1386 ASSERT(IS_SPS_CONTROL(sps));
1386 1387 ASSERT(ppa != NULL);
1387 1388 lwq = ppa->ppa_lower_wq;
1388 1389 ASSERT(mp->b_cont != NULL && mp->b_cont->b_rptr != NULL);
1389 1390 ASSERT(lwq == ((struct linkblk *)mp->b_cont->b_rptr)->l_qbot);
1390 1391
1391 1392 if (IS_SPS_KDEBUG(sps)) {
1392 1393 SPDEBUG(PPP_DRV_NAME
1393 1394 "/%d: I_UNLINK lwq=0x%p sps=0x%p flags=0x%b "
1394 1395 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id,
1395 1396 (void *)lwq, (void *)sps, sps->sps_flags,
1396 1397 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
1397 1398 PPA_FLAGS_STR);
1398 1399 }
1399 1400 /*
1400 1401 * While accessing the outer perimeter exclusively, we
1401 1402 * disassociate our ppa's lower_wq from the lower stream linked
1402 1403 * beneath us, and we also disassociate our control stream from
1403 1404 * the q_ptr of the lower stream.
1404 1405 */
1405 1406 lwq->q_ptr = RD(lwq)->q_ptr = NULL;
1406 1407 ppa->ppa_lower_wq = NULL;
1407 1408 /*
1408 1409 * Unblock streams which now feed back up the control stream,
1409 1410 * and acknowledge the request. We don't need to hold
1410 1411 * ppa_sib_lock here, since we are writer at the outer
1411 1412 * perimeter.
1412 1413 */
1413 1414 if (WR(sps->sps_rq)->q_first != NULL)
1414 1415 qenable(WR(sps->sps_rq));
1415 1416 for (nextsib = ppa->ppa_streams; nextsib != NULL;
1416 1417 nextsib = nextsib->sps_nextsib) {
1417 1418 if (WR(nextsib->sps_rq)->q_first != NULL) {
1418 1419 qenable(WR(nextsib->sps_rq));
1419 1420 }
1420 1421 }
1421 1422 error = 0; /* return success */
1422 1423 break;
1423 1424 case PPPIO_NEWPPA:
1424 1425 /*
1425 1426 * Do sanity check to ensure that we don't accept PPPIO_NEWPPA
1426 1427 * on a stream which DLPI is used (since certain DLPI messages
1427 1428 * will cause state transition reflected in sps_dlstate,
1428 1429 * changing it from its default DL_UNATTACHED value). In other
1429 1430 * words, we won't allow a network/snoop stream to become
1430 1431 * a control stream.
1431 1432 */
1432 1433 if (iop->ioc_cr == NULL ||
1433 1434 secpolicy_ppp_config(iop->ioc_cr) != 0) {
1434 1435 error = EPERM;
1435 1436 break;
1436 1437 } else if (IS_SPS_CONTROL(sps) || IS_SPS_PIOATTACH(sps) ||
1437 1438 (ppa != NULL) || (sps->sps_dlstate != DL_UNATTACHED)) {
1438 1439 break; /* return EINVAL */
1439 1440 }
1440 1441 /* Get requested unit number (if any) */
1441 1442 if (iop->ioc_count == sizeof (uint32_t) && mp->b_cont != NULL)
1442 1443 ppa_id = *(uint32_t *)mp->b_cont->b_rptr;
1443 1444 else
1444 1445 ppa_id = 0;
1445 1446 /* Get mblk to use for response message */
1446 1447 nmp = allocb(sizeof (uint32_t), BPRI_MED);
1447 1448 if (nmp == NULL) {
1448 1449 error = ENOSR;
1449 1450 break;
1450 1451 }
1451 1452 if (mp->b_cont != NULL) {
1452 1453 freemsg(mp->b_cont);
1453 1454 }
1454 1455 mp->b_cont = nmp; /* chain our response mblk */
1455 1456 /*
1456 1457 * Walk the global ppa list and determine the lowest
1457 1458 * available ppa_id number to be used.
1458 1459 */
1459 1460 if (ppa_id == (uint32_t)-1)
1460 1461 ppa_id = 0;
1461 1462 zoneid = crgetzoneid(iop->ioc_cr);
1462 1463 for (ppa = ppa_list; ppa != NULL; ppa = ppa->ppa_nextppa) {
1463 1464 if (ppa_id == (uint32_t)-2) {
1464 1465 if (ppa->ppa_ctl == NULL &&
1465 1466 ppa->ppa_zoneid == zoneid)
1466 1467 break;
1467 1468 } else {
1468 1469 if (ppa_id < ppa->ppa_ppa_id)
1469 1470 break;
1470 1471 if (ppa_id == ppa->ppa_ppa_id)
1471 1472 ++ppa_id;
1472 1473 }
1473 1474 }
1474 1475 if (ppa_id == (uint32_t)-2) {
1475 1476 if (ppa == NULL) {
1476 1477 error = ENXIO;
1477 1478 break;
1478 1479 }
1479 1480 /* Clear timestamp and lastmod flags */
1480 1481 ppa->ppa_flags = 0;
1481 1482 } else {
1482 1483 ppa = sppp_create_ppa(ppa_id, zoneid);
1483 1484 if (ppa == NULL) {
1484 1485 error = ENOMEM;
1485 1486 break;
1486 1487 }
1487 1488 }
1488 1489
1489 1490 sps->sps_ppa = ppa; /* chain the ppa structure */
1490 1491 sps->sps_npmode = NPMODE_PASS; /* network packets may travel */
1491 1492 sps->sps_flags |= SPS_CONTROL; /* this is the control stream */
1492 1493
1493 1494 ppa->ppa_refcnt++; /* new PPA reference */
1494 1495 ppa->ppa_ctl = sps; /* back ptr to upper stream */
1495 1496 /*
1496 1497 * Return the newly created ppa_id to the requestor and
1497 1498 * acnowledge the request.
1498 1499 */
1499 1500 *(uint32_t *)nmp->b_wptr = ppa->ppa_ppa_id;
1500 1501 nmp->b_wptr += sizeof (uint32_t);
1501 1502
1502 1503 if (IS_SPS_KDEBUG(sps)) {
1503 1504 SPDEBUG(PPP_DRV_NAME
1504 1505 "/%d: PPPIO_NEWPPA ppa_id=%d sps=0x%p flags=0x%b "
1505 1506 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, ppa_id,
1506 1507 (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1507 1508 (void *)ppa, ppa->ppa_flags,
1508 1509 PPA_FLAGS_STR);
1509 1510 }
1510 1511 count = msgsize(nmp);
1511 1512 error = 0;
1512 1513 break;
1513 1514 }
1514 1515
1515 1516 if (error == 0) {
1516 1517 /* Success; tell the user. */
1517 1518 miocack(q, mp, count, 0);
1518 1519 } else {
1519 1520 /* Failure; send error back upstream. */
1520 1521 miocnak(q, mp, 0, error);
1521 1522 }
1522 1523 }
1523 1524
1524 1525 /*
1525 1526 * sppp_send()
1526 1527 *
1527 1528 * MT-Perimeters:
1528 1529 * shared inner, shared outer.
1529 1530 *
1530 1531 * Description:
1531 1532 * Called by sppp_uwput to handle M_DATA message type. Returns
1532 1533 * queue_t for putnext, or NULL to mean that the packet was
1533 1534 * handled internally.
1534 1535 */
1535 1536 static queue_t *
1536 1537 sppp_send(queue_t *q, mblk_t **mpp, spppstr_t *sps)
1537 1538 {
1538 1539 mblk_t *mp;
1539 1540 sppa_t *ppa;
1540 1541 int is_promisc;
1541 1542 int msize;
1542 1543 int error = 0;
1543 1544 queue_t *nextq;
1544 1545
1545 1546 ASSERT(mpp != NULL);
1546 1547 mp = *mpp;
1547 1548 ASSERT(q != NULL && q->q_ptr != NULL);
1548 1549 ASSERT(mp != NULL && mp->b_rptr != NULL);
1549 1550 ASSERT(sps != NULL);
1550 1551 ASSERT(q->q_ptr == sps);
1551 1552 /*
1552 1553 * We only let M_DATA through if the sender is either the control
1553 1554 * stream (for PPP control packets) or one of the network streams
1554 1555 * (for IP packets) in IP fastpath mode. If this stream is not attached
1555 1556 * to any ppas, then discard data coming down through this stream.
1556 1557 */
1557 1558 ppa = sps->sps_ppa;
1558 1559 if (ppa == NULL) {
1559 1560 ASSERT(!IS_SPS_CONTROL(sps));
1560 1561 error = ENOLINK;
1561 1562 } else if (!IS_SPS_CONTROL(sps) && !IS_SPS_FASTPATH(sps)) {
1562 1563 error = EPROTO;
1563 1564 }
1564 1565 if (error != 0) {
1565 1566 merror(q, mp, error);
1566 1567 return (NULL);
1567 1568 }
1568 1569 msize = msgdsize(mp);
1569 1570 if (msize > (ppa->ppa_mtu + PPP_HDRLEN)) {
1570 1571 /* Log, and send it anyway */
1571 1572 mutex_enter(&ppa->ppa_sta_lock);
1572 1573 ppa->ppa_otoolongs++;
1573 1574 mutex_exit(&ppa->ppa_sta_lock);
1574 1575 } else if (msize < PPP_HDRLEN) {
1575 1576 /*
1576 1577 * Log, and send it anyway. We log it because we get things
1577 1578 * in M_DATA form here, which tells us that the sender is
1578 1579 * either IP in fastpath transmission mode, or pppd. In both
1579 1580 * cases, they are currently expected to send the 4-bytes
1580 1581 * PPP header in front of any possible payloads.
1581 1582 */
1582 1583 mutex_enter(&ppa->ppa_sta_lock);
1583 1584 ppa->ppa_orunts++;
1584 1585 mutex_exit(&ppa->ppa_sta_lock);
1585 1586 }
1586 1587
1587 1588 if (IS_SPS_KDEBUG(sps)) {
1588 1589 SPDEBUG(PPP_DRV_NAME
1589 1590 "/%d: M_DATA send (%d bytes) sps=0x%p flags=0x%b "
1590 1591 "ppa=0x%p flags=0x%b\n", sps->sps_mn_id, msize,
1591 1592 (void *)sps, sps->sps_flags, SPS_FLAGS_STR,
1592 1593 (void *)ppa, ppa->ppa_flags, PPA_FLAGS_STR);
1593 1594 }
1594 1595 /*
1595 1596 * Should there be any promiscuous stream(s), send the data up
1596 1597 * for each promiscuous stream that we recognize. Make sure that
1597 1598 * for fastpath, we skip the PPP header in the M_DATA mblk. We skip
1598 1599 * the control stream as we obviously never allow the control stream
1599 1600 * to become promiscous and bind to PPP_ALLSAP.
1600 1601 */
1601 1602 rw_enter(&ppa->ppa_sib_lock, RW_READER);
1602 1603 is_promisc = sps->sps_ppa->ppa_promicnt;
1603 1604 if (is_promisc) {
1604 1605 ASSERT(ppa->ppa_streams != NULL);
1605 1606 sppp_dlprsendup(ppa->ppa_streams, mp, sps->sps_sap, B_TRUE);
1606 1607 }
1607 1608 rw_exit(&ppa->ppa_sib_lock);
1608 1609 /*
1609 1610 * Only time-stamp the packet with hrtime if the upper stream
1610 1611 * is configured to do so. PPP control (negotiation) messages
1611 1612 * are never considered link activity; only data is activity.
1612 1613 */
1613 1614 if (!IS_SPS_CONTROL(sps) && IS_PPA_TIMESTAMP(ppa)) {
1614 1615 ppa->ppa_lasttx = gethrtime();
1615 1616 }
1616 1617 /*
1617 1618 * If there's already a message in the write-side service queue,
1618 1619 * then queue this message there as well, otherwise, try to send
1619 1620 * it down to the module immediately below us.
1620 1621 */
1621 1622 if (q->q_first != NULL ||
1622 1623 (nextq = sppp_outpkt(q, mpp, msize, sps)) == NULL) {
1623 1624 mp = *mpp;
1624 1625 if (mp != NULL && putq(q, mp) == 0) {
1625 1626 mutex_enter(&ppa->ppa_sta_lock);
1626 1627 ppa->ppa_oqdropped++;
1627 1628 mutex_exit(&ppa->ppa_sta_lock);
1628 1629 freemsg(mp);
1629 1630 }
1630 1631 return (NULL);
1631 1632 }
1632 1633 return (nextq);
1633 1634 }
1634 1635
1635 1636 /*
1636 1637 * sppp_outpkt()
1637 1638 *
1638 1639 * MT-Perimeters:
1639 1640 * shared inner, shared outer (if called from sppp_wput, sppp_dlunitdatareq).
1640 1641 * exclusive inner, shared outer (if called from sppp_wsrv).
1641 1642 *
1642 1643 * Description:
1643 1644 * Called from 1) sppp_uwput when processing a M_DATA fastpath message,
1644 1645 * or 2) sppp_uwsrv when processing the upper write-side service queue.
1645 1646 * For both cases, it prepares to send the data to the module below
1646 1647 * this driver if there is a lower stream linked underneath. If none, then
1647 1648 * the data will be sent upstream via the control channel to pppd.
1648 1649 *
1649 1650 * Returns:
1650 1651 * Non-NULL queue_t if message should be sent now, otherwise
1651 1652 * if *mpp == NULL, then message was freed, otherwise put *mpp
1652 1653 * (back) on the queue. (Does not do putq/putbq, since it's
1653 1654 * called both from srv and put procedures.)
1654 1655 */
1655 1656 static queue_t *
1656 1657 sppp_outpkt(queue_t *q, mblk_t **mpp, int msize, spppstr_t *sps)
1657 1658 {
1658 1659 mblk_t *mp;
1659 1660 sppa_t *ppa;
1660 1661 enum NPmode npmode;
1661 1662 mblk_t *mpnew;
1662 1663
1663 1664 ASSERT(mpp != NULL);
1664 1665 mp = *mpp;
1665 1666 ASSERT(q != NULL && q->q_ptr != NULL);
1666 1667 ASSERT(mp != NULL && mp->b_rptr != NULL);
1667 1668 ASSERT(sps != NULL);
1668 1669
1669 1670 ppa = sps->sps_ppa;
1670 1671 npmode = sps->sps_npmode;
1671 1672
1672 1673 if (npmode == NPMODE_QUEUE) {
1673 1674 ASSERT(!IS_SPS_CONTROL(sps));
1674 1675 return (NULL); /* queue it for later */
1675 1676 } else if (ppa == NULL || ppa->ppa_ctl == NULL ||
1676 1677 npmode == NPMODE_DROP || npmode == NPMODE_ERROR) {
1677 1678 /*
1678 1679 * This can not be the control stream, as it must always have
1679 1680 * a valid ppa, and its npmode must always be NPMODE_PASS.
1680 1681 */
1681 1682 ASSERT(!IS_SPS_CONTROL(sps));
1682 1683 if (npmode == NPMODE_DROP) {
1683 1684 freemsg(mp);
1684 1685 } else {
1685 1686 /*
1686 1687 * If we no longer have the control stream, or if the
1687 1688 * mode is set to NPMODE_ERROR, then we need to tell IP
1688 1689 * that the interface need to be marked as down. In
1689 1690 * other words, we tell IP to be quiescent.
1690 1691 */
1691 1692 merror(q, mp, EPROTO);
1692 1693 }
1693 1694 *mpp = NULL;
1694 1695 return (NULL); /* don't queue it */
1695 1696 }
1696 1697 /*
1697 1698 * Do we have a driver stream linked underneath ? If not, we need to
1698 1699 * notify pppd that the link needs to be brought up and configure
1699 1700 * this upper stream to drop subsequent outgoing packets. This is
1700 1701 * for demand-dialing, in which case pppd has done the IP plumbing
1701 1702 * but hasn't linked the driver stream underneath us. Therefore, when
1702 1703 * a packet is sent down the IP interface, a notification message
1703 1704 * will be sent up the control stream to pppd in order for it to
1704 1705 * establish the physical link. The driver stream is then expected
1705 1706 * to be linked underneath after physical link establishment is done.
1706 1707 */
1707 1708 if (ppa->ppa_lower_wq == NULL) {
1708 1709 ASSERT(ppa->ppa_ctl != NULL);
1709 1710 ASSERT(ppa->ppa_ctl->sps_rq != NULL);
1710 1711
1711 1712 *mpp = NULL;
1712 1713 mpnew = create_lsmsg(PPP_LINKSTAT_NEEDUP);
1713 1714 if (mpnew == NULL) {
1714 1715 freemsg(mp);
1715 1716 mutex_enter(&ppa->ppa_sta_lock);
1716 1717 ppa->ppa_allocbfail++;
1717 1718 mutex_exit(&ppa->ppa_sta_lock);
1718 1719 return (NULL); /* don't queue it */
1719 1720 }
1720 1721 /* Include the data in the message for logging. */
1721 1722 mpnew->b_cont = mp;
1722 1723 mutex_enter(&ppa->ppa_sta_lock);
1723 1724 ppa->ppa_lsneedup++;
1724 1725 mutex_exit(&ppa->ppa_sta_lock);
1725 1726 /*
1726 1727 * We need to set the mode to NPMODE_DROP, but should only
1727 1728 * do so when this stream is not the control stream.
1728 1729 */
1729 1730 if (!IS_SPS_CONTROL(sps)) {
1730 1731 sps->sps_npmode = NPMODE_DROP;
1731 1732 }
1732 1733 putnext(ppa->ppa_ctl->sps_rq, mpnew);
1733 1734 return (NULL); /* don't queue it */
1734 1735 }
1735 1736 /*
1736 1737 * If so, then try to send it down. The lower queue is only ever
1737 1738 * detached while holding an exclusive lock on the whole driver,
1738 1739 * so we can be confident that the lower queue is still there.
1739 1740 */
1740 1741 if (bcanputnext(ppa->ppa_lower_wq, mp->b_band)) {
1741 1742 mutex_enter(&ppa->ppa_sta_lock);
1742 1743 ppa->ppa_stats.p.ppp_opackets++;
1743 1744 if (IS_SPS_CONTROL(sps)) {
1744 1745 ppa->ppa_opkt_ctl++;
1745 1746 }
1746 1747 ppa->ppa_stats.p.ppp_obytes += msize;
1747 1748 mutex_exit(&ppa->ppa_sta_lock);
1748 1749 return (ppa->ppa_lower_wq); /* don't queue it */
1749 1750 }
1750 1751 return (NULL); /* queue it for later */
1751 1752 }
1752 1753
1753 1754 /*
1754 1755 * sppp_lwsrv()
1755 1756 *
1756 1757 * MT-Perimeters:
1757 1758 * exclusive inner, shared outer.
1758 1759 *
1759 1760 * Description:
1760 1761 * Lower write-side service procedure. No messages are ever placed on
1761 1762 * the write queue here, this just back-enables all upper write side
1762 1763 * service procedures.
1763 1764 */
1764 1765 int
1765 1766 sppp_lwsrv(queue_t *q)
1766 1767 {
1767 1768 sppa_t *ppa;
1768 1769 spppstr_t *nextsib;
1769 1770
1770 1771 ASSERT(q != NULL && q->q_ptr != NULL);
1771 1772 ppa = (sppa_t *)q->q_ptr;
1772 1773 ASSERT(ppa != NULL);
1773 1774
1774 1775 rw_enter(&ppa->ppa_sib_lock, RW_READER);
1775 1776 if ((nextsib = ppa->ppa_ctl) != NULL &&
1776 1777 WR(nextsib->sps_rq)->q_first != NULL)
1777 1778 qenable(WR(nextsib->sps_rq));
1778 1779 for (nextsib = ppa->ppa_streams; nextsib != NULL;
1779 1780 nextsib = nextsib->sps_nextsib) {
1780 1781 if (WR(nextsib->sps_rq)->q_first != NULL) {
1781 1782 qenable(WR(nextsib->sps_rq));
1782 1783 }
1783 1784 }
1784 1785 rw_exit(&ppa->ppa_sib_lock);
1785 1786 return (0);
1786 1787 }
1787 1788
1788 1789 /*
1789 1790 * sppp_lrput()
1790 1791 *
1791 1792 * MT-Perimeters:
1792 1793 * shared inner, shared outer.
1793 1794 *
1794 1795 * Description:
1795 1796 * Lower read-side put procedure. Messages from below get here.
1796 1797 * Data messages are handled separately to limit stack usage
1797 1798 * going into IP.
1798 1799 *
1799 1800 * Note that during I_UNLINK processing, it's possible for a downstream
1800 1801 * message to enable upstream data (due to pass_wput() removing the
1801 1802 * SQ_BLOCKED flag), and thus we must protect against a NULL sppa pointer.
1802 1803 * In this case, the only thing above us is passthru, and we might as well
1803 1804 * discard.
1804 1805 */
1805 1806 int
1806 1807 sppp_lrput(queue_t *q, mblk_t *mp)
1807 1808 {
1808 1809 sppa_t *ppa;
1809 1810 spppstr_t *sps;
1810 1811
1811 1812 if ((ppa = q->q_ptr) == NULL) {
1812 1813 freemsg(mp);
1813 1814 return (0);
1814 1815 }
1815 1816
1816 1817 sps = ppa->ppa_ctl;
1817 1818
1818 1819 if (MTYPE(mp) != M_DATA) {
1819 1820 sppp_recv_nondata(q, mp, sps);
1820 1821 } else if (sps == NULL) {
1821 1822 freemsg(mp);
1822 1823 } else if ((q = sppp_recv(q, &mp, sps)) != NULL) {
1823 1824 putnext(q, mp);
1824 1825 }
1825 1826 return (0);
1826 1827 }
1827 1828
1828 1829 /*
1829 1830 * sppp_lrsrv()
1830 1831 *
1831 1832 * MT-Perimeters:
1832 1833 * exclusive inner, shared outer.
1833 1834 *
1834 1835 * Description:
↓ open down ↓ |
1818 lines elided |
↑ open up ↑ |
1835 1836 * Lower read-side service procedure. This is run once after the I_LINK
1836 1837 * occurs in order to clean up any packets that came in while we were
1837 1838 * transferring in the lower stream. Otherwise, it's not used.
1838 1839 */
1839 1840 int
1840 1841 sppp_lrsrv(queue_t *q)
1841 1842 {
1842 1843 mblk_t *mp;
1843 1844
1844 1845 while ((mp = getq(q)) != NULL)
1845 - sppp_lrput(q, mp);
1846 + (void) sppp_lrput(q, mp);
1846 1847 return (0);
1847 1848 }
1848 1849
1849 1850 /*
1850 1851 * sppp_recv_nondata()
1851 1852 *
1852 1853 * MT-Perimeters:
1853 1854 * shared inner, shared outer.
1854 1855 *
1855 1856 * Description:
1856 1857 * All received non-data messages come through here.
1857 1858 */
1858 1859 static void
1859 1860 sppp_recv_nondata(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
1860 1861 {
1861 1862 sppa_t *ppa;
1862 1863 spppstr_t *destsps;
1863 1864 struct iocblk *iop;
1864 1865
1865 1866 ppa = (sppa_t *)q->q_ptr;
1866 1867 ctlsps = ppa->ppa_ctl;
1867 1868
1868 1869 switch (MTYPE(mp)) {
1869 1870 case M_CTL:
1870 1871 mutex_enter(&ppa->ppa_sta_lock);
1871 1872 if (*mp->b_rptr == PPPCTL_IERROR) {
1872 1873 ppa->ppa_stats.p.ppp_ierrors++;
1873 1874 ppa->ppa_ierr_low++;
1874 1875 ppa->ppa_mctlsknown++;
1875 1876 } else if (*mp->b_rptr == PPPCTL_OERROR) {
1876 1877 ppa->ppa_stats.p.ppp_oerrors++;
1877 1878 ppa->ppa_oerr_low++;
1878 1879 ppa->ppa_mctlsknown++;
1879 1880 } else {
1880 1881 ppa->ppa_mctlsunknown++;
1881 1882 }
1882 1883 mutex_exit(&ppa->ppa_sta_lock);
1883 1884 freemsg(mp);
1884 1885 break;
1885 1886 case M_IOCTL:
1886 1887 miocnak(q, mp, 0, EINVAL);
1887 1888 break;
1888 1889 case M_IOCACK:
1889 1890 case M_IOCNAK:
1890 1891 iop = (struct iocblk *)mp->b_rptr;
1891 1892 ASSERT(iop != NULL);
1892 1893 /*
1893 1894 * Attempt to match up the response with the stream that the
1894 1895 * request came from. If ioc_id doesn't match the one that we
1895 1896 * recorded, then discard this message.
1896 1897 */
1897 1898 rw_enter(&ppa->ppa_sib_lock, RW_READER);
1898 1899 if ((destsps = ctlsps) == NULL ||
1899 1900 destsps->sps_ioc_id != iop->ioc_id) {
1900 1901 destsps = ppa->ppa_streams;
1901 1902 while (destsps != NULL) {
1902 1903 if (destsps->sps_ioc_id == iop->ioc_id) {
1903 1904 break; /* found the upper stream */
1904 1905 }
1905 1906 destsps = destsps->sps_nextsib;
1906 1907 }
1907 1908 }
1908 1909 rw_exit(&ppa->ppa_sib_lock);
1909 1910 if (destsps == NULL) {
1910 1911 mutex_enter(&ppa->ppa_sta_lock);
1911 1912 ppa->ppa_ioctlsfwderr++;
1912 1913 mutex_exit(&ppa->ppa_sta_lock);
1913 1914 freemsg(mp);
1914 1915 break;
1915 1916 }
1916 1917 mutex_enter(&ppa->ppa_sta_lock);
1917 1918 ppa->ppa_ioctlsfwdok++;
1918 1919
1919 1920 /*
1920 1921 * Clear SPS_IOCQ and enable the lower write side queue,
1921 1922 * this would allow the upper stream service routine
1922 1923 * to start processing the queue for pending messages.
1923 1924 * sppp_lwsrv -> sppp_uwsrv.
1924 1925 */
1925 1926 destsps->sps_flags &= ~SPS_IOCQ;
1926 1927 mutex_exit(&ppa->ppa_sta_lock);
1927 1928 qenable(WR(destsps->sps_rq));
1928 1929
1929 1930 putnext(destsps->sps_rq, mp);
1930 1931 break;
1931 1932 case M_HANGUP:
1932 1933 /*
1933 1934 * Free the original mblk_t. We don't really want to send
1934 1935 * a M_HANGUP message upstream, so we need to translate this
1935 1936 * message into something else.
1936 1937 */
1937 1938 freemsg(mp);
1938 1939 if (ctlsps == NULL)
1939 1940 break;
1940 1941 mp = create_lsmsg(PPP_LINKSTAT_HANGUP);
1941 1942 if (mp == NULL) {
1942 1943 mutex_enter(&ppa->ppa_sta_lock);
1943 1944 ppa->ppa_allocbfail++;
1944 1945 mutex_exit(&ppa->ppa_sta_lock);
1945 1946 break;
1946 1947 }
1947 1948 mutex_enter(&ppa->ppa_sta_lock);
1948 1949 ppa->ppa_lsdown++;
1949 1950 mutex_exit(&ppa->ppa_sta_lock);
1950 1951 putnext(ctlsps->sps_rq, mp);
1951 1952 break;
1952 1953 case M_FLUSH:
1953 1954 if (*mp->b_rptr & FLUSHR) {
1954 1955 flushq(q, FLUSHDATA);
1955 1956 }
1956 1957 if (*mp->b_rptr & FLUSHW) {
1957 1958 *mp->b_rptr &= ~FLUSHR;
1958 1959 qreply(q, mp);
1959 1960 } else {
1960 1961 freemsg(mp);
1961 1962 }
1962 1963 break;
1963 1964 default:
1964 1965 if (ctlsps != NULL &&
1965 1966 (queclass(mp) == QPCTL) || canputnext(ctlsps->sps_rq)) {
1966 1967 putnext(ctlsps->sps_rq, mp);
1967 1968 } else {
1968 1969 mutex_enter(&ppa->ppa_sta_lock);
1969 1970 ppa->ppa_iqdropped++;
1970 1971 mutex_exit(&ppa->ppa_sta_lock);
1971 1972 freemsg(mp);
1972 1973 }
1973 1974 break;
1974 1975 }
1975 1976 }
1976 1977
1977 1978 /*
1978 1979 * sppp_recv()
1979 1980 *
1980 1981 * MT-Perimeters:
1981 1982 * shared inner, shared outer.
1982 1983 *
1983 1984 * Description:
1984 1985 * Receive function called by sppp_lrput. Finds appropriate
1985 1986 * receive stream and does accounting.
1986 1987 */
1987 1988 static queue_t *
1988 1989 sppp_recv(queue_t *q, mblk_t **mpp, spppstr_t *ctlsps)
1989 1990 {
1990 1991 mblk_t *mp;
1991 1992 int len;
1992 1993 sppa_t *ppa;
1993 1994 spppstr_t *destsps;
1994 1995 mblk_t *zmp;
1995 1996 uint32_t npflagpos;
1996 1997
1997 1998 ASSERT(mpp != NULL);
1998 1999 mp = *mpp;
1999 2000 ASSERT(q != NULL && q->q_ptr != NULL);
2000 2001 ASSERT(mp != NULL && mp->b_rptr != NULL);
2001 2002 ASSERT(ctlsps != NULL);
2002 2003 ASSERT(IS_SPS_CONTROL(ctlsps));
2003 2004 ppa = ctlsps->sps_ppa;
2004 2005 ASSERT(ppa != NULL && ppa->ppa_ctl != NULL);
2005 2006
2006 2007 len = msgdsize(mp);
2007 2008 mutex_enter(&ppa->ppa_sta_lock);
2008 2009 ppa->ppa_stats.p.ppp_ibytes += len;
2009 2010 mutex_exit(&ppa->ppa_sta_lock);
2010 2011 /*
2011 2012 * If the entire data size of the mblk is less than the length of the
2012 2013 * PPP header, then free it. We can't do much with such message anyway,
2013 2014 * since we can't really determine what the PPP protocol type is.
2014 2015 */
2015 2016 if (len < PPP_HDRLEN) {
2016 2017 /* Log, and free it */
2017 2018 mutex_enter(&ppa->ppa_sta_lock);
2018 2019 ppa->ppa_irunts++;
2019 2020 mutex_exit(&ppa->ppa_sta_lock);
2020 2021 freemsg(mp);
2021 2022 return (NULL);
2022 2023 } else if (len > (ppa->ppa_mru + PPP_HDRLEN)) {
2023 2024 /* Log, and accept it anyway */
2024 2025 mutex_enter(&ppa->ppa_sta_lock);
2025 2026 ppa->ppa_itoolongs++;
2026 2027 mutex_exit(&ppa->ppa_sta_lock);
2027 2028 }
2028 2029 /*
2029 2030 * We need at least be able to read the PPP protocol from the header,
2030 2031 * so if the first message block is too small, then we concatenate the
2031 2032 * rest of the following blocks into one message.
2032 2033 */
2033 2034 if (MBLKL(mp) < PPP_HDRLEN) {
2034 2035 zmp = msgpullup(mp, PPP_HDRLEN);
2035 2036 freemsg(mp);
2036 2037 mp = zmp;
2037 2038 if (mp == NULL) {
2038 2039 mutex_enter(&ppa->ppa_sta_lock);
2039 2040 ppa->ppa_allocbfail++;
2040 2041 mutex_exit(&ppa->ppa_sta_lock);
2041 2042 return (NULL);
2042 2043 }
2043 2044 *mpp = mp;
2044 2045 }
2045 2046 /*
2046 2047 * Hold this packet in the control-queue until
2047 2048 * the matching network-layer upper stream for the PPP protocol (sap)
2048 2049 * has not been plumbed and configured
2049 2050 */
2050 2051 npflagpos = sppp_ppp2np(PPP_PROTOCOL(mp->b_rptr));
2051 2052 mutex_enter(&ppa->ppa_npmutex);
2052 2053 if (npflagpos != 0 && (ppa->ppa_npflag & (1 << npflagpos))) {
2053 2054 /*
2054 2055 * proto is currently blocked; Hold up to 4 packets
2055 2056 * in the kernel.
2056 2057 */
2057 2058 if (ppa->ppa_holdpkts[npflagpos] > 3 ||
2058 2059 putq(ctlsps->sps_rq, mp) == 0)
2059 2060 freemsg(mp);
2060 2061 else
2061 2062 ppa->ppa_holdpkts[npflagpos]++;
2062 2063 mutex_exit(&ppa->ppa_npmutex);
2063 2064 return (NULL);
2064 2065 }
2065 2066 mutex_exit(&ppa->ppa_npmutex);
2066 2067 /*
2067 2068 * Try to find a matching network-layer upper stream for the specified
2068 2069 * PPP protocol (sap), and if none is found, send this frame up the
2069 2070 * control stream.
2070 2071 */
2071 2072 destsps = sppp_inpkt(q, mp, ctlsps);
2072 2073 if (destsps == NULL) {
2073 2074 mutex_enter(&ppa->ppa_sta_lock);
2074 2075 ppa->ppa_ipkt_ctl++;
2075 2076 mutex_exit(&ppa->ppa_sta_lock);
2076 2077 if (canputnext(ctlsps->sps_rq)) {
2077 2078 if (IS_SPS_KDEBUG(ctlsps)) {
2078 2079 SPDEBUG(PPP_DRV_NAME
2079 2080 "/%d: M_DATA recv (%d bytes) sps=0x%p "
2080 2081 "flags=0x%b ppa=0x%p flags=0x%b\n",
2081 2082 ctlsps->sps_mn_id, len, (void *)ctlsps,
2082 2083 ctlsps->sps_flags, SPS_FLAGS_STR,
2083 2084 (void *)ppa, ppa->ppa_flags,
2084 2085 PPA_FLAGS_STR);
2085 2086 }
2086 2087 return (ctlsps->sps_rq);
2087 2088 } else {
2088 2089 mutex_enter(&ppa->ppa_sta_lock);
2089 2090 ppa->ppa_iqdropped++;
2090 2091 mutex_exit(&ppa->ppa_sta_lock);
2091 2092 freemsg(mp);
2092 2093 return (NULL);
2093 2094 }
2094 2095 }
2095 2096 if (canputnext(destsps->sps_rq)) {
2096 2097 if (IS_SPS_KDEBUG(destsps)) {
2097 2098 SPDEBUG(PPP_DRV_NAME
2098 2099 "/%d: M_DATA recv (%d bytes) sps=0x%p flags=0x%b "
2099 2100 "ppa=0x%p flags=0x%b\n", destsps->sps_mn_id, len,
2100 2101 (void *)destsps, destsps->sps_flags,
2101 2102 SPS_FLAGS_STR, (void *)ppa, ppa->ppa_flags,
2102 2103 PPA_FLAGS_STR);
2103 2104 }
2104 2105 /*
2105 2106 * If fastpath is enabled on the network-layer stream, then
2106 2107 * make sure we skip over the PPP header, otherwise, we wrap
2107 2108 * the message in a DLPI message.
2108 2109 */
2109 2110 if (IS_SPS_FASTPATH(destsps)) {
2110 2111 mp->b_rptr += PPP_HDRLEN;
2111 2112 return (destsps->sps_rq);
2112 2113 } else {
2113 2114 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2114 2115 ASSERT(uqs != NULL);
2115 2116 mp->b_rptr += PPP_HDRLEN;
2116 2117 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2117 2118 if (mp != NULL) {
2118 2119 *mpp = mp;
2119 2120 return (destsps->sps_rq);
2120 2121 } else {
2121 2122 mutex_enter(&ppa->ppa_sta_lock);
2122 2123 ppa->ppa_allocbfail++;
2123 2124 mutex_exit(&ppa->ppa_sta_lock);
2124 2125 /* mp already freed by sppp_dladdud */
2125 2126 return (NULL);
2126 2127 }
2127 2128 }
2128 2129 } else {
2129 2130 mutex_enter(&ppa->ppa_sta_lock);
2130 2131 ppa->ppa_iqdropped++;
2131 2132 mutex_exit(&ppa->ppa_sta_lock);
2132 2133 freemsg(mp);
2133 2134 return (NULL);
2134 2135 }
2135 2136 }
2136 2137
2137 2138 /*
2138 2139 * sppp_inpkt()
2139 2140 *
2140 2141 * MT-Perimeters:
2141 2142 * shared inner, shared outer.
2142 2143 *
2143 2144 * Description:
2144 2145 * Find the destination upper stream for the received packet, called
2145 2146 * from sppp_recv.
2146 2147 *
2147 2148 * Returns:
2148 2149 * ptr to destination upper network stream, or NULL for control stream.
2149 2150 */
2150 2151 /* ARGSUSED */
2151 2152 static spppstr_t *
2152 2153 sppp_inpkt(queue_t *q, mblk_t *mp, spppstr_t *ctlsps)
2153 2154 {
2154 2155 spppstr_t *destsps = NULL;
2155 2156 sppa_t *ppa;
2156 2157 uint16_t proto;
2157 2158 int is_promisc;
2158 2159
2159 2160 ASSERT(q != NULL && q->q_ptr != NULL);
2160 2161 ASSERT(mp != NULL && mp->b_rptr != NULL);
2161 2162 ASSERT(IS_SPS_CONTROL(ctlsps));
2162 2163 ppa = ctlsps->sps_ppa;
2163 2164 ASSERT(ppa != NULL);
2164 2165 /*
2165 2166 * From RFC 1661 (Section 2):
2166 2167 *
2167 2168 * The Protocol field is one or two octets, and its value identifies
2168 2169 * the datagram encapsulated in the Information field of the packet.
2169 2170 * The field is transmitted and received most significant octet first.
2170 2171 *
2171 2172 * The structure of this field is consistent with the ISO 3309
2172 2173 * extension mechanism for address fields. All Protocols MUST be odd;
2173 2174 * the least significant bit of the least significant octet MUST equal
2174 2175 * "1". Also, all Protocols MUST be assigned such that the least
2175 2176 * significant bit of the most significant octet equals "0". Frames
2176 2177 * received which don't comply with these rules MUST be treated as
2177 2178 * having an unrecognized Protocol.
2178 2179 *
2179 2180 * Protocol field values in the "0***" to "3***" range identify the
2180 2181 * network-layer protocol of specific packets, and values in the
2181 2182 * "8***" to "b***" range identify packets belonging to the associated
2182 2183 * Network Control Protocols (NCPs), if any.
2183 2184 *
2184 2185 * Protocol field values in the "4***" to "7***" range are used for
2185 2186 * protocols with low volume traffic which have no associated NCP.
2186 2187 * Protocol field values in the "c***" to "f***" range identify packets
2187 2188 * as link-layer Control Protocols (such as LCP).
2188 2189 */
2189 2190 proto = PPP_PROTOCOL(mp->b_rptr);
2190 2191 mutex_enter(&ppa->ppa_sta_lock);
2191 2192 ppa->ppa_stats.p.ppp_ipackets++;
2192 2193 mutex_exit(&ppa->ppa_sta_lock);
2193 2194 /*
2194 2195 * We check if this is not a network-layer protocol, and if so,
2195 2196 * then send this packet up the control stream.
2196 2197 */
2197 2198 if (proto > 0x7fff) {
2198 2199 goto inpkt_done; /* send it up the control stream */
2199 2200 }
2200 2201 /*
2201 2202 * Try to grab the destination upper stream from the network-layer
2202 2203 * stream cache for this ppa for PPP_IP (0x0021) or PPP_IPV6 (0x0057)
2203 2204 * protocol types. Otherwise, if the type is not known to the cache,
2204 2205 * or if its sap can't be matched with any of the upper streams, then
2205 2206 * send this packet up the control stream so that it can be rejected.
2206 2207 */
2207 2208 if (proto == PPP_IP) {
2208 2209 destsps = ppa->ppa_ip_cache;
2209 2210 } else if (proto == PPP_IPV6) {
2210 2211 destsps = ppa->ppa_ip6_cache;
2211 2212 }
2212 2213 /*
2213 2214 * Toss this one away up the control stream if there's no matching sap;
2214 2215 * this way the protocol can be rejected (destsps is NULL).
2215 2216 */
2216 2217
2217 2218 inpkt_done:
2218 2219 /*
2219 2220 * Only time-stamp the packet with hrtime if the upper stream
2220 2221 * is configured to do so. PPP control (negotiation) messages
2221 2222 * are never considered link activity; only data is activity.
2222 2223 */
2223 2224 if (destsps != NULL && IS_PPA_TIMESTAMP(ppa)) {
2224 2225 ppa->ppa_lastrx = gethrtime();
2225 2226 }
2226 2227 /*
2227 2228 * Should there be any promiscuous stream(s), send the data up for
2228 2229 * each promiscuous stream that we recognize. We skip the control
2229 2230 * stream as we obviously never allow the control stream to become
2230 2231 * promiscous and bind to PPP_ALLSAP.
2231 2232 */
2232 2233 rw_enter(&ppa->ppa_sib_lock, RW_READER);
2233 2234 is_promisc = ppa->ppa_promicnt;
2234 2235 if (is_promisc) {
2235 2236 ASSERT(ppa->ppa_streams != NULL);
2236 2237 sppp_dlprsendup(ppa->ppa_streams, mp, proto, B_TRUE);
2237 2238 }
2238 2239 rw_exit(&ppa->ppa_sib_lock);
2239 2240 return (destsps);
2240 2241 }
2241 2242
2242 2243 /*
2243 2244 * sppp_kstat_update()
2244 2245 *
2245 2246 * Description:
2246 2247 * Update per-ppa kstat interface statistics.
2247 2248 */
2248 2249 static int
2249 2250 sppp_kstat_update(kstat_t *ksp, int rw)
2250 2251 {
2251 2252 register sppa_t *ppa;
2252 2253 register sppp_kstats_t *pppkp;
2253 2254 register struct pppstat64 *sp;
2254 2255
2255 2256 if (rw == KSTAT_WRITE) {
2256 2257 return (EACCES);
2257 2258 }
2258 2259
2259 2260 ppa = (sppa_t *)ksp->ks_private;
2260 2261 ASSERT(ppa != NULL);
2261 2262
2262 2263 pppkp = (sppp_kstats_t *)ksp->ks_data;
2263 2264 sp = &ppa->ppa_stats.p;
2264 2265
2265 2266 mutex_enter(&ppa->ppa_sta_lock);
2266 2267 pppkp->allocbfail.value.ui32 = ppa->ppa_allocbfail;
2267 2268 pppkp->mctlsfwd.value.ui32 = ppa->ppa_mctlsfwd;
2268 2269 pppkp->mctlsfwderr.value.ui32 = ppa->ppa_mctlsfwderr;
2269 2270 pppkp->rbytes.value.ui32 = sp->ppp_ibytes;
2270 2271 pppkp->rbytes64.value.ui64 = sp->ppp_ibytes;
2271 2272 pppkp->ierrors.value.ui32 = sp->ppp_ierrors;
2272 2273 pppkp->ierrors_lower.value.ui32 = ppa->ppa_ierr_low;
2273 2274 pppkp->ioctlsfwd.value.ui32 = ppa->ppa_ioctlsfwd;
2274 2275 pppkp->ioctlsfwdok.value.ui32 = ppa->ppa_ioctlsfwdok;
2275 2276 pppkp->ioctlsfwderr.value.ui32 = ppa->ppa_ioctlsfwderr;
2276 2277 pppkp->ipackets.value.ui32 = sp->ppp_ipackets;
2277 2278 pppkp->ipackets64.value.ui64 = sp->ppp_ipackets;
2278 2279 pppkp->ipackets_ctl.value.ui32 = ppa->ppa_ipkt_ctl;
2279 2280 pppkp->iqdropped.value.ui32 = ppa->ppa_iqdropped;
2280 2281 pppkp->irunts.value.ui32 = ppa->ppa_irunts;
2281 2282 pppkp->itoolongs.value.ui32 = ppa->ppa_itoolongs;
2282 2283 pppkp->lsneedup.value.ui32 = ppa->ppa_lsneedup;
2283 2284 pppkp->lsdown.value.ui32 = ppa->ppa_lsdown;
2284 2285 pppkp->mctlsknown.value.ui32 = ppa->ppa_mctlsknown;
2285 2286 pppkp->mctlsunknown.value.ui32 = ppa->ppa_mctlsunknown;
2286 2287 pppkp->obytes.value.ui32 = sp->ppp_obytes;
2287 2288 pppkp->obytes64.value.ui64 = sp->ppp_obytes;
2288 2289 pppkp->oerrors.value.ui32 = sp->ppp_oerrors;
2289 2290 pppkp->oerrors_lower.value.ui32 = ppa->ppa_oerr_low;
2290 2291 pppkp->opackets.value.ui32 = sp->ppp_opackets;
2291 2292 pppkp->opackets64.value.ui64 = sp->ppp_opackets;
2292 2293 pppkp->opackets_ctl.value.ui32 = ppa->ppa_opkt_ctl;
2293 2294 pppkp->oqdropped.value.ui32 = ppa->ppa_oqdropped;
2294 2295 pppkp->otoolongs.value.ui32 = ppa->ppa_otoolongs;
2295 2296 pppkp->orunts.value.ui32 = ppa->ppa_orunts;
2296 2297 mutex_exit(&ppa->ppa_sta_lock);
2297 2298
2298 2299 return (0);
2299 2300 }
2300 2301
2301 2302 /*
2302 2303 * Turn off proto in ppa_npflag to indicate that
2303 2304 * the corresponding network protocol has been plumbed.
2304 2305 * Release proto packets that were being held in the control
2305 2306 * queue in anticipation of this event.
2306 2307 */
2307 2308 static void
2308 2309 sppp_release_pkts(sppa_t *ppa, uint16_t proto)
2309 2310 {
2310 2311 uint32_t npflagpos = sppp_ppp2np(proto);
2311 2312 int count;
2312 2313 mblk_t *mp;
2313 2314 uint16_t mp_proto;
2314 2315 queue_t *q;
2315 2316 spppstr_t *destsps;
2316 2317
2317 2318 ASSERT(ppa != NULL);
2318 2319
2319 2320 if (npflagpos == 0 || (ppa->ppa_npflag & (1 << npflagpos)) == 0)
2320 2321 return;
2321 2322
2322 2323 mutex_enter(&ppa->ppa_npmutex);
2323 2324 ppa->ppa_npflag &= ~(1 << npflagpos);
2324 2325 count = ppa->ppa_holdpkts[npflagpos];
2325 2326 ppa->ppa_holdpkts[npflagpos] = 0;
2326 2327 mutex_exit(&ppa->ppa_npmutex);
2327 2328
2328 2329 q = ppa->ppa_ctl->sps_rq;
2329 2330
2330 2331 while (count > 0) {
2331 2332 mp = getq(q);
2332 2333 ASSERT(mp != NULL);
2333 2334
2334 2335 mp_proto = PPP_PROTOCOL(mp->b_rptr);
2335 2336 if (mp_proto != proto) {
2336 2337 (void) putq(q, mp);
2337 2338 continue;
2338 2339 }
2339 2340 count--;
2340 2341 destsps = NULL;
2341 2342 if (mp_proto == PPP_IP) {
2342 2343 destsps = ppa->ppa_ip_cache;
2343 2344 } else if (mp_proto == PPP_IPV6) {
2344 2345 destsps = ppa->ppa_ip6_cache;
2345 2346 }
2346 2347 ASSERT(destsps != NULL);
2347 2348
2348 2349 if (IS_SPS_FASTPATH(destsps)) {
2349 2350 mp->b_rptr += PPP_HDRLEN;
2350 2351 } else {
2351 2352 spppstr_t *uqs = (spppstr_t *)destsps->sps_rq->q_ptr;
2352 2353 ASSERT(uqs != NULL);
2353 2354 mp->b_rptr += PPP_HDRLEN;
2354 2355 mp = sppp_dladdud(uqs, mp, uqs->sps_sap, B_FALSE);
2355 2356 if (mp == NULL) {
2356 2357 mutex_enter(&ppa->ppa_sta_lock);
2357 2358 ppa->ppa_allocbfail++;
2358 2359 mutex_exit(&ppa->ppa_sta_lock);
2359 2360 /* mp already freed by sppp_dladdud */
2360 2361 continue;
2361 2362 }
2362 2363 }
2363 2364
2364 2365 if (canputnext(destsps->sps_rq)) {
2365 2366 putnext(destsps->sps_rq, mp);
2366 2367 } else {
2367 2368 mutex_enter(&ppa->ppa_sta_lock);
2368 2369 ppa->ppa_iqdropped++;
2369 2370 mutex_exit(&ppa->ppa_sta_lock);
2370 2371 freemsg(mp);
2371 2372 continue;
2372 2373 }
2373 2374 }
2374 2375 }
↓ open down ↓ |
519 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX