Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/dktp/drvobj/strategy.c
+++ new/usr/src/uts/intel/io/dktp/drvobj/strategy.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Device Strategy
29 29 */
30 30 #include <sys/dktp/cm.h>
31 31 #include <sys/kstat.h>
32 32
33 33 #include <sys/dktp/quetypes.h>
34 34 #include <sys/dktp/queue.h>
35 35 #include <sys/dktp/tgcom.h>
36 36 #include <sys/dktp/fctypes.h>
37 37 #include <sys/dktp/flowctrl.h>
38 38 #include <sys/param.h>
39 39 #include <vm/page.h>
40 40 #include <sys/modctl.h>
41 41
42 42 /*
43 43 * Object Management
44 44 */
45 45
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
46 46 static struct buf *qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge,
47 47 int *can_merge);
48 48
49 49 static struct modlmisc modlmisc = {
50 50 &mod_miscops, /* Type of module */
51 51 "Device Strategy Objects"
52 52 };
53 53
54 54 static struct modlinkage modlinkage = {
55 55 MODREV_1,
56 - &modlmisc,
57 - NULL
56 + { &modlmisc, NULL }
58 57 };
59 58
60 59 int
61 60 _init(void)
62 61 {
63 62 return (mod_install(&modlinkage));
64 63 }
65 64
66 65 int
67 66 _fini(void)
68 67 {
69 68 return (mod_remove(&modlinkage));
70 69 }
71 70
72 71 int
73 72 _info(struct modinfo *modinfop)
74 73 {
75 74 return (mod_info(&modlinkage, modinfop));
76 75 }
77 76
78 77
79 78 /*
80 79 * Common Flow Control functions
81 80 */
82 81
83 82 /*
84 83 * Local static data
85 84 */
86 85 #ifdef FLC_DEBUG
87 86 #define DENT 0x0001
88 87 #define DERR 0x0002
89 88 #define DIO 0x0004
90 89 static int flc_debug = DENT|DERR|DIO;
91 90
92 91 #include <sys/thread.h>
93 92 static int flc_malloc_intr = 0;
94 93 #endif /* FLC_DEBUG */
95 94
96 95 static int flc_kstat = 1;
97 96
98 97 static struct flc_obj *fc_create(struct flc_objops *fcopsp);
99 98 static int fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
100 99 void *lkarg);
101 100 static int fc_free(struct flc_obj *flcobjp);
102 101 static int fc_start_kstat(opaque_t queuep, char *devtype, int instance);
103 102 static int fc_stop_kstat(opaque_t queuep);
104 103
105 104 static struct flc_obj *
106 105 fc_create(struct flc_objops *fcopsp)
107 106 {
108 107 struct flc_obj *flcobjp;
109 108 struct fc_data *fcdp;
110 109
111 110 flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
112 111 if (!flcobjp)
113 112 return (NULL);
114 113
115 114 fcdp = (struct fc_data *)(flcobjp+1);
116 115 flcobjp->flc_data = (opaque_t)fcdp;
117 116 flcobjp->flc_ops = fcopsp;
118 117
119 118 return ((opaque_t)flcobjp);
120 119 }
121 120
122 121 static int dmult_maxcnt = DMULT_MAXCNT;
123 122
124 123 static int
125 124 fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
126 125 {
127 126 struct fc_data *fcdp = (struct fc_data *)queuep;
128 127
129 128 mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
130 129
131 130 fcdp->ds_queobjp = que_objp;
132 131 fcdp->ds_tgcomobjp = tgcom_objp;
133 132 fcdp->ds_waitcnt = dmult_maxcnt;
134 133
135 134 QUE_INIT(que_objp, lkarg);
136 135 TGCOM_INIT(tgcom_objp);
137 136 return (DDI_SUCCESS);
138 137 }
139 138
140 139 static int
141 140 fc_free(struct flc_obj *flcobjp)
142 141 {
143 142 struct fc_data *fcdp;
144 143
145 144 fcdp = (struct fc_data *)flcobjp->flc_data;
146 145 if (fcdp->ds_queobjp)
147 146 QUE_FREE(fcdp->ds_queobjp);
148 147 if (fcdp->ds_tgcomobjp) {
149 148 TGCOM_FREE(fcdp->ds_tgcomobjp);
150 149 mutex_destroy(&fcdp->ds_mutex);
151 150 }
152 151 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
153 152 return (0);
154 153 }
155 154
156 155 /*ARGSUSED*/
157 156 static int
158 157 fc_start_kstat(opaque_t queuep, char *devtype, int instance)
159 158 {
160 159 struct fc_data *fcdp = (struct fc_data *)queuep;
161 160 if (!flc_kstat)
162 161 return (0);
163 162
164 163 if (!fcdp->ds_kstat) {
165 164 if (fcdp->ds_kstat = kstat_create("cmdk", instance, NULL,
166 165 "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) {
167 166 kstat_install(fcdp->ds_kstat);
168 167 }
169 168 }
170 169 return (0);
171 170 }
172 171
173 172 static int
174 173 fc_stop_kstat(opaque_t queuep)
175 174 {
176 175 struct fc_data *fcdp = (struct fc_data *)queuep;
177 176
178 177 if (fcdp->ds_kstat) {
179 178 kstat_delete(fcdp->ds_kstat);
180 179 fcdp->ds_kstat = NULL;
181 180 }
182 181 return (0);
183 182 }
184 183
185 184
186 185 /*
187 186 * Single Command per Device
188 187 */
189 188 /*
190 189 * Local Function Prototypes
191 190 */
192 191 static int dsngl_restart();
193 192
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
194 193 static int dsngl_enque(opaque_t, struct buf *);
195 194 static int dsngl_deque(opaque_t, struct buf *);
196 195
197 196 struct flc_objops dsngl_ops = {
198 197 fc_init,
199 198 fc_free,
200 199 dsngl_enque,
201 200 dsngl_deque,
202 201 fc_start_kstat,
203 202 fc_stop_kstat,
204 - 0, 0
203 + { NULL, NULL }
205 204 };
206 205
207 206 struct flc_obj *
208 207 dsngl_create()
209 208 {
210 209 return (fc_create((struct flc_objops *)&dsngl_ops));
211 210 }
212 211
213 212 static int
214 213 dsngl_enque(opaque_t queuep, struct buf *in_bp)
215 214 {
216 215 struct fc_data *dsnglp = (struct fc_data *)queuep;
217 216 opaque_t tgcom_objp;
218 217 opaque_t que_objp;
219 218
220 219 que_objp = dsnglp->ds_queobjp;
221 220 tgcom_objp = dsnglp->ds_tgcomobjp;
222 221
223 222 if (!in_bp)
224 223 return (0);
225 224 mutex_enter(&dsnglp->ds_mutex);
226 225 if (dsnglp->ds_bp || dsnglp->ds_outcnt) {
227 226 QUE_ADD(que_objp, in_bp);
228 227 if (dsnglp->ds_kstat) {
229 228 kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
230 229 }
231 230 mutex_exit(&dsnglp->ds_mutex);
232 231 return (0);
233 232 }
234 233 if (dsnglp->ds_kstat) {
235 234 kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
236 235 }
237 236 if (TGCOM_PKT(tgcom_objp, in_bp, dsngl_restart,
238 237 (caddr_t)dsnglp) != DDI_SUCCESS) {
239 238
240 239 dsnglp->ds_bp = in_bp;
241 240 mutex_exit(&dsnglp->ds_mutex);
242 241 return (0);
243 242 }
244 243 dsnglp->ds_outcnt++;
245 244 if (dsnglp->ds_kstat)
246 245 kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
247 246 mutex_exit(&dsnglp->ds_mutex);
248 247 TGCOM_TRANSPORT(tgcom_objp, in_bp);
249 248 return (0);
250 249 }
251 250
252 251 static int
253 252 dsngl_deque(opaque_t queuep, struct buf *in_bp)
254 253 {
255 254 struct fc_data *dsnglp = (struct fc_data *)queuep;
256 255 opaque_t tgcom_objp;
257 256 opaque_t que_objp;
258 257 struct buf *bp;
259 258
260 259 que_objp = dsnglp->ds_queobjp;
261 260 tgcom_objp = dsnglp->ds_tgcomobjp;
262 261
263 262 mutex_enter(&dsnglp->ds_mutex);
264 263 if (in_bp) {
265 264 dsnglp->ds_outcnt--;
266 265 if (dsnglp->ds_kstat) {
267 266 if (in_bp->b_flags & B_READ) {
268 267 KSTAT_IO_PTR(dsnglp->ds_kstat)->reads++;
269 268 KSTAT_IO_PTR(dsnglp->ds_kstat)->nread +=
270 269 (in_bp->b_bcount - in_bp->b_resid);
271 270 } else {
272 271 KSTAT_IO_PTR(dsnglp->ds_kstat)->writes++;
273 272 KSTAT_IO_PTR(dsnglp->ds_kstat)->nwritten +=
274 273 (in_bp->b_bcount - in_bp->b_resid);
275 274 }
276 275 kstat_runq_exit(KSTAT_IO_PTR(dsnglp->ds_kstat));
277 276 }
278 277 }
279 278 for (;;) {
280 279 if (!dsnglp->ds_bp)
281 280 dsnglp->ds_bp = QUE_DEL(que_objp);
282 281 if (!dsnglp->ds_bp ||
283 282 (TGCOM_PKT(tgcom_objp, dsnglp->ds_bp, dsngl_restart,
284 283 (caddr_t)dsnglp) != DDI_SUCCESS) ||
285 284 dsnglp->ds_outcnt) {
286 285 mutex_exit(&dsnglp->ds_mutex);
287 286 return (0);
288 287 }
289 288 dsnglp->ds_outcnt++;
290 289 bp = dsnglp->ds_bp;
291 290 dsnglp->ds_bp = QUE_DEL(que_objp);
292 291 if (dsnglp->ds_kstat)
293 292 kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
294 293 mutex_exit(&dsnglp->ds_mutex);
295 294
296 295 TGCOM_TRANSPORT(tgcom_objp, bp);
297 296
298 297 if (!mutex_tryenter(&dsnglp->ds_mutex))
299 298 return (0);
300 299 }
301 300 }
302 301
303 302 static int
304 303 dsngl_restart(struct fc_data *dsnglp)
305 304 {
306 305 (void) dsngl_deque(dsnglp, NULL);
307 306 return (-1);
308 307 }
309 308
310 309
311 310 /*
312 311 * Multiple Commands per Device
313 312 */
314 313 /*
315 314 * Local Function Prototypes
316 315 */
317 316 static int dmult_restart();
318 317
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
319 318 static int dmult_enque(opaque_t, struct buf *);
320 319 static int dmult_deque(opaque_t, struct buf *);
321 320
322 321 struct flc_objops dmult_ops = {
323 322 fc_init,
324 323 fc_free,
325 324 dmult_enque,
326 325 dmult_deque,
327 326 fc_start_kstat,
328 327 fc_stop_kstat,
329 - 0, 0
328 + { NULL, NULL }
330 329 };
331 330
332 331 struct flc_obj *
333 332 dmult_create()
334 333 {
335 334 return (fc_create((struct flc_objops *)&dmult_ops));
336 335
337 336 }
338 337
339 338
340 339 /*
341 340 * Some of the object management functions QUE_ADD() and QUE_DEL()
342 341 * do not accquire lock.
343 342 * They depend on dmult_enque(), dmult_deque() to do all locking.
344 343 * If this changes we have to grab locks in qmerge_add() and qmerge_del().
345 344 */
346 345 static int
347 346 dmult_enque(opaque_t queuep, struct buf *in_bp)
348 347 {
349 348 struct fc_data *dmultp = (struct fc_data *)queuep;
350 349 opaque_t tgcom_objp;
351 350 opaque_t que_objp;
352 351
353 352 que_objp = dmultp->ds_queobjp;
354 353 tgcom_objp = dmultp->ds_tgcomobjp;
355 354
356 355 if (!in_bp)
357 356 return (0);
358 357 mutex_enter(&dmultp->ds_mutex);
359 358 if ((dmultp->ds_outcnt >= dmultp->ds_waitcnt) || dmultp->ds_bp) {
360 359 QUE_ADD(que_objp, in_bp);
361 360 if (dmultp->ds_kstat) {
362 361 kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
363 362 }
364 363 mutex_exit(&dmultp->ds_mutex);
365 364 return (0);
366 365 }
367 366 if (dmultp->ds_kstat) {
368 367 kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
369 368 }
370 369
371 370 if (TGCOM_PKT(tgcom_objp, in_bp, dmult_restart,
372 371 (caddr_t)dmultp) != DDI_SUCCESS) {
373 372
374 373 dmultp->ds_bp = in_bp;
375 374 mutex_exit(&dmultp->ds_mutex);
376 375 return (0);
377 376 }
378 377 dmultp->ds_outcnt++;
379 378 if (dmultp->ds_kstat)
380 379 kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
381 380 mutex_exit(&dmultp->ds_mutex);
382 381
383 382 TGCOM_TRANSPORT(tgcom_objp, in_bp);
384 383 return (0);
385 384 }
386 385
387 386 static int
388 387 dmult_deque(opaque_t queuep, struct buf *in_bp)
389 388 {
390 389 struct fc_data *dmultp = (struct fc_data *)queuep;
391 390 opaque_t tgcom_objp;
392 391 opaque_t que_objp;
393 392 struct buf *bp;
394 393
395 394 que_objp = dmultp->ds_queobjp;
396 395 tgcom_objp = dmultp->ds_tgcomobjp;
397 396
398 397 mutex_enter(&dmultp->ds_mutex);
399 398 if (in_bp) {
400 399 dmultp->ds_outcnt--;
401 400 if (dmultp->ds_kstat) {
402 401 if (in_bp->b_flags & B_READ) {
403 402 KSTAT_IO_PTR(dmultp->ds_kstat)->reads++;
404 403 KSTAT_IO_PTR(dmultp->ds_kstat)->nread +=
405 404 (in_bp->b_bcount - in_bp->b_resid);
406 405 } else {
407 406 KSTAT_IO_PTR(dmultp->ds_kstat)->writes++;
408 407 KSTAT_IO_PTR(dmultp->ds_kstat)->nwritten +=
409 408 (in_bp->b_bcount - in_bp->b_resid);
410 409 }
411 410 kstat_runq_exit(KSTAT_IO_PTR(dmultp->ds_kstat));
412 411 }
413 412 }
414 413
415 414 for (;;) {
416 415
417 416 #ifdef FLC_DEBUG
418 417 if ((curthread->t_intr) && (!dmultp->ds_bp) &&
419 418 (!dmultp->ds_outcnt))
420 419 flc_malloc_intr++;
421 420 #endif
422 421
423 422 if (!dmultp->ds_bp)
424 423 dmultp->ds_bp = QUE_DEL(que_objp);
425 424 if (!dmultp->ds_bp ||
426 425 (TGCOM_PKT(tgcom_objp, dmultp->ds_bp, dmult_restart,
427 426 (caddr_t)dmultp) != DDI_SUCCESS) ||
428 427 (dmultp->ds_outcnt >= dmultp->ds_waitcnt)) {
429 428 mutex_exit(&dmultp->ds_mutex);
430 429 return (0);
431 430 }
432 431 dmultp->ds_outcnt++;
433 432 bp = dmultp->ds_bp;
434 433 dmultp->ds_bp = QUE_DEL(que_objp);
435 434
436 435 if (dmultp->ds_kstat)
437 436 kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
438 437
439 438 mutex_exit(&dmultp->ds_mutex);
440 439
441 440 TGCOM_TRANSPORT(tgcom_objp, bp);
442 441
443 442 if (!mutex_tryenter(&dmultp->ds_mutex))
444 443 return (0);
445 444 }
446 445 }
447 446
448 447 static int
449 448 dmult_restart(struct fc_data *dmultp)
450 449 {
451 450 (void) dmult_deque(dmultp, NULL);
452 451 return (-1);
453 452 }
454 453
455 454 /*
456 455 * Duplexed Commands per Device: Read Queue and Write Queue
457 456 */
458 457 /*
459 458 * Local Function Prototypes
460 459 */
461 460 static int duplx_restart();
462 461
463 462 static int duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
464 463 void *lkarg);
465 464 static int duplx_free(struct flc_obj *flcobjp);
↓ open down ↓ |
126 lines elided |
↑ open up ↑ |
466 465 static int duplx_enque(opaque_t queuep, struct buf *bp);
467 466 static int duplx_deque(opaque_t queuep, struct buf *bp);
468 467
469 468 struct flc_objops duplx_ops = {
470 469 duplx_init,
471 470 duplx_free,
472 471 duplx_enque,
473 472 duplx_deque,
474 473 fc_start_kstat,
475 474 fc_stop_kstat,
476 - 0, 0
475 + { NULL, NULL }
477 476 };
478 477
479 478 struct flc_obj *
480 479 duplx_create()
481 480 {
482 481 struct flc_obj *flcobjp;
483 482 struct duplx_data *fcdp;
484 483
485 484 flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
486 485 if (!flcobjp)
487 486 return (NULL);
488 487
489 488 fcdp = (struct duplx_data *)(flcobjp+1);
490 489 flcobjp->flc_data = (opaque_t)fcdp;
491 490 flcobjp->flc_ops = &duplx_ops;
492 491
493 492 fcdp->ds_writeq.fc_qobjp = qfifo_create();
494 493 if (!(fcdp->ds_writeq.fc_qobjp = qfifo_create())) {
495 494 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
496 495 return (NULL);
497 496 }
498 497 return (flcobjp);
499 498 }
500 499
501 500 static int
502 501 duplx_free(struct flc_obj *flcobjp)
503 502 {
504 503 struct duplx_data *fcdp;
505 504
506 505 fcdp = (struct duplx_data *)flcobjp->flc_data;
507 506 if (fcdp->ds_writeq.fc_qobjp) {
508 507 QUE_FREE(fcdp->ds_writeq.fc_qobjp);
509 508 }
510 509 if (fcdp->ds_readq.fc_qobjp)
511 510 QUE_FREE(fcdp->ds_readq.fc_qobjp);
512 511 if (fcdp->ds_tgcomobjp) {
513 512 TGCOM_FREE(fcdp->ds_tgcomobjp);
514 513 mutex_destroy(&fcdp->ds_mutex);
515 514 }
516 515 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
517 516 return (0);
518 517 }
519 518
520 519 static int
521 520 duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
522 521 {
523 522 struct duplx_data *fcdp = (struct duplx_data *)queuep;
524 523 fcdp->ds_tgcomobjp = tgcom_objp;
525 524 fcdp->ds_readq.fc_qobjp = que_objp;
526 525
527 526 QUE_INIT(que_objp, lkarg);
528 527 QUE_INIT(fcdp->ds_writeq.fc_qobjp, lkarg);
529 528 TGCOM_INIT(tgcom_objp);
530 529
531 530 mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
532 531
533 532 fcdp->ds_writeq.fc_maxcnt = DUPLX_MAXCNT;
534 533 fcdp->ds_readq.fc_maxcnt = DUPLX_MAXCNT;
535 534
536 535 /* queues point to each other for round robin */
537 536 fcdp->ds_readq.next = &fcdp->ds_writeq;
538 537 fcdp->ds_writeq.next = &fcdp->ds_readq;
539 538
540 539 return (DDI_SUCCESS);
541 540 }
542 541
543 542 static int
544 543 duplx_enque(opaque_t queuep, struct buf *in_bp)
545 544 {
546 545 struct duplx_data *duplxp = (struct duplx_data *)queuep;
547 546 opaque_t tgcom_objp;
548 547 struct fc_que *activeq;
549 548 struct buf *bp;
550 549
551 550 mutex_enter(&duplxp->ds_mutex);
552 551 if (in_bp) {
553 552 if (duplxp->ds_kstat) {
554 553 kstat_waitq_enter(KSTAT_IO_PTR(duplxp->ds_kstat));
555 554 }
556 555 if (in_bp->b_flags & B_READ)
557 556 activeq = &duplxp->ds_readq;
558 557 else
559 558 activeq = &duplxp->ds_writeq;
560 559
561 560 QUE_ADD(activeq->fc_qobjp, in_bp);
562 561 } else {
563 562 activeq = &duplxp->ds_readq;
564 563 }
565 564
566 565 tgcom_objp = duplxp->ds_tgcomobjp;
567 566
568 567 for (;;) {
569 568 if (!activeq->fc_bp)
570 569 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
571 570 if (!activeq->fc_bp ||
572 571 (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
573 572 (caddr_t)duplxp) != DDI_SUCCESS) ||
574 573 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
575 574
576 575 /* switch read/write queues */
577 576 activeq = activeq->next;
578 577 if (!activeq->fc_bp)
579 578 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
580 579 if (!activeq->fc_bp ||
581 580 (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
582 581 duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
583 582 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
584 583 mutex_exit(&duplxp->ds_mutex);
585 584 return (0);
586 585 }
587 586 }
588 587
589 588 activeq->fc_outcnt++;
590 589 bp = activeq->fc_bp;
591 590 activeq->fc_bp = NULL;
592 591
593 592 if (duplxp->ds_kstat)
594 593 kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
595 594 mutex_exit(&duplxp->ds_mutex);
596 595
597 596 TGCOM_TRANSPORT(tgcom_objp, bp);
598 597
599 598 if (!mutex_tryenter(&duplxp->ds_mutex))
600 599 return (0);
601 600
602 601 activeq = activeq->next;
603 602 }
604 603 }
605 604
606 605 static int
607 606 duplx_deque(opaque_t queuep, struct buf *in_bp)
608 607 {
609 608 struct duplx_data *duplxp = (struct duplx_data *)queuep;
610 609 opaque_t tgcom_objp;
611 610 struct fc_que *activeq;
612 611 struct buf *bp;
613 612
614 613 mutex_enter(&duplxp->ds_mutex);
615 614
616 615 tgcom_objp = duplxp->ds_tgcomobjp;
617 616
618 617 if (in_bp->b_flags & B_READ)
619 618 activeq = &duplxp->ds_readq;
620 619 else
621 620 activeq = &duplxp->ds_writeq;
622 621 activeq->fc_outcnt--;
623 622
624 623 if (duplxp->ds_kstat) {
625 624 if (in_bp->b_flags & B_READ) {
626 625 KSTAT_IO_PTR(duplxp->ds_kstat)->reads++;
627 626 KSTAT_IO_PTR(duplxp->ds_kstat)->nread +=
628 627 (in_bp->b_bcount - in_bp->b_resid);
629 628 } else {
630 629 KSTAT_IO_PTR(duplxp->ds_kstat)->writes++;
631 630 KSTAT_IO_PTR(duplxp->ds_kstat)->nwritten +=
632 631 (in_bp->b_bcount - in_bp->b_resid);
633 632 }
634 633 kstat_runq_exit(KSTAT_IO_PTR(duplxp->ds_kstat));
635 634 }
636 635
637 636 for (;;) {
638 637
639 638 /* if needed, try to pull request off a queue */
640 639 if (!activeq->fc_bp)
641 640 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
642 641
643 642 if (!activeq->fc_bp ||
644 643 (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
645 644 (caddr_t)duplxp) != DDI_SUCCESS) ||
646 645 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
647 646
648 647 activeq = activeq->next;
649 648 if (!activeq->fc_bp)
650 649 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
651 650
652 651 if (!activeq->fc_bp ||
653 652 (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
654 653 duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
655 654 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
656 655 mutex_exit(&duplxp->ds_mutex);
657 656 return (0);
658 657 }
659 658 }
660 659
661 660 activeq->fc_outcnt++;
662 661 bp = activeq->fc_bp;
663 662 activeq->fc_bp = NULL;
664 663
665 664 if (duplxp->ds_kstat)
666 665 kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
667 666
668 667 mutex_exit(&duplxp->ds_mutex);
669 668
670 669 TGCOM_TRANSPORT(tgcom_objp, bp);
671 670
672 671 if (!mutex_tryenter(&duplxp->ds_mutex))
673 672 return (0);
674 673
675 674 activeq = activeq->next;
676 675 }
677 676 }
678 677
679 678 static int
680 679 duplx_restart(struct duplx_data *duplxp)
681 680 {
682 681 (void) duplx_enque(duplxp, NULL);
683 682 return (-1);
684 683 }
685 684
686 685 /*
687 686 * Tagged queueing flow control
688 687 */
689 688 /*
↓ open down ↓ |
203 lines elided |
↑ open up ↑ |
690 689 * Local Function Prototypes
691 690 */
692 691
693 692 struct flc_objops adapt_ops = {
694 693 fc_init,
695 694 fc_free,
696 695 dmult_enque,
697 696 dmult_deque,
698 697 fc_start_kstat,
699 698 fc_stop_kstat,
700 - 0, 0
699 + { NULL, NULL }
701 700 };
702 701
703 702 struct flc_obj *
704 703 adapt_create()
705 704 {
706 705 return (fc_create((struct flc_objops *)&adapt_ops));
707 706
708 707 }
709 708
710 709 /*
711 710 * Common Queue functions
712 711 */
713 712
714 713 /*
715 714 * Local static data
716 715 */
717 716 #ifdef Q_DEBUG
718 717 #define DENT 0x0001
719 718 #define DERR 0x0002
720 719 #define DIO 0x0004
721 720 static int que_debug = DENT|DERR|DIO;
722 721
723 722 #endif /* Q_DEBUG */
724 723 /*
725 724 * Local Function Prototypes
726 725 */
727 726 static struct que_obj *que_create(struct que_objops *qopsp);
728 727 static int que_init(struct que_data *qfp, void *lkarg);
729 728 static int que_free(struct que_obj *queobjp);
730 729 static struct buf *que_del(struct que_data *qfp);
731 730
732 731 static struct que_obj *
733 732 que_create(struct que_objops *qopsp)
734 733 {
735 734 struct que_data *qfp;
736 735 struct que_obj *queobjp;
737 736
738 737 queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
739 738 if (!queobjp)
740 739 return (NULL);
741 740
742 741 queobjp->que_ops = qopsp;
743 742 qfp = (struct que_data *)(queobjp+1);
744 743 queobjp->que_data = (opaque_t)qfp;
745 744
746 745 return ((opaque_t)queobjp);
747 746 }
748 747
749 748 static int
750 749 que_init(struct que_data *qfp, void *lkarg)
751 750 {
752 751 mutex_init(&qfp->q_mutex, NULL, MUTEX_DRIVER, lkarg);
753 752 return (DDI_SUCCESS);
754 753 }
755 754
756 755 static int
757 756 que_free(struct que_obj *queobjp)
758 757 {
759 758 struct que_data *qfp;
760 759
761 760 qfp = (struct que_data *)queobjp->que_data;
762 761 mutex_destroy(&qfp->q_mutex);
763 762 kmem_free(queobjp, (sizeof (*queobjp) + sizeof (struct que_data)));
764 763 return (0);
765 764 }
766 765
767 766 static struct buf *
768 767 que_del(struct que_data *qfp)
769 768 {
770 769 struct buf *bp;
771 770
772 771 bp = qfp->q_tab.b_actf;
773 772 if (bp) {
774 773 qfp->q_tab.b_actf = bp->av_forw;
775 774 if (!qfp->q_tab.b_actf)
776 775 qfp->q_tab.b_actl = NULL;
777 776 bp->av_forw = 0;
778 777 }
779 778 return (bp);
780 779 }
781 780
782 781
783 782
784 783 /*
785 784 * Qmerge
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
786 785 * Local Function Prototypes
787 786 */
788 787 static int qmerge_add(), qmerge_free();
789 788 static struct buf *qmerge_del(struct que_data *qfp);
790 789
791 790 struct que_objops qmerge_ops = {
792 791 que_init,
793 792 qmerge_free,
794 793 qmerge_add,
795 794 qmerge_del,
796 - 0, 0
795 + { NULL, NULL }
797 796 };
798 797
799 798 /* fields in diskhd */
800 799 #define hd_cnt b_back
801 800 #define hd_private b_forw
802 801 #define hd_flags b_flags
803 802 #define hd_sync_next av_forw
804 803 #define hd_async_next av_back
805 804
806 805 #define hd_sync2async sync_async_ratio
807 806
808 807 #define QNEAR_FORWARD 0x01
809 808 #define QNEAR_BACKWARD 0x02
810 809 #define QNEAR_ASYNCONLY 0x04
811 810 #define QNEAR_ASYNCALSO 0x08
812 811
813 812 #define DBLK(bp) ((unsigned long)(bp)->b_private)
814 813
815 814 #define BP_LT_BP(a, b) (DBLK(a) < DBLK(b))
816 815 #define BP_GT_BP(a, b) (DBLK(a) > DBLK(b))
817 816 #define BP_LT_HD(a, b) (DBLK(a) < (unsigned long)((b)->hd_private))
818 817 #define BP_GT_HD(a, b) (DBLK(a) > (unsigned long)((b)->hd_private))
819 818 #define QNEAR_ASYNC (QNEAR_ASYNCONLY|QNEAR_ASYNCALSO)
820 819
821 820 #define SYNC2ASYNC(a) ((a)->q_tab.hd_cnt)
822 821
823 822
824 823 /*
825 824 * qmerge implements a two priority queue, the low priority queue holding ASYNC
826 825 * write requests, while the rest are queued in the high priority sync queue.
827 826 * Requests on the async queue would be merged if possible.
828 827 * By default qmerge2wayscan is 1, indicating an elevator algorithm. When
829 828 * this variable is set to zero, it has the following side effects.
830 829 * 1. We assume fairness is the number one issue.
831 830 * 2. The next request to be picked indicates current head position.
832 831 *
833 832 * qmerge_sync2async indicates the ratio of scans of high prioriy
834 833 * sync queue to low priority async queue.
835 834 *
836 835 * When qmerge variables have the following values it defaults to qsort
837 836 *
838 837 * qmerge1pri = 1, qmerge2wayscan = 0, qmerge_max_merge = 0
839 838 *
840 839 */
841 840 static int qmerge_max_merge = 128 * 1024;
842 841 static intptr_t qmerge_sync2async = 4;
843 842 static int qmerge2wayscan = 1;
844 843 static int qmerge1pri = 0;
845 844 static int qmerge_merge = 0;
846 845
847 846 /*
848 847 * Local static data
849 848 */
850 849 struct que_obj *
851 850 qmerge_create()
852 851 {
853 852 struct que_data *qfp;
854 853 struct que_obj *queobjp;
855 854
856 855 queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
857 856 if (!queobjp)
858 857 return (NULL);
859 858
860 859 queobjp->que_ops = &qmerge_ops;
861 860 qfp = (struct que_data *)(queobjp+1);
862 861 qfp->q_tab.hd_private = 0;
863 862 qfp->q_tab.hd_sync_next = qfp->q_tab.hd_async_next = NULL;
864 863 qfp->q_tab.hd_cnt = (void *)qmerge_sync2async;
865 864 queobjp->que_data = (opaque_t)qfp;
866 865
867 866 return ((opaque_t)queobjp);
868 867 }
869 868
870 869 static int
871 870 qmerge_free(struct que_obj *queobjp)
872 871 {
873 872 struct que_data *qfp;
874 873
875 874 qfp = (struct que_data *)queobjp->que_data;
876 875 mutex_destroy(&qfp->q_mutex);
877 876 kmem_free(queobjp, (sizeof (*queobjp) + sizeof (*qfp)));
878 877 return (0);
879 878 }
880 879
881 880 static int
882 881 qmerge_can_merge(bp1, bp2)
883 882 struct buf *bp1, *bp2;
884 883 {
885 884 const int paw_flags = B_PAGEIO | B_ASYNC | B_WRITE;
886 885
887 886 if ((bp1->b_un.b_addr != 0) || (bp2->b_un.b_addr != 0) ||
888 887 ((bp1->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
889 888 ((bp2->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
890 889 (bp1->b_bcount & PAGEOFFSET) || (bp2->b_bcount & PAGEOFFSET) ||
891 890 (bp1->b_bcount + bp2->b_bcount > qmerge_max_merge))
892 891 return (0);
893 892
894 893 if ((DBLK(bp2) + bp2->b_bcount / DEV_BSIZE == DBLK(bp1)) ||
895 894 (DBLK(bp1) + bp1->b_bcount / DEV_BSIZE == DBLK(bp2)))
896 895 return (1);
897 896 else
898 897 return (0);
899 898 }
900 899
901 900 static void
902 901 qmerge_mergesetup(bp_merge, bp)
903 902 struct buf *bp_merge, *bp;
904 903 {
905 904 struct buf *bp1;
906 905 struct page *pp, *pp_merge, *pp_merge_prev;
907 906 int forward;
908 907
909 908 qmerge_merge++;
910 909 forward = DBLK(bp_merge) < DBLK(bp);
911 910
912 911 bp_merge->b_bcount += bp->b_bcount;
913 912
914 913 pp = bp->b_pages;
915 914 pp_merge = bp_merge->b_pages;
916 915
917 916 pp_merge_prev = pp_merge->p_prev;
918 917
919 918 pp_merge->p_prev->p_next = pp;
920 919 pp_merge->p_prev = pp->p_prev;
921 920 pp->p_prev->p_next = pp_merge;
922 921 pp->p_prev = pp_merge_prev;
923 922
924 923 bp1 = bp_merge->b_forw;
925 924
926 925 bp1->av_back->av_forw = bp;
927 926 bp->av_back = bp1->av_back;
928 927 bp1->av_back = bp;
929 928 bp->av_forw = bp1;
930 929
931 930 if (!forward) {
932 931 bp_merge->b_forw = bp;
933 932 bp_merge->b_pages = pp;
934 933 bp_merge->b_private = bp->b_private;
935 934 }
936 935 }
937 936
938 937 static void
939 938 que_insert(struct que_data *qfp, struct buf *bp)
940 939 {
941 940 struct buf *bp1, *bp_start, *lowest_bp, *highest_bp;
942 941 uintptr_t highest_blk, lowest_blk;
943 942 struct buf **async_bpp, **sync_bpp, **bpp;
944 943 struct diskhd *dp = &qfp->q_tab;
945 944
946 945 sync_bpp = &dp->hd_sync_next;
947 946 async_bpp = &dp->hd_async_next;
948 947 /*
949 948 * The ioctl used by the format utility requires that bp->av_back be
950 949 * preserved.
951 950 */
952 951 if (bp->av_back)
953 952 bp->b_error = (intptr_t)bp->av_back;
954 953 if (!qmerge1pri &&
955 954 ((bp->b_flags & (B_ASYNC|B_READ|B_FREE)) == B_ASYNC)) {
956 955 bpp = &dp->hd_async_next;
957 956 } else {
958 957 bpp = &dp->hd_sync_next;
959 958 }
960 959
961 960
962 961 if ((bp1 = *bpp) == NULL) {
963 962 *bpp = bp;
964 963 bp->av_forw = bp->av_back = bp;
965 964 if ((bpp == async_bpp) && (*sync_bpp == NULL)) {
966 965 dp->hd_flags |= QNEAR_ASYNCONLY;
967 966 } else if (bpp == sync_bpp) {
968 967 dp->hd_flags &= ~QNEAR_ASYNCONLY;
969 968 if (*async_bpp) {
970 969 dp->hd_flags |= QNEAR_ASYNCALSO;
971 970 }
972 971 }
973 972 return;
974 973 }
975 974 bp_start = bp1;
976 975 if (DBLK(bp) < DBLK(bp1)) {
977 976 lowest_blk = DBLK(bp1);
978 977 lowest_bp = bp1;
979 978 do {
980 979 if (DBLK(bp) > DBLK(bp1)) {
981 980 bp->av_forw = bp1->av_forw;
982 981 bp1->av_forw->av_back = bp;
983 982 bp1->av_forw = bp;
984 983 bp->av_back = bp1;
985 984
986 985 if (((bpp == async_bpp) &&
987 986 (dp->hd_flags & QNEAR_ASYNC)) ||
988 987 (bpp == sync_bpp)) {
989 988 if (!(dp->hd_flags & QNEAR_BACKWARD) &&
990 989 BP_GT_HD(bp, dp)) {
991 990 *bpp = bp;
992 991 }
993 992 }
994 993 return;
995 994 } else if (DBLK(bp1) < lowest_blk) {
996 995 lowest_bp = bp1;
997 996 lowest_blk = DBLK(bp1);
998 997 }
999 998 } while ((DBLK(bp1->av_back) < DBLK(bp1)) &&
1000 999 ((bp1 = bp1->av_back) != bp_start));
1001 1000 bp->av_forw = lowest_bp;
1002 1001 lowest_bp->av_back->av_forw = bp;
1003 1002 bp->av_back = lowest_bp->av_back;
1004 1003 lowest_bp->av_back = bp;
1005 1004 if ((bpp == async_bpp) && !(dp->hd_flags & QNEAR_ASYNC)) {
1006 1005 *bpp = bp;
1007 1006 } else if (!(dp->hd_flags & QNEAR_BACKWARD) &&
1008 1007 BP_GT_HD(bp, dp)) {
1009 1008 *bpp = bp;
1010 1009 }
1011 1010 } else {
1012 1011 highest_blk = DBLK(bp1);
1013 1012 highest_bp = bp1;
1014 1013 do {
1015 1014 if (DBLK(bp) < DBLK(bp1)) {
1016 1015 bp->av_forw = bp1;
1017 1016 bp1->av_back->av_forw = bp;
1018 1017 bp->av_back = bp1->av_back;
1019 1018 bp1->av_back = bp;
1020 1019 if (((bpp == async_bpp) &&
1021 1020 (dp->hd_flags & QNEAR_ASYNC)) ||
1022 1021 (bpp == sync_bpp)) {
1023 1022 if ((dp->hd_flags & QNEAR_BACKWARD) &&
1024 1023 BP_LT_HD(bp, dp)) {
1025 1024 *bpp = bp;
1026 1025 }
1027 1026 }
1028 1027 return;
1029 1028 } else if (DBLK(bp1) > highest_blk) {
1030 1029 highest_bp = bp1;
1031 1030 highest_blk = DBLK(bp1);
1032 1031 }
1033 1032 } while ((DBLK(bp1->av_forw) > DBLK(bp1)) &&
1034 1033 ((bp1 = bp1->av_forw) != bp_start));
1035 1034 bp->av_back = highest_bp;
1036 1035 highest_bp->av_forw->av_back = bp;
1037 1036 bp->av_forw = highest_bp->av_forw;
1038 1037 highest_bp->av_forw = bp;
1039 1038
1040 1039 if (((bpp == sync_bpp) ||
1041 1040 ((bpp == async_bpp) && (dp->hd_flags & QNEAR_ASYNC))) &&
1042 1041 (dp->hd_flags & QNEAR_BACKWARD) && (BP_LT_HD(bp, dp)))
1043 1042 *bpp = bp;
1044 1043 }
1045 1044 }
1046 1045
1047 1046 /*
1048 1047 * dmult_enque() holds dmultp->ds_mutex lock, so we dont grab
1049 1048 * lock here. If dmult_enque() changes we will have to visit
1050 1049 * this function again
1051 1050 */
1052 1051 static int
1053 1052 qmerge_add(struct que_data *qfp, struct buf *bp)
1054 1053 {
1055 1054
1056 1055 que_insert(qfp, bp);
1057 1056 return (++qfp->q_cnt);
1058 1057 }
1059 1058
1060 1059 static int
1061 1060 qmerge_iodone(struct buf *bp)
1062 1061 {
1063 1062 struct buf *bp1;
1064 1063 struct page *pp, *pp1, *tmp_pp;
1065 1064
1066 1065 if (bp->b_flags & B_REMAPPED)
1067 1066 bp_mapout(bp);
1068 1067
1069 1068 bp1 = bp->b_forw;
1070 1069 do {
1071 1070 bp->b_forw = bp1->av_forw;
1072 1071 bp1->av_forw->av_back = bp1->av_back;
1073 1072 bp1->av_back->av_forw = bp1->av_forw;
1074 1073 pp = (page_t *)bp1->b_pages;
1075 1074 pp1 = bp->b_forw->b_pages;
1076 1075
1077 1076 tmp_pp = pp->p_prev;
1078 1077 pp->p_prev = pp1->p_prev;
1079 1078 pp->p_prev->p_next = pp;
1080 1079
1081 1080 pp1->p_prev = tmp_pp;
1082 1081 pp1->p_prev->p_next = pp1;
1083 1082
1084 1083 if (bp->b_flags & B_ERROR) {
1085 1084 bp1->b_error = bp->b_error;
1086 1085 bp1->b_flags |= B_ERROR;
1087 1086 }
1088 1087
1089 1088 biodone(bp1);
1090 1089 } while ((bp1 = bp->b_forw) != bp->b_forw->av_forw);
1091 1090
1092 1091 biodone(bp1);
1093 1092 kmem_free(bp, sizeof (*bp));
1094 1093 return (0);
1095 1094 }
1096 1095
1097 1096
1098 1097
1099 1098
1100 1099 static struct buf *
1101 1100 qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge, int *can_merge)
1102 1101 {
1103 1102 intptr_t private, cnt;
1104 1103 int flags;
1105 1104 struct buf *sync_bp, *async_bp, *bp;
1106 1105 struct buf **sync_bpp, **async_bpp, **bpp;
1107 1106 struct diskhd *dp = &qfp->q_tab;
1108 1107
1109 1108 if (qfp->q_cnt == 0) {
1110 1109 return (NULL);
1111 1110 }
1112 1111 flags = qfp->q_tab.hd_flags;
1113 1112 sync_bpp = &qfp->q_tab.hd_sync_next;
1114 1113 async_bpp = &qfp->q_tab.hd_async_next;
1115 1114
1116 1115 begin_nextbp:
1117 1116 if (flags & QNEAR_ASYNCONLY) {
1118 1117 bp = *async_bpp;
1119 1118 private = DBLK(bp);
1120 1119 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1121 1120 return (NULL);
1122 1121 } else if (bp->av_forw == bp) {
1123 1122 bp->av_forw = bp->av_back = NULL;
1124 1123 flags &= ~(QNEAR_ASYNCONLY | QNEAR_BACKWARD);
1125 1124 private = 0;
1126 1125 } else if (flags & QNEAR_BACKWARD) {
1127 1126 if (DBLK(bp) < DBLK(bp->av_back)) {
1128 1127 flags &= ~QNEAR_BACKWARD;
1129 1128 private = 0;
1130 1129 }
1131 1130 } else if (DBLK(bp) > DBLK(bp->av_forw)) {
1132 1131 if (qmerge2wayscan) {
1133 1132 flags |= QNEAR_BACKWARD;
1134 1133 } else {
1135 1134 private = 0;
1136 1135 }
1137 1136 } else if (qmerge2wayscan == 0) {
1138 1137 private = DBLK(bp->av_forw);
1139 1138 }
1140 1139 bpp = async_bpp;
1141 1140
1142 1141 } else if (flags & QNEAR_ASYNCALSO) {
1143 1142 sync_bp = *sync_bpp;
1144 1143 async_bp = *async_bpp;
1145 1144 if (flags & QNEAR_BACKWARD) {
1146 1145 if (BP_GT_HD(sync_bp, dp) && BP_GT_HD(async_bp, dp)) {
1147 1146 flags &= ~(QNEAR_BACKWARD|QNEAR_ASYNCALSO);
1148 1147 *sync_bpp = sync_bp->av_forw;
1149 1148 *async_bpp = async_bp->av_forw;
1150 1149 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1151 1150 qfp->q_tab.hd_private = 0;
1152 1151 goto begin_nextbp;
1153 1152 }
1154 1153 if (BP_LT_HD(async_bp, dp) && BP_LT_HD(sync_bp, dp)) {
1155 1154 if (BP_GT_BP(async_bp, sync_bp)) {
1156 1155 bpp = async_bpp;
1157 1156 bp = *async_bpp;
1158 1157 } else {
1159 1158 bpp = sync_bpp;
1160 1159 bp = *sync_bpp;
1161 1160 }
1162 1161 } else if (BP_LT_HD(async_bp, dp)) {
1163 1162 bpp = async_bpp;
1164 1163 bp = *async_bpp;
1165 1164 } else {
1166 1165 bpp = sync_bpp;
1167 1166 bp = *sync_bpp;
1168 1167 }
1169 1168 } else {
1170 1169 if (BP_LT_HD(sync_bp, dp) && BP_LT_HD(async_bp, dp)) {
1171 1170 if (qmerge2wayscan) {
1172 1171 flags |= QNEAR_BACKWARD;
1173 1172 *sync_bpp = sync_bp->av_back;
1174 1173 *async_bpp = async_bp->av_back;
1175 1174 goto begin_nextbp;
1176 1175 } else {
1177 1176 flags &= ~QNEAR_ASYNCALSO;
1178 1177 SYNC2ASYNC(qfp) =
1179 1178 (void *)qmerge_sync2async;
1180 1179 qfp->q_tab.hd_private = 0;
1181 1180 goto begin_nextbp;
1182 1181 }
1183 1182 }
1184 1183 if (BP_GT_HD(async_bp, dp) && BP_GT_HD(sync_bp, dp)) {
1185 1184 if (BP_LT_BP(async_bp, sync_bp)) {
1186 1185 bpp = async_bpp;
1187 1186 bp = *async_bpp;
1188 1187 } else {
1189 1188 bpp = sync_bpp;
1190 1189 bp = *sync_bpp;
1191 1190 }
1192 1191 } else if (BP_GT_HD(async_bp, dp)) {
1193 1192 bpp = async_bpp;
1194 1193 bp = *async_bpp;
1195 1194 } else {
1196 1195 bpp = sync_bpp;
1197 1196 bp = *sync_bpp;
1198 1197 }
1199 1198 }
1200 1199 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1201 1200 return (NULL);
1202 1201 } else if (bp->av_forw == bp) {
1203 1202 bp->av_forw = bp->av_back = NULL;
1204 1203 flags &= ~QNEAR_ASYNCALSO;
1205 1204 if (bpp == async_bpp) {
1206 1205 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1207 1206 } else {
1208 1207 flags |= QNEAR_ASYNCONLY;
1209 1208 }
1210 1209 }
1211 1210 private = DBLK(bp);
1212 1211 } else {
1213 1212 bp = *sync_bpp;
1214 1213 private = DBLK(bp);
1215 1214 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1216 1215 return (NULL);
1217 1216 } else if (bp->av_forw == bp) {
1218 1217 private = 0;
1219 1218 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1220 1219 bp->av_forw = bp->av_back = NULL;
1221 1220 flags &= ~QNEAR_BACKWARD;
1222 1221 if (*async_bpp)
1223 1222 flags |= QNEAR_ASYNCONLY;
1224 1223 } else if (flags & QNEAR_BACKWARD) {
1225 1224 if (DBLK(bp) < DBLK(bp->av_back)) {
1226 1225 flags &= ~QNEAR_BACKWARD;
1227 1226 cnt = (intptr_t)SYNC2ASYNC(qfp);
1228 1227 if (cnt > 0) {
1229 1228 cnt--;
1230 1229 SYNC2ASYNC(qfp) = (void *)cnt;
1231 1230 } else {
1232 1231 if (*async_bpp)
1233 1232 flags |= QNEAR_ASYNCALSO;
1234 1233 SYNC2ASYNC(qfp) =
1235 1234 (void *)qmerge_sync2async;
1236 1235 }
1237 1236 private = 0;
1238 1237 }
1239 1238 } else if (DBLK(bp) > DBLK(bp->av_forw)) {
1240 1239 private = 0;
1241 1240 if (qmerge2wayscan) {
1242 1241 flags |= QNEAR_BACKWARD;
1243 1242 private = DBLK(bp);
1244 1243 } else {
1245 1244 cnt = (intptr_t)SYNC2ASYNC(qfp);
1246 1245 if (cnt > 0) {
1247 1246 cnt--;
1248 1247 SYNC2ASYNC(qfp) = (void *)cnt;
1249 1248 } else {
1250 1249 if (*async_bpp)
1251 1250 flags |= QNEAR_ASYNCALSO;
1252 1251 SYNC2ASYNC(qfp) =
1253 1252 (void *)qmerge_sync2async;
1254 1253 }
1255 1254 }
1256 1255 } else if (qmerge2wayscan == 0) {
1257 1256 private = DBLK(bp->av_forw);
1258 1257 }
1259 1258 bpp = sync_bpp;
1260 1259 }
1261 1260
1262 1261 if (bp->av_forw) {
1263 1262 *can_merge = !(bp->b_flags & B_READ);
1264 1263 if (flags & QNEAR_BACKWARD) {
1265 1264 *bpp = bp->av_back;
1266 1265 if ((DBLK(bp->av_back) +
1267 1266 bp->av_back->b_bcount / DEV_BSIZE) != DBLK(bp))
1268 1267 *can_merge = 0;
1269 1268 } else {
1270 1269 *bpp = bp->av_forw;
1271 1270 if ((DBLK(bp) + bp->b_bcount / DEV_BSIZE) !=
1272 1271 DBLK(bp->av_forw))
1273 1272 *can_merge = 0;
1274 1273 }
1275 1274 bp->av_forw->av_back = bp->av_back;
1276 1275 bp->av_back->av_forw = bp->av_forw;
1277 1276 bp->av_forw = bp->av_back = NULL;
1278 1277 } else {
1279 1278 *bpp = NULL;
1280 1279 *can_merge = 0;
1281 1280 }
1282 1281 qfp->q_tab.hd_private = (void *)private;
1283 1282 qfp->q_cnt--;
1284 1283 qfp->q_tab.hd_flags = flags;
1285 1284 if (bp->b_error) {
1286 1285 bp->av_back = (void *)(intptr_t)bp->b_error;
1287 1286 bp->b_error = 0;
1288 1287 }
1289 1288 return (bp);
1290 1289 }
1291 1290
1292 1291 static struct buf *
1293 1292 qmerge_del(struct que_data *qfp)
1294 1293 {
1295 1294 struct buf *bp, *next_bp, *bp_merge;
1296 1295 int alloc_mergebp, merge;
1297 1296
1298 1297 if (qfp->q_cnt == 0) {
1299 1298 return (NULL);
1300 1299 }
1301 1300
1302 1301 bp_merge = bp = qmerge_nextbp(qfp, NULL, &merge);
1303 1302 alloc_mergebp = 1;
1304 1303 while (merge && (next_bp = qmerge_nextbp(qfp, bp_merge, &merge))) {
1305 1304 if (alloc_mergebp) {
1306 1305 bp_merge = kmem_alloc(sizeof (*bp_merge), KM_NOSLEEP);
1307 1306 if (bp_merge == NULL) {
1308 1307 mutex_exit(&qfp->q_mutex);
1309 1308 return (bp);
1310 1309 }
1311 1310 bcopy(bp, bp_merge, sizeof (*bp_merge));
1312 1311 bp_merge->b_iodone = qmerge_iodone;
1313 1312 bp_merge->b_forw = bp;
1314 1313 bp_merge->b_back = (struct buf *)qfp;
1315 1314 bp->av_forw = bp->av_back = bp;
1316 1315 alloc_mergebp = 0;
1317 1316 }
1318 1317 qmerge_mergesetup(bp_merge, next_bp);
1319 1318 }
1320 1319 return (bp_merge);
1321 1320 }
1322 1321
1323 1322
1324 1323 /*
1325 1324 * FIFO Queue functions
1326 1325 */
↓ open down ↓ |
520 lines elided |
↑ open up ↑ |
1327 1326 /*
1328 1327 * Local Function Prototypes
1329 1328 */
1330 1329 static int qfifo_add();
1331 1330
1332 1331 struct que_objops qfifo_ops = {
1333 1332 que_init,
1334 1333 que_free,
1335 1334 qfifo_add,
1336 1335 que_del,
1337 - 0, 0
1336 + { NULL, NULL }
1338 1337 };
1339 1338
1340 1339 /*
1341 1340 * Local static data
1342 1341 */
1343 1342 struct que_obj *
1344 1343 qfifo_create()
1345 1344 {
1346 1345 return (que_create((struct que_objops *)&qfifo_ops));
1347 1346 }
1348 1347
1349 1348 static int
1350 1349 qfifo_add(struct que_data *qfp, struct buf *bp)
1351 1350 {
1352 1351
1353 1352 if (!qfp->q_tab.b_actf)
1354 1353 qfp->q_tab.b_actf = bp;
1355 1354 else
1356 1355 qfp->q_tab.b_actl->av_forw = bp;
1357 1356 qfp->q_tab.b_actl = bp;
1358 1357 bp->av_forw = NULL;
1359 1358 return (0);
1360 1359 }
1361 1360
1362 1361 /*
1363 1362 * One-Way-Scan Queue functions
1364 1363 */
1365 1364 /*
1366 1365 * Local Function Prototypes
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1367 1366 */
1368 1367 static int qsort_add();
1369 1368 static struct buf *qsort_del();
1370 1369 static void oneway_scan_binary(struct diskhd *dp, struct buf *bp);
1371 1370
1372 1371 struct que_objops qsort_ops = {
1373 1372 que_init,
1374 1373 que_free,
1375 1374 qsort_add,
1376 1375 qsort_del,
1377 - 0, 0
1376 + { NULL, NULL }
1378 1377 };
1379 1378
1380 1379 /*
1381 1380 * Local static data
1382 1381 */
1383 1382 struct que_obj *
1384 1383 qsort_create()
1385 1384 {
1386 1385 return (que_create((struct que_objops *)&qsort_ops));
1387 1386 }
1388 1387
1389 1388 static int
1390 1389 qsort_add(struct que_data *qfp, struct buf *bp)
1391 1390 {
1392 1391 qfp->q_cnt++;
1393 1392 oneway_scan_binary(&qfp->q_tab, bp);
1394 1393 return (0);
1395 1394 }
1396 1395
1397 1396
1398 1397 #define b_pasf b_forw
1399 1398 #define b_pasl b_back
1400 1399 static void
1401 1400 oneway_scan_binary(struct diskhd *dp, struct buf *bp)
1402 1401 {
1403 1402 struct buf *ap;
1404 1403
1405 1404 ap = dp->b_actf;
1406 1405 if (ap == NULL) {
1407 1406 dp->b_actf = bp;
1408 1407 bp->av_forw = NULL;
1409 1408 return;
1410 1409 }
1411 1410 if (DBLK(bp) < DBLK(ap)) {
1412 1411 ap = dp->b_pasf;
1413 1412 if ((ap == NULL) || (DBLK(bp) < DBLK(ap))) {
1414 1413 dp->b_pasf = bp;
1415 1414 bp->av_forw = ap;
1416 1415 return;
1417 1416 }
1418 1417 }
1419 1418 while (ap->av_forw) {
1420 1419 if (DBLK(bp) < DBLK(ap->av_forw))
1421 1420 break;
1422 1421 ap = ap->av_forw;
1423 1422 }
1424 1423 bp->av_forw = ap->av_forw;
1425 1424 ap->av_forw = bp;
1426 1425 }
1427 1426
1428 1427 static struct buf *
1429 1428 qsort_del(struct que_data *qfp)
1430 1429 {
1431 1430 struct buf *bp;
1432 1431
1433 1432 if (qfp->q_cnt == 0) {
1434 1433 return (NULL);
1435 1434 }
1436 1435 qfp->q_cnt--;
1437 1436 bp = qfp->q_tab.b_actf;
1438 1437 qfp->q_tab.b_actf = bp->av_forw;
1439 1438 bp->av_forw = 0;
1440 1439 if (!qfp->q_tab.b_actf && qfp->q_tab.b_pasf) {
1441 1440 qfp->q_tab.b_actf = qfp->q_tab.b_pasf;
1442 1441 qfp->q_tab.b_pasf = NULL;
1443 1442 }
1444 1443 return (bp);
1445 1444 }
1446 1445
1447 1446 /*
1448 1447 * Tagged queueing
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
1449 1448 */
1450 1449 /*
1451 1450 * Local Function Prototypes
1452 1451 */
1453 1452
1454 1453 struct que_objops qtag_ops = {
1455 1454 que_init,
1456 1455 que_free,
1457 1456 qsort_add,
1458 1457 qsort_del,
1459 - 0, 0
1458 + { NULL, NULL }
1460 1459 };
1461 1460
1462 1461 /*
1463 1462 * Local static data
1464 1463 */
1465 1464 struct que_obj *
1466 1465 qtag_create()
1467 1466 {
1468 1467 return (que_create((struct que_objops *)&qtag_ops));
1469 1468 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX