Print this page
5083 avoid undefined order of operations in assignments
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/dktp/drvobj/strategy.c
+++ new/usr/src/uts/intel/io/dktp/drvobj/strategy.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Device Strategy
29 29 */
30 30 #include <sys/dktp/cm.h>
31 31 #include <sys/kstat.h>
32 32
33 33 #include <sys/dktp/quetypes.h>
34 34 #include <sys/dktp/queue.h>
35 35 #include <sys/dktp/tgcom.h>
36 36 #include <sys/dktp/fctypes.h>
37 37 #include <sys/dktp/flowctrl.h>
38 38 #include <sys/param.h>
39 39 #include <vm/page.h>
40 40 #include <sys/modctl.h>
41 41
42 42 /*
43 43 * Object Management
44 44 */
45 45
46 46 static struct buf *qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge,
47 47 int *can_merge);
48 48
49 49 static struct modlmisc modlmisc = {
50 50 &mod_miscops, /* Type of module */
51 51 "Device Strategy Objects"
52 52 };
53 53
54 54 static struct modlinkage modlinkage = {
55 55 MODREV_1,
56 56 &modlmisc,
57 57 NULL
58 58 };
59 59
60 60 int
61 61 _init(void)
62 62 {
63 63 return (mod_install(&modlinkage));
64 64 }
65 65
66 66 int
67 67 _fini(void)
68 68 {
69 69 return (mod_remove(&modlinkage));
70 70 }
71 71
72 72 int
73 73 _info(struct modinfo *modinfop)
74 74 {
75 75 return (mod_info(&modlinkage, modinfop));
76 76 }
77 77
78 78
79 79 /*
80 80 * Common Flow Control functions
81 81 */
82 82
83 83 /*
84 84 * Local static data
85 85 */
86 86 #ifdef FLC_DEBUG
87 87 #define DENT 0x0001
88 88 #define DERR 0x0002
89 89 #define DIO 0x0004
90 90 static int flc_debug = DENT|DERR|DIO;
91 91
92 92 #include <sys/thread.h>
93 93 static int flc_malloc_intr = 0;
94 94 #endif /* FLC_DEBUG */
95 95
96 96 static int flc_kstat = 1;
97 97
98 98 static struct flc_obj *fc_create(struct flc_objops *fcopsp);
99 99 static int fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
100 100 void *lkarg);
101 101 static int fc_free(struct flc_obj *flcobjp);
102 102 static int fc_start_kstat(opaque_t queuep, char *devtype, int instance);
103 103 static int fc_stop_kstat(opaque_t queuep);
104 104
105 105 static struct flc_obj *
106 106 fc_create(struct flc_objops *fcopsp)
107 107 {
108 108 struct flc_obj *flcobjp;
109 109 struct fc_data *fcdp;
110 110
111 111 flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
112 112 if (!flcobjp)
113 113 return (NULL);
114 114
115 115 fcdp = (struct fc_data *)(flcobjp+1);
116 116 flcobjp->flc_data = (opaque_t)fcdp;
117 117 flcobjp->flc_ops = fcopsp;
118 118
119 119 return ((opaque_t)flcobjp);
120 120 }
121 121
122 122 static int dmult_maxcnt = DMULT_MAXCNT;
123 123
124 124 static int
125 125 fc_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
126 126 {
127 127 struct fc_data *fcdp = (struct fc_data *)queuep;
128 128
129 129 mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
130 130
131 131 fcdp->ds_queobjp = que_objp;
132 132 fcdp->ds_tgcomobjp = tgcom_objp;
133 133 fcdp->ds_waitcnt = dmult_maxcnt;
134 134
135 135 QUE_INIT(que_objp, lkarg);
136 136 TGCOM_INIT(tgcom_objp);
137 137 return (DDI_SUCCESS);
138 138 }
139 139
140 140 static int
141 141 fc_free(struct flc_obj *flcobjp)
142 142 {
143 143 struct fc_data *fcdp;
144 144
145 145 fcdp = (struct fc_data *)flcobjp->flc_data;
146 146 if (fcdp->ds_queobjp)
147 147 QUE_FREE(fcdp->ds_queobjp);
148 148 if (fcdp->ds_tgcomobjp) {
149 149 TGCOM_FREE(fcdp->ds_tgcomobjp);
150 150 mutex_destroy(&fcdp->ds_mutex);
151 151 }
152 152 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
153 153 return (0);
154 154 }
155 155
156 156 /*ARGSUSED*/
157 157 static int
158 158 fc_start_kstat(opaque_t queuep, char *devtype, int instance)
159 159 {
160 160 struct fc_data *fcdp = (struct fc_data *)queuep;
161 161 if (!flc_kstat)
162 162 return (0);
163 163
164 164 if (!fcdp->ds_kstat) {
165 165 if (fcdp->ds_kstat = kstat_create("cmdk", instance, NULL,
166 166 "disk", KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT)) {
167 167 kstat_install(fcdp->ds_kstat);
168 168 }
169 169 }
170 170 return (0);
171 171 }
172 172
173 173 static int
174 174 fc_stop_kstat(opaque_t queuep)
175 175 {
176 176 struct fc_data *fcdp = (struct fc_data *)queuep;
177 177
178 178 if (fcdp->ds_kstat) {
179 179 kstat_delete(fcdp->ds_kstat);
180 180 fcdp->ds_kstat = NULL;
181 181 }
182 182 return (0);
183 183 }
184 184
185 185
186 186 /*
187 187 * Single Command per Device
188 188 */
189 189 /*
190 190 * Local Function Prototypes
191 191 */
192 192 static int dsngl_restart();
193 193
194 194 static int dsngl_enque(opaque_t, struct buf *);
195 195 static int dsngl_deque(opaque_t, struct buf *);
196 196
197 197 struct flc_objops dsngl_ops = {
198 198 fc_init,
199 199 fc_free,
200 200 dsngl_enque,
201 201 dsngl_deque,
202 202 fc_start_kstat,
203 203 fc_stop_kstat,
204 204 0, 0
205 205 };
206 206
207 207 struct flc_obj *
208 208 dsngl_create()
209 209 {
210 210 return (fc_create((struct flc_objops *)&dsngl_ops));
211 211 }
212 212
213 213 static int
214 214 dsngl_enque(opaque_t queuep, struct buf *in_bp)
215 215 {
216 216 struct fc_data *dsnglp = (struct fc_data *)queuep;
217 217 opaque_t tgcom_objp;
218 218 opaque_t que_objp;
219 219
220 220 que_objp = dsnglp->ds_queobjp;
221 221 tgcom_objp = dsnglp->ds_tgcomobjp;
222 222
223 223 if (!in_bp)
224 224 return (0);
225 225 mutex_enter(&dsnglp->ds_mutex);
226 226 if (dsnglp->ds_bp || dsnglp->ds_outcnt) {
227 227 QUE_ADD(que_objp, in_bp);
↓ open down ↓ |
227 lines elided |
↑ open up ↑ |
228 228 if (dsnglp->ds_kstat) {
229 229 kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
230 230 }
231 231 mutex_exit(&dsnglp->ds_mutex);
232 232 return (0);
233 233 }
234 234 if (dsnglp->ds_kstat) {
235 235 kstat_waitq_enter(KSTAT_IO_PTR(dsnglp->ds_kstat));
236 236 }
237 237 if (TGCOM_PKT(tgcom_objp, in_bp, dsngl_restart,
238 - (caddr_t)dsnglp) != DDI_SUCCESS) {
238 + (caddr_t)dsnglp) != DDI_SUCCESS) {
239 239
240 240 dsnglp->ds_bp = in_bp;
241 241 mutex_exit(&dsnglp->ds_mutex);
242 242 return (0);
243 243 }
244 244 dsnglp->ds_outcnt++;
245 245 if (dsnglp->ds_kstat)
246 246 kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
247 247 mutex_exit(&dsnglp->ds_mutex);
248 248 TGCOM_TRANSPORT(tgcom_objp, in_bp);
249 249 return (0);
250 250 }
251 251
252 252 static int
253 253 dsngl_deque(opaque_t queuep, struct buf *in_bp)
254 254 {
255 255 struct fc_data *dsnglp = (struct fc_data *)queuep;
256 256 opaque_t tgcom_objp;
257 257 opaque_t que_objp;
258 258 struct buf *bp;
259 259
260 260 que_objp = dsnglp->ds_queobjp;
261 261 tgcom_objp = dsnglp->ds_tgcomobjp;
262 262
263 263 mutex_enter(&dsnglp->ds_mutex);
264 264 if (in_bp) {
265 265 dsnglp->ds_outcnt--;
266 266 if (dsnglp->ds_kstat) {
267 267 if (in_bp->b_flags & B_READ) {
268 268 KSTAT_IO_PTR(dsnglp->ds_kstat)->reads++;
269 269 KSTAT_IO_PTR(dsnglp->ds_kstat)->nread +=
270 270 (in_bp->b_bcount - in_bp->b_resid);
271 271 } else {
272 272 KSTAT_IO_PTR(dsnglp->ds_kstat)->writes++;
273 273 KSTAT_IO_PTR(dsnglp->ds_kstat)->nwritten +=
274 274 (in_bp->b_bcount - in_bp->b_resid);
275 275 }
276 276 kstat_runq_exit(KSTAT_IO_PTR(dsnglp->ds_kstat));
277 277 }
278 278 }
279 279 for (;;) {
280 280 if (!dsnglp->ds_bp)
281 281 dsnglp->ds_bp = QUE_DEL(que_objp);
282 282 if (!dsnglp->ds_bp ||
283 283 (TGCOM_PKT(tgcom_objp, dsnglp->ds_bp, dsngl_restart,
284 284 (caddr_t)dsnglp) != DDI_SUCCESS) ||
285 285 dsnglp->ds_outcnt) {
286 286 mutex_exit(&dsnglp->ds_mutex);
287 287 return (0);
288 288 }
289 289 dsnglp->ds_outcnt++;
290 290 bp = dsnglp->ds_bp;
291 291 dsnglp->ds_bp = QUE_DEL(que_objp);
292 292 if (dsnglp->ds_kstat)
293 293 kstat_waitq_to_runq(KSTAT_IO_PTR(dsnglp->ds_kstat));
294 294 mutex_exit(&dsnglp->ds_mutex);
295 295
296 296 TGCOM_TRANSPORT(tgcom_objp, bp);
297 297
298 298 if (!mutex_tryenter(&dsnglp->ds_mutex))
299 299 return (0);
300 300 }
301 301 }
302 302
303 303 static int
304 304 dsngl_restart(struct fc_data *dsnglp)
305 305 {
306 306 (void) dsngl_deque(dsnglp, NULL);
307 307 return (-1);
308 308 }
309 309
310 310
311 311 /*
312 312 * Multiple Commands per Device
313 313 */
314 314 /*
315 315 * Local Function Prototypes
316 316 */
317 317 static int dmult_restart();
318 318
319 319 static int dmult_enque(opaque_t, struct buf *);
320 320 static int dmult_deque(opaque_t, struct buf *);
321 321
322 322 struct flc_objops dmult_ops = {
323 323 fc_init,
324 324 fc_free,
325 325 dmult_enque,
326 326 dmult_deque,
327 327 fc_start_kstat,
328 328 fc_stop_kstat,
329 329 0, 0
330 330 };
331 331
332 332 struct flc_obj *
333 333 dmult_create()
334 334 {
335 335 return (fc_create((struct flc_objops *)&dmult_ops));
336 336
337 337 }
338 338
339 339
340 340 /*
341 341 * Some of the object management functions QUE_ADD() and QUE_DEL()
342 342 * do not accquire lock.
343 343 * They depend on dmult_enque(), dmult_deque() to do all locking.
344 344 * If this changes we have to grab locks in qmerge_add() and qmerge_del().
345 345 */
346 346 static int
347 347 dmult_enque(opaque_t queuep, struct buf *in_bp)
348 348 {
349 349 struct fc_data *dmultp = (struct fc_data *)queuep;
350 350 opaque_t tgcom_objp;
351 351 opaque_t que_objp;
352 352
353 353 que_objp = dmultp->ds_queobjp;
354 354 tgcom_objp = dmultp->ds_tgcomobjp;
355 355
356 356 if (!in_bp)
357 357 return (0);
358 358 mutex_enter(&dmultp->ds_mutex);
359 359 if ((dmultp->ds_outcnt >= dmultp->ds_waitcnt) || dmultp->ds_bp) {
360 360 QUE_ADD(que_objp, in_bp);
361 361 if (dmultp->ds_kstat) {
↓ open down ↓ |
113 lines elided |
↑ open up ↑ |
362 362 kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
363 363 }
364 364 mutex_exit(&dmultp->ds_mutex);
365 365 return (0);
366 366 }
367 367 if (dmultp->ds_kstat) {
368 368 kstat_waitq_enter(KSTAT_IO_PTR(dmultp->ds_kstat));
369 369 }
370 370
371 371 if (TGCOM_PKT(tgcom_objp, in_bp, dmult_restart,
372 - (caddr_t)dmultp) != DDI_SUCCESS) {
372 + (caddr_t)dmultp) != DDI_SUCCESS) {
373 373
374 374 dmultp->ds_bp = in_bp;
375 375 mutex_exit(&dmultp->ds_mutex);
376 376 return (0);
377 377 }
378 378 dmultp->ds_outcnt++;
379 379 if (dmultp->ds_kstat)
380 380 kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
381 381 mutex_exit(&dmultp->ds_mutex);
382 382
383 383 TGCOM_TRANSPORT(tgcom_objp, in_bp);
384 384 return (0);
385 385 }
386 386
387 387 static int
388 388 dmult_deque(opaque_t queuep, struct buf *in_bp)
389 389 {
390 390 struct fc_data *dmultp = (struct fc_data *)queuep;
391 391 opaque_t tgcom_objp;
392 392 opaque_t que_objp;
393 393 struct buf *bp;
394 394
395 395 que_objp = dmultp->ds_queobjp;
396 396 tgcom_objp = dmultp->ds_tgcomobjp;
397 397
398 398 mutex_enter(&dmultp->ds_mutex);
399 399 if (in_bp) {
400 400 dmultp->ds_outcnt--;
401 401 if (dmultp->ds_kstat) {
402 402 if (in_bp->b_flags & B_READ) {
403 403 KSTAT_IO_PTR(dmultp->ds_kstat)->reads++;
404 404 KSTAT_IO_PTR(dmultp->ds_kstat)->nread +=
405 405 (in_bp->b_bcount - in_bp->b_resid);
406 406 } else {
407 407 KSTAT_IO_PTR(dmultp->ds_kstat)->writes++;
408 408 KSTAT_IO_PTR(dmultp->ds_kstat)->nwritten +=
409 409 (in_bp->b_bcount - in_bp->b_resid);
410 410 }
411 411 kstat_runq_exit(KSTAT_IO_PTR(dmultp->ds_kstat));
412 412 }
413 413 }
414 414
415 415 for (;;) {
416 416
417 417 #ifdef FLC_DEBUG
418 418 if ((curthread->t_intr) && (!dmultp->ds_bp) &&
419 419 (!dmultp->ds_outcnt))
420 420 flc_malloc_intr++;
421 421 #endif
422 422
423 423 if (!dmultp->ds_bp)
424 424 dmultp->ds_bp = QUE_DEL(que_objp);
425 425 if (!dmultp->ds_bp ||
426 426 (TGCOM_PKT(tgcom_objp, dmultp->ds_bp, dmult_restart,
427 427 (caddr_t)dmultp) != DDI_SUCCESS) ||
428 428 (dmultp->ds_outcnt >= dmultp->ds_waitcnt)) {
429 429 mutex_exit(&dmultp->ds_mutex);
430 430 return (0);
431 431 }
432 432 dmultp->ds_outcnt++;
433 433 bp = dmultp->ds_bp;
434 434 dmultp->ds_bp = QUE_DEL(que_objp);
435 435
436 436 if (dmultp->ds_kstat)
437 437 kstat_waitq_to_runq(KSTAT_IO_PTR(dmultp->ds_kstat));
438 438
439 439 mutex_exit(&dmultp->ds_mutex);
440 440
441 441 TGCOM_TRANSPORT(tgcom_objp, bp);
442 442
443 443 if (!mutex_tryenter(&dmultp->ds_mutex))
444 444 return (0);
445 445 }
446 446 }
447 447
448 448 static int
449 449 dmult_restart(struct fc_data *dmultp)
450 450 {
451 451 (void) dmult_deque(dmultp, NULL);
452 452 return (-1);
453 453 }
454 454
455 455 /*
456 456 * Duplexed Commands per Device: Read Queue and Write Queue
457 457 */
458 458 /*
459 459 * Local Function Prototypes
460 460 */
461 461 static int duplx_restart();
462 462
463 463 static int duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp,
464 464 void *lkarg);
465 465 static int duplx_free(struct flc_obj *flcobjp);
466 466 static int duplx_enque(opaque_t queuep, struct buf *bp);
467 467 static int duplx_deque(opaque_t queuep, struct buf *bp);
468 468
469 469 struct flc_objops duplx_ops = {
470 470 duplx_init,
471 471 duplx_free,
472 472 duplx_enque,
473 473 duplx_deque,
474 474 fc_start_kstat,
475 475 fc_stop_kstat,
476 476 0, 0
477 477 };
478 478
479 479 struct flc_obj *
480 480 duplx_create()
481 481 {
482 482 struct flc_obj *flcobjp;
483 483 struct duplx_data *fcdp;
484 484
485 485 flcobjp = kmem_zalloc((sizeof (*flcobjp) + sizeof (*fcdp)), KM_NOSLEEP);
486 486 if (!flcobjp)
487 487 return (NULL);
488 488
489 489 fcdp = (struct duplx_data *)(flcobjp+1);
490 490 flcobjp->flc_data = (opaque_t)fcdp;
491 491 flcobjp->flc_ops = &duplx_ops;
492 492
493 493 fcdp->ds_writeq.fc_qobjp = qfifo_create();
494 494 if (!(fcdp->ds_writeq.fc_qobjp = qfifo_create())) {
495 495 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
496 496 return (NULL);
497 497 }
498 498 return (flcobjp);
499 499 }
500 500
501 501 static int
502 502 duplx_free(struct flc_obj *flcobjp)
503 503 {
504 504 struct duplx_data *fcdp;
505 505
506 506 fcdp = (struct duplx_data *)flcobjp->flc_data;
507 507 if (fcdp->ds_writeq.fc_qobjp) {
508 508 QUE_FREE(fcdp->ds_writeq.fc_qobjp);
509 509 }
510 510 if (fcdp->ds_readq.fc_qobjp)
511 511 QUE_FREE(fcdp->ds_readq.fc_qobjp);
512 512 if (fcdp->ds_tgcomobjp) {
513 513 TGCOM_FREE(fcdp->ds_tgcomobjp);
514 514 mutex_destroy(&fcdp->ds_mutex);
515 515 }
516 516 kmem_free(flcobjp, (sizeof (*flcobjp) + sizeof (*fcdp)));
517 517 return (0);
518 518 }
519 519
520 520 static int
521 521 duplx_init(opaque_t queuep, opaque_t tgcom_objp, opaque_t que_objp, void *lkarg)
522 522 {
523 523 struct duplx_data *fcdp = (struct duplx_data *)queuep;
524 524 fcdp->ds_tgcomobjp = tgcom_objp;
525 525 fcdp->ds_readq.fc_qobjp = que_objp;
526 526
527 527 QUE_INIT(que_objp, lkarg);
528 528 QUE_INIT(fcdp->ds_writeq.fc_qobjp, lkarg);
529 529 TGCOM_INIT(tgcom_objp);
530 530
531 531 mutex_init(&fcdp->ds_mutex, NULL, MUTEX_DRIVER, lkarg);
532 532
533 533 fcdp->ds_writeq.fc_maxcnt = DUPLX_MAXCNT;
534 534 fcdp->ds_readq.fc_maxcnt = DUPLX_MAXCNT;
535 535
536 536 /* queues point to each other for round robin */
537 537 fcdp->ds_readq.next = &fcdp->ds_writeq;
538 538 fcdp->ds_writeq.next = &fcdp->ds_readq;
539 539
540 540 return (DDI_SUCCESS);
541 541 }
542 542
543 543 static int
544 544 duplx_enque(opaque_t queuep, struct buf *in_bp)
545 545 {
546 546 struct duplx_data *duplxp = (struct duplx_data *)queuep;
547 547 opaque_t tgcom_objp;
548 548 struct fc_que *activeq;
549 549 struct buf *bp;
550 550
551 551 mutex_enter(&duplxp->ds_mutex);
552 552 if (in_bp) {
553 553 if (duplxp->ds_kstat) {
554 554 kstat_waitq_enter(KSTAT_IO_PTR(duplxp->ds_kstat));
555 555 }
556 556 if (in_bp->b_flags & B_READ)
557 557 activeq = &duplxp->ds_readq;
558 558 else
559 559 activeq = &duplxp->ds_writeq;
560 560
561 561 QUE_ADD(activeq->fc_qobjp, in_bp);
562 562 } else {
563 563 activeq = &duplxp->ds_readq;
564 564 }
565 565
566 566 tgcom_objp = duplxp->ds_tgcomobjp;
567 567
568 568 for (;;) {
569 569 if (!activeq->fc_bp)
570 570 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
571 571 if (!activeq->fc_bp ||
572 572 (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
573 573 (caddr_t)duplxp) != DDI_SUCCESS) ||
574 574 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
575 575
576 576 /* switch read/write queues */
577 577 activeq = activeq->next;
578 578 if (!activeq->fc_bp)
579 579 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
580 580 if (!activeq->fc_bp ||
581 581 (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
582 582 duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
583 583 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
584 584 mutex_exit(&duplxp->ds_mutex);
585 585 return (0);
586 586 }
587 587 }
588 588
589 589 activeq->fc_outcnt++;
590 590 bp = activeq->fc_bp;
591 591 activeq->fc_bp = NULL;
592 592
593 593 if (duplxp->ds_kstat)
594 594 kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
595 595 mutex_exit(&duplxp->ds_mutex);
596 596
597 597 TGCOM_TRANSPORT(tgcom_objp, bp);
598 598
599 599 if (!mutex_tryenter(&duplxp->ds_mutex))
600 600 return (0);
601 601
602 602 activeq = activeq->next;
603 603 }
604 604 }
605 605
606 606 static int
607 607 duplx_deque(opaque_t queuep, struct buf *in_bp)
608 608 {
609 609 struct duplx_data *duplxp = (struct duplx_data *)queuep;
610 610 opaque_t tgcom_objp;
611 611 struct fc_que *activeq;
612 612 struct buf *bp;
613 613
614 614 mutex_enter(&duplxp->ds_mutex);
615 615
616 616 tgcom_objp = duplxp->ds_tgcomobjp;
617 617
618 618 if (in_bp->b_flags & B_READ)
619 619 activeq = &duplxp->ds_readq;
620 620 else
621 621 activeq = &duplxp->ds_writeq;
622 622 activeq->fc_outcnt--;
623 623
624 624 if (duplxp->ds_kstat) {
625 625 if (in_bp->b_flags & B_READ) {
626 626 KSTAT_IO_PTR(duplxp->ds_kstat)->reads++;
627 627 KSTAT_IO_PTR(duplxp->ds_kstat)->nread +=
628 628 (in_bp->b_bcount - in_bp->b_resid);
629 629 } else {
630 630 KSTAT_IO_PTR(duplxp->ds_kstat)->writes++;
631 631 KSTAT_IO_PTR(duplxp->ds_kstat)->nwritten +=
632 632 (in_bp->b_bcount - in_bp->b_resid);
633 633 }
634 634 kstat_runq_exit(KSTAT_IO_PTR(duplxp->ds_kstat));
635 635 }
636 636
637 637 for (;;) {
638 638
639 639 /* if needed, try to pull request off a queue */
640 640 if (!activeq->fc_bp)
641 641 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
642 642
643 643 if (!activeq->fc_bp ||
644 644 (TGCOM_PKT(tgcom_objp, activeq->fc_bp, duplx_restart,
645 645 (caddr_t)duplxp) != DDI_SUCCESS) ||
646 646 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
647 647
648 648 activeq = activeq->next;
649 649 if (!activeq->fc_bp)
650 650 activeq->fc_bp = QUE_DEL(activeq->fc_qobjp);
651 651
652 652 if (!activeq->fc_bp ||
653 653 (TGCOM_PKT(tgcom_objp, activeq->fc_bp,
654 654 duplx_restart, (caddr_t)duplxp) != DDI_SUCCESS) ||
655 655 (activeq->fc_outcnt >= activeq->fc_maxcnt)) {
656 656 mutex_exit(&duplxp->ds_mutex);
657 657 return (0);
658 658 }
659 659 }
660 660
661 661 activeq->fc_outcnt++;
662 662 bp = activeq->fc_bp;
663 663 activeq->fc_bp = NULL;
664 664
665 665 if (duplxp->ds_kstat)
666 666 kstat_waitq_to_runq(KSTAT_IO_PTR(duplxp->ds_kstat));
667 667
668 668 mutex_exit(&duplxp->ds_mutex);
669 669
670 670 TGCOM_TRANSPORT(tgcom_objp, bp);
671 671
672 672 if (!mutex_tryenter(&duplxp->ds_mutex))
673 673 return (0);
674 674
675 675 activeq = activeq->next;
676 676 }
677 677 }
678 678
679 679 static int
680 680 duplx_restart(struct duplx_data *duplxp)
681 681 {
682 682 (void) duplx_enque(duplxp, NULL);
683 683 return (-1);
684 684 }
685 685
686 686 /*
687 687 * Tagged queueing flow control
688 688 */
689 689 /*
690 690 * Local Function Prototypes
691 691 */
692 692
693 693 struct flc_objops adapt_ops = {
694 694 fc_init,
695 695 fc_free,
696 696 dmult_enque,
697 697 dmult_deque,
698 698 fc_start_kstat,
699 699 fc_stop_kstat,
700 700 0, 0
701 701 };
702 702
703 703 struct flc_obj *
704 704 adapt_create()
705 705 {
706 706 return (fc_create((struct flc_objops *)&adapt_ops));
707 707
708 708 }
709 709
710 710 /*
711 711 * Common Queue functions
712 712 */
713 713
714 714 /*
715 715 * Local static data
716 716 */
717 717 #ifdef Q_DEBUG
718 718 #define DENT 0x0001
719 719 #define DERR 0x0002
720 720 #define DIO 0x0004
721 721 static int que_debug = DENT|DERR|DIO;
722 722
723 723 #endif /* Q_DEBUG */
724 724 /*
725 725 * Local Function Prototypes
726 726 */
727 727 static struct que_obj *que_create(struct que_objops *qopsp);
728 728 static int que_init(struct que_data *qfp, void *lkarg);
729 729 static int que_free(struct que_obj *queobjp);
730 730 static struct buf *que_del(struct que_data *qfp);
731 731
732 732 static struct que_obj *
733 733 que_create(struct que_objops *qopsp)
734 734 {
735 735 struct que_data *qfp;
736 736 struct que_obj *queobjp;
737 737
738 738 queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
739 739 if (!queobjp)
740 740 return (NULL);
741 741
742 742 queobjp->que_ops = qopsp;
743 743 qfp = (struct que_data *)(queobjp+1);
744 744 queobjp->que_data = (opaque_t)qfp;
745 745
746 746 return ((opaque_t)queobjp);
747 747 }
748 748
749 749 static int
750 750 que_init(struct que_data *qfp, void *lkarg)
751 751 {
752 752 mutex_init(&qfp->q_mutex, NULL, MUTEX_DRIVER, lkarg);
753 753 return (DDI_SUCCESS);
754 754 }
755 755
756 756 static int
757 757 que_free(struct que_obj *queobjp)
758 758 {
759 759 struct que_data *qfp;
760 760
761 761 qfp = (struct que_data *)queobjp->que_data;
762 762 mutex_destroy(&qfp->q_mutex);
763 763 kmem_free(queobjp, (sizeof (*queobjp) + sizeof (struct que_data)));
764 764 return (0);
765 765 }
766 766
767 767 static struct buf *
768 768 que_del(struct que_data *qfp)
769 769 {
770 770 struct buf *bp;
771 771
772 772 bp = qfp->q_tab.b_actf;
773 773 if (bp) {
774 774 qfp->q_tab.b_actf = bp->av_forw;
775 775 if (!qfp->q_tab.b_actf)
776 776 qfp->q_tab.b_actl = NULL;
777 777 bp->av_forw = 0;
778 778 }
779 779 return (bp);
780 780 }
781 781
782 782
783 783
784 784 /*
785 785 * Qmerge
786 786 * Local Function Prototypes
787 787 */
788 788 static int qmerge_add(), qmerge_free();
789 789 static struct buf *qmerge_del(struct que_data *qfp);
790 790
791 791 struct que_objops qmerge_ops = {
792 792 que_init,
793 793 qmerge_free,
794 794 qmerge_add,
795 795 qmerge_del,
796 796 0, 0
797 797 };
798 798
799 799 /* fields in diskhd */
800 800 #define hd_cnt b_back
801 801 #define hd_private b_forw
802 802 #define hd_flags b_flags
803 803 #define hd_sync_next av_forw
804 804 #define hd_async_next av_back
805 805
806 806 #define hd_sync2async sync_async_ratio
807 807
808 808 #define QNEAR_FORWARD 0x01
809 809 #define QNEAR_BACKWARD 0x02
810 810 #define QNEAR_ASYNCONLY 0x04
811 811 #define QNEAR_ASYNCALSO 0x08
812 812
813 813 #define DBLK(bp) ((unsigned long)(bp)->b_private)
814 814
815 815 #define BP_LT_BP(a, b) (DBLK(a) < DBLK(b))
816 816 #define BP_GT_BP(a, b) (DBLK(a) > DBLK(b))
817 817 #define BP_LT_HD(a, b) (DBLK(a) < (unsigned long)((b)->hd_private))
818 818 #define BP_GT_HD(a, b) (DBLK(a) > (unsigned long)((b)->hd_private))
819 819 #define QNEAR_ASYNC (QNEAR_ASYNCONLY|QNEAR_ASYNCALSO)
820 820
821 821 #define SYNC2ASYNC(a) ((a)->q_tab.hd_cnt)
822 822
823 823
824 824 /*
825 825 * qmerge implements a two priority queue, the low priority queue holding ASYNC
826 826 * write requests, while the rest are queued in the high priority sync queue.
827 827 * Requests on the async queue would be merged if possible.
828 828 * By default qmerge2wayscan is 1, indicating an elevator algorithm. When
829 829 * this variable is set to zero, it has the following side effects.
830 830 * 1. We assume fairness is the number one issue.
831 831 * 2. The next request to be picked indicates current head position.
832 832 *
833 833 * qmerge_sync2async indicates the ratio of scans of high prioriy
834 834 * sync queue to low priority async queue.
835 835 *
836 836 * When qmerge variables have the following values it defaults to qsort
837 837 *
838 838 * qmerge1pri = 1, qmerge2wayscan = 0, qmerge_max_merge = 0
839 839 *
840 840 */
841 841 static int qmerge_max_merge = 128 * 1024;
842 842 static intptr_t qmerge_sync2async = 4;
843 843 static int qmerge2wayscan = 1;
844 844 static int qmerge1pri = 0;
845 845 static int qmerge_merge = 0;
846 846
847 847 /*
848 848 * Local static data
849 849 */
850 850 struct que_obj *
851 851 qmerge_create()
↓ open down ↓ |
469 lines elided |
↑ open up ↑ |
852 852 {
853 853 struct que_data *qfp;
854 854 struct que_obj *queobjp;
855 855
856 856 queobjp = kmem_zalloc((sizeof (*queobjp) + sizeof (*qfp)), KM_NOSLEEP);
857 857 if (!queobjp)
858 858 return (NULL);
859 859
860 860 queobjp->que_ops = &qmerge_ops;
861 861 qfp = (struct que_data *)(queobjp+1);
862 - qfp->q_tab.hd_private = qfp->q_tab.hd_private = 0;
862 + qfp->q_tab.hd_private = 0;
863 863 qfp->q_tab.hd_sync_next = qfp->q_tab.hd_async_next = NULL;
864 864 qfp->q_tab.hd_cnt = (void *)qmerge_sync2async;
865 865 queobjp->que_data = (opaque_t)qfp;
866 866
867 867 return ((opaque_t)queobjp);
868 868 }
869 869
870 870 static int
871 871 qmerge_free(struct que_obj *queobjp)
872 872 {
873 873 struct que_data *qfp;
874 874
875 875 qfp = (struct que_data *)queobjp->que_data;
876 876 mutex_destroy(&qfp->q_mutex);
877 877 kmem_free(queobjp, (sizeof (*queobjp) + sizeof (*qfp)));
878 878 return (0);
879 879 }
880 880
881 881 static int
882 882 qmerge_can_merge(bp1, bp2)
883 883 struct buf *bp1, *bp2;
884 884 {
885 885 const int paw_flags = B_PAGEIO | B_ASYNC | B_WRITE;
886 886
887 887 if ((bp1->b_un.b_addr != 0) || (bp2->b_un.b_addr != 0) ||
888 888 ((bp1->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
889 889 ((bp2->b_flags & (paw_flags | B_REMAPPED)) != paw_flags) ||
890 890 (bp1->b_bcount & PAGEOFFSET) || (bp2->b_bcount & PAGEOFFSET) ||
891 891 (bp1->b_bcount + bp2->b_bcount > qmerge_max_merge))
892 892 return (0);
893 893
894 894 if ((DBLK(bp2) + bp2->b_bcount / DEV_BSIZE == DBLK(bp1)) ||
895 895 (DBLK(bp1) + bp1->b_bcount / DEV_BSIZE == DBLK(bp2)))
896 896 return (1);
897 897 else
898 898 return (0);
899 899 }
900 900
901 901 static void
902 902 qmerge_mergesetup(bp_merge, bp)
903 903 struct buf *bp_merge, *bp;
904 904 {
905 905 struct buf *bp1;
906 906 struct page *pp, *pp_merge, *pp_merge_prev;
907 907 int forward;
908 908
909 909 qmerge_merge++;
910 910 forward = DBLK(bp_merge) < DBLK(bp);
911 911
912 912 bp_merge->b_bcount += bp->b_bcount;
913 913
914 914 pp = bp->b_pages;
915 915 pp_merge = bp_merge->b_pages;
916 916
917 917 pp_merge_prev = pp_merge->p_prev;
918 918
919 919 pp_merge->p_prev->p_next = pp;
920 920 pp_merge->p_prev = pp->p_prev;
921 921 pp->p_prev->p_next = pp_merge;
922 922 pp->p_prev = pp_merge_prev;
923 923
924 924 bp1 = bp_merge->b_forw;
925 925
926 926 bp1->av_back->av_forw = bp;
927 927 bp->av_back = bp1->av_back;
928 928 bp1->av_back = bp;
929 929 bp->av_forw = bp1;
930 930
931 931 if (!forward) {
932 932 bp_merge->b_forw = bp;
933 933 bp_merge->b_pages = pp;
934 934 bp_merge->b_private = bp->b_private;
935 935 }
936 936 }
937 937
938 938 static void
939 939 que_insert(struct que_data *qfp, struct buf *bp)
940 940 {
941 941 struct buf *bp1, *bp_start, *lowest_bp, *highest_bp;
942 942 uintptr_t highest_blk, lowest_blk;
943 943 struct buf **async_bpp, **sync_bpp, **bpp;
944 944 struct diskhd *dp = &qfp->q_tab;
945 945
946 946 sync_bpp = &dp->hd_sync_next;
947 947 async_bpp = &dp->hd_async_next;
948 948 /*
949 949 * The ioctl used by the format utility requires that bp->av_back be
950 950 * preserved.
951 951 */
952 952 if (bp->av_back)
953 953 bp->b_error = (intptr_t)bp->av_back;
954 954 if (!qmerge1pri &&
955 955 ((bp->b_flags & (B_ASYNC|B_READ|B_FREE)) == B_ASYNC)) {
956 956 bpp = &dp->hd_async_next;
957 957 } else {
958 958 bpp = &dp->hd_sync_next;
959 959 }
960 960
961 961
962 962 if ((bp1 = *bpp) == NULL) {
963 963 *bpp = bp;
964 964 bp->av_forw = bp->av_back = bp;
965 965 if ((bpp == async_bpp) && (*sync_bpp == NULL)) {
966 966 dp->hd_flags |= QNEAR_ASYNCONLY;
967 967 } else if (bpp == sync_bpp) {
968 968 dp->hd_flags &= ~QNEAR_ASYNCONLY;
969 969 if (*async_bpp) {
970 970 dp->hd_flags |= QNEAR_ASYNCALSO;
971 971 }
972 972 }
973 973 return;
974 974 }
975 975 bp_start = bp1;
976 976 if (DBLK(bp) < DBLK(bp1)) {
977 977 lowest_blk = DBLK(bp1);
978 978 lowest_bp = bp1;
979 979 do {
980 980 if (DBLK(bp) > DBLK(bp1)) {
981 981 bp->av_forw = bp1->av_forw;
982 982 bp1->av_forw->av_back = bp;
983 983 bp1->av_forw = bp;
984 984 bp->av_back = bp1;
985 985
986 986 if (((bpp == async_bpp) &&
987 987 (dp->hd_flags & QNEAR_ASYNC)) ||
988 988 (bpp == sync_bpp)) {
989 989 if (!(dp->hd_flags & QNEAR_BACKWARD) &&
990 990 BP_GT_HD(bp, dp)) {
991 991 *bpp = bp;
992 992 }
993 993 }
994 994 return;
995 995 } else if (DBLK(bp1) < lowest_blk) {
996 996 lowest_bp = bp1;
997 997 lowest_blk = DBLK(bp1);
998 998 }
999 999 } while ((DBLK(bp1->av_back) < DBLK(bp1)) &&
1000 1000 ((bp1 = bp1->av_back) != bp_start));
1001 1001 bp->av_forw = lowest_bp;
1002 1002 lowest_bp->av_back->av_forw = bp;
1003 1003 bp->av_back = lowest_bp->av_back;
1004 1004 lowest_bp->av_back = bp;
1005 1005 if ((bpp == async_bpp) && !(dp->hd_flags & QNEAR_ASYNC)) {
1006 1006 *bpp = bp;
1007 1007 } else if (!(dp->hd_flags & QNEAR_BACKWARD) &&
1008 1008 BP_GT_HD(bp, dp)) {
1009 1009 *bpp = bp;
1010 1010 }
1011 1011 } else {
1012 1012 highest_blk = DBLK(bp1);
1013 1013 highest_bp = bp1;
1014 1014 do {
1015 1015 if (DBLK(bp) < DBLK(bp1)) {
1016 1016 bp->av_forw = bp1;
1017 1017 bp1->av_back->av_forw = bp;
1018 1018 bp->av_back = bp1->av_back;
1019 1019 bp1->av_back = bp;
1020 1020 if (((bpp == async_bpp) &&
1021 1021 (dp->hd_flags & QNEAR_ASYNC)) ||
1022 1022 (bpp == sync_bpp)) {
1023 1023 if ((dp->hd_flags & QNEAR_BACKWARD) &&
1024 1024 BP_LT_HD(bp, dp)) {
1025 1025 *bpp = bp;
1026 1026 }
1027 1027 }
1028 1028 return;
1029 1029 } else if (DBLK(bp1) > highest_blk) {
1030 1030 highest_bp = bp1;
1031 1031 highest_blk = DBLK(bp1);
1032 1032 }
1033 1033 } while ((DBLK(bp1->av_forw) > DBLK(bp1)) &&
1034 1034 ((bp1 = bp1->av_forw) != bp_start));
1035 1035 bp->av_back = highest_bp;
1036 1036 highest_bp->av_forw->av_back = bp;
1037 1037 bp->av_forw = highest_bp->av_forw;
1038 1038 highest_bp->av_forw = bp;
1039 1039
1040 1040 if (((bpp == sync_bpp) ||
1041 1041 ((bpp == async_bpp) && (dp->hd_flags & QNEAR_ASYNC))) &&
1042 1042 (dp->hd_flags & QNEAR_BACKWARD) && (BP_LT_HD(bp, dp)))
1043 1043 *bpp = bp;
1044 1044 }
1045 1045 }
1046 1046
1047 1047 /*
1048 1048 * dmult_enque() holds dmultp->ds_mutex lock, so we dont grab
1049 1049 * lock here. If dmult_enque() changes we will have to visit
1050 1050 * this function again
1051 1051 */
1052 1052 static int
1053 1053 qmerge_add(struct que_data *qfp, struct buf *bp)
1054 1054 {
1055 1055
1056 1056 que_insert(qfp, bp);
1057 1057 return (++qfp->q_cnt);
1058 1058 }
1059 1059
1060 1060 static int
1061 1061 qmerge_iodone(struct buf *bp)
1062 1062 {
1063 1063 struct buf *bp1;
1064 1064 struct page *pp, *pp1, *tmp_pp;
1065 1065
1066 1066 if (bp->b_flags & B_REMAPPED)
1067 1067 bp_mapout(bp);
1068 1068
1069 1069 bp1 = bp->b_forw;
1070 1070 do {
1071 1071 bp->b_forw = bp1->av_forw;
1072 1072 bp1->av_forw->av_back = bp1->av_back;
1073 1073 bp1->av_back->av_forw = bp1->av_forw;
1074 1074 pp = (page_t *)bp1->b_pages;
1075 1075 pp1 = bp->b_forw->b_pages;
1076 1076
1077 1077 tmp_pp = pp->p_prev;
1078 1078 pp->p_prev = pp1->p_prev;
1079 1079 pp->p_prev->p_next = pp;
1080 1080
1081 1081 pp1->p_prev = tmp_pp;
1082 1082 pp1->p_prev->p_next = pp1;
1083 1083
1084 1084 if (bp->b_flags & B_ERROR) {
1085 1085 bp1->b_error = bp->b_error;
1086 1086 bp1->b_flags |= B_ERROR;
1087 1087 }
1088 1088
1089 1089 biodone(bp1);
1090 1090 } while ((bp1 = bp->b_forw) != bp->b_forw->av_forw);
1091 1091
1092 1092 biodone(bp1);
1093 1093 kmem_free(bp, sizeof (*bp));
1094 1094 return (0);
1095 1095 }
1096 1096
1097 1097
1098 1098
1099 1099
1100 1100 static struct buf *
1101 1101 qmerge_nextbp(struct que_data *qfp, struct buf *bp_merge, int *can_merge)
1102 1102 {
1103 1103 intptr_t private, cnt;
1104 1104 int flags;
1105 1105 struct buf *sync_bp, *async_bp, *bp;
1106 1106 struct buf **sync_bpp, **async_bpp, **bpp;
1107 1107 struct diskhd *dp = &qfp->q_tab;
1108 1108
1109 1109 if (qfp->q_cnt == 0) {
1110 1110 return (NULL);
1111 1111 }
1112 1112 flags = qfp->q_tab.hd_flags;
1113 1113 sync_bpp = &qfp->q_tab.hd_sync_next;
1114 1114 async_bpp = &qfp->q_tab.hd_async_next;
1115 1115
1116 1116 begin_nextbp:
1117 1117 if (flags & QNEAR_ASYNCONLY) {
1118 1118 bp = *async_bpp;
1119 1119 private = DBLK(bp);
1120 1120 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1121 1121 return (NULL);
1122 1122 } else if (bp->av_forw == bp) {
1123 1123 bp->av_forw = bp->av_back = NULL;
1124 1124 flags &= ~(QNEAR_ASYNCONLY | QNEAR_BACKWARD);
1125 1125 private = 0;
1126 1126 } else if (flags & QNEAR_BACKWARD) {
1127 1127 if (DBLK(bp) < DBLK(bp->av_back)) {
1128 1128 flags &= ~QNEAR_BACKWARD;
1129 1129 private = 0;
1130 1130 }
1131 1131 } else if (DBLK(bp) > DBLK(bp->av_forw)) {
1132 1132 if (qmerge2wayscan) {
1133 1133 flags |= QNEAR_BACKWARD;
1134 1134 } else {
1135 1135 private = 0;
1136 1136 }
1137 1137 } else if (qmerge2wayscan == 0) {
1138 1138 private = DBLK(bp->av_forw);
1139 1139 }
1140 1140 bpp = async_bpp;
1141 1141
1142 1142 } else if (flags & QNEAR_ASYNCALSO) {
1143 1143 sync_bp = *sync_bpp;
1144 1144 async_bp = *async_bpp;
1145 1145 if (flags & QNEAR_BACKWARD) {
1146 1146 if (BP_GT_HD(sync_bp, dp) && BP_GT_HD(async_bp, dp)) {
1147 1147 flags &= ~(QNEAR_BACKWARD|QNEAR_ASYNCALSO);
1148 1148 *sync_bpp = sync_bp->av_forw;
1149 1149 *async_bpp = async_bp->av_forw;
1150 1150 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1151 1151 qfp->q_tab.hd_private = 0;
1152 1152 goto begin_nextbp;
1153 1153 }
1154 1154 if (BP_LT_HD(async_bp, dp) && BP_LT_HD(sync_bp, dp)) {
1155 1155 if (BP_GT_BP(async_bp, sync_bp)) {
1156 1156 bpp = async_bpp;
1157 1157 bp = *async_bpp;
1158 1158 } else {
1159 1159 bpp = sync_bpp;
1160 1160 bp = *sync_bpp;
1161 1161 }
1162 1162 } else if (BP_LT_HD(async_bp, dp)) {
1163 1163 bpp = async_bpp;
1164 1164 bp = *async_bpp;
1165 1165 } else {
1166 1166 bpp = sync_bpp;
1167 1167 bp = *sync_bpp;
1168 1168 }
↓ open down ↓ |
296 lines elided |
↑ open up ↑ |
1169 1169 } else {
1170 1170 if (BP_LT_HD(sync_bp, dp) && BP_LT_HD(async_bp, dp)) {
1171 1171 if (qmerge2wayscan) {
1172 1172 flags |= QNEAR_BACKWARD;
1173 1173 *sync_bpp = sync_bp->av_back;
1174 1174 *async_bpp = async_bp->av_back;
1175 1175 goto begin_nextbp;
1176 1176 } else {
1177 1177 flags &= ~QNEAR_ASYNCALSO;
1178 1178 SYNC2ASYNC(qfp) =
1179 - (void *)qmerge_sync2async;
1179 + (void *)qmerge_sync2async;
1180 1180 qfp->q_tab.hd_private = 0;
1181 1181 goto begin_nextbp;
1182 1182 }
1183 1183 }
1184 1184 if (BP_GT_HD(async_bp, dp) && BP_GT_HD(sync_bp, dp)) {
1185 1185 if (BP_LT_BP(async_bp, sync_bp)) {
1186 1186 bpp = async_bpp;
1187 1187 bp = *async_bpp;
1188 1188 } else {
1189 1189 bpp = sync_bpp;
1190 1190 bp = *sync_bpp;
1191 1191 }
1192 1192 } else if (BP_GT_HD(async_bp, dp)) {
1193 1193 bpp = async_bpp;
1194 1194 bp = *async_bpp;
1195 1195 } else {
1196 1196 bpp = sync_bpp;
1197 1197 bp = *sync_bpp;
1198 1198 }
1199 1199 }
1200 1200 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1201 1201 return (NULL);
1202 1202 } else if (bp->av_forw == bp) {
1203 1203 bp->av_forw = bp->av_back = NULL;
1204 1204 flags &= ~QNEAR_ASYNCALSO;
1205 1205 if (bpp == async_bpp) {
1206 1206 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1207 1207 } else {
1208 1208 flags |= QNEAR_ASYNCONLY;
1209 1209 }
1210 1210 }
1211 1211 private = DBLK(bp);
1212 1212 } else {
1213 1213 bp = *sync_bpp;
1214 1214 private = DBLK(bp);
1215 1215 if (bp_merge && !qmerge_can_merge(bp, bp_merge)) {
1216 1216 return (NULL);
1217 1217 } else if (bp->av_forw == bp) {
1218 1218 private = 0;
1219 1219 SYNC2ASYNC(qfp) = (void *)qmerge_sync2async;
1220 1220 bp->av_forw = bp->av_back = NULL;
1221 1221 flags &= ~QNEAR_BACKWARD;
1222 1222 if (*async_bpp)
1223 1223 flags |= QNEAR_ASYNCONLY;
1224 1224 } else if (flags & QNEAR_BACKWARD) {
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
1225 1225 if (DBLK(bp) < DBLK(bp->av_back)) {
1226 1226 flags &= ~QNEAR_BACKWARD;
1227 1227 cnt = (intptr_t)SYNC2ASYNC(qfp);
1228 1228 if (cnt > 0) {
1229 1229 cnt--;
1230 1230 SYNC2ASYNC(qfp) = (void *)cnt;
1231 1231 } else {
1232 1232 if (*async_bpp)
1233 1233 flags |= QNEAR_ASYNCALSO;
1234 1234 SYNC2ASYNC(qfp) =
1235 - (void *)qmerge_sync2async;
1235 + (void *)qmerge_sync2async;
1236 1236 }
1237 1237 private = 0;
1238 1238 }
1239 1239 } else if (DBLK(bp) > DBLK(bp->av_forw)) {
1240 1240 private = 0;
1241 1241 if (qmerge2wayscan) {
1242 1242 flags |= QNEAR_BACKWARD;
1243 1243 private = DBLK(bp);
1244 1244 } else {
1245 1245 cnt = (intptr_t)SYNC2ASYNC(qfp);
1246 1246 if (cnt > 0) {
1247 1247 cnt--;
1248 1248 SYNC2ASYNC(qfp) = (void *)cnt;
1249 1249 } else {
1250 1250 if (*async_bpp)
1251 1251 flags |= QNEAR_ASYNCALSO;
1252 1252 SYNC2ASYNC(qfp) =
1253 - (void *)qmerge_sync2async;
1253 + (void *)qmerge_sync2async;
1254 1254 }
1255 1255 }
1256 1256 } else if (qmerge2wayscan == 0) {
1257 1257 private = DBLK(bp->av_forw);
1258 1258 }
1259 1259 bpp = sync_bpp;
1260 1260 }
1261 1261
1262 1262 if (bp->av_forw) {
1263 1263 *can_merge = !(bp->b_flags & B_READ);
1264 1264 if (flags & QNEAR_BACKWARD) {
1265 1265 *bpp = bp->av_back;
1266 1266 if ((DBLK(bp->av_back) +
1267 1267 bp->av_back->b_bcount / DEV_BSIZE) != DBLK(bp))
1268 1268 *can_merge = 0;
1269 1269 } else {
1270 1270 *bpp = bp->av_forw;
1271 1271 if ((DBLK(bp) + bp->b_bcount / DEV_BSIZE) !=
1272 1272 DBLK(bp->av_forw))
1273 1273 *can_merge = 0;
1274 1274 }
1275 1275 bp->av_forw->av_back = bp->av_back;
1276 1276 bp->av_back->av_forw = bp->av_forw;
1277 1277 bp->av_forw = bp->av_back = NULL;
1278 1278 } else {
1279 1279 *bpp = NULL;
1280 1280 *can_merge = 0;
1281 1281 }
1282 1282 qfp->q_tab.hd_private = (void *)private;
1283 1283 qfp->q_cnt--;
1284 1284 qfp->q_tab.hd_flags = flags;
1285 1285 if (bp->b_error) {
1286 1286 bp->av_back = (void *)(intptr_t)bp->b_error;
1287 1287 bp->b_error = 0;
1288 1288 }
1289 1289 return (bp);
1290 1290 }
1291 1291
1292 1292 static struct buf *
1293 1293 qmerge_del(struct que_data *qfp)
1294 1294 {
1295 1295 struct buf *bp, *next_bp, *bp_merge;
1296 1296 int alloc_mergebp, merge;
1297 1297
1298 1298 if (qfp->q_cnt == 0) {
1299 1299 return (NULL);
1300 1300 }
1301 1301
1302 1302 bp_merge = bp = qmerge_nextbp(qfp, NULL, &merge);
1303 1303 alloc_mergebp = 1;
1304 1304 while (merge && (next_bp = qmerge_nextbp(qfp, bp_merge, &merge))) {
1305 1305 if (alloc_mergebp) {
1306 1306 bp_merge = kmem_alloc(sizeof (*bp_merge), KM_NOSLEEP);
1307 1307 if (bp_merge == NULL) {
1308 1308 mutex_exit(&qfp->q_mutex);
1309 1309 return (bp);
1310 1310 }
1311 1311 bcopy(bp, bp_merge, sizeof (*bp_merge));
1312 1312 bp_merge->b_iodone = qmerge_iodone;
1313 1313 bp_merge->b_forw = bp;
1314 1314 bp_merge->b_back = (struct buf *)qfp;
1315 1315 bp->av_forw = bp->av_back = bp;
1316 1316 alloc_mergebp = 0;
1317 1317 }
1318 1318 qmerge_mergesetup(bp_merge, next_bp);
1319 1319 }
1320 1320 return (bp_merge);
1321 1321 }
1322 1322
1323 1323
1324 1324 /*
1325 1325 * FIFO Queue functions
1326 1326 */
1327 1327 /*
1328 1328 * Local Function Prototypes
1329 1329 */
1330 1330 static int qfifo_add();
1331 1331
1332 1332 struct que_objops qfifo_ops = {
1333 1333 que_init,
1334 1334 que_free,
1335 1335 qfifo_add,
1336 1336 que_del,
1337 1337 0, 0
1338 1338 };
1339 1339
1340 1340 /*
1341 1341 * Local static data
1342 1342 */
1343 1343 struct que_obj *
1344 1344 qfifo_create()
1345 1345 {
1346 1346 return (que_create((struct que_objops *)&qfifo_ops));
1347 1347 }
1348 1348
1349 1349 static int
1350 1350 qfifo_add(struct que_data *qfp, struct buf *bp)
1351 1351 {
1352 1352
1353 1353 if (!qfp->q_tab.b_actf)
1354 1354 qfp->q_tab.b_actf = bp;
1355 1355 else
1356 1356 qfp->q_tab.b_actl->av_forw = bp;
1357 1357 qfp->q_tab.b_actl = bp;
1358 1358 bp->av_forw = NULL;
1359 1359 return (0);
1360 1360 }
1361 1361
1362 1362 /*
1363 1363 * One-Way-Scan Queue functions
1364 1364 */
1365 1365 /*
1366 1366 * Local Function Prototypes
1367 1367 */
1368 1368 static int qsort_add();
1369 1369 static struct buf *qsort_del();
1370 1370 static void oneway_scan_binary(struct diskhd *dp, struct buf *bp);
1371 1371
1372 1372 struct que_objops qsort_ops = {
1373 1373 que_init,
1374 1374 que_free,
1375 1375 qsort_add,
1376 1376 qsort_del,
1377 1377 0, 0
1378 1378 };
1379 1379
1380 1380 /*
1381 1381 * Local static data
1382 1382 */
1383 1383 struct que_obj *
1384 1384 qsort_create()
1385 1385 {
1386 1386 return (que_create((struct que_objops *)&qsort_ops));
1387 1387 }
1388 1388
1389 1389 static int
1390 1390 qsort_add(struct que_data *qfp, struct buf *bp)
1391 1391 {
1392 1392 qfp->q_cnt++;
1393 1393 oneway_scan_binary(&qfp->q_tab, bp);
1394 1394 return (0);
1395 1395 }
1396 1396
1397 1397
1398 1398 #define b_pasf b_forw
1399 1399 #define b_pasl b_back
1400 1400 static void
1401 1401 oneway_scan_binary(struct diskhd *dp, struct buf *bp)
1402 1402 {
1403 1403 struct buf *ap;
1404 1404
1405 1405 ap = dp->b_actf;
1406 1406 if (ap == NULL) {
1407 1407 dp->b_actf = bp;
1408 1408 bp->av_forw = NULL;
1409 1409 return;
1410 1410 }
1411 1411 if (DBLK(bp) < DBLK(ap)) {
1412 1412 ap = dp->b_pasf;
1413 1413 if ((ap == NULL) || (DBLK(bp) < DBLK(ap))) {
1414 1414 dp->b_pasf = bp;
1415 1415 bp->av_forw = ap;
1416 1416 return;
1417 1417 }
1418 1418 }
1419 1419 while (ap->av_forw) {
1420 1420 if (DBLK(bp) < DBLK(ap->av_forw))
1421 1421 break;
1422 1422 ap = ap->av_forw;
1423 1423 }
1424 1424 bp->av_forw = ap->av_forw;
1425 1425 ap->av_forw = bp;
1426 1426 }
1427 1427
1428 1428 static struct buf *
1429 1429 qsort_del(struct que_data *qfp)
1430 1430 {
1431 1431 struct buf *bp;
1432 1432
1433 1433 if (qfp->q_cnt == 0) {
1434 1434 return (NULL);
1435 1435 }
1436 1436 qfp->q_cnt--;
1437 1437 bp = qfp->q_tab.b_actf;
1438 1438 qfp->q_tab.b_actf = bp->av_forw;
1439 1439 bp->av_forw = 0;
1440 1440 if (!qfp->q_tab.b_actf && qfp->q_tab.b_pasf) {
1441 1441 qfp->q_tab.b_actf = qfp->q_tab.b_pasf;
1442 1442 qfp->q_tab.b_pasf = NULL;
1443 1443 }
1444 1444 return (bp);
1445 1445 }
1446 1446
1447 1447 /*
1448 1448 * Tagged queueing
1449 1449 */
1450 1450 /*
1451 1451 * Local Function Prototypes
1452 1452 */
1453 1453
1454 1454 struct que_objops qtag_ops = {
1455 1455 que_init,
1456 1456 que_free,
1457 1457 qsort_add,
1458 1458 qsort_del,
1459 1459 0, 0
1460 1460 };
1461 1461
1462 1462 /*
1463 1463 * Local static data
1464 1464 */
1465 1465 struct que_obj *
1466 1466 qtag_create()
1467 1467 {
1468 1468 return (que_create((struct que_objops *)&qtag_ops));
1469 1469 }
↓ open down ↓ |
206 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX