Print this page
2976 remove useless offsetof() macros
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/avs/ns/rdc/rdc_io.c
+++ new/usr/src/uts/common/avs/ns/rdc/rdc_io.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/ksynch.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/kmem.h>
30 30 #include <sys/conf.h>
31 31 #include <sys/errno.h>
32 +#include <sys/sysmacros.h>
32 33
33 34 #ifdef _SunOS_5_6
34 35 /*
35 36 * on 2.6 both dki_lock.h and rpc/types.h define bool_t so we
36 37 * define enum_t here as it is all we need from rpc/types.h
37 38 * anyway and make it look like we included it. Yuck.
38 39 */
39 40 #define _RPC_TYPES_H
40 41 typedef int enum_t;
41 42 #else
42 43 #ifndef DS_DDICT
43 44 #include <rpc/types.h>
44 45 #endif
45 46 #endif /* _SunOS_5_6 */
46 47
47 48 #include <sys/ddi.h>
48 49
49 50 #include <sys/nsc_thread.h>
50 51 #include <sys/nsctl/nsctl.h>
51 52
52 53 #include <sys/sdt.h> /* dtrace is S10 or later */
53 54
54 55 #include "rdc_io.h"
55 56 #include "rdc_bitmap.h"
56 57 #include "rdc_update.h"
57 58 #include "rdc_ioctl.h"
58 59 #include "rdcsrv.h"
59 60 #include "rdc_diskq.h"
60 61
61 62 #include <sys/unistat/spcs_s.h>
62 63 #include <sys/unistat/spcs_s_k.h>
63 64 #include <sys/unistat/spcs_errors.h>
64 65
65 66 volatile int net_exit;
66 67 nsc_size_t MAX_RDC_FBAS;
67 68
68 69 #ifdef DEBUG
69 70 int RDC_MAX_SYNC_THREADS = 8;
70 71 int rdc_maxthreads_last = 8;
71 72 #endif
72 73
73 74 kmutex_t rdc_ping_lock; /* Ping lock */
74 75 static kmutex_t net_blk_lock;
75 76
76 77 /*
77 78 * rdc_conf_lock is used as a global device configuration lock.
78 79 * It is also used by enable/resume and disable/suspend code to ensure that
79 80 * the transition of an rdc set between configured and unconfigured is
80 81 * atomic.
81 82 *
82 83 * krdc->group->lock is used to protect state changes of a configured rdc
83 84 * set (e.g. changes to urdc->flags), such as enabled to disabled and vice
84 85 * versa.
85 86 *
86 87 * rdc_many_lock is also used to protect changes in group membership. A group
87 88 * linked list cannot change while this lock is held. The many list and the
88 89 * multi-hop list are both protected by rdc_many_lock.
89 90 */
90 91 kmutex_t rdc_conf_lock;
91 92 kmutex_t rdc_many_lock; /* Many/multi-list lock */
92 93
93 94 static kmutex_t rdc_net_hnd_id_lock; /* Network handle id lock */
94 95 int rdc_debug = 0;
95 96 int rdc_debug_sleep = 0;
96 97
97 98 static int rdc_net_hnd_id = 1;
98 99
99 100 extern kmutex_t rdc_clnt_lock;
100 101
101 102 static void rdc_ditemsfree(rdc_net_dataset_t *);
102 103 void rdc_clnt_destroy(void);
103 104
104 105 rdc_k_info_t *rdc_k_info;
105 106 rdc_u_info_t *rdc_u_info;
106 107
107 108 unsigned long rdc_async_timeout;
108 109
109 110 nsc_size_t rdc_maxthres_queue = RDC_MAXTHRES_QUEUE;
110 111 int rdc_max_qitems = RDC_MAX_QITEMS;
111 112 int rdc_asyncthr = RDC_ASYNCTHR;
112 113 static nsc_svc_t *rdc_volume_update;
113 114 static int rdc_prealloc_handle = 1;
114 115
115 116 extern int _rdc_rsrv_diskq(rdc_group_t *group);
116 117 extern void _rdc_rlse_diskq(rdc_group_t *group);
117 118
118 119 /*
119 120 * Forward declare all statics that are used before defined
120 121 * to enforce parameter checking
121 122 *
122 123 * Some (if not all) of these could be removed if the code were reordered
123 124 */
124 125
125 126 static void rdc_volume_update_svc(intptr_t);
126 127 static void halt_sync(rdc_k_info_t *krdc);
127 128 void rdc_kstat_create(int index);
128 129 void rdc_kstat_delete(int index);
129 130 static int rdc_checkforbitmap(int, nsc_off_t);
130 131 static int rdc_installbitmap(int, void *, int, nsc_off_t, int, int *, int);
131 132 static rdc_group_t *rdc_newgroup();
132 133
133 134 int rdc_enable_diskq(rdc_k_info_t *krdc);
134 135 void rdc_close_diskq(rdc_group_t *group);
135 136 int rdc_suspend_diskq(rdc_k_info_t *krdc);
136 137 int rdc_resume_diskq(rdc_k_info_t *krdc);
137 138 void rdc_init_diskq_header(rdc_group_t *grp, dqheader *header);
138 139 void rdc_fail_diskq(rdc_k_info_t *krdc, int wait, int dolog);
139 140 void rdc_unfail_diskq(rdc_k_info_t *krdc);
140 141 void rdc_unintercept_diskq(rdc_group_t *grp);
141 142 int rdc_stamp_diskq(rdc_k_info_t *krdc, int rsrvd, int flags);
142 143 void rdc_qfiller_thr(rdc_k_info_t *krdc);
143 144
144 145 nstset_t *_rdc_ioset;
145 146 nstset_t *_rdc_flset;
146 147
147 148 /*
148 149 * RDC threadset tunables
149 150 */
150 151 int rdc_threads = 64; /* default number of threads */
151 152 int rdc_threads_inc = 8; /* increment for changing the size of the set */
152 153
153 154 /*
154 155 * Private threadset manipulation variables
155 156 */
156 157 static int rdc_threads_hysteresis = 2;
157 158 /* hysteresis for threadset resizing */
158 159 static int rdc_sets_active; /* number of sets currently enabled */
159 160
160 161 #ifdef DEBUG
161 162 kmutex_t rdc_cntlock;
162 163 #endif
163 164
164 165 /*
165 166 * rdc_thread_deconfigure - rdc is being deconfigured, stop any
166 167 * thread activity.
167 168 *
168 169 * Inherently single-threaded by the Solaris module unloading code.
169 170 */
170 171 static void
171 172 rdc_thread_deconfigure(void)
172 173 {
173 174 nst_destroy(_rdc_ioset);
174 175 _rdc_ioset = NULL;
175 176
176 177 nst_destroy(_rdc_flset);
177 178 _rdc_flset = NULL;
178 179
179 180 nst_destroy(sync_info.rdc_syncset);
180 181 sync_info.rdc_syncset = NULL;
181 182 }
182 183
183 184 /*
184 185 * rdc_thread_configure - rdc is being configured, initialize the
185 186 * threads we need for flushing aync volumes.
186 187 *
187 188 * Must be called with rdc_conf_lock held.
188 189 */
189 190 static int
190 191 rdc_thread_configure(void)
191 192 {
192 193 ASSERT(MUTEX_HELD(&rdc_conf_lock));
193 194
194 195 if ((_rdc_ioset = nst_init("rdc_thr", rdc_threads)) == NULL)
195 196 return (EINVAL);
196 197
197 198 if ((_rdc_flset = nst_init("rdc_flushthr", 2)) == NULL)
198 199 return (EINVAL);
199 200
200 201 if ((sync_info.rdc_syncset =
201 202 nst_init("rdc_syncthr", RDC_MAX_SYNC_THREADS)) == NULL)
202 203 return (EINVAL);
203 204
204 205 return (0);
205 206 }
206 207
207 208
208 209 /*
209 210 * rdc_thread_tune - called to tune the size of the rdc threadset.
210 211 *
211 212 * Called from the config code when an rdc_set has been enabled or disabled.
212 213 * 'sets' is the increment to the number of active rdc_sets.
213 214 *
214 215 * Must be called with rdc_conf_lock held.
215 216 */
216 217 static void
217 218 rdc_thread_tune(int sets)
218 219 {
219 220 int incr = (sets > 0) ? 1 : -1;
220 221 int change = 0;
221 222 int nthreads;
222 223
223 224 ASSERT(MUTEX_HELD(&rdc_conf_lock));
224 225
225 226 if (sets < 0)
226 227 sets = -sets;
227 228
228 229 while (sets--) {
229 230 nthreads = nst_nthread(_rdc_ioset);
230 231 rdc_sets_active += incr;
231 232
232 233 if (rdc_sets_active >= nthreads)
233 234 change += nst_add_thread(_rdc_ioset, rdc_threads_inc);
234 235 else if ((rdc_sets_active <
235 236 (nthreads - (rdc_threads_inc + rdc_threads_hysteresis))) &&
236 237 ((nthreads - rdc_threads_inc) >= rdc_threads))
237 238 change -= nst_del_thread(_rdc_ioset, rdc_threads_inc);
238 239 }
239 240
240 241 #ifdef DEBUG
241 242 if (change) {
242 243 cmn_err(CE_NOTE, "!rdc_thread_tune: "
243 244 "nsets %d, nthreads %d, nthreads change %d",
244 245 rdc_sets_active, nst_nthread(_rdc_ioset), change);
245 246 }
246 247 #endif
247 248 }
248 249
249 250
250 251 /*
251 252 * _rdc_unload() - cache is being unloaded,
252 253 * deallocate any dual copy structures allocated during cache
253 254 * loading.
254 255 */
255 256 void
256 257 _rdc_unload(void)
257 258 {
258 259 int i;
259 260 rdc_k_info_t *krdc;
260 261
261 262 if (rdc_volume_update) {
262 263 (void) nsc_unregister_svc(rdc_volume_update);
263 264 rdc_volume_update = NULL;
264 265 }
265 266
266 267 rdc_thread_deconfigure();
267 268
268 269 if (rdc_k_info != NULL) {
269 270 for (i = 0; i < rdc_max_sets; i++) {
270 271 krdc = &rdc_k_info[i];
271 272 mutex_destroy(&krdc->dc_sleep);
272 273 mutex_destroy(&krdc->bmapmutex);
273 274 mutex_destroy(&krdc->kstat_mutex);
274 275 mutex_destroy(&krdc->bmp_kstat_mutex);
275 276 mutex_destroy(&krdc->syncbitmutex);
276 277 cv_destroy(&krdc->busycv);
277 278 cv_destroy(&krdc->closingcv);
278 279 cv_destroy(&krdc->haltcv);
279 280 cv_destroy(&krdc->synccv);
280 281 }
281 282 }
282 283
283 284 mutex_destroy(&sync_info.lock);
284 285 mutex_destroy(&rdc_ping_lock);
285 286 mutex_destroy(&net_blk_lock);
286 287 mutex_destroy(&rdc_conf_lock);
287 288 mutex_destroy(&rdc_many_lock);
288 289 mutex_destroy(&rdc_net_hnd_id_lock);
289 290 mutex_destroy(&rdc_clnt_lock);
290 291 #ifdef DEBUG
291 292 mutex_destroy(&rdc_cntlock);
292 293 #endif
293 294 net_exit = ATM_EXIT;
294 295
295 296 if (rdc_k_info != NULL)
296 297 kmem_free(rdc_k_info, sizeof (*rdc_k_info) * rdc_max_sets);
297 298 if (rdc_u_info != NULL)
298 299 kmem_free(rdc_u_info, sizeof (*rdc_u_info) * rdc_max_sets);
299 300 rdc_k_info = NULL;
300 301 rdc_u_info = NULL;
301 302 rdc_max_sets = 0;
302 303 }
303 304
304 305
305 306 /*
306 307 * _rdc_load() - rdc is being loaded, Allocate anything
307 308 * that will be needed while the cache is loaded but doesn't really
308 309 * depend on configuration parameters.
309 310 *
310 311 */
311 312 int
312 313 _rdc_load(void)
313 314 {
314 315 int i;
315 316 rdc_k_info_t *krdc;
316 317
317 318 mutex_init(&rdc_ping_lock, NULL, MUTEX_DRIVER, NULL);
318 319 mutex_init(&net_blk_lock, NULL, MUTEX_DRIVER, NULL);
319 320 mutex_init(&rdc_conf_lock, NULL, MUTEX_DRIVER, NULL);
320 321 mutex_init(&rdc_many_lock, NULL, MUTEX_DRIVER, NULL);
321 322 mutex_init(&rdc_net_hnd_id_lock, NULL, MUTEX_DRIVER, NULL);
322 323 mutex_init(&rdc_clnt_lock, NULL, MUTEX_DRIVER, NULL);
323 324 mutex_init(&sync_info.lock, NULL, MUTEX_DRIVER, NULL);
324 325
325 326 #ifdef DEBUG
326 327 mutex_init(&rdc_cntlock, NULL, MUTEX_DRIVER, NULL);
327 328 #endif
328 329
329 330 if ((i = nsc_max_devices()) < rdc_max_sets)
330 331 rdc_max_sets = i;
331 332 /* following case for partial installs that may fail */
332 333 if (!rdc_max_sets)
333 334 rdc_max_sets = 1024;
334 335
335 336 rdc_k_info = kmem_zalloc(sizeof (*rdc_k_info) * rdc_max_sets, KM_SLEEP);
336 337 if (!rdc_k_info)
337 338 return (ENOMEM);
338 339
339 340 rdc_u_info = kmem_zalloc(sizeof (*rdc_u_info) * rdc_max_sets, KM_SLEEP);
340 341 if (!rdc_u_info) {
341 342 kmem_free(rdc_k_info, sizeof (*rdc_k_info) * rdc_max_sets);
342 343 return (ENOMEM);
343 344 }
344 345
345 346 net_exit = ATM_NONE;
346 347 for (i = 0; i < rdc_max_sets; i++) {
347 348 krdc = &rdc_k_info[i];
348 349 bzero(krdc, sizeof (*krdc));
349 350 krdc->index = i;
350 351 mutex_init(&krdc->dc_sleep, NULL, MUTEX_DRIVER, NULL);
351 352 mutex_init(&krdc->bmapmutex, NULL, MUTEX_DRIVER, NULL);
352 353 mutex_init(&krdc->kstat_mutex, NULL, MUTEX_DRIVER, NULL);
353 354 mutex_init(&krdc->bmp_kstat_mutex, NULL, MUTEX_DRIVER, NULL);
354 355 mutex_init(&krdc->syncbitmutex, NULL, MUTEX_DRIVER, NULL);
355 356 cv_init(&krdc->busycv, NULL, CV_DRIVER, NULL);
356 357 cv_init(&krdc->closingcv, NULL, CV_DRIVER, NULL);
357 358 cv_init(&krdc->haltcv, NULL, CV_DRIVER, NULL);
358 359 cv_init(&krdc->synccv, NULL, CV_DRIVER, NULL);
359 360 }
360 361
361 362 rdc_volume_update = nsc_register_svc("RDCVolumeUpdated",
362 363 rdc_volume_update_svc);
363 364
364 365 return (0);
365 366 }
366 367
367 368 static void
368 369 rdc_u_init(rdc_u_info_t *urdc)
369 370 {
370 371 const int index = (int)(urdc - &rdc_u_info[0]);
371 372
372 373 if (urdc->secondary.addr.maxlen)
373 374 free_rdc_netbuf(&urdc->secondary.addr);
374 375 if (urdc->primary.addr.maxlen)
375 376 free_rdc_netbuf(&urdc->primary.addr);
376 377
377 378 bzero(urdc, sizeof (rdc_u_info_t));
378 379
379 380 urdc->index = index;
380 381 urdc->maxqfbas = rdc_maxthres_queue;
381 382 urdc->maxqitems = rdc_max_qitems;
382 383 urdc->asyncthr = rdc_asyncthr;
383 384 }
384 385
385 386 /*
386 387 * _rdc_configure() - cache is being configured.
387 388 *
388 389 * Initialize dual copy structures
389 390 */
390 391 int
391 392 _rdc_configure(void)
392 393 {
393 394 int index;
394 395 rdc_k_info_t *krdc;
395 396
396 397 for (index = 0; index < rdc_max_sets; index++) {
397 398 krdc = &rdc_k_info[index];
398 399
399 400 krdc->remote_index = -1;
400 401 krdc->dcio_bitmap = NULL;
401 402 krdc->bitmap_ref = NULL;
402 403 krdc->bitmap_size = 0;
403 404 krdc->bitmap_write = 0;
404 405 krdc->disk_status = 0;
405 406 krdc->many_next = krdc;
406 407
407 408 rdc_u_init(&rdc_u_info[index]);
408 409 }
409 410
410 411 rdc_async_timeout = 120 * HZ; /* Seconds * HZ */
411 412 MAX_RDC_FBAS = FBA_LEN(RDC_MAXDATA);
412 413 if (net_exit != ATM_INIT) {
413 414 net_exit = ATM_INIT;
414 415 return (0);
415 416 }
416 417 return (0);
417 418 }
418 419
419 420 /*
420 421 * _rdc_deconfigure - rdc is being deconfigured, shut down any
421 422 * dual copy operations and return to an unconfigured state.
422 423 */
423 424 void
424 425 _rdc_deconfigure(void)
425 426 {
426 427 rdc_k_info_t *krdc;
427 428 rdc_u_info_t *urdc;
428 429 int index;
429 430
430 431 for (index = 0; index < rdc_max_sets; index++) {
431 432 krdc = &rdc_k_info[index];
432 433 urdc = &rdc_u_info[index];
433 434
434 435 krdc->remote_index = -1;
435 436 krdc->dcio_bitmap = NULL;
436 437 krdc->bitmap_ref = NULL;
437 438 krdc->bitmap_size = 0;
438 439 krdc->bitmap_write = 0;
439 440 krdc->disk_status = 0;
440 441 krdc->many_next = krdc;
441 442
442 443 if (urdc->primary.addr.maxlen)
443 444 free_rdc_netbuf(&(urdc->primary.addr));
444 445
445 446 if (urdc->secondary.addr.maxlen)
446 447 free_rdc_netbuf(&(urdc->secondary.addr));
447 448
448 449 bzero(urdc, sizeof (rdc_u_info_t));
449 450 urdc->index = index;
450 451 }
451 452 net_exit = ATM_EXIT;
452 453 rdc_clnt_destroy();
453 454
454 455 }
455 456
456 457
457 458 /*
458 459 * Lock primitives, containing checks that lock ordering isn't broken
459 460 */
460 461 /*ARGSUSED*/
461 462 void
462 463 rdc_many_enter(rdc_k_info_t *krdc)
463 464 {
464 465 ASSERT(!MUTEX_HELD(&krdc->bmapmutex));
465 466
466 467 mutex_enter(&rdc_many_lock);
467 468 }
468 469
469 470 /* ARGSUSED */
470 471 void
471 472 rdc_many_exit(rdc_k_info_t *krdc)
472 473 {
473 474 mutex_exit(&rdc_many_lock);
474 475 }
475 476
476 477 void
477 478 rdc_group_enter(rdc_k_info_t *krdc)
478 479 {
479 480 ASSERT(!MUTEX_HELD(&rdc_many_lock));
480 481 ASSERT(!MUTEX_HELD(&rdc_conf_lock));
481 482 ASSERT(!MUTEX_HELD(&krdc->bmapmutex));
482 483
483 484 mutex_enter(&krdc->group->lock);
484 485 }
485 486
486 487 void
487 488 rdc_group_exit(rdc_k_info_t *krdc)
488 489 {
489 490 mutex_exit(&krdc->group->lock);
490 491 }
491 492
492 493 /*
493 494 * Suspend and disable operations use this function to wait until it is safe
494 495 * to do continue, without trashing data structures used by other ioctls.
495 496 */
496 497 static void
497 498 wait_busy(rdc_k_info_t *krdc)
498 499 {
499 500 ASSERT(MUTEX_HELD(&rdc_conf_lock));
500 501
501 502 while (krdc->busy_count > 0)
502 503 cv_wait(&krdc->busycv, &rdc_conf_lock);
503 504 }
504 505
505 506
506 507 /*
507 508 * Other ioctls use this function to hold off disable and suspend.
508 509 */
509 510 void
510 511 set_busy(rdc_k_info_t *krdc)
511 512 {
512 513 ASSERT(MUTEX_HELD(&rdc_conf_lock));
513 514
514 515 wait_busy(krdc);
515 516
516 517 krdc->busy_count++;
517 518 }
518 519
519 520
520 521 /*
521 522 * Other ioctls use this function to allow disable and suspend to continue.
522 523 */
523 524 void
524 525 wakeup_busy(rdc_k_info_t *krdc)
525 526 {
526 527 ASSERT(MUTEX_HELD(&rdc_conf_lock));
527 528
528 529 if (krdc->busy_count <= 0)
529 530 return;
530 531
531 532 krdc->busy_count--;
532 533 cv_broadcast(&krdc->busycv);
533 534 }
534 535
535 536
536 537 /*
537 538 * Remove the rdc set from its group, and destroy the group if no longer in
538 539 * use.
539 540 */
540 541 static void
541 542 remove_from_group(rdc_k_info_t *krdc)
542 543 {
543 544 rdc_k_info_t *p;
544 545 rdc_group_t *group;
545 546
546 547 ASSERT(MUTEX_HELD(&rdc_conf_lock));
547 548
548 549 rdc_many_enter(krdc);
549 550 group = krdc->group;
550 551
551 552 group->count--;
552 553
553 554 /*
554 555 * lock queue while looking at thrnum
555 556 */
556 557 mutex_enter(&group->ra_queue.net_qlock);
557 558 if ((group->rdc_thrnum == 0) && (group->count == 0)) {
558 559
559 560 /*
560 561 * Assure the we've stopped and the flusher thread has not
561 562 * fallen back to sleep
562 563 */
563 564 if (krdc->group->ra_queue.qfill_sleeping != RDC_QFILL_DEAD) {
564 565 group->ra_queue.qfflags |= RDC_QFILLSTOP;
565 566 while (krdc->group->ra_queue.qfflags & RDC_QFILLSTOP) {
566 567 if (krdc->group->ra_queue.qfill_sleeping ==
567 568 RDC_QFILL_ASLEEP)
568 569 cv_broadcast(&group->ra_queue.qfcv);
569 570 mutex_exit(&group->ra_queue.net_qlock);
570 571 delay(2);
571 572 mutex_enter(&group->ra_queue.net_qlock);
572 573 }
573 574 }
574 575 mutex_exit(&group->ra_queue.net_qlock);
575 576
576 577 mutex_enter(&group->diskqmutex);
577 578 rdc_close_diskq(group);
578 579 mutex_exit(&group->diskqmutex);
579 580 rdc_delgroup(group);
580 581 rdc_many_exit(krdc);
581 582 krdc->group = NULL;
582 583 return;
583 584 }
584 585 mutex_exit(&group->ra_queue.net_qlock);
585 586 /*
586 587 * Always clear the group field.
587 588 * no, you need it set in rdc_flush_memq().
588 589 * to call rdc_group_log()
589 590 * krdc->group = NULL;
590 591 */
591 592
592 593 /* Take this rdc structure off the group list */
593 594
594 595 for (p = krdc->group_next; p->group_next != krdc; p = p->group_next)
595 596 ;
596 597 p->group_next = krdc->group_next;
597 598
598 599 rdc_many_exit(krdc);
599 600 }
600 601
601 602
602 603 /*
603 604 * Add the rdc set to its group, setting up a new group if it's the first one.
604 605 */
605 606 static int
606 607 add_to_group(rdc_k_info_t *krdc, int options, int cmd)
607 608 {
608 609 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
609 610 rdc_u_info_t *utmp;
610 611 rdc_k_info_t *ktmp;
611 612 int index;
612 613 rdc_group_t *group;
613 614 int rc = 0;
614 615 nsthread_t *trc;
615 616
616 617 ASSERT(MUTEX_HELD(&rdc_conf_lock));
617 618
618 619 /*
619 620 * Look for matching group name, primary host name and secondary
620 621 * host name.
621 622 */
622 623
623 624 rdc_many_enter(krdc);
624 625 for (index = 0; index < rdc_max_sets; index++) {
625 626 utmp = &rdc_u_info[index];
626 627 ktmp = &rdc_k_info[index];
627 628
628 629 if (urdc->group_name[0] == 0)
629 630 break;
630 631
631 632 if (!IS_CONFIGURED(ktmp))
632 633 continue;
633 634
634 635 if (strncmp(utmp->group_name, urdc->group_name,
635 636 NSC_MAXPATH) != 0)
636 637 continue;
637 638 if (strncmp(utmp->primary.intf, urdc->primary.intf,
638 639 MAX_RDC_HOST_SIZE) != 0) {
639 640 /* Same group name, different primary interface */
640 641 rdc_many_exit(krdc);
641 642 return (-1);
642 643 }
643 644 if (strncmp(utmp->secondary.intf, urdc->secondary.intf,
644 645 MAX_RDC_HOST_SIZE) != 0) {
645 646 /* Same group name, different secondary interface */
646 647 rdc_many_exit(krdc);
647 648 return (-1);
648 649 }
649 650
650 651 /* Group already exists, so add this set to the group */
651 652
652 653 if (((options & RDC_OPT_ASYNC) == 0) &&
653 654 ((ktmp->type_flag & RDC_ASYNCMODE) != 0)) {
654 655 /* Must be same mode as existing group members */
655 656 rdc_many_exit(krdc);
656 657 return (-1);
657 658 }
658 659 if (((options & RDC_OPT_ASYNC) != 0) &&
659 660 ((ktmp->type_flag & RDC_ASYNCMODE) == 0)) {
660 661 /* Must be same mode as existing group members */
661 662 rdc_many_exit(krdc);
662 663 return (-1);
663 664 }
664 665
665 666 /* cannont reconfigure existing group into new queue this way */
666 667 if ((cmd != RDC_CMD_RESUME) &&
667 668 !RDC_IS_DISKQ(ktmp->group) && urdc->disk_queue[0] != '\0') {
668 669 rdc_many_exit(krdc);
669 670 return (RDC_EQNOADD);
670 671 }
671 672
672 673 ktmp->group->count++;
673 674 krdc->group = ktmp->group;
674 675 krdc->group_next = ktmp->group_next;
675 676 ktmp->group_next = krdc;
676 677
677 678 urdc->autosync = utmp->autosync; /* Same as rest */
678 679
679 680 (void) strncpy(urdc->disk_queue, utmp->disk_queue, NSC_MAXPATH);
680 681
681 682 rdc_many_exit(krdc);
682 683 return (0);
683 684 }
684 685
685 686 /* This must be a new group */
686 687 group = rdc_newgroup();
687 688 krdc->group = group;
688 689 krdc->group_next = krdc;
689 690 urdc->autosync = -1; /* Unknown */
690 691
691 692 /*
692 693 * Tune the thread set by one for each thread created
693 694 */
694 695 rdc_thread_tune(1);
695 696
696 697 trc = nst_create(_rdc_ioset, rdc_qfiller_thr, (void *)krdc, NST_SLEEP);
697 698 if (trc == NULL) {
698 699 rc = -1;
699 700 cmn_err(CE_NOTE, "!unable to create queue filler daemon");
700 701 goto fail;
701 702 }
702 703
703 704 if (urdc->disk_queue[0] == '\0') {
704 705 krdc->group->flags |= RDC_MEMQUE;
705 706 } else {
706 707 krdc->group->flags |= RDC_DISKQUE;
707 708
708 709 /* XXX check here for resume or enable and act accordingly */
709 710
710 711 if (cmd == RDC_CMD_RESUME) {
711 712 rc = rdc_resume_diskq(krdc);
712 713
713 714 } else if (cmd == RDC_CMD_ENABLE) {
714 715 rc = rdc_enable_diskq(krdc);
715 716 if ((rc == RDC_EQNOADD) && (cmd != RDC_CMD_ENABLE)) {
716 717 cmn_err(CE_WARN, "!disk queue %s enable failed,"
717 718 " enabling memory queue",
718 719 urdc->disk_queue);
719 720 krdc->group->flags &= ~RDC_DISKQUE;
720 721 krdc->group->flags |= RDC_MEMQUE;
721 722 bzero(urdc->disk_queue, NSC_MAXPATH);
722 723 }
723 724 }
724 725 }
725 726 fail:
726 727 rdc_many_exit(krdc);
727 728 return (rc);
728 729 }
729 730
730 731
731 732 /*
732 733 * Move the set to a new group if possible
733 734 */
734 735 static int
735 736 change_group(rdc_k_info_t *krdc, int options)
736 737 {
737 738 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
738 739 rdc_u_info_t *utmp;
739 740 rdc_k_info_t *ktmp;
740 741 rdc_k_info_t *next;
741 742 char tmpq[NSC_MAXPATH];
742 743 int index;
743 744 int rc = -1;
744 745 rdc_group_t *group, *old_group;
745 746 nsthread_t *trc;
746 747
747 748 ASSERT(MUTEX_HELD(&rdc_conf_lock));
748 749
749 750 /*
750 751 * Look for matching group name, primary host name and secondary
751 752 * host name.
752 753 */
753 754
754 755 bzero(&tmpq, sizeof (tmpq));
755 756 rdc_many_enter(krdc);
756 757
757 758 old_group = krdc->group;
758 759 next = krdc->group_next;
759 760
760 761 if (RDC_IS_DISKQ(old_group)) { /* can't keep your own queue */
761 762 (void) strncpy(tmpq, urdc->disk_queue, NSC_MAXPATH);
762 763 bzero(urdc->disk_queue, sizeof (urdc->disk_queue));
763 764 }
764 765 for (index = 0; index < rdc_max_sets; index++) {
765 766 utmp = &rdc_u_info[index];
766 767 ktmp = &rdc_k_info[index];
767 768
768 769 if (ktmp == krdc)
769 770 continue;
770 771
771 772 if (urdc->group_name[0] == 0)
772 773 break;
773 774
774 775 if (!IS_CONFIGURED(ktmp))
775 776 continue;
776 777
777 778 if (strncmp(utmp->group_name, urdc->group_name,
778 779 NSC_MAXPATH) != 0)
779 780 continue;
780 781 if (strncmp(utmp->primary.intf, urdc->primary.intf,
781 782 MAX_RDC_HOST_SIZE) != 0)
782 783 goto bad;
783 784 if (strncmp(utmp->secondary.intf, urdc->secondary.intf,
784 785 MAX_RDC_HOST_SIZE) != 0)
785 786 goto bad;
786 787
787 788 /* Group already exists, so add this set to the group */
788 789
789 790 if (((options & RDC_OPT_ASYNC) == 0) &&
790 791 ((ktmp->type_flag & RDC_ASYNCMODE) != 0)) {
791 792 /* Must be same mode as existing group members */
792 793 goto bad;
793 794 }
794 795 if (((options & RDC_OPT_ASYNC) != 0) &&
795 796 ((ktmp->type_flag & RDC_ASYNCMODE) == 0)) {
796 797 /* Must be same mode as existing group members */
797 798 goto bad;
798 799 }
799 800
800 801 ktmp->group->count++;
801 802 krdc->group = ktmp->group;
802 803 krdc->group_next = ktmp->group_next;
803 804 ktmp->group_next = krdc;
804 805 bzero(urdc->disk_queue, sizeof (urdc->disk_queue));
805 806 (void) strncpy(urdc->disk_queue, utmp->disk_queue, NSC_MAXPATH);
806 807
807 808 goto good;
808 809 }
809 810
810 811 /* This must be a new group */
811 812 group = rdc_newgroup();
812 813 krdc->group = group;
813 814 krdc->group_next = krdc;
814 815
815 816 trc = nst_create(_rdc_ioset, rdc_qfiller_thr, (void *)krdc, NST_SLEEP);
816 817 if (trc == NULL) {
817 818 rc = -1;
818 819 cmn_err(CE_NOTE, "!unable to create queue filler daemon");
819 820 goto bad;
820 821 }
821 822
822 823 if (urdc->disk_queue[0] == 0) {
823 824 krdc->group->flags |= RDC_MEMQUE;
824 825 } else {
825 826 krdc->group->flags |= RDC_DISKQUE;
826 827 if ((rc = rdc_enable_diskq(krdc)) < 0)
827 828 goto bad;
828 829 }
829 830 good:
830 831 if (options & RDC_OPT_ASYNC) {
831 832 krdc->type_flag |= RDC_ASYNCMODE;
832 833 rdc_set_flags(urdc, RDC_ASYNC);
833 834 } else {
834 835 krdc->type_flag &= ~RDC_ASYNCMODE;
835 836 rdc_clr_flags(urdc, RDC_ASYNC);
836 837 }
837 838
838 839 old_group->count--;
839 840 if (!old_group->rdc_writer && old_group->count == 0) {
840 841 /* Group now empty, so destroy */
841 842 if (RDC_IS_DISKQ(old_group)) {
842 843 rdc_unintercept_diskq(old_group);
843 844 mutex_enter(&old_group->diskqmutex);
844 845 rdc_close_diskq(old_group);
845 846 mutex_exit(&old_group->diskqmutex);
846 847 }
847 848
848 849 mutex_enter(&old_group->ra_queue.net_qlock);
849 850
850 851 /*
851 852 * Assure the we've stopped and the flusher thread has not
852 853 * fallen back to sleep
853 854 */
854 855 if (old_group->ra_queue.qfill_sleeping != RDC_QFILL_DEAD) {
855 856 old_group->ra_queue.qfflags |= RDC_QFILLSTOP;
856 857 while (old_group->ra_queue.qfflags & RDC_QFILLSTOP) {
857 858 if (old_group->ra_queue.qfill_sleeping ==
858 859 RDC_QFILL_ASLEEP)
859 860 cv_broadcast(&old_group->ra_queue.qfcv);
860 861 mutex_exit(&old_group->ra_queue.net_qlock);
861 862 delay(2);
862 863 mutex_enter(&old_group->ra_queue.net_qlock);
863 864 }
864 865 }
865 866 mutex_exit(&old_group->ra_queue.net_qlock);
866 867
867 868 rdc_delgroup(old_group);
868 869 rdc_many_exit(krdc);
869 870 return (0);
870 871 }
871 872
872 873 /* Take this rdc structure off the old group list */
873 874
874 875 for (ktmp = next; ktmp->group_next != krdc; ktmp = ktmp->group_next)
875 876 ;
876 877 ktmp->group_next = next;
877 878
878 879 rdc_many_exit(krdc);
879 880 return (0);
880 881
881 882 bad:
882 883 /* Leave existing group status alone */
883 884 (void) strncpy(urdc->disk_queue, tmpq, NSC_MAXPATH);
884 885 rdc_many_exit(krdc);
885 886 return (rc);
886 887 }
887 888
888 889
889 890 /*
890 891 * Set flags for an rdc set, setting the group flags as necessary.
891 892 */
892 893 void
893 894 rdc_set_flags(rdc_u_info_t *urdc, int flags)
894 895 {
895 896 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
896 897 int vflags, sflags, bflags, ssflags;
897 898
898 899 DTRACE_PROBE2(rdc_set_flags, int, krdc->index, int, flags);
899 900 vflags = flags & RDC_VFLAGS;
900 901 sflags = flags & RDC_SFLAGS;
901 902 bflags = flags & RDC_BFLAGS;
902 903 ssflags = flags & RDC_SYNC_STATE_FLAGS;
903 904
904 905 if (vflags) {
905 906 /* normal volume flags */
906 907 ASSERT(MUTEX_HELD(&rdc_conf_lock) ||
907 908 MUTEX_HELD(&krdc->group->lock));
908 909 if (ssflags)
909 910 mutex_enter(&krdc->bmapmutex);
910 911
911 912 urdc->flags |= vflags;
912 913
913 914 if (ssflags)
914 915 mutex_exit(&krdc->bmapmutex);
915 916 }
916 917
917 918 if (sflags) {
918 919 /* Sync state flags that are protected by a different lock */
919 920 ASSERT(MUTEX_HELD(&rdc_many_lock));
920 921 urdc->sync_flags |= sflags;
921 922 }
922 923
923 924 if (bflags) {
924 925 /* Bmap state flags that are protected by a different lock */
925 926 ASSERT(MUTEX_HELD(&krdc->bmapmutex));
926 927 urdc->bmap_flags |= bflags;
927 928 }
928 929
929 930 }
930 931
931 932
932 933 /*
933 934 * Clear flags for an rdc set, clearing the group flags as necessary.
934 935 */
935 936 void
936 937 rdc_clr_flags(rdc_u_info_t *urdc, int flags)
937 938 {
938 939 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
939 940 int vflags, sflags, bflags;
940 941
941 942 DTRACE_PROBE2(rdc_clr_flags, int, krdc->index, int, flags);
942 943 vflags = flags & RDC_VFLAGS;
943 944 sflags = flags & RDC_SFLAGS;
944 945 bflags = flags & RDC_BFLAGS;
945 946
946 947 if (vflags) {
947 948 /* normal volume flags */
948 949 ASSERT(MUTEX_HELD(&rdc_conf_lock) ||
949 950 MUTEX_HELD(&krdc->group->lock));
950 951 urdc->flags &= ~vflags;
951 952
952 953 }
953 954
954 955 if (sflags) {
955 956 /* Sync state flags that are protected by a different lock */
956 957 ASSERT(MUTEX_HELD(&rdc_many_lock));
957 958 urdc->sync_flags &= ~sflags;
958 959 }
959 960
960 961 if (bflags) {
961 962 /* Bmap state flags that are protected by a different lock */
962 963 ASSERT(MUTEX_HELD(&krdc->bmapmutex));
963 964 urdc->bmap_flags &= ~bflags;
964 965 }
965 966 }
966 967
967 968
968 969 /*
969 970 * Get the flags for an rdc set.
970 971 */
971 972 int
972 973 rdc_get_vflags(rdc_u_info_t *urdc)
973 974 {
974 975 return (urdc->flags | urdc->sync_flags | urdc->bmap_flags);
975 976 }
976 977
977 978
978 979 /*
979 980 * Initialise flags for an rdc set.
980 981 */
981 982 static void
982 983 rdc_init_flags(rdc_u_info_t *urdc)
983 984 {
984 985 urdc->flags = 0;
985 986 urdc->mflags = 0;
986 987 urdc->sync_flags = 0;
987 988 urdc->bmap_flags = 0;
988 989 }
989 990
990 991
991 992 /*
992 993 * Set flags for a many group.
993 994 */
994 995 void
995 996 rdc_set_mflags(rdc_u_info_t *urdc, int flags)
996 997 {
997 998 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
998 999 rdc_k_info_t *this = krdc;
999 1000
1000 1001 ASSERT(!(flags & ~RDC_MFLAGS));
1001 1002
1002 1003 if (flags == 0)
1003 1004 return;
1004 1005
1005 1006 ASSERT(MUTEX_HELD(&rdc_many_lock));
1006 1007
1007 1008 rdc_set_flags(urdc, flags); /* set flags on local urdc */
1008 1009
1009 1010 urdc->mflags |= flags;
1010 1011 for (krdc = krdc->many_next; krdc != this; krdc = krdc->many_next) {
1011 1012 urdc = &rdc_u_info[krdc->index];
1012 1013 if (!IS_ENABLED(urdc))
1013 1014 continue;
1014 1015 urdc->mflags |= flags;
1015 1016 }
1016 1017 }
1017 1018
1018 1019
1019 1020 /*
1020 1021 * Clear flags for a many group.
1021 1022 */
1022 1023 void
1023 1024 rdc_clr_mflags(rdc_u_info_t *urdc, int flags)
1024 1025 {
1025 1026 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
1026 1027 rdc_k_info_t *this = krdc;
1027 1028 rdc_u_info_t *utmp;
1028 1029
1029 1030 ASSERT(!(flags & ~RDC_MFLAGS));
1030 1031
1031 1032 if (flags == 0)
1032 1033 return;
1033 1034
1034 1035 ASSERT(MUTEX_HELD(&rdc_many_lock));
1035 1036
1036 1037 rdc_clr_flags(urdc, flags); /* clear flags on local urdc */
1037 1038
1038 1039 /*
1039 1040 * We must maintain the mflags based on the set of flags for
1040 1041 * all the urdc's that are chained up.
1041 1042 */
1042 1043
1043 1044 /*
1044 1045 * First look through all the urdc's and remove bits from
1045 1046 * the 'flags' variable that are in use elsewhere.
1046 1047 */
1047 1048
1048 1049 for (krdc = krdc->many_next; krdc != this; krdc = krdc->many_next) {
1049 1050 utmp = &rdc_u_info[krdc->index];
1050 1051 if (!IS_ENABLED(utmp))
1051 1052 continue;
1052 1053 flags &= ~(rdc_get_vflags(utmp) & RDC_MFLAGS);
1053 1054 if (flags == 0)
1054 1055 break;
1055 1056 }
1056 1057
1057 1058 /*
1058 1059 * Now clear flags as necessary.
1059 1060 */
1060 1061
1061 1062 if (flags != 0) {
1062 1063 urdc->mflags &= ~flags;
1063 1064 for (krdc = krdc->many_next; krdc != this;
1064 1065 krdc = krdc->many_next) {
1065 1066 utmp = &rdc_u_info[krdc->index];
1066 1067 if (!IS_ENABLED(utmp))
1067 1068 continue;
1068 1069 utmp->mflags &= ~flags;
1069 1070 }
1070 1071 }
1071 1072 }
1072 1073
1073 1074
1074 1075 int
1075 1076 rdc_get_mflags(rdc_u_info_t *urdc)
1076 1077 {
1077 1078 return (urdc->mflags);
1078 1079 }
1079 1080
1080 1081
1081 1082 void
1082 1083 rdc_set_flags_log(rdc_u_info_t *urdc, int flags, char *why)
1083 1084 {
1084 1085 DTRACE_PROBE2(rdc_set_flags_log, int, urdc->index, int, flags);
1085 1086
1086 1087 rdc_set_flags(urdc, flags);
1087 1088
1088 1089 if (why == NULL)
1089 1090 return;
1090 1091
1091 1092 if (flags & RDC_LOGGING)
1092 1093 cmn_err(CE_NOTE, "!sndr: %s:%s entered logging mode: %s",
1093 1094 urdc->secondary.intf, urdc->secondary.file, why);
1094 1095 if (flags & RDC_VOL_FAILED)
1095 1096 cmn_err(CE_NOTE, "!sndr: %s:%s volume failed: %s",
1096 1097 urdc->secondary.intf, urdc->secondary.file, why);
1097 1098 if (flags & RDC_BMP_FAILED)
1098 1099 cmn_err(CE_NOTE, "!sndr: %s:%s bitmap failed: %s",
1099 1100 urdc->secondary.intf, urdc->secondary.file, why);
1100 1101 }
1101 1102 /*
1102 1103 * rdc_lor(source, dest, len)
1103 1104 * logically OR memory pointed to by source and dest, copying result into dest.
1104 1105 */
1105 1106 void
1106 1107 rdc_lor(const uchar_t *source, uchar_t *dest, int len)
1107 1108 {
1108 1109 int i;
1109 1110
1110 1111 if (source == NULL)
1111 1112 return;
1112 1113
1113 1114 for (i = 0; i < len; i++)
1114 1115 *dest++ |= *source++;
1115 1116 }
1116 1117
1117 1118
1118 1119 static int
1119 1120 check_filesize(int index, spcs_s_info_t kstatus)
1120 1121 {
1121 1122 uint64_t remote_size;
1122 1123 char tmp1[16], tmp2[16];
1123 1124 rdc_u_info_t *urdc = &rdc_u_info[index];
1124 1125 int status;
1125 1126
1126 1127 status = rdc_net_getsize(index, &remote_size);
1127 1128 if (status) {
1128 1129 (void) spcs_s_inttostring(status, tmp1, sizeof (tmp1), 0);
1129 1130 spcs_s_add(kstatus, RDC_EGETSIZE, urdc->secondary.intf,
1130 1131 urdc->secondary.file, tmp1);
1131 1132 (void) rdc_net_state(index, CCIO_ENABLELOG);
1132 1133 return (RDC_EGETSIZE);
1133 1134 }
1134 1135 if (remote_size < (unsigned long long)urdc->volume_size) {
1135 1136 (void) spcs_s_inttostring(
1136 1137 urdc->volume_size, tmp1, sizeof (tmp1), 0);
1137 1138 /*
1138 1139 * Cheat, and covert to int, until we have
1139 1140 * spcs_s_unsignedlonginttostring().
1140 1141 */
1141 1142 status = (int)remote_size;
1142 1143 (void) spcs_s_inttostring(status, tmp2, sizeof (tmp2), 0);
1143 1144 spcs_s_add(kstatus, RDC_ESIZE, urdc->primary.intf,
1144 1145 urdc->primary.file, tmp1, urdc->secondary.intf,
1145 1146 urdc->secondary.file, tmp2);
1146 1147 (void) rdc_net_state(index, CCIO_ENABLELOG);
1147 1148 return (RDC_ESIZE);
1148 1149 }
1149 1150 return (0);
1150 1151 }
1151 1152
1152 1153
1153 1154 static void
1154 1155 rdc_volume_update_svc(intptr_t arg)
1155 1156 {
1156 1157 rdc_update_t *update = (rdc_update_t *)arg;
1157 1158 rdc_k_info_t *krdc;
1158 1159 rdc_k_info_t *this;
1159 1160 rdc_u_info_t *urdc;
1160 1161 struct net_bdata6 bd;
1161 1162 int index;
1162 1163 int rc;
1163 1164
1164 1165 #ifdef DEBUG_IIUPDATE
1165 1166 cmn_err(CE_NOTE, "!SNDR received update request for %s",
1166 1167 update->volume);
1167 1168 #endif
1168 1169
1169 1170 if ((update->protocol != RDC_SVC_ONRETURN) &&
1170 1171 (update->protocol != RDC_SVC_VOL_ENABLED)) {
1171 1172 /* don't understand what the client intends to do */
1172 1173 update->denied = 1;
1173 1174 spcs_s_add(update->status, RDC_EVERSION);
1174 1175 return;
1175 1176 }
1176 1177
1177 1178 index = rdc_lookup_enabled(update->volume, 0);
1178 1179 if (index < 0)
1179 1180 return;
1180 1181
1181 1182 /*
1182 1183 * warn II that this volume is in use by sndr so
1183 1184 * II can validate the sizes of the master vs shadow
1184 1185 * and avoid trouble later down the line with
1185 1186 * size mis-matches between urdc->volume_size and
1186 1187 * what is returned from nsc_partsize() which may
1187 1188 * be the size of the master when replicating the shadow
1188 1189 */
1189 1190 if (update->protocol == RDC_SVC_VOL_ENABLED) {
1190 1191 if (index >= 0)
1191 1192 update->denied = 1;
1192 1193 return;
1193 1194 }
1194 1195
1195 1196 krdc = &rdc_k_info[index];
1196 1197 urdc = &rdc_u_info[index];
1197 1198 this = krdc;
1198 1199
1199 1200 do {
1200 1201 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
1201 1202 #ifdef DEBUG_IIUPDATE
1202 1203 cmn_err(CE_NOTE, "!SNDR refused update request for %s",
1203 1204 update->volume);
1204 1205 #endif
1205 1206 update->denied = 1;
1206 1207 spcs_s_add(update->status, RDC_EMIRRORUP);
1207 1208 return;
1208 1209 }
1209 1210 /* 1->many - all must be logging */
1210 1211 if (IS_MANY(krdc) && IS_STATE(urdc, RDC_PRIMARY)) {
1211 1212 rdc_many_enter(krdc);
1212 1213 for (krdc = krdc->many_next; krdc != this;
1213 1214 krdc = krdc->many_next) {
1214 1215 urdc = &rdc_u_info[krdc->index];
1215 1216 if (!IS_ENABLED(urdc))
1216 1217 continue;
1217 1218 break;
1218 1219 }
1219 1220 rdc_many_exit(krdc);
1220 1221 }
1221 1222 } while (krdc != this);
1222 1223
1223 1224 #ifdef DEBUG_IIUPDATE
1224 1225 cmn_err(CE_NOTE, "!SNDR allowed update request for %s", update->volume);
1225 1226 #endif
1226 1227 urdc = &rdc_u_info[krdc->index];
1227 1228 do {
1228 1229
1229 1230 bd.size = min(krdc->bitmap_size, (nsc_size_t)update->size);
1230 1231 bd.data.data_val = (char *)update->bitmap;
1231 1232 bd.offset = 0;
1232 1233 bd.cd = index;
1233 1234
1234 1235 if ((rc = RDC_OR_BITMAP(&bd)) != 0) {
1235 1236 update->denied = 1;
1236 1237 spcs_s_add(update->status, rc);
1237 1238 return;
1238 1239 }
1239 1240 urdc = &rdc_u_info[index];
1240 1241 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
1241 1242 if (IS_MANY(krdc) && IS_STATE(urdc, RDC_PRIMARY)) {
1242 1243 rdc_many_enter(krdc);
1243 1244 for (krdc = krdc->many_next; krdc != this;
1244 1245 krdc = krdc->many_next) {
1245 1246 index = krdc->index;
1246 1247 if (!IS_ENABLED(urdc))
1247 1248 continue;
1248 1249 break;
1249 1250 }
1250 1251 rdc_many_exit(krdc);
1251 1252 }
1252 1253 } while (krdc != this);
1253 1254
1254 1255
1255 1256 /* II (or something else) has updated us, so no need for a sync */
1256 1257 if (rdc_get_vflags(urdc) & (RDC_SYNC_NEEDED | RDC_RSYNC_NEEDED)) {
1257 1258 rdc_many_enter(krdc);
1258 1259 rdc_clr_flags(urdc, RDC_SYNC_NEEDED | RDC_RSYNC_NEEDED);
1259 1260 rdc_many_exit(krdc);
1260 1261 }
1261 1262
1262 1263 if (krdc->bitmap_write > 0)
1263 1264 (void) rdc_write_bitmap(krdc);
1264 1265 }
1265 1266
1266 1267
1267 1268 /*
1268 1269 * rdc_check()
1269 1270 *
1270 1271 * Return 0 if the set is configured, enabled and the supplied
1271 1272 * addressing information matches the in-kernel config, otherwise
1272 1273 * return 1.
1273 1274 */
1274 1275 static int
1275 1276 rdc_check(rdc_k_info_t *krdc, rdc_set_t *rdc_set)
1276 1277 {
1277 1278 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1278 1279
1279 1280 ASSERT(MUTEX_HELD(&krdc->group->lock));
1280 1281
1281 1282 if (!IS_ENABLED(urdc))
1282 1283 return (1);
1283 1284
1284 1285 if (strncmp(urdc->primary.file, rdc_set->primary.file,
1285 1286 NSC_MAXPATH) != 0) {
1286 1287 #ifdef DEBUG
1287 1288 cmn_err(CE_WARN, "!rdc_check: primary file mismatch %s vs %s",
1288 1289 urdc->primary.file, rdc_set->primary.file);
1289 1290 #endif
1290 1291 return (1);
1291 1292 }
1292 1293
1293 1294 if (rdc_set->primary.addr.len != 0 &&
1294 1295 bcmp(urdc->primary.addr.buf, rdc_set->primary.addr.buf,
1295 1296 urdc->primary.addr.len) != 0) {
1296 1297 #ifdef DEBUG
1297 1298 cmn_err(CE_WARN, "!rdc_check: primary address mismatch for %s",
1298 1299 urdc->primary.file);
1299 1300 #endif
1300 1301 return (1);
1301 1302 }
1302 1303
1303 1304 if (strncmp(urdc->secondary.file, rdc_set->secondary.file,
1304 1305 NSC_MAXPATH) != 0) {
1305 1306 #ifdef DEBUG
1306 1307 cmn_err(CE_WARN, "!rdc_check: secondary file mismatch %s vs %s",
1307 1308 urdc->secondary.file, rdc_set->secondary.file);
1308 1309 #endif
1309 1310 return (1);
1310 1311 }
1311 1312
1312 1313 if (rdc_set->secondary.addr.len != 0 &&
1313 1314 bcmp(urdc->secondary.addr.buf, rdc_set->secondary.addr.buf,
1314 1315 urdc->secondary.addr.len) != 0) {
1315 1316 #ifdef DEBUG
1316 1317 cmn_err(CE_WARN, "!rdc_check: secondary addr mismatch for %s",
1317 1318 urdc->secondary.file);
1318 1319 #endif
1319 1320 return (1);
1320 1321 }
1321 1322
1322 1323 return (0);
1323 1324 }
1324 1325
1325 1326
1326 1327 /*
1327 1328 * Lookup enabled sets for a bitmap match
1328 1329 */
1329 1330
1330 1331 int
1331 1332 rdc_lookup_bitmap(char *pathname)
1332 1333 {
1333 1334 rdc_u_info_t *urdc;
1334 1335 #ifdef DEBUG
1335 1336 rdc_k_info_t *krdc;
1336 1337 #endif
1337 1338 int index;
1338 1339
1339 1340 for (index = 0; index < rdc_max_sets; index++) {
1340 1341 urdc = &rdc_u_info[index];
1341 1342 #ifdef DEBUG
1342 1343 krdc = &rdc_k_info[index];
1343 1344 #endif
1344 1345 ASSERT(krdc->index == index);
1345 1346 ASSERT(urdc->index == index);
1346 1347
1347 1348 if (!IS_ENABLED(urdc))
1348 1349 continue;
1349 1350
1350 1351 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1351 1352 if (strncmp(pathname, urdc->primary.bitmap,
1352 1353 NSC_MAXPATH) == 0)
1353 1354 return (index);
1354 1355 } else {
1355 1356 if (strncmp(pathname, urdc->secondary.bitmap,
1356 1357 NSC_MAXPATH) == 0)
1357 1358 return (index);
1358 1359 }
1359 1360 }
1360 1361
1361 1362 return (-1);
1362 1363 }
1363 1364
1364 1365
1365 1366 /*
1366 1367 * Translate a pathname to index into rdc_k_info[].
1367 1368 * Returns first match that is enabled.
1368 1369 */
1369 1370
1370 1371 int
1371 1372 rdc_lookup_enabled(char *pathname, int allow_disabling)
1372 1373 {
1373 1374 rdc_u_info_t *urdc;
1374 1375 rdc_k_info_t *krdc;
1375 1376 int index;
1376 1377
1377 1378 restart:
1378 1379 for (index = 0; index < rdc_max_sets; index++) {
1379 1380 urdc = &rdc_u_info[index];
1380 1381 krdc = &rdc_k_info[index];
1381 1382
1382 1383 ASSERT(krdc->index == index);
1383 1384 ASSERT(urdc->index == index);
1384 1385
1385 1386 if (!IS_ENABLED(urdc))
1386 1387 continue;
1387 1388
1388 1389 if (allow_disabling == 0 && krdc->type_flag & RDC_UNREGISTER)
1389 1390 continue;
1390 1391
1391 1392 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1392 1393 if (strncmp(pathname, urdc->primary.file,
1393 1394 NSC_MAXPATH) == 0)
1394 1395 return (index);
1395 1396 } else {
1396 1397 if (strncmp(pathname, urdc->secondary.file,
1397 1398 NSC_MAXPATH) == 0)
1398 1399 return (index);
1399 1400 }
1400 1401 }
1401 1402
1402 1403 if (allow_disabling == 0) {
1403 1404 /* None found, or only a disabling one found, so try again */
1404 1405 allow_disabling = 1;
1405 1406 goto restart;
1406 1407 }
1407 1408
1408 1409 return (-1);
1409 1410 }
1410 1411
1411 1412
1412 1413 /*
1413 1414 * Translate a pathname to index into rdc_k_info[].
1414 1415 * Returns first match that is configured.
1415 1416 *
1416 1417 * Used by enable & resume code.
1417 1418 * Must be called with rdc_conf_lock held.
1418 1419 */
1419 1420
1420 1421 int
1421 1422 rdc_lookup_configured(char *pathname)
1422 1423 {
1423 1424 rdc_u_info_t *urdc;
1424 1425 rdc_k_info_t *krdc;
1425 1426 int index;
1426 1427
1427 1428 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1428 1429
1429 1430 for (index = 0; index < rdc_max_sets; index++) {
1430 1431 urdc = &rdc_u_info[index];
1431 1432 krdc = &rdc_k_info[index];
1432 1433
1433 1434 ASSERT(krdc->index == index);
1434 1435 ASSERT(urdc->index == index);
1435 1436
1436 1437 if (!IS_CONFIGURED(krdc))
1437 1438 continue;
1438 1439
1439 1440 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1440 1441 if (strncmp(pathname, urdc->primary.file,
1441 1442 NSC_MAXPATH) == 0)
1442 1443 return (index);
1443 1444 } else {
1444 1445 if (strncmp(pathname, urdc->secondary.file,
1445 1446 NSC_MAXPATH) == 0)
1446 1447 return (index);
1447 1448 }
1448 1449 }
1449 1450
1450 1451 return (-1);
1451 1452 }
1452 1453
1453 1454
1454 1455 /*
1455 1456 * Looks up a configured set with matching secondary interface:volume
1456 1457 * to check for illegal many-to-one volume configs. To be used during
1457 1458 * enable and resume processing.
1458 1459 *
1459 1460 * Must be called with rdc_conf_lock held.
1460 1461 */
1461 1462
1462 1463 static int
1463 1464 rdc_lookup_many2one(rdc_set_t *rdc_set)
1464 1465 {
1465 1466 rdc_u_info_t *urdc;
1466 1467 rdc_k_info_t *krdc;
1467 1468 int index;
1468 1469
1469 1470 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1470 1471
1471 1472 for (index = 0; index < rdc_max_sets; index++) {
1472 1473 urdc = &rdc_u_info[index];
1473 1474 krdc = &rdc_k_info[index];
1474 1475
1475 1476 if (!IS_CONFIGURED(krdc))
1476 1477 continue;
1477 1478
1478 1479 if (strncmp(urdc->secondary.file,
1479 1480 rdc_set->secondary.file, NSC_MAXPATH) != 0)
1480 1481 continue;
1481 1482 if (strncmp(urdc->secondary.intf,
1482 1483 rdc_set->secondary.intf, MAX_RDC_HOST_SIZE) != 0)
1483 1484 continue;
1484 1485
1485 1486 break;
1486 1487 }
1487 1488
1488 1489 if (index < rdc_max_sets)
1489 1490 return (index);
1490 1491 else
1491 1492 return (-1);
1492 1493 }
1493 1494
1494 1495
1495 1496 /*
1496 1497 * Looks up an rdc set to check if it is already configured, to be used from
1497 1498 * functions called from the config ioctl where the interface names can be
1498 1499 * used for comparison.
1499 1500 *
1500 1501 * Must be called with rdc_conf_lock held.
1501 1502 */
1502 1503
1503 1504 int
1504 1505 rdc_lookup_byname(rdc_set_t *rdc_set)
1505 1506 {
1506 1507 rdc_u_info_t *urdc;
1507 1508 rdc_k_info_t *krdc;
1508 1509 int index;
1509 1510
1510 1511 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1511 1512
1512 1513 for (index = 0; index < rdc_max_sets; index++) {
1513 1514 urdc = &rdc_u_info[index];
1514 1515 krdc = &rdc_k_info[index];
1515 1516
1516 1517 ASSERT(krdc->index == index);
1517 1518 ASSERT(urdc->index == index);
1518 1519
1519 1520 if (!IS_CONFIGURED(krdc))
1520 1521 continue;
1521 1522
1522 1523 if (strncmp(urdc->primary.file, rdc_set->primary.file,
1523 1524 NSC_MAXPATH) != 0)
1524 1525 continue;
1525 1526 if (strncmp(urdc->primary.intf, rdc_set->primary.intf,
1526 1527 MAX_RDC_HOST_SIZE) != 0)
1527 1528 continue;
1528 1529 if (strncmp(urdc->secondary.file, rdc_set->secondary.file,
1529 1530 NSC_MAXPATH) != 0)
1530 1531 continue;
1531 1532 if (strncmp(urdc->secondary.intf, rdc_set->secondary.intf,
1532 1533 MAX_RDC_HOST_SIZE) != 0)
1533 1534 continue;
1534 1535
1535 1536 break;
1536 1537 }
1537 1538
1538 1539 if (index < rdc_max_sets)
1539 1540 return (index);
1540 1541 else
1541 1542 return (-1);
1542 1543 }
1543 1544
1544 1545 /*
1545 1546 * Looks up a secondary hostname and device, to be used from
1546 1547 * functions called from the config ioctl where the interface names can be
1547 1548 * used for comparison.
1548 1549 *
1549 1550 * Must be called with rdc_conf_lock held.
1550 1551 */
1551 1552
1552 1553 int
1553 1554 rdc_lookup_byhostdev(char *intf, char *file)
1554 1555 {
1555 1556 rdc_u_info_t *urdc;
1556 1557 rdc_k_info_t *krdc;
1557 1558 int index;
1558 1559
1559 1560 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1560 1561
1561 1562 for (index = 0; index < rdc_max_sets; index++) {
1562 1563 urdc = &rdc_u_info[index];
1563 1564 krdc = &rdc_k_info[index];
1564 1565
1565 1566 ASSERT(krdc->index == index);
1566 1567 ASSERT(urdc->index == index);
1567 1568
1568 1569 if (!IS_CONFIGURED(krdc))
1569 1570 continue;
1570 1571
1571 1572 if (strncmp(urdc->secondary.file, file,
1572 1573 NSC_MAXPATH) != 0)
1573 1574 continue;
1574 1575 if (strncmp(urdc->secondary.intf, intf,
1575 1576 MAX_RDC_HOST_SIZE) != 0)
1576 1577 continue;
1577 1578 break;
1578 1579 }
1579 1580
1580 1581 if (index < rdc_max_sets)
1581 1582 return (index);
1582 1583 else
1583 1584 return (-1);
1584 1585 }
1585 1586
1586 1587
1587 1588 /*
1588 1589 * Looks up an rdc set to see if it is currently enabled, to be used on the
1589 1590 * server so that the interface addresses must be used for comparison, as
1590 1591 * the interface names may differ from those used on the client.
1591 1592 *
1592 1593 */
1593 1594
1594 1595 int
1595 1596 rdc_lookup_byaddr(rdc_set_t *rdc_set)
1596 1597 {
1597 1598 rdc_u_info_t *urdc;
1598 1599 #ifdef DEBUG
1599 1600 rdc_k_info_t *krdc;
1600 1601 #endif
1601 1602 int index;
1602 1603
1603 1604 for (index = 0; index < rdc_max_sets; index++) {
1604 1605 urdc = &rdc_u_info[index];
1605 1606 #ifdef DEBUG
1606 1607 krdc = &rdc_k_info[index];
1607 1608 #endif
1608 1609 ASSERT(krdc->index == index);
1609 1610 ASSERT(urdc->index == index);
1610 1611
1611 1612 if (!IS_ENABLED(urdc))
1612 1613 continue;
1613 1614
1614 1615 if (strcmp(urdc->primary.file, rdc_set->primary.file) != 0)
1615 1616 continue;
1616 1617
1617 1618 if (strcmp(urdc->secondary.file, rdc_set->secondary.file) != 0)
1618 1619 continue;
1619 1620
1620 1621 if (bcmp(urdc->primary.addr.buf, rdc_set->primary.addr.buf,
1621 1622 urdc->primary.addr.len) != 0) {
1622 1623 continue;
1623 1624 }
1624 1625
1625 1626 if (bcmp(urdc->secondary.addr.buf, rdc_set->secondary.addr.buf,
1626 1627 urdc->secondary.addr.len) != 0) {
1627 1628 continue;
1628 1629 }
1629 1630
1630 1631 break;
1631 1632 }
1632 1633
1633 1634 if (index < rdc_max_sets)
1634 1635 return (index);
1635 1636 else
1636 1637 return (-1);
1637 1638 }
1638 1639
1639 1640
1640 1641 /*
1641 1642 * Return index of first multihop or 1-to-many
1642 1643 * Behavior controlled by setting ismany.
1643 1644 * ismany TRUE (one-to-many)
1644 1645 * ismany FALSE (multihops)
1645 1646 *
1646 1647 */
1647 1648 static int
1648 1649 rdc_lookup_multimany(rdc_k_info_t *krdc, const int ismany)
1649 1650 {
1650 1651 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1651 1652 rdc_u_info_t *utmp;
1652 1653 rdc_k_info_t *ktmp;
1653 1654 char *pathname;
1654 1655 int index;
1655 1656 int role;
1656 1657
1657 1658 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1658 1659 ASSERT(MUTEX_HELD(&rdc_many_lock));
1659 1660
1660 1661 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1661 1662 /* this host is the primary of the krdc set */
1662 1663 pathname = urdc->primary.file;
1663 1664 if (ismany) {
1664 1665 /*
1665 1666 * 1-many sets are linked by primary :
1666 1667 * look for matching primary on this host
1667 1668 */
1668 1669 role = RDC_PRIMARY;
1669 1670 } else {
1670 1671 /*
1671 1672 * multihop sets link primary to secondary :
1672 1673 * look for matching secondary on this host
1673 1674 */
1674 1675 role = 0;
1675 1676 }
1676 1677 } else {
1677 1678 /* this host is the secondary of the krdc set */
1678 1679 pathname = urdc->secondary.file;
1679 1680 if (ismany) {
1680 1681 /*
1681 1682 * 1-many sets are linked by primary, so if
1682 1683 * this host is the secondary of the set this
1683 1684 * cannot require 1-many linkage.
1684 1685 */
1685 1686 return (-1);
1686 1687 } else {
1687 1688 /*
1688 1689 * multihop sets link primary to secondary :
1689 1690 * look for matching primary on this host
1690 1691 */
1691 1692 role = RDC_PRIMARY;
1692 1693 }
1693 1694 }
1694 1695
1695 1696 for (index = 0; index < rdc_max_sets; index++) {
1696 1697 utmp = &rdc_u_info[index];
1697 1698 ktmp = &rdc_k_info[index];
1698 1699
1699 1700 if (!IS_CONFIGURED(ktmp)) {
1700 1701 continue;
1701 1702 }
1702 1703
1703 1704 if (role == RDC_PRIMARY) {
1704 1705 /*
1705 1706 * Find a primary that is this host and is not
1706 1707 * krdc but shares the same data volume as krdc.
1707 1708 */
1708 1709 if ((rdc_get_vflags(utmp) & RDC_PRIMARY) &&
1709 1710 strncmp(utmp->primary.file, pathname,
1710 1711 NSC_MAXPATH) == 0 && (krdc != ktmp)) {
1711 1712 break;
1712 1713 }
1713 1714 } else {
1714 1715 /*
1715 1716 * Find a secondary that is this host and is not
1716 1717 * krdc but shares the same data volume as krdc.
1717 1718 */
1718 1719 if (!(rdc_get_vflags(utmp) & RDC_PRIMARY) &&
1719 1720 strncmp(utmp->secondary.file, pathname,
1720 1721 NSC_MAXPATH) == 0 && (krdc != ktmp)) {
1721 1722 break;
1722 1723 }
1723 1724 }
1724 1725 }
1725 1726
1726 1727 if (index < rdc_max_sets)
1727 1728 return (index);
1728 1729 else
1729 1730 return (-1);
1730 1731 }
1731 1732
1732 1733 /*
1733 1734 * Returns secondary match that is configured.
1734 1735 *
1735 1736 * Used by enable & resume code.
1736 1737 * Must be called with rdc_conf_lock held.
1737 1738 */
1738 1739
1739 1740 static int
1740 1741 rdc_lookup_secondary(char *pathname)
1741 1742 {
1742 1743 rdc_u_info_t *urdc;
1743 1744 rdc_k_info_t *krdc;
1744 1745 int index;
1745 1746
1746 1747 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1747 1748
1748 1749 for (index = 0; index < rdc_max_sets; index++) {
1749 1750 urdc = &rdc_u_info[index];
1750 1751 krdc = &rdc_k_info[index];
1751 1752
1752 1753 ASSERT(krdc->index == index);
1753 1754 ASSERT(urdc->index == index);
1754 1755
1755 1756 if (!IS_CONFIGURED(krdc))
1756 1757 continue;
1757 1758
1758 1759 if (!IS_STATE(urdc, RDC_PRIMARY)) {
1759 1760 if (strncmp(pathname, urdc->secondary.file,
1760 1761 NSC_MAXPATH) == 0)
1761 1762 return (index);
1762 1763 }
1763 1764 }
1764 1765
1765 1766 return (-1);
1766 1767 }
1767 1768
1768 1769
1769 1770 static nsc_fd_t *
1770 1771 rdc_open_direct(rdc_k_info_t *krdc)
1771 1772 {
1772 1773 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1773 1774 int rc;
1774 1775
1775 1776 if (krdc->remote_fd == NULL)
1776 1777 krdc->remote_fd = nsc_open(urdc->direct_file,
1777 1778 NSC_RDCHR_ID|NSC_DEVICE|NSC_RDWR, 0, 0, &rc);
1778 1779 return (krdc->remote_fd);
1779 1780 }
1780 1781
1781 1782 static void
1782 1783 rdc_close_direct(rdc_k_info_t *krdc)
1783 1784 {
1784 1785 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
1785 1786
1786 1787 urdc->direct_file[0] = 0;
1787 1788 if (krdc->remote_fd) {
1788 1789 if (nsc_close(krdc->remote_fd) == 0) {
1789 1790 krdc->remote_fd = NULL;
1790 1791 }
1791 1792 }
1792 1793 }
1793 1794
1794 1795
1795 1796 #ifdef DEBUG_MANY
1796 1797 static void
1797 1798 print_many(rdc_k_info_t *start)
1798 1799 {
1799 1800 rdc_k_info_t *p = start;
1800 1801 rdc_u_info_t *q = &rdc_u_info[p->index];
1801 1802
1802 1803 do {
1803 1804 cmn_err(CE_CONT, "!krdc %p, %s %s (many_nxt %p multi_nxt %p)\n",
1804 1805 p, q->primary.file, q->secondary.file, p->many_next,
1805 1806 p->multi_next);
1806 1807 delay(10);
1807 1808 p = p->many_next;
1808 1809 q = &rdc_u_info[p->index];
1809 1810 } while (p && p != start);
1810 1811 }
1811 1812 #endif /* DEBUG_MANY */
1812 1813
1813 1814
1814 1815 static int
1815 1816 add_to_multi(rdc_k_info_t *krdc)
1816 1817 {
1817 1818 rdc_u_info_t *urdc;
1818 1819 rdc_k_info_t *ktmp;
1819 1820 rdc_u_info_t *utmp;
1820 1821 int mindex;
1821 1822 int domulti;
1822 1823
1823 1824 urdc = &rdc_u_info[krdc->index];
1824 1825
1825 1826 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1826 1827 ASSERT(MUTEX_HELD(&rdc_many_lock));
1827 1828
1828 1829 /* Now find companion krdc */
1829 1830 mindex = rdc_lookup_multimany(krdc, FALSE);
1830 1831
1831 1832 #ifdef DEBUG_MANY
1832 1833 cmn_err(CE_NOTE,
1833 1834 "!add_to_multi: lookup_multimany: mindex %d prim %s sec %s",
1834 1835 mindex, urdc->primary.file, urdc->secondary.file);
1835 1836 #endif
1836 1837
1837 1838 if (mindex >= 0) {
1838 1839 ktmp = &rdc_k_info[mindex];
1839 1840 utmp = &rdc_u_info[mindex];
1840 1841
1841 1842 domulti = 1;
1842 1843
1843 1844 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
1844 1845 ktmp->multi_next != NULL) {
1845 1846 /*
1846 1847 * We are adding a new primary to a many
1847 1848 * group that is the target of a multihop, just
1848 1849 * ignore it since we are linked in elsewhere.
1849 1850 */
1850 1851 domulti = 0;
1851 1852 }
1852 1853
1853 1854 if (domulti) {
1854 1855 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
1855 1856 /* Is previous leg using direct file I/O? */
1856 1857 if (utmp->direct_file[0] != 0) {
1857 1858 /* It is, so cannot proceed */
1858 1859 return (-1);
1859 1860 }
1860 1861 } else {
1861 1862 /* Is this leg using direct file I/O? */
1862 1863 if (urdc->direct_file[0] != 0) {
1863 1864 /* It is, so cannot proceed */
1864 1865 return (-1);
1865 1866 }
1866 1867 }
1867 1868 krdc->multi_next = ktmp;
1868 1869 ktmp->multi_next = krdc;
1869 1870 }
1870 1871 } else {
1871 1872 krdc->multi_next = NULL;
1872 1873 #ifdef DEBUG_MANY
1873 1874 cmn_err(CE_NOTE, "!add_to_multi: NULL multi_next index %d",
1874 1875 krdc->index);
1875 1876 #endif
1876 1877 }
1877 1878
1878 1879 return (0);
1879 1880 }
1880 1881
1881 1882
1882 1883 /*
1883 1884 * Add a new set to the circular list of 1-to-many primaries and chain
1884 1885 * up any multihop as well.
1885 1886 */
1886 1887 static int
1887 1888 add_to_many(rdc_k_info_t *krdc)
1888 1889 {
1889 1890 rdc_k_info_t *okrdc;
1890 1891 int oindex;
1891 1892
1892 1893 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1893 1894
1894 1895 rdc_many_enter(krdc);
1895 1896
1896 1897 if (add_to_multi(krdc) < 0) {
1897 1898 rdc_many_exit(krdc);
1898 1899 return (-1);
1899 1900 }
1900 1901
1901 1902 oindex = rdc_lookup_multimany(krdc, TRUE);
1902 1903 if (oindex < 0) {
1903 1904 #ifdef DEBUG_MANY
1904 1905 print_many(krdc);
1905 1906 #endif
1906 1907 rdc_many_exit(krdc);
1907 1908 return (0);
1908 1909 }
1909 1910
1910 1911 okrdc = &rdc_k_info[oindex];
1911 1912
1912 1913 #ifdef DEBUG_MANY
1913 1914 print_many(okrdc);
1914 1915 #endif
1915 1916 krdc->many_next = okrdc->many_next;
1916 1917 okrdc->many_next = krdc;
1917 1918
1918 1919 #ifdef DEBUG_MANY
1919 1920 print_many(okrdc);
1920 1921 #endif
1921 1922 rdc_many_exit(krdc);
1922 1923 return (0);
1923 1924 }
1924 1925
1925 1926
1926 1927 /*
1927 1928 * Remove a set from the circular list of 1-to-many primaries.
1928 1929 */
1929 1930 static void
1930 1931 remove_from_many(rdc_k_info_t *old)
1931 1932 {
1932 1933 rdc_u_info_t *uold = &rdc_u_info[old->index];
1933 1934 rdc_k_info_t *p, *q;
1934 1935
1935 1936 ASSERT(MUTEX_HELD(&rdc_conf_lock));
1936 1937
1937 1938 rdc_many_enter(old);
1938 1939
1939 1940 #ifdef DEBUG_MANY
1940 1941 cmn_err(CE_NOTE, "!rdc: before remove_from_many");
1941 1942 print_many(old);
1942 1943 #endif
1943 1944
1944 1945 if (old->many_next == old) {
1945 1946 /* remove from multihop */
1946 1947 if ((q = old->multi_next) != NULL) {
1947 1948 ASSERT(q->multi_next == old);
1948 1949 q->multi_next = NULL;
1949 1950 old->multi_next = NULL;
1950 1951 }
1951 1952
1952 1953 rdc_many_exit(old);
1953 1954 return;
1954 1955 }
1955 1956
1956 1957 /* search */
1957 1958 for (p = old->many_next; p->many_next != old; p = p->many_next)
1958 1959 ;
1959 1960
1960 1961 p->many_next = old->many_next;
1961 1962 old->many_next = old;
1962 1963
1963 1964 if ((q = old->multi_next) != NULL) {
1964 1965 /*
1965 1966 * old was part of a multihop, so switch multi pointers
1966 1967 * to someone remaining on the many chain
1967 1968 */
1968 1969 ASSERT(p->multi_next == NULL);
1969 1970
1970 1971 q->multi_next = p;
1971 1972 p->multi_next = q;
1972 1973 old->multi_next = NULL;
1973 1974 }
1974 1975
1975 1976 #ifdef DEBUG_MANY
1976 1977 if (p == old) {
1977 1978 cmn_err(CE_NOTE, "!rdc: after remove_from_many empty");
1978 1979 } else {
1979 1980 cmn_err(CE_NOTE, "!rdc: after remove_from_many");
1980 1981 print_many(p);
1981 1982 }
1982 1983 #endif
1983 1984
1984 1985 rdc_clr_mflags(&rdc_u_info[p->index],
1985 1986 (rdc_get_vflags(uold) & RDC_MFLAGS));
1986 1987
1987 1988 rdc_many_exit(old);
1988 1989 }
1989 1990
1990 1991
1991 1992 static int
1992 1993 _rdc_enable(rdc_set_t *rdc_set, int options, spcs_s_info_t kstatus)
1993 1994 {
1994 1995 int index;
1995 1996 char *rhost;
1996 1997 struct netbuf *addrp;
1997 1998 rdc_k_info_t *krdc;
1998 1999 rdc_u_info_t *urdc;
1999 2000 rdc_srv_t *svp = NULL;
2000 2001 char *local_file;
2001 2002 char *local_bitmap;
2002 2003 char *diskq;
2003 2004 int rc;
2004 2005 nsc_size_t maxfbas;
2005 2006 rdc_group_t *grp;
2006 2007
2007 2008 if ((rdc_set->primary.intf[0] == 0) ||
2008 2009 (rdc_set->primary.addr.len == 0) ||
2009 2010 (rdc_set->primary.file[0] == 0) ||
2010 2011 (rdc_set->primary.bitmap[0] == 0) ||
2011 2012 (rdc_set->secondary.intf[0] == 0) ||
2012 2013 (rdc_set->secondary.addr.len == 0) ||
2013 2014 (rdc_set->secondary.file[0] == 0) ||
2014 2015 (rdc_set->secondary.bitmap[0] == 0)) {
2015 2016 spcs_s_add(kstatus, RDC_EEMPTY);
2016 2017 return (RDC_EEMPTY);
2017 2018 }
2018 2019
2019 2020 /* Next check there aren't any enabled rdc sets which match. */
2020 2021
2021 2022 mutex_enter(&rdc_conf_lock);
2022 2023
2023 2024 if (rdc_lookup_byname(rdc_set) >= 0) {
2024 2025 mutex_exit(&rdc_conf_lock);
2025 2026 spcs_s_add(kstatus, RDC_EENABLED, rdc_set->primary.intf,
2026 2027 rdc_set->primary.file, rdc_set->secondary.intf,
2027 2028 rdc_set->secondary.file);
2028 2029 return (RDC_EENABLED);
2029 2030 }
2030 2031
2031 2032 if (rdc_lookup_many2one(rdc_set) >= 0) {
2032 2033 mutex_exit(&rdc_conf_lock);
2033 2034 spcs_s_add(kstatus, RDC_EMANY2ONE, rdc_set->primary.intf,
2034 2035 rdc_set->primary.file, rdc_set->secondary.intf,
2035 2036 rdc_set->secondary.file);
2036 2037 return (RDC_EMANY2ONE);
2037 2038 }
2038 2039
2039 2040 if (rdc_set->netconfig->knc_proto == NULL) {
2040 2041 mutex_exit(&rdc_conf_lock);
2041 2042 spcs_s_add(kstatus, RDC_ENETCONFIG);
2042 2043 return (RDC_ENETCONFIG);
2043 2044 }
2044 2045
2045 2046 if (rdc_set->primary.addr.len == 0) {
2046 2047 mutex_exit(&rdc_conf_lock);
2047 2048 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->primary.file);
2048 2049 return (RDC_ENETBUF);
2049 2050 }
2050 2051
2051 2052 if (rdc_set->secondary.addr.len == 0) {
2052 2053 mutex_exit(&rdc_conf_lock);
2053 2054 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->secondary.file);
2054 2055 return (RDC_ENETBUF);
2055 2056 }
2056 2057
2057 2058 /* Check that the local data volume isn't in use as a bitmap */
2058 2059 if (options & RDC_OPT_PRIMARY)
2059 2060 local_file = rdc_set->primary.file;
2060 2061 else
2061 2062 local_file = rdc_set->secondary.file;
2062 2063 if (rdc_lookup_bitmap(local_file) >= 0) {
2063 2064 mutex_exit(&rdc_conf_lock);
2064 2065 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
2065 2066 return (RDC_EVOLINUSE);
2066 2067 }
2067 2068
2068 2069 /* check that the secondary data volume isn't in use */
2069 2070 if (!(options & RDC_OPT_PRIMARY)) {
2070 2071 local_file = rdc_set->secondary.file;
2071 2072 if (rdc_lookup_secondary(local_file) >= 0) {
2072 2073 mutex_exit(&rdc_conf_lock);
2073 2074 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
2074 2075 return (RDC_EVOLINUSE);
2075 2076 }
2076 2077 }
2077 2078
2078 2079 /* check that the local data vol is not in use as a diskqueue */
2079 2080 if (options & RDC_OPT_PRIMARY) {
2080 2081 if (rdc_lookup_diskq(rdc_set->primary.file) >= 0) {
2081 2082 mutex_exit(&rdc_conf_lock);
2082 2083 spcs_s_add(kstatus,
2083 2084 RDC_EVOLINUSE, rdc_set->primary.file);
2084 2085 return (RDC_EVOLINUSE);
2085 2086 }
2086 2087 }
2087 2088
2088 2089 /* Check that the bitmap isn't in use as a data volume */
2089 2090 if (options & RDC_OPT_PRIMARY)
2090 2091 local_bitmap = rdc_set->primary.bitmap;
2091 2092 else
2092 2093 local_bitmap = rdc_set->secondary.bitmap;
2093 2094 if (rdc_lookup_configured(local_bitmap) >= 0) {
2094 2095 mutex_exit(&rdc_conf_lock);
2095 2096 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
2096 2097 return (RDC_EBMPINUSE);
2097 2098 }
2098 2099
2099 2100 /* Check that the bitmap isn't already in use as a bitmap */
2100 2101 if (rdc_lookup_bitmap(local_bitmap) >= 0) {
2101 2102 mutex_exit(&rdc_conf_lock);
2102 2103 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
2103 2104 return (RDC_EBMPINUSE);
2104 2105 }
2105 2106
2106 2107 /* check that the diskq (if here) is not in use */
2107 2108 diskq = rdc_set->disk_queue;
2108 2109 if (diskq[0] && rdc_diskq_inuse(rdc_set, diskq)) {
2109 2110 mutex_exit(&rdc_conf_lock);
2110 2111 spcs_s_add(kstatus, RDC_EDISKQINUSE, diskq);
2111 2112 return (RDC_EDISKQINUSE);
2112 2113 }
2113 2114
2114 2115
2115 2116 /* Set urdc->volume_size */
2116 2117 index = rdc_dev_open(rdc_set, options);
2117 2118 if (index < 0) {
2118 2119 mutex_exit(&rdc_conf_lock);
2119 2120 if (options & RDC_OPT_PRIMARY)
2120 2121 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->primary.intf,
2121 2122 rdc_set->primary.file);
2122 2123 else
2123 2124 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->secondary.intf,
2124 2125 rdc_set->secondary.file);
2125 2126 return (RDC_EOPEN);
2126 2127 }
2127 2128
2128 2129 urdc = &rdc_u_info[index];
2129 2130 krdc = &rdc_k_info[index];
2130 2131
2131 2132 /* copy relevant parts of rdc_set to urdc field by field */
2132 2133
2133 2134 (void) strncpy(urdc->primary.intf, rdc_set->primary.intf,
2134 2135 MAX_RDC_HOST_SIZE);
2135 2136 (void) strncpy(urdc->secondary.intf, rdc_set->secondary.intf,
2136 2137 MAX_RDC_HOST_SIZE);
2137 2138
2138 2139 (void) strncpy(urdc->group_name, rdc_set->group_name, NSC_MAXPATH);
2139 2140 (void) strncpy(urdc->disk_queue, rdc_set->disk_queue, NSC_MAXPATH);
2140 2141
2141 2142 dup_rdc_netbuf(&rdc_set->primary.addr, &urdc->primary.addr);
2142 2143 (void) strncpy(urdc->primary.file, rdc_set->primary.file, NSC_MAXPATH);
2143 2144 (void) strncpy(urdc->primary.bitmap, rdc_set->primary.bitmap,
2144 2145 NSC_MAXPATH);
2145 2146
2146 2147 dup_rdc_netbuf(&rdc_set->secondary.addr, &urdc->secondary.addr);
2147 2148 (void) strncpy(urdc->secondary.file, rdc_set->secondary.file,
2148 2149 NSC_MAXPATH);
2149 2150 (void) strncpy(urdc->secondary.bitmap, rdc_set->secondary.bitmap,
2150 2151 NSC_MAXPATH);
2151 2152
2152 2153 urdc->setid = rdc_set->setid;
2153 2154
2154 2155 /*
2155 2156 * before we try to add to group, or create one, check out
2156 2157 * if we are doing the wrong thing with the diskq
2157 2158 */
2158 2159
2159 2160 if (urdc->disk_queue[0] && (options & RDC_OPT_SYNC)) {
2160 2161 mutex_exit(&rdc_conf_lock);
2161 2162 rdc_dev_close(krdc);
2162 2163 spcs_s_add(kstatus, RDC_EQWRONGMODE);
2163 2164 return (RDC_EQWRONGMODE);
2164 2165 }
2165 2166
2166 2167 if ((rc = add_to_group(krdc, options, RDC_CMD_ENABLE)) != 0) {
2167 2168 mutex_exit(&rdc_conf_lock);
2168 2169 rdc_dev_close(krdc);
2169 2170 if (rc == RDC_EQNOADD) {
2170 2171 spcs_s_add(kstatus, RDC_EQNOADD, rdc_set->disk_queue);
2171 2172 return (RDC_EQNOADD);
2172 2173 } else {
2173 2174 spcs_s_add(kstatus, RDC_EGROUP,
2174 2175 rdc_set->primary.intf, rdc_set->primary.file,
2175 2176 rdc_set->secondary.intf, rdc_set->secondary.file,
2176 2177 rdc_set->group_name);
2177 2178 return (RDC_EGROUP);
2178 2179 }
2179 2180 }
2180 2181
2181 2182 /*
2182 2183 * maxfbas was set in rdc_dev_open as primary's maxfbas.
2183 2184 * If diskq's maxfbas is smaller, then use diskq's.
2184 2185 */
2185 2186 grp = krdc->group;
2186 2187 if (grp && RDC_IS_DISKQ(grp) && (grp->diskqfd != 0)) {
2187 2188 rc = _rdc_rsrv_diskq(grp);
2188 2189 if (RDC_SUCCESS(rc)) {
2189 2190 rc = nsc_maxfbas(grp->diskqfd, 0, &maxfbas);
2190 2191 if (rc == 0) {
2191 2192 #ifdef DEBUG
2192 2193 if (krdc->maxfbas != maxfbas)
2193 2194 cmn_err(CE_NOTE,
2194 2195 "!_rdc_enable: diskq maxfbas = %"
2195 2196 NSC_SZFMT ", primary maxfbas = %"
2196 2197 NSC_SZFMT, maxfbas, krdc->maxfbas);
2197 2198 #endif
2198 2199 krdc->maxfbas = min(krdc->maxfbas, maxfbas);
2199 2200 } else {
2200 2201 cmn_err(CE_WARN,
2201 2202 "!_rdc_enable: diskq maxfbas failed (%d)",
2202 2203 rc);
2203 2204 }
2204 2205 _rdc_rlse_diskq(grp);
2205 2206 } else {
2206 2207 cmn_err(CE_WARN,
2207 2208 "!_rdc_enable: diskq reserve failed (%d)", rc);
2208 2209 }
2209 2210 }
2210 2211
2211 2212 rdc_init_flags(urdc);
2212 2213 (void) strncpy(urdc->direct_file, rdc_set->direct_file, NSC_MAXPATH);
2213 2214 if ((options & RDC_OPT_PRIMARY) && rdc_set->direct_file[0]) {
2214 2215 if (rdc_open_direct(krdc) == NULL)
2215 2216 rdc_set_flags(urdc, RDC_FCAL_FAILED);
2216 2217 }
2217 2218
2218 2219 krdc->many_next = krdc;
2219 2220
2220 2221 ASSERT(krdc->type_flag == 0);
2221 2222 krdc->type_flag = RDC_CONFIGURED;
2222 2223
2223 2224 if (options & RDC_OPT_PRIMARY)
2224 2225 rdc_set_flags(urdc, RDC_PRIMARY);
2225 2226
2226 2227 if (options & RDC_OPT_ASYNC)
2227 2228 krdc->type_flag |= RDC_ASYNCMODE;
2228 2229
2229 2230 set_busy(krdc);
2230 2231 urdc->syshostid = rdc_set->syshostid;
2231 2232
2232 2233 if (add_to_many(krdc) < 0) {
2233 2234 mutex_exit(&rdc_conf_lock);
2234 2235
2235 2236 rdc_group_enter(krdc);
2236 2237
2237 2238 spcs_s_add(kstatus, RDC_EMULTI);
2238 2239 rc = RDC_EMULTI;
2239 2240 goto fail;
2240 2241 }
2241 2242
2242 2243 /* Configured but not enabled */
2243 2244 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2244 2245
2245 2246 mutex_exit(&rdc_conf_lock);
2246 2247
2247 2248 rdc_group_enter(krdc);
2248 2249
2249 2250 /* Configured but not enabled */
2250 2251 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2251 2252
2252 2253 /*
2253 2254 * The rdc set is configured but not yet enabled. Other operations must
2254 2255 * ignore this set until it is enabled.
2255 2256 */
2256 2257
2257 2258 urdc->sync_pos = 0;
2258 2259
2259 2260 if (rdc_set->maxqfbas > 0)
2260 2261 urdc->maxqfbas = rdc_set->maxqfbas;
2261 2262 else
2262 2263 urdc->maxqfbas = rdc_maxthres_queue;
2263 2264
2264 2265 if (rdc_set->maxqitems > 0)
2265 2266 urdc->maxqitems = rdc_set->maxqitems;
2266 2267 else
2267 2268 urdc->maxqitems = rdc_max_qitems;
2268 2269
2269 2270 if (rdc_set->asyncthr > 0)
2270 2271 urdc->asyncthr = rdc_set->asyncthr;
2271 2272 else
2272 2273 urdc->asyncthr = rdc_asyncthr;
2273 2274
2274 2275 if (urdc->autosync == -1) {
2275 2276 /* Still unknown */
2276 2277 if (rdc_set->autosync > 0)
2277 2278 urdc->autosync = 1;
2278 2279 else
2279 2280 urdc->autosync = 0;
2280 2281 }
2281 2282
2282 2283 urdc->netconfig = rdc_set->netconfig;
2283 2284
2284 2285 if (options & RDC_OPT_PRIMARY) {
2285 2286 rhost = rdc_set->secondary.intf;
2286 2287 addrp = &rdc_set->secondary.addr;
2287 2288 } else {
2288 2289 rhost = rdc_set->primary.intf;
2289 2290 addrp = &rdc_set->primary.addr;
2290 2291 }
2291 2292
2292 2293 if (options & RDC_OPT_ASYNC)
2293 2294 rdc_set_flags(urdc, RDC_ASYNC);
2294 2295
2295 2296 svp = rdc_create_svinfo(rhost, addrp, urdc->netconfig);
2296 2297 if (svp == NULL) {
2297 2298 spcs_s_add(kstatus, ENOMEM);
2298 2299 rc = ENOMEM;
2299 2300 goto fail;
2300 2301 }
2301 2302 urdc->netconfig = NULL; /* This will be no good soon */
2302 2303
2303 2304 rdc_kstat_create(index);
2304 2305
2305 2306 /* Don't set krdc->intf here */
2306 2307
2307 2308 if (rdc_enable_bitmap(krdc, options & RDC_OPT_SETBMP) < 0)
2308 2309 goto bmpfail;
2309 2310
2310 2311 RDC_ZERO_BITREF(krdc);
2311 2312 if (krdc->lsrv == NULL)
2312 2313 krdc->lsrv = svp;
2313 2314 else {
2314 2315 #ifdef DEBUG
2315 2316 cmn_err(CE_WARN, "!_rdc_enable: krdc->lsrv already set: %p",
2316 2317 (void *) krdc->lsrv);
2317 2318 #endif
2318 2319 rdc_destroy_svinfo(svp);
2319 2320 }
2320 2321 svp = NULL;
2321 2322
2322 2323 /* Configured but not enabled */
2323 2324 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2324 2325
2325 2326 /* And finally */
2326 2327
2327 2328 krdc->remote_index = -1;
2328 2329 /* Should we set the whole group logging? */
2329 2330 rdc_set_flags(urdc, RDC_ENABLED | RDC_LOGGING);
2330 2331
2331 2332 rdc_group_exit(krdc);
2332 2333
2333 2334 if (rdc_intercept(krdc) != 0) {
2334 2335 rdc_group_enter(krdc);
2335 2336 rdc_clr_flags(urdc, RDC_ENABLED);
2336 2337 if (options & RDC_OPT_PRIMARY)
2337 2338 spcs_s_add(kstatus, RDC_EREGISTER, urdc->primary.file);
2338 2339 else
2339 2340 spcs_s_add(kstatus, RDC_EREGISTER,
2340 2341 urdc->secondary.file);
2341 2342 #ifdef DEBUG
2342 2343 cmn_err(CE_NOTE, "!nsc_register_path failed %s",
2343 2344 urdc->primary.file);
2344 2345 #endif
2345 2346 rc = RDC_EREGISTER;
2346 2347 goto bmpfail;
2347 2348 }
2348 2349 #ifdef DEBUG
2349 2350 cmn_err(CE_NOTE, "!SNDR: enabled %s %s", urdc->primary.file,
2350 2351 urdc->secondary.file);
2351 2352 #endif
2352 2353
2353 2354 rdc_write_state(urdc);
2354 2355
2355 2356 mutex_enter(&rdc_conf_lock);
2356 2357 wakeup_busy(krdc);
2357 2358 mutex_exit(&rdc_conf_lock);
2358 2359
2359 2360 return (0);
2360 2361
2361 2362 bmpfail:
2362 2363 if (options & RDC_OPT_PRIMARY)
2363 2364 spcs_s_add(kstatus, RDC_EBITMAP, rdc_set->primary.bitmap);
2364 2365 else
2365 2366 spcs_s_add(kstatus, RDC_EBITMAP, rdc_set->secondary.bitmap);
2366 2367 rc = RDC_EBITMAP;
2367 2368 if (rdc_get_vflags(urdc) & RDC_ENABLED) {
2368 2369 rdc_group_exit(krdc);
2369 2370 (void) rdc_unintercept(krdc);
2370 2371 rdc_group_enter(krdc);
2371 2372 }
2372 2373
2373 2374 fail:
2374 2375 rdc_kstat_delete(index);
2375 2376 rdc_group_exit(krdc);
2376 2377 if (krdc->intf) {
2377 2378 rdc_if_t *ip = krdc->intf;
2378 2379 mutex_enter(&rdc_conf_lock);
2379 2380 krdc->intf = NULL;
2380 2381 rdc_remove_from_if(ip);
2381 2382 mutex_exit(&rdc_conf_lock);
2382 2383 }
2383 2384 rdc_group_enter(krdc);
2384 2385 /* Configured but not enabled */
2385 2386 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2386 2387
2387 2388 rdc_dev_close(krdc);
2388 2389 rdc_close_direct(krdc);
2389 2390 rdc_destroy_svinfo(svp);
2390 2391
2391 2392 /* Configured but not enabled */
2392 2393 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2393 2394
2394 2395 rdc_group_exit(krdc);
2395 2396
2396 2397 mutex_enter(&rdc_conf_lock);
2397 2398
2398 2399 /* Configured but not enabled */
2399 2400 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2400 2401
2401 2402 remove_from_group(krdc);
2402 2403
2403 2404 if (IS_MANY(krdc) || IS_MULTI(krdc))
2404 2405 remove_from_many(krdc);
2405 2406
2406 2407 rdc_u_init(urdc);
2407 2408
2408 2409 ASSERT(krdc->type_flag & RDC_CONFIGURED);
2409 2410 krdc->type_flag = 0;
2410 2411 wakeup_busy(krdc);
2411 2412
2412 2413 mutex_exit(&rdc_conf_lock);
2413 2414
2414 2415 return (rc);
2415 2416 }
2416 2417
2417 2418 static int
2418 2419 rdc_enable(rdc_config_t *uparms, spcs_s_info_t kstatus)
2419 2420 {
2420 2421 int rc;
2421 2422 char itmp[10];
2422 2423
2423 2424 if (!(uparms->options & RDC_OPT_SYNC) &&
2424 2425 !(uparms->options & RDC_OPT_ASYNC)) {
2425 2426 rc = RDC_EEINVAL;
2426 2427 (void) spcs_s_inttostring(
2427 2428 uparms->options, itmp, sizeof (itmp), 1);
2428 2429 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2429 2430 goto done;
2430 2431 }
2431 2432
2432 2433 if (!(uparms->options & RDC_OPT_PRIMARY) &&
2433 2434 !(uparms->options & RDC_OPT_SECONDARY)) {
2434 2435 rc = RDC_EEINVAL;
2435 2436 (void) spcs_s_inttostring(
2436 2437 uparms->options, itmp, sizeof (itmp), 1);
2437 2438 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2438 2439 goto done;
2439 2440 }
2440 2441
2441 2442 if (!(uparms->options & RDC_OPT_SETBMP) &&
2442 2443 !(uparms->options & RDC_OPT_CLRBMP)) {
2443 2444 rc = RDC_EEINVAL;
2444 2445 (void) spcs_s_inttostring(
2445 2446 uparms->options, itmp, sizeof (itmp), 1);
2446 2447 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
2447 2448 goto done;
2448 2449 }
2449 2450
2450 2451 rc = _rdc_enable(uparms->rdc_set, uparms->options, kstatus);
2451 2452 done:
2452 2453 return (rc);
2453 2454 }
2454 2455
2455 2456 /* ARGSUSED */
2456 2457 static int
2457 2458 _rdc_disable(rdc_k_info_t *krdc, rdc_config_t *uap, spcs_s_info_t kstatus)
2458 2459 {
2459 2460 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2460 2461 rdc_if_t *ip;
2461 2462 int index = krdc->index;
2462 2463 disk_queue *q;
2463 2464 rdc_set_t *rdc_set = uap->rdc_set;
2464 2465
2465 2466 ASSERT(krdc->group != NULL);
2466 2467 rdc_group_enter(krdc);
2467 2468 #ifdef DEBUG
2468 2469 ASSERT(rdc_check(krdc, rdc_set) == 0);
2469 2470 #else
2470 2471 if (((uap->options & RDC_OPT_FORCE_DISABLE) == 0) &&
2471 2472 rdc_check(krdc, rdc_set)) {
2472 2473 rdc_group_exit(krdc);
2473 2474 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
2474 2475 rdc_set->secondary.file);
2475 2476 return (RDC_EALREADY);
2476 2477 }
2477 2478 #endif
2478 2479
2479 2480 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
2480 2481 halt_sync(krdc);
2481 2482 ASSERT(IS_ENABLED(urdc));
2482 2483 }
2483 2484 q = &krdc->group->diskq;
2484 2485
2485 2486 if (IS_ASYNC(urdc) && RDC_IS_DISKQ(krdc->group) &&
2486 2487 ((!IS_STATE(urdc, RDC_LOGGING)) && (!QEMPTY(q)))) {
2487 2488 krdc->type_flag &= ~RDC_DISABLEPEND;
2488 2489 rdc_group_exit(krdc);
2489 2490 spcs_s_add(kstatus, RDC_EQNOTEMPTY, urdc->disk_queue);
2490 2491 return (RDC_EQNOTEMPTY);
2491 2492 }
2492 2493 rdc_group_exit(krdc);
2493 2494 (void) rdc_unintercept(krdc);
2494 2495
2495 2496 #ifdef DEBUG
2496 2497 cmn_err(CE_NOTE, "!SNDR: disabled %s %s", urdc->primary.file,
2497 2498 urdc->secondary.file);
2498 2499 #endif
2499 2500
2500 2501 /* Configured but not enabled */
2501 2502 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2502 2503
2503 2504 /*
2504 2505 * No new io can come in through the io provider.
2505 2506 * Wait for the async flusher to finish.
2506 2507 */
2507 2508
2508 2509 if (IS_ASYNC(urdc) && !RDC_IS_DISKQ(krdc->group)) {
2509 2510 int tries = 2; /* in case of hopelessly stuck flusher threads */
2510 2511 #ifdef DEBUG
2511 2512 net_queue *qp = &krdc->group->ra_queue;
2512 2513 #endif
2513 2514 do {
2514 2515 if (!krdc->group->rdc_writer)
2515 2516 (void) rdc_writer(krdc->index);
2516 2517
2517 2518 (void) rdc_drain_queue(krdc->index);
2518 2519
2519 2520 } while (krdc->group->rdc_writer && tries--);
2520 2521
2521 2522 /* ok, force it to happen... */
2522 2523 if (rdc_drain_queue(krdc->index) != 0) {
2523 2524 do {
2524 2525 mutex_enter(&krdc->group->ra_queue.net_qlock);
2525 2526 krdc->group->asyncdis = 1;
2526 2527 cv_broadcast(&krdc->group->asyncqcv);
2527 2528 mutex_exit(&krdc->group->ra_queue.net_qlock);
2528 2529 cmn_err(CE_WARN,
2529 2530 "!SNDR: async I/O pending and not flushed "
2530 2531 "for %s during disable",
2531 2532 urdc->primary.file);
2532 2533 #ifdef DEBUG
2533 2534 cmn_err(CE_WARN,
2534 2535 "!nitems: %" NSC_SZFMT " nblocks: %"
2535 2536 NSC_SZFMT " head: 0x%p tail: 0x%p",
2536 2537 qp->nitems, qp->blocks,
2537 2538 (void *)qp->net_qhead,
2538 2539 (void *)qp->net_qtail);
2539 2540 #endif
2540 2541 } while (krdc->group->rdc_thrnum > 0);
2541 2542 }
2542 2543 }
2543 2544
2544 2545 mutex_enter(&rdc_conf_lock);
2545 2546 ip = krdc->intf;
2546 2547 krdc->intf = 0;
2547 2548
2548 2549 if (ip) {
2549 2550 rdc_remove_from_if(ip);
2550 2551 }
2551 2552
2552 2553 mutex_exit(&rdc_conf_lock);
2553 2554
2554 2555 rdc_group_enter(krdc);
2555 2556
2556 2557 /* Configured but not enabled */
2557 2558 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2558 2559
2559 2560 /* Must not hold group lock during this function */
2560 2561 rdc_group_exit(krdc);
2561 2562 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
2562 2563 delay(2);
2563 2564 rdc_group_enter(krdc);
2564 2565
2565 2566 (void) rdc_clear_state(krdc);
2566 2567
2567 2568 rdc_free_bitmap(krdc, RDC_CMD_DISABLE);
2568 2569 rdc_close_bitmap(krdc);
2569 2570
2570 2571 rdc_dev_close(krdc);
2571 2572 rdc_close_direct(krdc);
2572 2573
2573 2574 /* Configured but not enabled */
2574 2575 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2575 2576
2576 2577 rdc_group_exit(krdc);
2577 2578
2578 2579 /*
2579 2580 * we should now unregister the queue, with no conflicting
2580 2581 * locks held. This is the last(only) member of the group
2581 2582 */
2582 2583 if (krdc->group && RDC_IS_DISKQ(krdc->group) &&
2583 2584 krdc->group->count == 1) { /* stop protecting queue */
2584 2585 rdc_unintercept_diskq(krdc->group);
2585 2586 }
2586 2587
2587 2588 mutex_enter(&rdc_conf_lock);
2588 2589
2589 2590 /* Configured but not enabled */
2590 2591 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
2591 2592
2592 2593 wait_busy(krdc);
2593 2594
2594 2595 if (IS_MANY(krdc) || IS_MULTI(krdc))
2595 2596 remove_from_many(krdc);
2596 2597
2597 2598 remove_from_group(krdc);
2598 2599
2599 2600 krdc->remote_index = -1;
2600 2601 ASSERT(krdc->type_flag & RDC_CONFIGURED);
2601 2602 ASSERT(krdc->type_flag & RDC_DISABLEPEND);
2602 2603 krdc->type_flag = 0;
2603 2604 #ifdef DEBUG
2604 2605 if (krdc->dcio_bitmap)
2605 2606 cmn_err(CE_WARN, "!_rdc_disable: possible mem leak, "
2606 2607 "dcio_bitmap");
2607 2608 #endif
2608 2609 krdc->dcio_bitmap = NULL;
2609 2610 krdc->bitmap_ref = NULL;
2610 2611 krdc->bitmap_size = 0;
2611 2612 krdc->maxfbas = 0;
2612 2613 krdc->bitmap_write = 0;
2613 2614 krdc->disk_status = 0;
2614 2615 rdc_destroy_svinfo(krdc->lsrv);
2615 2616 krdc->lsrv = NULL;
2616 2617 krdc->multi_next = NULL;
2617 2618
2618 2619 rdc_u_init(urdc);
2619 2620
2620 2621 mutex_exit(&rdc_conf_lock);
2621 2622 rdc_kstat_delete(index);
2622 2623
2623 2624 return (0);
2624 2625 }
2625 2626
2626 2627 static int
2627 2628 rdc_disable(rdc_config_t *uparms, spcs_s_info_t kstatus)
2628 2629 {
2629 2630 rdc_k_info_t *krdc;
2630 2631 int index;
2631 2632 int rc;
2632 2633
2633 2634 mutex_enter(&rdc_conf_lock);
2634 2635
2635 2636 index = rdc_lookup_byname(uparms->rdc_set);
2636 2637 if (index >= 0)
2637 2638 krdc = &rdc_k_info[index];
2638 2639 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
2639 2640 mutex_exit(&rdc_conf_lock);
2640 2641 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
2641 2642 uparms->rdc_set->secondary.file);
2642 2643 return (RDC_EALREADY);
2643 2644 }
2644 2645
2645 2646 krdc->type_flag |= RDC_DISABLEPEND;
2646 2647 wait_busy(krdc);
2647 2648 if (krdc->type_flag == 0) {
2648 2649 /* A resume or enable failed */
2649 2650 mutex_exit(&rdc_conf_lock);
2650 2651 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
2651 2652 uparms->rdc_set->secondary.file);
2652 2653 return (RDC_EALREADY);
2653 2654 }
2654 2655 mutex_exit(&rdc_conf_lock);
2655 2656
2656 2657 rc = _rdc_disable(krdc, uparms, kstatus);
2657 2658 return (rc);
2658 2659 }
2659 2660
2660 2661
2661 2662 /*
2662 2663 * Checks whether the state of one of the other sets in the 1-many or
2663 2664 * multi-hop config should prevent a sync from starting on this one.
2664 2665 * Return NULL if no just cause or impediment is found, otherwise return
2665 2666 * a pointer to the offending set.
2666 2667 */
2667 2668 static rdc_u_info_t *
2668 2669 rdc_allow_pri_sync(rdc_u_info_t *urdc, int options)
2669 2670 {
2670 2671 rdc_k_info_t *krdc = &rdc_k_info[urdc->index];
2671 2672 rdc_k_info_t *ktmp;
2672 2673 rdc_u_info_t *utmp;
2673 2674 rdc_k_info_t *kmulti = NULL;
2674 2675
2675 2676 ASSERT(rdc_get_vflags(urdc) & RDC_PRIMARY);
2676 2677
2677 2678 rdc_many_enter(krdc);
2678 2679
2679 2680 /*
2680 2681 * In the reverse sync case we need to check the previous leg of
2681 2682 * the multi-hop config. The link to that set can be from any of
2682 2683 * the 1-many list, so as we go through we keep an eye open for it.
2683 2684 */
2684 2685 if ((options & RDC_OPT_REVERSE) && (IS_MULTI(krdc))) {
2685 2686 /* This set links to the first leg */
2686 2687 ktmp = krdc->multi_next;
2687 2688 utmp = &rdc_u_info[ktmp->index];
2688 2689 if (IS_ENABLED(utmp))
2689 2690 kmulti = ktmp;
2690 2691 }
2691 2692
2692 2693 if (IS_MANY(krdc)) {
2693 2694 for (ktmp = krdc->many_next; ktmp != krdc;
2694 2695 ktmp = ktmp->many_next) {
2695 2696 utmp = &rdc_u_info[ktmp->index];
2696 2697
2697 2698 if (!IS_ENABLED(utmp))
2698 2699 continue;
2699 2700
2700 2701 if (options & RDC_OPT_FORWARD) {
2701 2702 /*
2702 2703 * Reverse sync needed is bad, as it means a
2703 2704 * reverse sync in progress or started and
2704 2705 * didn't complete, so this primary volume
2705 2706 * is not consistent. So we shouldn't copy
2706 2707 * it to its secondary.
2707 2708 */
2708 2709 if (rdc_get_mflags(utmp) & RDC_RSYNC_NEEDED) {
2709 2710 rdc_many_exit(krdc);
2710 2711 return (utmp);
2711 2712 }
2712 2713 } else {
2713 2714 /* Reverse, so see if we need to spot kmulti */
2714 2715 if ((kmulti == NULL) && (IS_MULTI(ktmp))) {
2715 2716 /* This set links to the first leg */
2716 2717 kmulti = ktmp->multi_next;
2717 2718 if (!IS_ENABLED(
2718 2719 &rdc_u_info[kmulti->index]))
2719 2720 kmulti = NULL;
2720 2721 }
2721 2722
2722 2723 /*
2723 2724 * Non-logging is bad, as the bitmap will
2724 2725 * be updated with the bits for this sync.
2725 2726 */
2726 2727 if (!(rdc_get_vflags(utmp) & RDC_LOGGING)) {
2727 2728 rdc_many_exit(krdc);
2728 2729 return (utmp);
2729 2730 }
2730 2731 }
2731 2732 }
2732 2733 }
2733 2734
2734 2735 if (kmulti) {
2735 2736 utmp = &rdc_u_info[kmulti->index];
2736 2737 ktmp = kmulti; /* In case we decide we do need to use ktmp */
2737 2738
2738 2739 ASSERT(options & RDC_OPT_REVERSE);
2739 2740
2740 2741 if (IS_REPLICATING(utmp)) {
2741 2742 /*
2742 2743 * Replicating is bad as data is already flowing to
2743 2744 * the target of the requested sync operation.
2744 2745 */
2745 2746 rdc_many_exit(krdc);
2746 2747 return (utmp);
2747 2748 }
2748 2749
2749 2750 if (rdc_get_vflags(utmp) & RDC_SYNCING) {
2750 2751 /*
2751 2752 * Forward sync in progress is bad, as data is
2752 2753 * already flowing to the target of the requested
2753 2754 * sync operation.
2754 2755 * Reverse sync in progress is bad, as the primary
2755 2756 * has already decided which data to copy.
2756 2757 */
2757 2758 rdc_many_exit(krdc);
2758 2759 return (utmp);
2759 2760 }
2760 2761
2761 2762 /*
2762 2763 * Clear the "sync needed" flags, as the multi-hop secondary
2763 2764 * will be updated via this requested sync operation, so does
2764 2765 * not need to complete its aborted forward sync.
2765 2766 */
2766 2767 if (rdc_get_vflags(utmp) & RDC_SYNC_NEEDED)
2767 2768 rdc_clr_flags(utmp, RDC_SYNC_NEEDED);
2768 2769 }
2769 2770
2770 2771 if (IS_MANY(krdc) && (options & RDC_OPT_REVERSE)) {
2771 2772 for (ktmp = krdc->many_next; ktmp != krdc;
2772 2773 ktmp = ktmp->many_next) {
2773 2774 utmp = &rdc_u_info[ktmp->index];
2774 2775 if (!IS_ENABLED(utmp))
2775 2776 continue;
2776 2777
2777 2778 /*
2778 2779 * Clear any "reverse sync needed" flags, as the
2779 2780 * volume will be updated via this requested
2780 2781 * sync operation, so does not need to complete
2781 2782 * its aborted reverse sync.
2782 2783 */
2783 2784 if (rdc_get_mflags(utmp) & RDC_RSYNC_NEEDED)
2784 2785 rdc_clr_mflags(utmp, RDC_RSYNC_NEEDED);
2785 2786 }
2786 2787 }
2787 2788
2788 2789 rdc_many_exit(krdc);
2789 2790
2790 2791 return (NULL);
2791 2792 }
2792 2793
2793 2794 static void
2794 2795 _rdc_sync_wrthr(void *thrinfo)
2795 2796 {
2796 2797 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2797 2798 nsc_buf_t *handle = NULL;
2798 2799 rdc_k_info_t *krdc = syncinfo->krdc;
2799 2800 int rc;
2800 2801 int tries = 0;
2801 2802
2802 2803 DTRACE_PROBE2(rdc_sync_loop_netwrite_start, int, krdc->index,
2803 2804 nsc_buf_t *, handle);
2804 2805
2805 2806 retry:
2806 2807 rc = nsc_alloc_buf(RDC_U_FD(krdc), syncinfo->offset, syncinfo->len,
2807 2808 NSC_READ | NSC_NOCACHE, &handle);
2808 2809
2809 2810 if (!RDC_SUCCESS(rc) || krdc->remote_index < 0) {
2810 2811 DTRACE_PROBE(rdc_sync_wrthr_alloc_buf_err);
2811 2812 goto failed;
2812 2813 }
2813 2814
2814 2815 rdc_group_enter(krdc);
2815 2816 if ((krdc->disk_status == 1) || (krdc->dcio_bitmap == NULL)) {
2816 2817 rdc_group_exit(krdc);
2817 2818 goto failed;
2818 2819 }
2819 2820 rdc_group_exit(krdc);
2820 2821
2821 2822 if ((rc = rdc_net_write(krdc->index, krdc->remote_index, handle,
2822 2823 handle->sb_pos, handle->sb_len, RDC_NOSEQ, RDC_NOQUE, NULL)) > 0) {
2823 2824 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2824 2825
2825 2826 /*
2826 2827 * The following is to handle
2827 2828 * the case where the secondary side
2828 2829 * has thrown our buffer handle token away in a
2829 2830 * attempt to preserve its health on restart
2830 2831 */
2831 2832 if ((rc == EPROTO) && (tries < 3)) {
2832 2833 (void) nsc_free_buf(handle);
2833 2834 handle = NULL;
2834 2835 tries++;
2835 2836 delay(HZ >> 2);
2836 2837 goto retry;
2837 2838 }
2838 2839
2839 2840 DTRACE_PROBE(rdc_sync_wrthr_remote_write_err);
2840 2841 cmn_err(CE_WARN, "!rdc_sync_wrthr: remote write failed (%d) "
2841 2842 "0x%x", rc, rdc_get_vflags(urdc));
2842 2843
2843 2844 goto failed;
2844 2845 }
2845 2846 (void) nsc_free_buf(handle);
2846 2847 handle = NULL;
2847 2848
2848 2849 return;
2849 2850 failed:
2850 2851 (void) nsc_free_buf(handle);
2851 2852 syncinfo->status->offset = syncinfo->offset;
2852 2853 }
2853 2854
2854 2855 /*
2855 2856 * see above comments on _rdc_sync_wrthr
2856 2857 */
2857 2858 static void
2858 2859 _rdc_sync_rdthr(void *thrinfo)
2859 2860 {
2860 2861 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2861 2862 nsc_buf_t *handle = NULL;
2862 2863 rdc_k_info_t *krdc = syncinfo->krdc;
2863 2864 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2864 2865 int rc;
2865 2866
2866 2867 rc = nsc_alloc_buf(RDC_U_FD(krdc), syncinfo->offset, syncinfo->len,
2867 2868 NSC_WRITE | NSC_WRTHRU | NSC_NOCACHE, &handle);
2868 2869
2869 2870 if (!RDC_SUCCESS(rc) || krdc->remote_index < 0) {
2870 2871 goto failed;
2871 2872 }
2872 2873 rdc_group_enter(krdc);
2873 2874 if ((krdc->disk_status == 1) || (krdc->dcio_bitmap == NULL)) {
2874 2875 rdc_group_exit(krdc);
2875 2876 goto failed;
2876 2877 }
2877 2878 rdc_group_exit(krdc);
2878 2879
2879 2880 rc = rdc_net_read(krdc->index, krdc->remote_index, handle,
2880 2881 handle->sb_pos, handle->sb_len);
2881 2882
2882 2883 if (!RDC_SUCCESS(rc)) {
2883 2884 cmn_err(CE_WARN, "!rdc_sync_rdthr: remote read failed(%d)", rc);
2884 2885 goto failed;
2885 2886 }
2886 2887 if (!IS_STATE(urdc, RDC_FULL))
2887 2888 rdc_set_bitmap_many(krdc, handle->sb_pos, handle->sb_len);
2888 2889
2889 2890 rc = nsc_write(handle, handle->sb_pos, handle->sb_len, 0);
2890 2891
2891 2892 if (!RDC_SUCCESS(rc)) {
2892 2893 rdc_many_enter(krdc);
2893 2894 rdc_set_flags_log(urdc, RDC_VOL_FAILED, "nsc_write failed");
2894 2895 rdc_many_exit(krdc);
2895 2896 rdc_write_state(urdc);
2896 2897 goto failed;
2897 2898 }
2898 2899
2899 2900 (void) nsc_free_buf(handle);
2900 2901 handle = NULL;
2901 2902
2902 2903 return;
2903 2904 failed:
2904 2905 (void) nsc_free_buf(handle);
2905 2906 syncinfo->status->offset = syncinfo->offset;
2906 2907 }
2907 2908
2908 2909 /*
2909 2910 * _rdc_sync_wrthr
2910 2911 * sync loop write thread
2911 2912 * if there are avail threads, we have not
2912 2913 * used up the pipe, so the sync loop will, if
2913 2914 * possible use these to multithread the write/read
2914 2915 */
2915 2916 void
2916 2917 _rdc_sync_thread(void *thrinfo)
2917 2918 {
2918 2919 rdc_syncthr_t *syncinfo = (rdc_syncthr_t *)thrinfo;
2919 2920 rdc_k_info_t *krdc = syncinfo->krdc;
2920 2921 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
2921 2922 rdc_thrsync_t *sync = &krdc->syncs;
2922 2923 uint_t bitmask;
2923 2924 int rc;
2924 2925
2925 2926 rc = _rdc_rsrv_devs(krdc, RDC_RAW, RDC_INTERNAL);
2926 2927 if (!RDC_SUCCESS(rc))
2927 2928 goto failed;
2928 2929
2929 2930 if (IS_STATE(urdc, RDC_SLAVE))
2930 2931 _rdc_sync_rdthr(thrinfo);
2931 2932 else
2932 2933 _rdc_sync_wrthr(thrinfo);
2933 2934
2934 2935 _rdc_rlse_devs(krdc, RDC_RAW);
2935 2936
2936 2937 if (krdc->dcio_bitmap == NULL) {
2937 2938 #ifdef DEBUG
2938 2939 cmn_err(CE_NOTE, "!_rdc_sync_wrthr: NULL bitmap");
2939 2940 #else
2940 2941 /*EMPTY*/
2941 2942 #endif
2942 2943 } else if (syncinfo->status->offset < 0) {
2943 2944
2944 2945 RDC_SET_BITMASK(syncinfo->offset, syncinfo->len, &bitmask);
2945 2946 RDC_CLR_BITMAP(krdc, syncinfo->offset, syncinfo->len, \
2946 2947 bitmask, RDC_BIT_FORCE);
2947 2948 }
2948 2949
2949 2950 failed:
2950 2951 /*
2951 2952 * done with this, get rid of it.
2952 2953 * the status is not freed, it should still be a status chain
2953 2954 * that _rdc_sync() has the head of
2954 2955 */
2955 2956 kmem_free(syncinfo, sizeof (*syncinfo));
2956 2957
2957 2958 /*
2958 2959 * decrement the global sync thread num
2959 2960 */
2960 2961 mutex_enter(&sync_info.lock);
2961 2962 sync_info.active_thr--;
2962 2963 /* LINTED */
2963 2964 RDC_AVAIL_THR_TUNE(sync_info);
2964 2965 mutex_exit(&sync_info.lock);
2965 2966
2966 2967 /*
2967 2968 * krdc specific stuff
2968 2969 */
2969 2970 mutex_enter(&sync->lock);
2970 2971 sync->complete++;
2971 2972 cv_broadcast(&sync->cv);
2972 2973 mutex_exit(&sync->lock);
2973 2974 }
2974 2975
2975 2976 int
2976 2977 _rdc_setup_syncthr(rdc_syncthr_t **synthr, nsc_off_t offset,
2977 2978 nsc_size_t len, rdc_k_info_t *krdc, sync_status_t *stats)
2978 2979 {
2979 2980 rdc_syncthr_t *tmp;
2980 2981 /* alloc here, free in the sync thread */
2981 2982 tmp =
2982 2983 (rdc_syncthr_t *)kmem_zalloc(sizeof (rdc_syncthr_t), KM_NOSLEEP);
2983 2984
2984 2985 if (tmp == NULL)
2985 2986 return (-1);
2986 2987 tmp->offset = offset;
2987 2988 tmp->len = len;
2988 2989 tmp->status = stats;
2989 2990 tmp->krdc = krdc;
2990 2991
2991 2992 *synthr = tmp;
2992 2993 return (0);
2993 2994 }
2994 2995
2995 2996 sync_status_t *
2996 2997 _rdc_new_sync_status()
2997 2998 {
2998 2999 sync_status_t *s;
2999 3000
3000 3001 s = (sync_status_t *)kmem_zalloc(sizeof (*s), KM_NOSLEEP);
3001 3002 s->offset = -1;
3002 3003 return (s);
3003 3004 }
3004 3005
3005 3006 void
3006 3007 _rdc_free_sync_status(sync_status_t *status)
3007 3008 {
3008 3009 sync_status_t *s;
3009 3010
3010 3011 while (status) {
3011 3012 s = status->next;
3012 3013 kmem_free(status, sizeof (*status));
3013 3014 status = s;
3014 3015 }
3015 3016 }
3016 3017 int
3017 3018 _rdc_sync_status_ok(sync_status_t *status, int *offset)
3018 3019 {
3019 3020 #ifdef DEBUG_SYNCSTATUS
3020 3021 int i = 0;
3021 3022 #endif
3022 3023 while (status) {
3023 3024 if (status->offset >= 0) {
3024 3025 *offset = status->offset;
3025 3026 return (-1);
3026 3027 }
3027 3028 status = status->next;
3028 3029 #ifdef DEBUG_SYNCSTATUS
3029 3030 i++;
3030 3031 #endif
3031 3032 }
3032 3033 #ifdef DEBUGSYNCSTATUS
3033 3034 cmn_err(CE_NOTE, "!rdc_sync_status_ok: checked %d statuses", i);
3034 3035 #endif
3035 3036 return (0);
3036 3037 }
3037 3038
3038 3039 int mtsync = 1;
3039 3040 /*
3040 3041 * _rdc_sync() : rdc sync loop
3041 3042 *
3042 3043 */
3043 3044 static void
3044 3045 _rdc_sync(rdc_k_info_t *krdc)
3045 3046 {
3046 3047 nsc_size_t size = 0;
3047 3048 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
3048 3049 int rtype;
3049 3050 int sts;
3050 3051 int reserved = 0;
3051 3052 nsc_buf_t *alloc_h = NULL;
3052 3053 nsc_buf_t *handle = NULL;
3053 3054 nsc_off_t mask;
3054 3055 nsc_size_t maxbit;
3055 3056 nsc_size_t len;
3056 3057 nsc_off_t offset = 0;
3057 3058 int sync_completed = 0;
3058 3059 int tries = 0;
3059 3060 int rc;
3060 3061 int queuing = 0;
3061 3062 uint_t bitmask;
3062 3063 sync_status_t *ss, *sync_status = NULL;
3063 3064 rdc_thrsync_t *sync = &krdc->syncs;
3064 3065 rdc_syncthr_t *syncinfo;
3065 3066 nsthread_t *trc = NULL;
3066 3067
3067 3068 if (IS_STATE(urdc, RDC_QUEUING) && !IS_STATE(urdc, RDC_FULL)) {
3068 3069 /* flusher is handling the sync in the update case */
3069 3070 queuing = 1;
3070 3071 goto sync_done;
3071 3072 }
3072 3073
3073 3074 /*
3074 3075 * Main sync/resync loop
3075 3076 */
3076 3077 DTRACE_PROBE(rdc_sync_loop_start);
3077 3078
3078 3079 rtype = RDC_RAW;
3079 3080 sts = _rdc_rsrv_devs(krdc, rtype, RDC_INTERNAL);
3080 3081
3081 3082 DTRACE_PROBE(rdc_sync_loop_rsrv);
3082 3083
3083 3084 if (sts != 0)
3084 3085 goto failed_noincr;
3085 3086
3086 3087 reserved = 1;
3087 3088
3088 3089 /*
3089 3090 * pre-allocate a handle if we can - speeds up the sync.
3090 3091 */
3091 3092
3092 3093 if (rdc_prealloc_handle) {
3093 3094 alloc_h = nsc_alloc_handle(RDC_U_FD(krdc), NULL, NULL, NULL);
3094 3095 #ifdef DEBUG
3095 3096 if (!alloc_h) {
3096 3097 cmn_err(CE_WARN,
3097 3098 "!rdc sync: failed to pre-alloc handle");
3098 3099 }
3099 3100 #endif
3100 3101 } else {
3101 3102 alloc_h = NULL;
3102 3103 }
3103 3104
3104 3105 ASSERT(urdc->volume_size != 0);
3105 3106 size = urdc->volume_size;
3106 3107 mask = ~(LOG_TO_FBA_NUM(1) - 1);
3107 3108 maxbit = FBA_TO_LOG_NUM(size - 1);
3108 3109
3109 3110 /*
3110 3111 * as this while loop can also move data, it is counted as a
3111 3112 * sync loop thread
3112 3113 */
3113 3114 rdc_group_enter(krdc);
3114 3115 rdc_clr_flags(urdc, RDC_LOGGING);
3115 3116 rdc_set_flags(urdc, RDC_SYNCING);
3116 3117 krdc->group->synccount++;
3117 3118 rdc_group_exit(krdc);
3118 3119 mutex_enter(&sync_info.lock);
3119 3120 sync_info.active_thr++;
3120 3121 /* LINTED */
3121 3122 RDC_AVAIL_THR_TUNE(sync_info);
3122 3123 mutex_exit(&sync_info.lock);
3123 3124
3124 3125 while (offset < size) {
3125 3126 rdc_group_enter(krdc);
3126 3127 ASSERT(krdc->aux_state & RDC_AUXSYNCIP);
3127 3128 if (krdc->disk_status == 1 || krdc->dcio_bitmap == NULL) {
3128 3129 rdc_group_exit(krdc);
3129 3130 if (krdc->disk_status == 1) {
3130 3131 DTRACE_PROBE(rdc_sync_loop_disk_status_err);
3131 3132 } else {
3132 3133 DTRACE_PROBE(rdc_sync_loop_dcio_bitmap_err);
3133 3134 }
3134 3135 goto failed; /* halt sync */
3135 3136 }
3136 3137 rdc_group_exit(krdc);
3137 3138
3138 3139 if (!(rdc_get_vflags(urdc) & RDC_FULL)) {
3139 3140 mutex_enter(&krdc->syncbitmutex);
3140 3141 krdc->syncbitpos = FBA_TO_LOG_NUM(offset);
3141 3142 len = 0;
3142 3143
3143 3144 /* skip unnecessary chunks */
3144 3145
3145 3146 while (krdc->syncbitpos <= maxbit &&
3146 3147 !RDC_BIT_ISSET(krdc, krdc->syncbitpos)) {
3147 3148 offset += LOG_TO_FBA_NUM(1);
3148 3149 krdc->syncbitpos++;
3149 3150 }
3150 3151
3151 3152 /* check for boundary */
3152 3153
3153 3154 if (offset >= size) {
3154 3155 mutex_exit(&krdc->syncbitmutex);
3155 3156 goto sync_done;
3156 3157 }
3157 3158
3158 3159 /* find maximal length we can transfer */
3159 3160
3160 3161 while (krdc->syncbitpos <= maxbit &&
3161 3162 RDC_BIT_ISSET(krdc, krdc->syncbitpos)) {
3162 3163 len += LOG_TO_FBA_NUM(1);
3163 3164 krdc->syncbitpos++;
3164 3165 /* we can only read maxfbas anyways */
3165 3166 if (len >= krdc->maxfbas)
3166 3167 break;
3167 3168 }
3168 3169
3169 3170 len = min(len, (size - offset));
3170 3171
3171 3172 } else {
3172 3173 len = size - offset;
3173 3174 }
3174 3175
3175 3176 /* truncate to the io provider limit */
3176 3177 ASSERT(krdc->maxfbas != 0);
3177 3178 len = min(len, krdc->maxfbas);
3178 3179
3179 3180 if (len > LOG_TO_FBA_NUM(1)) {
3180 3181 /*
3181 3182 * If the update is larger than a bitmap chunk,
3182 3183 * then truncate to a whole number of bitmap
3183 3184 * chunks.
3184 3185 *
3185 3186 * If the update is smaller than a bitmap
3186 3187 * chunk, this must be the last write.
3187 3188 */
3188 3189 len &= mask;
3189 3190 }
3190 3191
3191 3192 if (!(rdc_get_vflags(urdc) & RDC_FULL)) {
3192 3193 krdc->syncbitpos = FBA_TO_LOG_NUM(offset + len);
3193 3194 mutex_exit(&krdc->syncbitmutex);
3194 3195 }
3195 3196
3196 3197 /*
3197 3198 * Find out if we can reserve a thread here ...
3198 3199 * note: skip the mutex for the first check, if the number
3199 3200 * is up there, why bother even grabbing the mutex to
3200 3201 * only realize that we can't have a thread anyways
3201 3202 */
3202 3203
3203 3204 if (mtsync && sync_info.active_thr < RDC_MAX_SYNC_THREADS) {
3204 3205
3205 3206 mutex_enter(&sync_info.lock);
3206 3207 if (sync_info.avail_thr >= 1) {
3207 3208 if (sync_status == NULL) {
3208 3209 ss = sync_status =
3209 3210 _rdc_new_sync_status();
3210 3211 } else {
3211 3212 ss = ss->next = _rdc_new_sync_status();
3212 3213 }
3213 3214 if (ss == NULL) {
3214 3215 mutex_exit(&sync_info.lock);
3215 3216 #ifdef DEBUG
3216 3217 cmn_err(CE_WARN, "!rdc_sync: can't "
3217 3218 "allocate status for mt sync");
3218 3219 #endif
3219 3220 goto retry;
3220 3221 }
3221 3222 /*
3222 3223 * syncinfo protected by sync_info lock but
3223 3224 * not part of the sync_info structure
3224 3225 * be careful if moving
3225 3226 */
3226 3227 if (_rdc_setup_syncthr(&syncinfo,
3227 3228 offset, len, krdc, ss) < 0) {
3228 3229 _rdc_free_sync_status(ss);
3229 3230 }
3230 3231
3231 3232 trc = nst_create(sync_info.rdc_syncset,
3232 3233 _rdc_sync_thread, syncinfo, NST_SLEEP);
3233 3234
3234 3235 if (trc == NULL) {
3235 3236 mutex_exit(&sync_info.lock);
3236 3237 #ifdef DEBUG
3237 3238 cmn_err(CE_NOTE, "!rdc_sync: unable to "
3238 3239 "mt sync");
3239 3240 #endif
3240 3241 _rdc_free_sync_status(ss);
3241 3242 kmem_free(syncinfo, sizeof (*syncinfo));
3242 3243 syncinfo = NULL;
3243 3244 goto retry;
3244 3245 } else {
3245 3246 mutex_enter(&sync->lock);
3246 3247 sync->threads++;
3247 3248 mutex_exit(&sync->lock);
3248 3249 }
3249 3250
3250 3251 sync_info.active_thr++;
3251 3252 /* LINTED */
3252 3253 RDC_AVAIL_THR_TUNE(sync_info);
3253 3254
3254 3255 mutex_exit(&sync_info.lock);
3255 3256 goto threaded;
3256 3257 }
3257 3258 mutex_exit(&sync_info.lock);
3258 3259 }
3259 3260 retry:
3260 3261 handle = alloc_h;
3261 3262 DTRACE_PROBE(rdc_sync_loop_allocbuf_start);
3262 3263 if (rdc_get_vflags(urdc) & RDC_SLAVE)
3263 3264 sts = nsc_alloc_buf(RDC_U_FD(krdc), offset, len,
3264 3265 NSC_WRITE | NSC_WRTHRU | NSC_NOCACHE, &handle);
3265 3266 else
3266 3267 sts = nsc_alloc_buf(RDC_U_FD(krdc), offset, len,
3267 3268 NSC_READ | NSC_NOCACHE, &handle);
3268 3269
3269 3270 DTRACE_PROBE(rdc_sync_loop_allocbuf_end);
3270 3271 if (sts > 0) {
3271 3272 if (handle && handle != alloc_h) {
3272 3273 (void) nsc_free_buf(handle);
3273 3274 }
3274 3275
3275 3276 handle = NULL;
3276 3277 DTRACE_PROBE(rdc_sync_loop_allocbuf_err);
3277 3278 goto failed;
3278 3279 }
3279 3280
3280 3281 if (rdc_get_vflags(urdc) & RDC_SLAVE) {
3281 3282 /* overwrite buffer with remote data */
3282 3283 sts = rdc_net_read(krdc->index, krdc->remote_index,
3283 3284 handle, handle->sb_pos, handle->sb_len);
3284 3285
3285 3286 if (!RDC_SUCCESS(sts)) {
3286 3287 #ifdef DEBUG
3287 3288 cmn_err(CE_WARN,
3288 3289 "!rdc sync: remote read failed (%d)", sts);
3289 3290 #endif
3290 3291 DTRACE_PROBE(rdc_sync_loop_remote_read_err);
3291 3292 goto failed;
3292 3293 }
3293 3294 if (!(rdc_get_vflags(urdc) & RDC_FULL))
3294 3295 rdc_set_bitmap_many(krdc, handle->sb_pos,
3295 3296 handle->sb_len);
3296 3297
3297 3298 /* commit locally */
3298 3299
3299 3300 sts = nsc_write(handle, handle->sb_pos,
3300 3301 handle->sb_len, 0);
3301 3302
3302 3303 if (!RDC_SUCCESS(sts)) {
3303 3304 /* reverse sync needed already set */
3304 3305 rdc_many_enter(krdc);
3305 3306 rdc_set_flags_log(urdc, RDC_VOL_FAILED,
3306 3307 "write failed during sync");
3307 3308 rdc_many_exit(krdc);
3308 3309 rdc_write_state(urdc);
3309 3310 DTRACE_PROBE(rdc_sync_loop_nsc_write_err);
3310 3311 goto failed;
3311 3312 }
3312 3313 } else {
3313 3314 /* send local data to remote */
3314 3315 DTRACE_PROBE2(rdc_sync_loop_netwrite_start,
3315 3316 int, krdc->index, nsc_buf_t *, handle);
3316 3317
3317 3318 if ((sts = rdc_net_write(krdc->index,
3318 3319 krdc->remote_index, handle, handle->sb_pos,
3319 3320 handle->sb_len, RDC_NOSEQ, RDC_NOQUE, NULL)) > 0) {
3320 3321
3321 3322 /*
3322 3323 * The following is to handle
3323 3324 * the case where the secondary side
3324 3325 * has thrown our buffer handle token away in a
3325 3326 * attempt to preserve its health on restart
3326 3327 */
3327 3328 if ((sts == EPROTO) && (tries < 3)) {
3328 3329 (void) nsc_free_buf(handle);
3329 3330 handle = NULL;
3330 3331 tries++;
3331 3332 delay(HZ >> 2);
3332 3333 goto retry;
3333 3334 }
3334 3335 #ifdef DEBUG
3335 3336 cmn_err(CE_WARN,
3336 3337 "!rdc sync: remote write failed (%d) 0x%x",
3337 3338 sts, rdc_get_vflags(urdc));
3338 3339 #endif
3339 3340 DTRACE_PROBE(rdc_sync_loop_netwrite_err);
3340 3341 goto failed;
3341 3342 }
3342 3343 DTRACE_PROBE(rdc_sync_loop_netwrite_end);
3343 3344 }
3344 3345
3345 3346 (void) nsc_free_buf(handle);
3346 3347 handle = NULL;
3347 3348
3348 3349 if (krdc->dcio_bitmap == NULL) {
3349 3350 #ifdef DEBUG
3350 3351 cmn_err(CE_NOTE, "!_rdc_sync: NULL bitmap");
3351 3352 #else
3352 3353 ;
3353 3354 /*EMPTY*/
3354 3355 #endif
3355 3356 } else {
3356 3357
3357 3358 RDC_SET_BITMASK(offset, len, &bitmask);
3358 3359 RDC_CLR_BITMAP(krdc, offset, len, bitmask, \
3359 3360 RDC_BIT_FORCE);
3360 3361 ASSERT(!IS_ASYNC(urdc));
3361 3362 }
3362 3363
3363 3364 /*
3364 3365 * Only release/reserve if someone is waiting
3365 3366 */
3366 3367 if (krdc->devices->id_release || nsc_waiting(RDC_U_FD(krdc))) {
3367 3368 DTRACE_PROBE(rdc_sync_loop_rlse_start);
3368 3369 if (alloc_h) {
3369 3370 (void) nsc_free_handle(alloc_h);
3370 3371 alloc_h = NULL;
3371 3372 }
3372 3373
3373 3374 _rdc_rlse_devs(krdc, rtype);
3374 3375 reserved = 0;
3375 3376 delay(2);
3376 3377
3377 3378 rtype = RDC_RAW;
3378 3379 sts = _rdc_rsrv_devs(krdc, rtype, RDC_INTERNAL);
3379 3380 if (sts != 0) {
3380 3381 handle = NULL;
3381 3382 DTRACE_PROBE(rdc_sync_loop_rdc_rsrv_err);
3382 3383 goto failed;
3383 3384 }
3384 3385
3385 3386 reserved = 1;
3386 3387
3387 3388 if (rdc_prealloc_handle) {
3388 3389 alloc_h = nsc_alloc_handle(RDC_U_FD(krdc),
3389 3390 NULL, NULL, NULL);
3390 3391 #ifdef DEBUG
3391 3392 if (!alloc_h) {
3392 3393 cmn_err(CE_WARN, "!rdc_sync: "
3393 3394 "failed to pre-alloc handle");
3394 3395 }
3395 3396 #endif
3396 3397 }
3397 3398 DTRACE_PROBE(rdc_sync_loop_rlse_end);
3398 3399 }
3399 3400 threaded:
3400 3401 offset += len;
3401 3402 urdc->sync_pos = offset;
3402 3403 }
3403 3404
3404 3405 sync_done:
3405 3406 sync_completed = 1;
3406 3407
3407 3408 failed:
3408 3409 krdc->group->synccount--;
3409 3410 failed_noincr:
3410 3411 mutex_enter(&sync->lock);
3411 3412 while (sync->complete != sync->threads) {
3412 3413 cv_wait(&sync->cv, &sync->lock);
3413 3414 }
3414 3415 sync->complete = 0;
3415 3416 sync->threads = 0;
3416 3417 mutex_exit(&sync->lock);
3417 3418
3418 3419 /*
3419 3420 * if sync_completed is 0 here,
3420 3421 * we know that the main sync thread failed anyway
3421 3422 * so just free the statuses and fail
3422 3423 */
3423 3424 if (sync_completed && (_rdc_sync_status_ok(sync_status, &rc) < 0)) {
3424 3425 urdc->sync_pos = rc;
3425 3426 sync_completed = 0; /* at least 1 thread failed */
3426 3427 }
3427 3428
3428 3429 _rdc_free_sync_status(sync_status);
3429 3430
3430 3431 /*
3431 3432 * we didn't increment, we didn't even sync,
3432 3433 * so don't dec sync_info.active_thr
3433 3434 */
3434 3435 if (!queuing) {
3435 3436 mutex_enter(&sync_info.lock);
3436 3437 sync_info.active_thr--;
3437 3438 /* LINTED */
3438 3439 RDC_AVAIL_THR_TUNE(sync_info);
3439 3440 mutex_exit(&sync_info.lock);
3440 3441 }
3441 3442
3442 3443 if (handle) {
3443 3444 (void) nsc_free_buf(handle);
3444 3445 }
3445 3446
3446 3447 if (alloc_h) {
3447 3448 (void) nsc_free_handle(alloc_h);
3448 3449 }
3449 3450
3450 3451 if (reserved) {
3451 3452 _rdc_rlse_devs(krdc, rtype);
3452 3453 }
3453 3454
3454 3455 notstarted:
3455 3456 rdc_group_enter(krdc);
3456 3457 ASSERT(krdc->aux_state & RDC_AUXSYNCIP);
3457 3458 if (IS_STATE(urdc, RDC_QUEUING))
3458 3459 rdc_clr_flags(urdc, RDC_QUEUING);
3459 3460
3460 3461 if (sync_completed) {
3461 3462 (void) rdc_net_state(krdc->index, CCIO_DONE);
3462 3463 } else {
3463 3464 (void) rdc_net_state(krdc->index, CCIO_ENABLELOG);
3464 3465 }
3465 3466
3466 3467 rdc_clr_flags(urdc, RDC_SYNCING);
3467 3468 if (rdc_get_vflags(urdc) & RDC_SLAVE) {
3468 3469 rdc_many_enter(krdc);
3469 3470 rdc_clr_mflags(urdc, RDC_SLAVE);
3470 3471 rdc_many_exit(krdc);
3471 3472 }
3472 3473 if (krdc->type_flag & RDC_ASYNCMODE)
3473 3474 rdc_set_flags(urdc, RDC_ASYNC);
3474 3475 if (sync_completed) {
3475 3476 rdc_many_enter(krdc);
3476 3477 rdc_clr_mflags(urdc, RDC_RSYNC_NEEDED);
3477 3478 rdc_many_exit(krdc);
3478 3479 } else {
3479 3480 krdc->remote_index = -1;
3480 3481 rdc_set_flags_log(urdc, RDC_LOGGING, "sync failed to complete");
3481 3482 }
3482 3483 rdc_group_exit(krdc);
3483 3484 rdc_write_state(urdc);
3484 3485
3485 3486 mutex_enter(&net_blk_lock);
3486 3487 if (sync_completed)
3487 3488 krdc->sync_done = RDC_COMPLETED;
3488 3489 else
3489 3490 krdc->sync_done = RDC_FAILED;
3490 3491 cv_broadcast(&krdc->synccv);
3491 3492 mutex_exit(&net_blk_lock);
3492 3493
3493 3494 }
3494 3495
3495 3496
3496 3497 static int
3497 3498 rdc_sync(rdc_config_t *uparms, spcs_s_info_t kstatus)
3498 3499 {
3499 3500 rdc_set_t *rdc_set = uparms->rdc_set;
3500 3501 int options = uparms->options;
3501 3502 int rc = 0;
3502 3503 int busy = 0;
3503 3504 int index;
3504 3505 rdc_k_info_t *krdc;
3505 3506 rdc_u_info_t *urdc;
3506 3507 rdc_k_info_t *kmulti;
3507 3508 rdc_u_info_t *umulti;
3508 3509 rdc_group_t *group;
3509 3510 rdc_srv_t *svp;
3510 3511 int sm, um, md;
3511 3512 int sync_completed = 0;
3512 3513 int thrcount;
3513 3514
3514 3515 mutex_enter(&rdc_conf_lock);
3515 3516 index = rdc_lookup_byname(rdc_set);
3516 3517 if (index >= 0)
3517 3518 krdc = &rdc_k_info[index];
3518 3519 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
3519 3520 mutex_exit(&rdc_conf_lock);
3520 3521 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3521 3522 rdc_set->secondary.file);
3522 3523 rc = RDC_EALREADY;
3523 3524 goto notstarted;
3524 3525 }
3525 3526
3526 3527 urdc = &rdc_u_info[index];
3527 3528 group = krdc->group;
3528 3529 set_busy(krdc);
3529 3530 busy = 1;
3530 3531 if ((krdc->type_flag == 0) || (krdc->type_flag & RDC_DISABLEPEND)) {
3531 3532 /* A resume or enable failed or we raced with a teardown */
3532 3533 mutex_exit(&rdc_conf_lock);
3533 3534 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3534 3535 rdc_set->secondary.file);
3535 3536 rc = RDC_EALREADY;
3536 3537 goto notstarted;
3537 3538 }
3538 3539 mutex_exit(&rdc_conf_lock);
3539 3540 rdc_group_enter(krdc);
3540 3541
3541 3542 if (!IS_STATE(urdc, RDC_LOGGING)) {
3542 3543 spcs_s_add(kstatus, RDC_ESETNOTLOGGING, urdc->secondary.intf,
3543 3544 urdc->secondary.file);
3544 3545 rc = RDC_ENOTLOGGING;
3545 3546 goto notstarted_unlock;
3546 3547 }
3547 3548
3548 3549 if (rdc_check(krdc, rdc_set)) {
3549 3550 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3550 3551 rdc_set->secondary.file);
3551 3552 rc = RDC_EALREADY;
3552 3553 goto notstarted_unlock;
3553 3554 }
3554 3555
3555 3556 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY)) {
3556 3557 spcs_s_add(kstatus, RDC_ENOTPRIMARY, rdc_set->primary.intf,
3557 3558 rdc_set->primary.file, rdc_set->secondary.intf,
3558 3559 rdc_set->secondary.file);
3559 3560 rc = RDC_ENOTPRIMARY;
3560 3561 goto notstarted_unlock;
3561 3562 }
3562 3563
3563 3564 if ((options & RDC_OPT_REVERSE) && (IS_STATE(urdc, RDC_QUEUING))) {
3564 3565 /*
3565 3566 * cannot reverse sync when queuing, need to go logging first
3566 3567 */
3567 3568 spcs_s_add(kstatus, RDC_EQNORSYNC, rdc_set->primary.intf,
3568 3569 rdc_set->primary.file, rdc_set->secondary.intf,
3569 3570 rdc_set->secondary.file);
3570 3571 rc = RDC_EQNORSYNC;
3571 3572 goto notstarted_unlock;
3572 3573 }
3573 3574
3574 3575 svp = krdc->lsrv;
3575 3576 krdc->intf = rdc_add_to_if(svp, &(urdc->primary.addr),
3576 3577 &(urdc->secondary.addr), 1);
3577 3578
3578 3579 if (!krdc->intf) {
3579 3580 spcs_s_add(kstatus, RDC_EADDTOIF, urdc->primary.intf,
3580 3581 urdc->secondary.intf);
3581 3582 rc = RDC_EADDTOIF;
3582 3583 goto notstarted_unlock;
3583 3584 }
3584 3585
3585 3586 if (urdc->volume_size == 0) {
3586 3587 /* Implies reserve failed when previous resume was done */
3587 3588 rdc_get_details(krdc);
3588 3589 }
3589 3590 if (urdc->volume_size == 0) {
3590 3591 spcs_s_add(kstatus, RDC_ENOBMAP);
3591 3592 rc = RDC_ENOBMAP;
3592 3593 goto notstarted_unlock;
3593 3594 }
3594 3595
3595 3596 if (krdc->dcio_bitmap == NULL) {
3596 3597 if (rdc_resume_bitmap(krdc) < 0) {
3597 3598 spcs_s_add(kstatus, RDC_ENOBMAP);
3598 3599 rc = RDC_ENOBMAP;
3599 3600 goto notstarted_unlock;
3600 3601 }
3601 3602 }
3602 3603
3603 3604 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd)) {
3604 3605 if (rdc_reset_bitmap(krdc)) {
3605 3606 spcs_s_add(kstatus, RDC_EBITMAP);
3606 3607 rc = RDC_EBITMAP;
3607 3608 goto notstarted_unlock;
3608 3609 }
3609 3610 }
3610 3611
3611 3612 if (IS_MANY(krdc) || IS_MULTI(krdc)) {
3612 3613 rdc_u_info_t *ubad;
3613 3614
3614 3615 if ((ubad = rdc_allow_pri_sync(urdc, options)) != NULL) {
3615 3616 spcs_s_add(kstatus, RDC_ESTATE,
3616 3617 ubad->primary.intf, ubad->primary.file,
3617 3618 ubad->secondary.intf, ubad->secondary.file);
3618 3619 rc = RDC_ESTATE;
3619 3620 goto notstarted_unlock;
3620 3621 }
3621 3622 }
3622 3623
3623 3624 /*
3624 3625 * there is a small window where _rdc_sync is still
3625 3626 * running, but has cleared the RDC_SYNCING flag.
3626 3627 * Use aux_state which is only cleared
3627 3628 * after _rdc_sync had done its 'death' broadcast.
3628 3629 */
3629 3630 if (krdc->aux_state & RDC_AUXSYNCIP) {
3630 3631 #ifdef DEBUG
3631 3632 if (!rdc_get_vflags(urdc) & RDC_SYNCING) {
3632 3633 cmn_err(CE_WARN, "!rdc_sync: "
3633 3634 "RDC_AUXSYNCIP set, SYNCING off");
3634 3635 }
3635 3636 #endif
3636 3637 spcs_s_add(kstatus, RDC_ESYNCING, rdc_set->primary.file);
3637 3638 rc = RDC_ESYNCING;
3638 3639 goto notstarted_unlock;
3639 3640 }
3640 3641 if (krdc->disk_status == 1) {
3641 3642 spcs_s_add(kstatus, RDC_ESYNCING, rdc_set->primary.file);
3642 3643 rc = RDC_ESYNCING;
3643 3644 goto notstarted_unlock;
3644 3645 }
3645 3646
3646 3647 if ((options & RDC_OPT_FORWARD) &&
3647 3648 (rdc_get_mflags(urdc) & RDC_RSYNC_NEEDED)) {
3648 3649 /* cannot forward sync if a reverse sync is needed */
3649 3650 spcs_s_add(kstatus, RDC_ERSYNCNEEDED, rdc_set->primary.intf,
3650 3651 rdc_set->primary.file, rdc_set->secondary.intf,
3651 3652 rdc_set->secondary.file);
3652 3653 rc = RDC_ERSYNCNEEDED;
3653 3654 goto notstarted_unlock;
3654 3655 }
3655 3656
3656 3657 urdc->sync_pos = 0;
3657 3658
3658 3659 /* Check if the rdc set is accessible on the remote node */
3659 3660 if (rdc_net_getstate(krdc, &sm, &um, &md, FALSE) < 0) {
3660 3661 /*
3661 3662 * Remote end may be inaccessible, or the rdc set is not
3662 3663 * enabled at the remote end.
3663 3664 */
3664 3665 spcs_s_add(kstatus, RDC_ECONNOPEN, urdc->secondary.intf,
3665 3666 urdc->secondary.file);
3666 3667 rc = RDC_ECONNOPEN;
3667 3668 goto notstarted_unlock;
3668 3669 }
3669 3670 if (options & RDC_OPT_REVERSE)
3670 3671 krdc->remote_index = rdc_net_state(index, CCIO_RSYNC);
3671 3672 else
3672 3673 krdc->remote_index = rdc_net_state(index, CCIO_SLAVE);
3673 3674 if (krdc->remote_index < 0) {
3674 3675 /*
3675 3676 * Remote note probably not in a valid state to be synced,
3676 3677 * as the state was fetched OK above.
3677 3678 */
3678 3679 spcs_s_add(kstatus, RDC_ERSTATE, urdc->secondary.intf,
3679 3680 urdc->secondary.file, urdc->primary.intf,
3680 3681 urdc->primary.file);
3681 3682 rc = RDC_ERSTATE;
3682 3683 goto notstarted_unlock;
3683 3684 }
3684 3685
3685 3686 rc = check_filesize(index, kstatus);
3686 3687 if (rc != 0) {
3687 3688 (void) rdc_net_state(krdc->index, CCIO_ENABLELOG);
3688 3689 goto notstarted_unlock;
3689 3690 }
3690 3691
3691 3692 krdc->sync_done = 0;
3692 3693
3693 3694 mutex_enter(&krdc->bmapmutex);
3694 3695 krdc->aux_state |= RDC_AUXSYNCIP;
3695 3696 mutex_exit(&krdc->bmapmutex);
3696 3697
3697 3698 if (options & RDC_OPT_REVERSE) {
3698 3699 rdc_many_enter(krdc);
3699 3700 rdc_set_mflags(urdc, RDC_SLAVE | RDC_RSYNC_NEEDED);
3700 3701 mutex_enter(&krdc->bmapmutex);
3701 3702 rdc_clr_flags(urdc, RDC_VOL_FAILED);
3702 3703 mutex_exit(&krdc->bmapmutex);
3703 3704 rdc_write_state(urdc);
3704 3705 /* LINTED */
3705 3706 if (kmulti = krdc->multi_next) {
3706 3707 umulti = &rdc_u_info[kmulti->index];
3707 3708 if (IS_ENABLED(umulti) && (rdc_get_vflags(umulti) &
3708 3709 (RDC_VOL_FAILED | RDC_SYNC_NEEDED))) {
3709 3710 rdc_clr_flags(umulti, RDC_SYNC_NEEDED);
3710 3711 rdc_clr_flags(umulti, RDC_VOL_FAILED);
3711 3712 rdc_write_state(umulti);
3712 3713 }
3713 3714 }
3714 3715 rdc_many_exit(krdc);
3715 3716 } else {
3716 3717 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
3717 3718 rdc_write_state(urdc);
3718 3719 }
3719 3720
3720 3721 if (options & RDC_OPT_UPDATE) {
3721 3722 ASSERT(urdc->volume_size != 0);
3722 3723 if (rdc_net_getbmap(index,
3723 3724 BMAP_LOG_BYTES(urdc->volume_size)) > 0) {
3724 3725 spcs_s_add(kstatus, RDC_ENOBMAP);
3725 3726 rc = RDC_ENOBMAP;
3726 3727
3727 3728 (void) rdc_net_state(index, CCIO_ENABLELOG);
3728 3729
3729 3730 rdc_clr_flags(urdc, RDC_SYNCING);
3730 3731 if (options & RDC_OPT_REVERSE) {
3731 3732 rdc_many_enter(krdc);
3732 3733 rdc_clr_mflags(urdc, RDC_SLAVE);
3733 3734 rdc_many_exit(krdc);
3734 3735 }
3735 3736 if (krdc->type_flag & RDC_ASYNCMODE)
3736 3737 rdc_set_flags(urdc, RDC_ASYNC);
3737 3738 krdc->remote_index = -1;
3738 3739 rdc_set_flags_log(urdc, RDC_LOGGING,
3739 3740 "failed to read remote bitmap");
3740 3741 rdc_write_state(urdc);
3741 3742 goto failed;
3742 3743 }
3743 3744 rdc_clr_flags(urdc, RDC_FULL);
3744 3745 } else {
3745 3746 /*
3746 3747 * This is a full sync (not an update sync), mark the
3747 3748 * entire bitmap dirty
3748 3749 */
3749 3750 (void) RDC_FILL_BITMAP(krdc, FALSE);
3750 3751
3751 3752 rdc_set_flags(urdc, RDC_FULL);
3752 3753 }
3753 3754
3754 3755 rdc_group_exit(krdc);
3755 3756
3756 3757 /*
3757 3758 * allow diskq->memq flusher to wake up
3758 3759 */
3759 3760 mutex_enter(&krdc->group->ra_queue.net_qlock);
3760 3761 krdc->group->ra_queue.qfflags &= ~RDC_QFILLSLEEP;
3761 3762 mutex_exit(&krdc->group->ra_queue.net_qlock);
3762 3763
3763 3764 /*
3764 3765 * if this is a full sync on a non-diskq set or
3765 3766 * a diskq set that has failed, clear the async flag
3766 3767 */
3767 3768 if (krdc->type_flag & RDC_ASYNCMODE) {
3768 3769 if ((!(options & RDC_OPT_UPDATE)) ||
3769 3770 (!RDC_IS_DISKQ(krdc->group)) ||
3770 3771 (!(IS_STATE(urdc, RDC_QUEUING)))) {
3771 3772 /* full syncs, or core queue are synchronous */
3772 3773 rdc_group_enter(krdc);
3773 3774 rdc_clr_flags(urdc, RDC_ASYNC);
3774 3775 rdc_group_exit(krdc);
3775 3776 }
3776 3777
3777 3778 /*
3778 3779 * if the queue failed because it was full, lets see
3779 3780 * if we can restart it. After _rdc_sync() is done
3780 3781 * the modes will switch and we will begin disk
3781 3782 * queuing again. NOTE: this should only be called
3782 3783 * once per group, as it clears state for all group
3783 3784 * members, also clears the async flag for all members
3784 3785 */
3785 3786 if (IS_STATE(urdc, RDC_DISKQ_FAILED)) {
3786 3787 rdc_unfail_diskq(krdc);
3787 3788 } else {
3788 3789 /* don't add insult to injury by flushing a dead queue */
3789 3790
3790 3791 /*
3791 3792 * if we are updating, and a diskq and
3792 3793 * the async thread isn't active, start
3793 3794 * it up.
3794 3795 */
3795 3796 if ((options & RDC_OPT_UPDATE) &&
3796 3797 (IS_STATE(urdc, RDC_QUEUING))) {
3797 3798 rdc_group_enter(krdc);
3798 3799 rdc_clr_flags(urdc, RDC_SYNCING);
3799 3800 rdc_group_exit(krdc);
3800 3801 mutex_enter(&krdc->group->ra_queue.net_qlock);
3801 3802 if (krdc->group->ra_queue.qfill_sleeping ==
3802 3803 RDC_QFILL_ASLEEP)
3803 3804 cv_broadcast(&group->ra_queue.qfcv);
3804 3805 mutex_exit(&krdc->group->ra_queue.net_qlock);
3805 3806 thrcount = urdc->asyncthr;
3806 3807 while ((thrcount-- > 0) &&
3807 3808 !krdc->group->rdc_writer) {
3808 3809 (void) rdc_writer(krdc->index);
3809 3810 }
3810 3811 }
3811 3812 }
3812 3813 }
3813 3814
3814 3815 /*
3815 3816 * For a reverse sync, merge the current bitmap with all other sets
3816 3817 * that share this volume.
3817 3818 */
3818 3819 if (options & RDC_OPT_REVERSE) {
3819 3820 retry_many:
3820 3821 rdc_many_enter(krdc);
3821 3822 if (IS_MANY(krdc)) {
3822 3823 rdc_k_info_t *kmany;
3823 3824 rdc_u_info_t *umany;
3824 3825
3825 3826 for (kmany = krdc->many_next; kmany != krdc;
3826 3827 kmany = kmany->many_next) {
3827 3828 umany = &rdc_u_info[kmany->index];
3828 3829 if (!IS_ENABLED(umany))
3829 3830 continue;
3830 3831 ASSERT(umany->flags & RDC_PRIMARY);
3831 3832
3832 3833 if (!mutex_tryenter(&kmany->group->lock)) {
3833 3834 rdc_many_exit(krdc);
3834 3835 /* May merge more than once */
3835 3836 goto retry_many;
3836 3837 }
3837 3838 rdc_merge_bitmaps(krdc, kmany);
3838 3839 mutex_exit(&kmany->group->lock);
3839 3840 }
3840 3841 }
3841 3842 rdc_many_exit(krdc);
3842 3843
3843 3844 retry_multi:
3844 3845 rdc_many_enter(krdc);
3845 3846 if (IS_MULTI(krdc)) {
3846 3847 rdc_k_info_t *kmulti = krdc->multi_next;
3847 3848 rdc_u_info_t *umulti = &rdc_u_info[kmulti->index];
3848 3849
3849 3850 if (IS_ENABLED(umulti)) {
3850 3851 ASSERT(!(umulti->flags & RDC_PRIMARY));
3851 3852
3852 3853 if (!mutex_tryenter(&kmulti->group->lock)) {
3853 3854 rdc_many_exit(krdc);
3854 3855 goto retry_multi;
3855 3856 }
3856 3857 rdc_merge_bitmaps(krdc, kmulti);
3857 3858 mutex_exit(&kmulti->group->lock);
3858 3859 }
3859 3860 }
3860 3861 rdc_many_exit(krdc);
3861 3862 }
3862 3863
3863 3864 rdc_group_enter(krdc);
3864 3865
3865 3866 if (krdc->bitmap_write == 0) {
3866 3867 if (rdc_write_bitmap_fill(krdc) >= 0)
3867 3868 krdc->bitmap_write = -1;
3868 3869 }
3869 3870
3870 3871 if (krdc->bitmap_write > 0)
3871 3872 (void) rdc_write_bitmap(krdc);
3872 3873
3873 3874 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
3874 3875
3875 3876 rdc_group_exit(krdc);
3876 3877
3877 3878 if (options & RDC_OPT_REVERSE) {
3878 3879 (void) _rdc_sync_event_notify(RDC_SYNC_START,
3879 3880 urdc->primary.file, urdc->group_name);
3880 3881 }
3881 3882
3882 3883 /* Now set off the sync itself */
3883 3884
3884 3885 mutex_enter(&net_blk_lock);
3885 3886 if (nsc_create_process(
3886 3887 (void (*)(void *))_rdc_sync, (void *)krdc, FALSE)) {
3887 3888 mutex_exit(&net_blk_lock);
3888 3889 spcs_s_add(kstatus, RDC_ENOPROC);
3889 3890 /*
3890 3891 * We used to just return here,
3891 3892 * but we need to clear the AUXSYNCIP bit
3892 3893 * and there is a very small chance that
3893 3894 * someone may be waiting on the disk_status flag.
3894 3895 */
3895 3896 rc = RDC_ENOPROC;
3896 3897 /*
3897 3898 * need the group lock held at failed.
3898 3899 */
3899 3900 rdc_group_enter(krdc);
3900 3901 goto failed;
3901 3902 }
3902 3903
3903 3904 mutex_enter(&rdc_conf_lock);
3904 3905 wakeup_busy(krdc);
3905 3906 busy = 0;
3906 3907 mutex_exit(&rdc_conf_lock);
3907 3908
3908 3909 while (krdc->sync_done == 0)
3909 3910 cv_wait(&krdc->synccv, &net_blk_lock);
3910 3911 mutex_exit(&net_blk_lock);
3911 3912
3912 3913 rdc_group_enter(krdc);
3913 3914
3914 3915 if (krdc->sync_done == RDC_FAILED) {
3915 3916 char siztmp1[16];
3916 3917 (void) spcs_s_inttostring(
3917 3918 urdc->sync_pos, siztmp1, sizeof (siztmp1),
3918 3919 0);
3919 3920 spcs_s_add(kstatus, RDC_EFAIL, siztmp1);
3920 3921 rc = RDC_EFAIL;
3921 3922 } else
3922 3923 sync_completed = 1;
3923 3924
3924 3925 failed:
3925 3926 /*
3926 3927 * We use this flag now to make halt_sync() wait for
3927 3928 * us to terminate and let us take the group lock.
3928 3929 */
3929 3930 krdc->aux_state &= ~RDC_AUXSYNCIP;
3930 3931 if (krdc->disk_status == 1) {
3931 3932 krdc->disk_status = 0;
3932 3933 cv_broadcast(&krdc->haltcv);
3933 3934 }
3934 3935
3935 3936 notstarted_unlock:
3936 3937 rdc_group_exit(krdc);
3937 3938
3938 3939 if (sync_completed && (options & RDC_OPT_REVERSE)) {
3939 3940 (void) _rdc_sync_event_notify(RDC_SYNC_DONE,
3940 3941 urdc->primary.file, urdc->group_name);
3941 3942 }
3942 3943
3943 3944 notstarted:
3944 3945 if (busy) {
3945 3946 mutex_enter(&rdc_conf_lock);
3946 3947 wakeup_busy(krdc);
3947 3948 mutex_exit(&rdc_conf_lock);
3948 3949 }
3949 3950
3950 3951 return (rc);
3951 3952 }
3952 3953
3953 3954 /* ARGSUSED */
3954 3955 static int
3955 3956 _rdc_suspend(rdc_k_info_t *krdc, rdc_set_t *rdc_set, spcs_s_info_t kstatus)
3956 3957 {
3957 3958 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
3958 3959 rdc_if_t *ip;
3959 3960 int index = krdc->index;
3960 3961
3961 3962 ASSERT(krdc->group != NULL);
3962 3963 rdc_group_enter(krdc);
3963 3964 #ifdef DEBUG
3964 3965 ASSERT(rdc_check(krdc, rdc_set) == 0);
3965 3966 #else
3966 3967 if (rdc_check(krdc, rdc_set)) {
3967 3968 rdc_group_exit(krdc);
3968 3969 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
3969 3970 rdc_set->secondary.file);
3970 3971 return (RDC_EALREADY);
3971 3972 }
3972 3973 #endif
3973 3974
3974 3975 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
3975 3976 halt_sync(krdc);
3976 3977 ASSERT(IS_ENABLED(urdc));
3977 3978 }
3978 3979
3979 3980 rdc_group_exit(krdc);
3980 3981 (void) rdc_unintercept(krdc);
3981 3982
3982 3983 #ifdef DEBUG
3983 3984 cmn_err(CE_NOTE, "!SNDR: suspended %s %s", urdc->primary.file,
3984 3985 urdc->secondary.file);
3985 3986 #endif
3986 3987
3987 3988 /* Configured but not enabled */
3988 3989 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
3989 3990
3990 3991
3991 3992 if (IS_ASYNC(urdc) && !RDC_IS_DISKQ(krdc->group)) {
3992 3993 int tries = 2; /* in case of possibly stuck flusher threads */
3993 3994 #ifdef DEBUG
3994 3995 net_queue *qp = &krdc->group->ra_queue;
3995 3996 #endif
3996 3997 do {
3997 3998 if (!krdc->group->rdc_writer)
3998 3999 (void) rdc_writer(krdc->index);
3999 4000
4000 4001 (void) rdc_drain_queue(krdc->index);
4001 4002
4002 4003 } while (krdc->group->rdc_writer && tries--);
4003 4004
4004 4005 /* ok, force it to happen... */
4005 4006 if (rdc_drain_queue(krdc->index) != 0) {
4006 4007 do {
4007 4008 mutex_enter(&krdc->group->ra_queue.net_qlock);
4008 4009 krdc->group->asyncdis = 1;
4009 4010 cv_broadcast(&krdc->group->asyncqcv);
4010 4011 mutex_exit(&krdc->group->ra_queue.net_qlock);
4011 4012 cmn_err(CE_WARN,
4012 4013 "!SNDR: async I/O pending and not flushed "
4013 4014 "for %s during suspend",
4014 4015 urdc->primary.file);
4015 4016 #ifdef DEBUG
4016 4017 cmn_err(CE_WARN,
4017 4018 "!nitems: %" NSC_SZFMT " nblocks: %"
4018 4019 NSC_SZFMT " head: 0x%p tail: 0x%p",
4019 4020 qp->nitems, qp->blocks,
4020 4021 (void *)qp->net_qhead,
4021 4022 (void *)qp->net_qtail);
4022 4023 #endif
4023 4024 } while (krdc->group->rdc_thrnum > 0);
4024 4025 }
4025 4026 }
4026 4027
4027 4028 mutex_enter(&rdc_conf_lock);
4028 4029 ip = krdc->intf;
4029 4030 krdc->intf = 0;
4030 4031
4031 4032 if (ip) {
4032 4033 rdc_remove_from_if(ip);
4033 4034 }
4034 4035
4035 4036 mutex_exit(&rdc_conf_lock);
4036 4037
4037 4038 rdc_group_enter(krdc);
4038 4039
4039 4040 /* Configured but not enabled */
4040 4041 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4041 4042
4042 4043 rdc_group_exit(krdc);
4043 4044 /* Must not hold group lock during this function */
4044 4045 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4045 4046 delay(2);
4046 4047 rdc_group_enter(krdc);
4047 4048
4048 4049 /* Don't rdc_clear_state, unlike _rdc_disable */
4049 4050
4050 4051 rdc_free_bitmap(krdc, RDC_CMD_SUSPEND);
4051 4052 rdc_close_bitmap(krdc);
4052 4053
4053 4054 rdc_dev_close(krdc);
4054 4055 rdc_close_direct(krdc);
4055 4056
4056 4057 /* Configured but not enabled */
4057 4058 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4058 4059
4059 4060 rdc_group_exit(krdc);
4060 4061
4061 4062 /*
4062 4063 * we should now unregister the queue, with no conflicting
4063 4064 * locks held. This is the last(only) member of the group
4064 4065 */
4065 4066 if (krdc->group && RDC_IS_DISKQ(krdc->group) &&
4066 4067 krdc->group->count == 1) { /* stop protecting queue */
4067 4068 rdc_unintercept_diskq(krdc->group);
4068 4069 }
4069 4070
4070 4071 mutex_enter(&rdc_conf_lock);
4071 4072
4072 4073 /* Configured but not enabled */
4073 4074 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4074 4075
4075 4076 wait_busy(krdc);
4076 4077
4077 4078 if (IS_MANY(krdc) || IS_MULTI(krdc))
4078 4079 remove_from_many(krdc);
4079 4080
4080 4081 remove_from_group(krdc);
4081 4082
4082 4083 krdc->remote_index = -1;
4083 4084 ASSERT(krdc->type_flag & RDC_CONFIGURED);
4084 4085 ASSERT(krdc->type_flag & RDC_DISABLEPEND);
4085 4086 krdc->type_flag = 0;
4086 4087 #ifdef DEBUG
4087 4088 if (krdc->dcio_bitmap)
4088 4089 cmn_err(CE_WARN, "!_rdc_suspend: possible mem leak, "
4089 4090 "dcio_bitmap");
4090 4091 #endif
4091 4092 krdc->dcio_bitmap = NULL;
4092 4093 krdc->bitmap_ref = NULL;
4093 4094 krdc->bitmap_size = 0;
4094 4095 krdc->maxfbas = 0;
4095 4096 krdc->bitmap_write = 0;
4096 4097 krdc->disk_status = 0;
4097 4098 rdc_destroy_svinfo(krdc->lsrv);
4098 4099 krdc->lsrv = NULL;
4099 4100 krdc->multi_next = NULL;
4100 4101
4101 4102 rdc_u_init(urdc);
4102 4103
4103 4104 mutex_exit(&rdc_conf_lock);
4104 4105 rdc_kstat_delete(index);
4105 4106 return (0);
4106 4107 }
4107 4108
4108 4109 static int
4109 4110 rdc_suspend(rdc_config_t *uparms, spcs_s_info_t kstatus)
4110 4111 {
4111 4112 rdc_k_info_t *krdc;
4112 4113 int index;
4113 4114 int rc;
4114 4115
4115 4116 mutex_enter(&rdc_conf_lock);
4116 4117
4117 4118 index = rdc_lookup_byname(uparms->rdc_set);
4118 4119 if (index >= 0)
4119 4120 krdc = &rdc_k_info[index];
4120 4121 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4121 4122 mutex_exit(&rdc_conf_lock);
4122 4123 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4123 4124 uparms->rdc_set->secondary.file);
4124 4125 return (RDC_EALREADY);
4125 4126 }
4126 4127
4127 4128 krdc->type_flag |= RDC_DISABLEPEND;
4128 4129 wait_busy(krdc);
4129 4130 if (krdc->type_flag == 0) {
4130 4131 /* A resume or enable failed */
4131 4132 mutex_exit(&rdc_conf_lock);
4132 4133 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4133 4134 uparms->rdc_set->secondary.file);
4134 4135 return (RDC_EALREADY);
4135 4136 }
4136 4137 mutex_exit(&rdc_conf_lock);
4137 4138
4138 4139 rc = _rdc_suspend(krdc, uparms->rdc_set, kstatus);
4139 4140 return (rc);
4140 4141 }
4141 4142
4142 4143 static int
4143 4144 _rdc_resume(rdc_set_t *rdc_set, int options, spcs_s_info_t kstatus)
4144 4145 {
4145 4146 int index;
4146 4147 char *rhost;
4147 4148 struct netbuf *addrp;
4148 4149 rdc_k_info_t *krdc;
4149 4150 rdc_u_info_t *urdc;
4150 4151 rdc_srv_t *svp = NULL;
4151 4152 char *local_file;
4152 4153 char *local_bitmap;
4153 4154 int rc, rc1;
4154 4155 nsc_size_t maxfbas;
4155 4156 rdc_group_t *grp;
4156 4157
4157 4158 if ((rdc_set->primary.intf[0] == 0) ||
4158 4159 (rdc_set->primary.addr.len == 0) ||
4159 4160 (rdc_set->primary.file[0] == 0) ||
4160 4161 (rdc_set->primary.bitmap[0] == 0) ||
4161 4162 (rdc_set->secondary.intf[0] == 0) ||
4162 4163 (rdc_set->secondary.addr.len == 0) ||
4163 4164 (rdc_set->secondary.file[0] == 0) ||
4164 4165 (rdc_set->secondary.bitmap[0] == 0)) {
4165 4166 spcs_s_add(kstatus, RDC_EEMPTY);
4166 4167 return (RDC_EEMPTY);
4167 4168 }
4168 4169
4169 4170 /* Next check there aren't any enabled rdc sets which match. */
4170 4171
4171 4172 mutex_enter(&rdc_conf_lock);
4172 4173
4173 4174 if (rdc_lookup_byname(rdc_set) >= 0) {
4174 4175 mutex_exit(&rdc_conf_lock);
4175 4176 spcs_s_add(kstatus, RDC_EENABLED, rdc_set->primary.intf,
4176 4177 rdc_set->primary.file, rdc_set->secondary.intf,
4177 4178 rdc_set->secondary.file);
4178 4179 return (RDC_EENABLED);
4179 4180 }
4180 4181
4181 4182 if (rdc_lookup_many2one(rdc_set) >= 0) {
4182 4183 mutex_exit(&rdc_conf_lock);
4183 4184 spcs_s_add(kstatus, RDC_EMANY2ONE, rdc_set->primary.intf,
4184 4185 rdc_set->primary.file, rdc_set->secondary.intf,
4185 4186 rdc_set->secondary.file);
4186 4187 return (RDC_EMANY2ONE);
4187 4188 }
4188 4189
4189 4190 if (rdc_set->netconfig->knc_proto == NULL) {
4190 4191 mutex_exit(&rdc_conf_lock);
4191 4192 spcs_s_add(kstatus, RDC_ENETCONFIG);
4192 4193 return (RDC_ENETCONFIG);
4193 4194 }
4194 4195
4195 4196 if (rdc_set->primary.addr.len == 0) {
4196 4197 mutex_exit(&rdc_conf_lock);
4197 4198 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->primary.file);
4198 4199 return (RDC_ENETBUF);
4199 4200 }
4200 4201
4201 4202 if (rdc_set->secondary.addr.len == 0) {
4202 4203 mutex_exit(&rdc_conf_lock);
4203 4204 spcs_s_add(kstatus, RDC_ENETBUF, rdc_set->secondary.file);
4204 4205 return (RDC_ENETBUF);
4205 4206 }
4206 4207
4207 4208 /* Check that the local data volume isn't in use as a bitmap */
4208 4209 if (options & RDC_OPT_PRIMARY)
4209 4210 local_file = rdc_set->primary.file;
4210 4211 else
4211 4212 local_file = rdc_set->secondary.file;
4212 4213 if (rdc_lookup_bitmap(local_file) >= 0) {
4213 4214 mutex_exit(&rdc_conf_lock);
4214 4215 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
4215 4216 return (RDC_EVOLINUSE);
4216 4217 }
4217 4218
4218 4219 /* check that the secondary data volume isn't in use */
4219 4220 if (!(options & RDC_OPT_PRIMARY)) {
4220 4221 local_file = rdc_set->secondary.file;
4221 4222 if (rdc_lookup_secondary(local_file) >= 0) {
4222 4223 mutex_exit(&rdc_conf_lock);
4223 4224 spcs_s_add(kstatus, RDC_EVOLINUSE, local_file);
4224 4225 return (RDC_EVOLINUSE);
4225 4226 }
4226 4227 }
4227 4228
4228 4229 /* Check that the bitmap isn't in use as a data volume */
4229 4230 if (options & RDC_OPT_PRIMARY)
4230 4231 local_bitmap = rdc_set->primary.bitmap;
4231 4232 else
4232 4233 local_bitmap = rdc_set->secondary.bitmap;
4233 4234 if (rdc_lookup_configured(local_bitmap) >= 0) {
4234 4235 mutex_exit(&rdc_conf_lock);
4235 4236 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
4236 4237 return (RDC_EBMPINUSE);
4237 4238 }
4238 4239
4239 4240 /* Check that the bitmap isn't already in use as a bitmap */
4240 4241 if (rdc_lookup_bitmap(local_bitmap) >= 0) {
4241 4242 mutex_exit(&rdc_conf_lock);
4242 4243 spcs_s_add(kstatus, RDC_EBMPINUSE, local_bitmap);
4243 4244 return (RDC_EBMPINUSE);
4244 4245 }
4245 4246
4246 4247 /* Set urdc->volume_size */
4247 4248 index = rdc_dev_open(rdc_set, options);
4248 4249 if (index < 0) {
4249 4250 mutex_exit(&rdc_conf_lock);
4250 4251 if (options & RDC_OPT_PRIMARY)
4251 4252 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->primary.intf,
4252 4253 rdc_set->primary.file);
4253 4254 else
4254 4255 spcs_s_add(kstatus, RDC_EOPEN, rdc_set->secondary.intf,
4255 4256 rdc_set->secondary.file);
4256 4257 return (RDC_EOPEN);
4257 4258 }
4258 4259
4259 4260 urdc = &rdc_u_info[index];
4260 4261 krdc = &rdc_k_info[index];
4261 4262
4262 4263 /* copy relevant parts of rdc_set to urdc field by field */
4263 4264
4264 4265 (void) strncpy(urdc->primary.intf, rdc_set->primary.intf,
4265 4266 MAX_RDC_HOST_SIZE);
4266 4267 (void) strncpy(urdc->secondary.intf, rdc_set->secondary.intf,
4267 4268 MAX_RDC_HOST_SIZE);
4268 4269
4269 4270 (void) strncpy(urdc->group_name, rdc_set->group_name, NSC_MAXPATH);
4270 4271
4271 4272 dup_rdc_netbuf(&rdc_set->primary.addr, &urdc->primary.addr);
4272 4273 (void) strncpy(urdc->primary.file, rdc_set->primary.file, NSC_MAXPATH);
4273 4274 (void) strncpy(urdc->primary.bitmap, rdc_set->primary.bitmap,
4274 4275 NSC_MAXPATH);
4275 4276
4276 4277 dup_rdc_netbuf(&rdc_set->secondary.addr, &urdc->secondary.addr);
4277 4278 (void) strncpy(urdc->secondary.file, rdc_set->secondary.file,
4278 4279 NSC_MAXPATH);
4279 4280 (void) strncpy(urdc->secondary.bitmap, rdc_set->secondary.bitmap,
4280 4281 NSC_MAXPATH);
4281 4282 (void) strncpy(urdc->disk_queue, rdc_set->disk_queue, NSC_MAXPATH);
4282 4283 urdc->setid = rdc_set->setid;
4283 4284
4284 4285 if ((options & RDC_OPT_SYNC) && urdc->disk_queue[0]) {
4285 4286 mutex_exit(&rdc_conf_lock);
4286 4287 rdc_dev_close(krdc);
4287 4288 spcs_s_add(kstatus, RDC_EQWRONGMODE);
4288 4289 return (RDC_EQWRONGMODE);
4289 4290 }
4290 4291
4291 4292 /*
4292 4293 * init flags now so that state left by failures in add_to_group()
4293 4294 * are preserved.
4294 4295 */
4295 4296 rdc_init_flags(urdc);
4296 4297
4297 4298 if ((rc1 = add_to_group(krdc, options, RDC_CMD_RESUME)) != 0) {
4298 4299 if (rc1 == RDC_EQNOADD) { /* something went wrong with queue */
4299 4300 rdc_fail_diskq(krdc, RDC_WAIT, RDC_NOLOG);
4300 4301 /* don't return a failure here, continue with resume */
4301 4302
4302 4303 } else { /* some other group add failure */
4303 4304 mutex_exit(&rdc_conf_lock);
4304 4305 rdc_dev_close(krdc);
4305 4306 spcs_s_add(kstatus, RDC_EGROUP,
4306 4307 rdc_set->primary.intf, rdc_set->primary.file,
4307 4308 rdc_set->secondary.intf, rdc_set->secondary.file,
4308 4309 rdc_set->group_name);
4309 4310 return (RDC_EGROUP);
4310 4311 }
4311 4312 }
4312 4313
4313 4314 /*
4314 4315 * maxfbas was set in rdc_dev_open as primary's maxfbas.
4315 4316 * If diskq's maxfbas is smaller, then use diskq's.
4316 4317 */
4317 4318 grp = krdc->group;
4318 4319 if (grp && RDC_IS_DISKQ(grp) && (grp->diskqfd != 0)) {
4319 4320 rc = _rdc_rsrv_diskq(grp);
4320 4321 if (RDC_SUCCESS(rc)) {
4321 4322 rc = nsc_maxfbas(grp->diskqfd, 0, &maxfbas);
4322 4323 if (rc == 0) {
4323 4324 #ifdef DEBUG
4324 4325 if (krdc->maxfbas != maxfbas)
4325 4326 cmn_err(CE_NOTE,
4326 4327 "!_rdc_resume: diskq maxfbas = %"
4327 4328 NSC_SZFMT ", primary maxfbas = %"
4328 4329 NSC_SZFMT, maxfbas, krdc->maxfbas);
4329 4330 #endif
4330 4331 krdc->maxfbas = min(krdc->maxfbas,
4331 4332 maxfbas);
4332 4333 } else {
4333 4334 cmn_err(CE_WARN,
4334 4335 "!_rdc_resume: diskq maxfbas failed (%d)",
4335 4336 rc);
4336 4337 }
4337 4338 _rdc_rlse_diskq(grp);
4338 4339 } else {
4339 4340 cmn_err(CE_WARN,
4340 4341 "!_rdc_resume: diskq reserve failed (%d)", rc);
4341 4342 }
4342 4343 }
4343 4344
4344 4345 (void) strncpy(urdc->direct_file, rdc_set->direct_file, NSC_MAXPATH);
4345 4346 if ((options & RDC_OPT_PRIMARY) && rdc_set->direct_file[0]) {
4346 4347 if (rdc_open_direct(krdc) == NULL)
4347 4348 rdc_set_flags(urdc, RDC_FCAL_FAILED);
4348 4349 }
4349 4350
4350 4351 krdc->many_next = krdc;
4351 4352
4352 4353 ASSERT(krdc->type_flag == 0);
4353 4354 krdc->type_flag = RDC_CONFIGURED;
4354 4355
4355 4356 if (options & RDC_OPT_PRIMARY)
4356 4357 rdc_set_flags(urdc, RDC_PRIMARY);
4357 4358
4358 4359 if (options & RDC_OPT_ASYNC)
4359 4360 krdc->type_flag |= RDC_ASYNCMODE;
4360 4361
4361 4362 set_busy(krdc);
4362 4363
4363 4364 urdc->syshostid = rdc_set->syshostid;
4364 4365
4365 4366 if (add_to_many(krdc) < 0) {
4366 4367 mutex_exit(&rdc_conf_lock);
4367 4368
4368 4369 rdc_group_enter(krdc);
4369 4370
4370 4371 spcs_s_add(kstatus, RDC_EMULTI);
4371 4372 rc = RDC_EMULTI;
4372 4373 goto fail;
4373 4374 }
4374 4375
4375 4376 /* Configured but not enabled */
4376 4377 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4377 4378
4378 4379 mutex_exit(&rdc_conf_lock);
4379 4380
4380 4381 if (urdc->volume_size == 0) {
4381 4382 rdc_many_enter(krdc);
4382 4383 if (options & RDC_OPT_PRIMARY)
4383 4384 rdc_set_mflags(urdc, RDC_RSYNC_NEEDED);
4384 4385 else
4385 4386 rdc_set_flags(urdc, RDC_SYNC_NEEDED);
4386 4387 rdc_set_flags(urdc, RDC_VOL_FAILED);
4387 4388 rdc_many_exit(krdc);
4388 4389 }
4389 4390
4390 4391 rdc_group_enter(krdc);
4391 4392
4392 4393 /* Configured but not enabled */
4393 4394 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4394 4395
4395 4396 /*
4396 4397 * The rdc set is configured but not yet enabled. Other operations must
4397 4398 * ignore this set until it is enabled.
4398 4399 */
4399 4400
4400 4401 urdc->sync_pos = 0;
4401 4402
4402 4403 /* Set tunable defaults, we'll pick up tunables from the header later */
4403 4404
4404 4405 urdc->maxqfbas = rdc_maxthres_queue;
4405 4406 urdc->maxqitems = rdc_max_qitems;
4406 4407 urdc->autosync = 0;
4407 4408 urdc->asyncthr = rdc_asyncthr;
4408 4409
4409 4410 urdc->netconfig = rdc_set->netconfig;
4410 4411
4411 4412 if (options & RDC_OPT_PRIMARY) {
4412 4413 rhost = rdc_set->secondary.intf;
4413 4414 addrp = &rdc_set->secondary.addr;
4414 4415 } else {
4415 4416 rhost = rdc_set->primary.intf;
4416 4417 addrp = &rdc_set->primary.addr;
4417 4418 }
4418 4419
4419 4420 if (options & RDC_OPT_ASYNC)
4420 4421 rdc_set_flags(urdc, RDC_ASYNC);
4421 4422
4422 4423 svp = rdc_create_svinfo(rhost, addrp, urdc->netconfig);
4423 4424 if (svp == NULL) {
4424 4425 spcs_s_add(kstatus, ENOMEM);
4425 4426 rc = ENOMEM;
4426 4427 goto fail;
4427 4428 }
4428 4429
4429 4430 urdc->netconfig = NULL; /* This will be no good soon */
4430 4431
4431 4432 /* Don't set krdc->intf here */
4432 4433 rdc_kstat_create(index);
4433 4434
4434 4435 /* if the bitmap resume isn't clean, it will clear queuing flag */
4435 4436
4436 4437 (void) rdc_resume_bitmap(krdc);
4437 4438
4438 4439 if (RDC_IS_DISKQ(krdc->group)) {
4439 4440 disk_queue *q = &krdc->group->diskq;
4440 4441 if ((rc1 == RDC_EQNOADD) ||
4441 4442 IS_QSTATE(q, RDC_QBADRESUME)) {
4442 4443 rdc_clr_flags(urdc, RDC_QUEUING);
4443 4444 RDC_ZERO_BITREF(krdc);
4444 4445 }
4445 4446 }
4446 4447
4447 4448 if (krdc->lsrv == NULL)
4448 4449 krdc->lsrv = svp;
4449 4450 else {
4450 4451 #ifdef DEBUG
4451 4452 cmn_err(CE_WARN, "!_rdc_resume: krdc->lsrv already set: %p",
4452 4453 (void *) krdc->lsrv);
4453 4454 #endif
4454 4455 rdc_destroy_svinfo(svp);
4455 4456 }
4456 4457 svp = NULL;
4457 4458
4458 4459 /* Configured but not enabled */
4459 4460 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4460 4461
4461 4462 /* And finally */
4462 4463
4463 4464 krdc->remote_index = -1;
4464 4465
4465 4466 /* Should we set the whole group logging? */
4466 4467 rdc_set_flags(urdc, RDC_ENABLED | RDC_LOGGING);
4467 4468
4468 4469 rdc_group_exit(krdc);
4469 4470
4470 4471 if (rdc_intercept(krdc) != 0) {
4471 4472 rdc_group_enter(krdc);
4472 4473 rdc_clr_flags(urdc, RDC_ENABLED);
4473 4474 if (options & RDC_OPT_PRIMARY)
4474 4475 spcs_s_add(kstatus, RDC_EREGISTER, urdc->primary.file);
4475 4476 else
4476 4477 spcs_s_add(kstatus, RDC_EREGISTER,
4477 4478 urdc->secondary.file);
4478 4479 #ifdef DEBUG
4479 4480 cmn_err(CE_NOTE, "!nsc_register_path failed %s",
4480 4481 urdc->primary.file);
4481 4482 #endif
4482 4483 rc = RDC_EREGISTER;
4483 4484 goto bmpfail;
4484 4485 }
4485 4486 #ifdef DEBUG
4486 4487 cmn_err(CE_NOTE, "!SNDR: resumed %s %s", urdc->primary.file,
4487 4488 urdc->secondary.file);
4488 4489 #endif
4489 4490
4490 4491 rdc_write_state(urdc);
4491 4492
4492 4493 mutex_enter(&rdc_conf_lock);
4493 4494 wakeup_busy(krdc);
4494 4495 mutex_exit(&rdc_conf_lock);
4495 4496
4496 4497 return (0);
4497 4498
4498 4499 bmpfail:
4499 4500 if (options & RDC_OPT_PRIMARY)
4500 4501 spcs_s_add(kstatus, RDC_EBITMAP, urdc->primary.bitmap);
4501 4502 else
4502 4503 spcs_s_add(kstatus, RDC_EBITMAP, urdc->secondary.bitmap);
4503 4504 rc = RDC_EBITMAP;
4504 4505 if (rdc_get_vflags(urdc) & RDC_ENABLED) {
4505 4506 rdc_group_exit(krdc);
4506 4507 (void) rdc_unintercept(krdc);
4507 4508 rdc_group_enter(krdc);
4508 4509 }
4509 4510
4510 4511 fail:
4511 4512 rdc_kstat_delete(index);
4512 4513 /* Don't unset krdc->intf here, unlike _rdc_enable */
4513 4514
4514 4515 /* Configured but not enabled */
4515 4516 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4516 4517
4517 4518 rdc_dev_close(krdc);
4518 4519 rdc_close_direct(krdc);
4519 4520 rdc_destroy_svinfo(svp);
4520 4521
4521 4522 /* Configured but not enabled */
4522 4523 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4523 4524
4524 4525 rdc_group_exit(krdc);
4525 4526
4526 4527 mutex_enter(&rdc_conf_lock);
4527 4528
4528 4529 /* Configured but not enabled */
4529 4530 ASSERT(IS_CONFIGURED(krdc) && !IS_ENABLED(urdc));
4530 4531
4531 4532 remove_from_group(krdc);
4532 4533
4533 4534 if (IS_MANY(krdc) || IS_MULTI(krdc))
4534 4535 remove_from_many(krdc);
4535 4536
4536 4537 rdc_u_init(urdc);
4537 4538
4538 4539 ASSERT(krdc->type_flag & RDC_CONFIGURED);
4539 4540 krdc->type_flag = 0;
4540 4541 wakeup_busy(krdc);
4541 4542
4542 4543 mutex_exit(&rdc_conf_lock);
4543 4544
4544 4545 return (rc);
4545 4546 }
4546 4547
4547 4548 static int
4548 4549 rdc_resume(rdc_config_t *uparms, spcs_s_info_t kstatus)
4549 4550 {
4550 4551 char itmp[10];
4551 4552 int rc;
4552 4553
4553 4554 if (!(uparms->options & RDC_OPT_SYNC) &&
4554 4555 !(uparms->options & RDC_OPT_ASYNC)) {
4555 4556 (void) spcs_s_inttostring(
4556 4557 uparms->options, itmp, sizeof (itmp), 1);
4557 4558 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
4558 4559 rc = RDC_EEINVAL;
4559 4560 goto done;
4560 4561 }
4561 4562
4562 4563 if (!(uparms->options & RDC_OPT_PRIMARY) &&
4563 4564 !(uparms->options & RDC_OPT_SECONDARY)) {
4564 4565 (void) spcs_s_inttostring(
4565 4566 uparms->options, itmp, sizeof (itmp), 1);
4566 4567 spcs_s_add(kstatus, RDC_EEINVAL, itmp);
4567 4568 rc = RDC_EEINVAL;
4568 4569 goto done;
4569 4570 }
4570 4571
4571 4572 rc = _rdc_resume(uparms->rdc_set, uparms->options, kstatus);
4572 4573 done:
4573 4574 return (rc);
4574 4575 }
4575 4576
4576 4577 /*
4577 4578 * if rdc_group_log is called because a volume has failed,
4578 4579 * we must disgard the queue to preserve write ordering.
4579 4580 * later perhaps, we can keep queuing, but we would have to
4580 4581 * rewrite the i/o path to acommodate that. currently, if there
4581 4582 * is a volume failure, the buffers are satisfied remotely and
4582 4583 * there is no way to satisfy them from the current diskq config
4583 4584 * phew, if we do that.. it will be difficult
4584 4585 */
4585 4586 int
4586 4587 rdc_can_queue(rdc_k_info_t *krdc)
4587 4588 {
4588 4589 rdc_k_info_t *p;
4589 4590 rdc_u_info_t *q;
4590 4591
4591 4592 for (p = krdc->group_next; ; p = p->group_next) {
4592 4593 q = &rdc_u_info[p->index];
4593 4594 if (IS_STATE(q, RDC_VOL_FAILED))
4594 4595 return (0);
4595 4596 if (p == krdc)
4596 4597 break;
4597 4598 }
4598 4599 return (1);
4599 4600 }
4600 4601
4601 4602 /*
4602 4603 * wait here, until all in flight async i/o's have either
4603 4604 * finished or failed. Avoid the race with r_net_state()
4604 4605 * which tells remote end to log.
4605 4606 */
4606 4607 void
4607 4608 rdc_inflwait(rdc_group_t *grp)
4608 4609 {
4609 4610 int bail = RDC_CLNT_TMOUT * 2; /* to include retries */
4610 4611 volatile int *inflitems;
4611 4612
4612 4613 if (RDC_IS_DISKQ(grp))
4613 4614 inflitems = (&(grp->diskq.inflitems));
4614 4615 else
4615 4616 inflitems = (&(grp->ra_queue.inflitems));
4616 4617
4617 4618 while (*inflitems && (--bail > 0))
4618 4619 delay(HZ);
4619 4620 }
4620 4621
4621 4622 void
4622 4623 rdc_group_log(rdc_k_info_t *krdc, int flag, char *why)
4623 4624 {
4624 4625 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
4625 4626 rdc_k_info_t *p;
4626 4627 rdc_u_info_t *q;
4627 4628 int do_group;
4628 4629 int sm, um, md;
4629 4630 disk_queue *dq;
4630 4631
4631 4632 void (*flag_op)(rdc_u_info_t *urdc, int flag);
4632 4633
4633 4634 ASSERT(MUTEX_HELD(&krdc->group->lock));
4634 4635
4635 4636 if (!IS_ENABLED(urdc))
4636 4637 return;
4637 4638
4638 4639 rdc_many_enter(krdc);
4639 4640
4640 4641 if ((flag & RDC_QUEUING) && (!IS_STATE(urdc, RDC_SYNCING)) &&
4641 4642 (rdc_can_queue(krdc))) {
4642 4643 flag_op = rdc_set_flags; /* keep queuing, link error */
4643 4644 flag &= ~RDC_FLUSH;
4644 4645 } else {
4645 4646 flag_op = rdc_clr_flags; /* stop queuing, user request */
4646 4647 }
4647 4648
4648 4649 do_group = 1;
4649 4650 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY))
4650 4651 do_group = 0;
4651 4652 else if ((urdc->group_name[0] == 0) ||
4652 4653 (rdc_get_vflags(urdc) & RDC_LOGGING) ||
4653 4654 (rdc_get_vflags(urdc) & RDC_SYNCING))
4654 4655 do_group = 0;
4655 4656 if (do_group) {
4656 4657 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4657 4658 q = &rdc_u_info[p->index];
4658 4659 if (!IS_ENABLED(q))
4659 4660 continue;
4660 4661 if ((rdc_get_vflags(q) & RDC_LOGGING) ||
4661 4662 (rdc_get_vflags(q) & RDC_SYNCING)) {
4662 4663 do_group = 0;
4663 4664 break;
4664 4665 }
4665 4666 }
4666 4667 }
4667 4668 if (!do_group && (flag & RDC_FORCE_GROUP))
4668 4669 do_group = 1;
4669 4670
4670 4671 rdc_many_exit(krdc);
4671 4672 dq = &krdc->group->diskq;
4672 4673 if (do_group) {
4673 4674 #ifdef DEBUG
4674 4675 cmn_err(CE_NOTE, "!SNDR:Group point-in-time for grp: %s %s:%s",
4675 4676 urdc->group_name, urdc->primary.intf, urdc->secondary.intf);
4676 4677 #endif
4677 4678 DTRACE_PROBE(rdc_diskq_group_PIT);
4678 4679
4679 4680 /* Set group logging at the same PIT under rdc_many_lock */
4680 4681 rdc_many_enter(krdc);
4681 4682 rdc_set_flags_log(urdc, RDC_LOGGING, why);
4682 4683 if (RDC_IS_DISKQ(krdc->group))
4683 4684 flag_op(urdc, RDC_QUEUING);
4684 4685 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4685 4686 q = &rdc_u_info[p->index];
4686 4687 if (!IS_ENABLED(q))
4687 4688 continue;
4688 4689 rdc_set_flags_log(q, RDC_LOGGING,
4689 4690 "consistency group member following leader");
4690 4691 if (RDC_IS_DISKQ(p->group))
4691 4692 flag_op(q, RDC_QUEUING);
4692 4693 }
4693 4694
4694 4695 rdc_many_exit(krdc);
4695 4696
4696 4697 /*
4697 4698 * This can cause the async threads to fail,
4698 4699 * which in turn will call rdc_group_log()
4699 4700 * again. Release the lock and re-aquire.
4700 4701 */
4701 4702 rdc_group_exit(krdc);
4702 4703
4703 4704 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4704 4705 delay(2);
4705 4706 if (!RDC_IS_DISKQ(krdc->group))
4706 4707 RDC_ZERO_BITREF(krdc);
4707 4708
4708 4709 rdc_inflwait(krdc->group);
4709 4710
4710 4711 /*
4711 4712 * a little lazy, but neat. recall dump_alloc_bufs to
4712 4713 * ensure that the queue pointers & seq are reset properly
4713 4714 * after we have waited for inflight stuff
4714 4715 */
4715 4716 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4716 4717 delay(2);
4717 4718
4718 4719 rdc_group_enter(krdc);
4719 4720 if (RDC_IS_DISKQ(krdc->group) && (!(flag & RDC_QUEUING))) {
4720 4721 /* fail or user request */
4721 4722 RDC_ZERO_BITREF(krdc);
4722 4723 mutex_enter(&krdc->group->diskq.disk_qlock);
4723 4724 rdc_init_diskq_header(krdc->group,
4724 4725 &krdc->group->diskq.disk_hdr);
4725 4726 SET_QNXTIO(dq, QHEAD(dq));
4726 4727 mutex_exit(&krdc->group->diskq.disk_qlock);
4727 4728 }
4728 4729
4729 4730 if (flag & RDC_ALLREMOTE) {
4730 4731 /* Tell other node to start logging */
4731 4732 if (krdc->lsrv && krdc->intf && !krdc->intf->if_down)
4732 4733 (void) rdc_net_state(krdc->index,
4733 4734 CCIO_ENABLELOG);
4734 4735 }
4735 4736
4736 4737 if (flag & (RDC_ALLREMOTE | RDC_OTHERREMOTE)) {
4737 4738 rdc_many_enter(krdc);
4738 4739 for (p = krdc->group_next; p != krdc;
4739 4740 p = p->group_next) {
4740 4741 if (p->lsrv && krdc->intf &&
4741 4742 !krdc->intf->if_down) {
4742 4743 (void) rdc_net_state(p->index,
4743 4744 CCIO_ENABLELOG);
4744 4745 }
4745 4746 }
4746 4747 rdc_many_exit(krdc);
4747 4748 }
4748 4749
4749 4750 rdc_write_state(urdc);
4750 4751 for (p = krdc->group_next; p != krdc; p = p->group_next) {
4751 4752 q = &rdc_u_info[p->index];
4752 4753 if (!IS_ENABLED(q))
4753 4754 continue;
4754 4755 rdc_write_state(q);
4755 4756 }
4756 4757 } else {
4757 4758 /* No point in time is possible, just deal with single set */
4758 4759
4759 4760 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
4760 4761 halt_sync(krdc);
4761 4762 } else {
4762 4763 if (rdc_net_getstate(krdc, &sm, &um, &md, TRUE) < 0) {
4763 4764 rdc_clr_flags(urdc, RDC_SYNCING);
4764 4765 rdc_set_flags_log(urdc, RDC_LOGGING,
4765 4766 "failed to read remote state");
4766 4767
4767 4768 rdc_write_state(urdc);
4768 4769 while (rdc_dump_alloc_bufs_cd(krdc->index)
4769 4770 == EAGAIN)
4770 4771 delay(2);
4771 4772 if ((RDC_IS_DISKQ(krdc->group)) &&
4772 4773 (!(flag & RDC_QUEUING))) { /* fail! */
4773 4774 mutex_enter(QLOCK(dq));
4774 4775 rdc_init_diskq_header(krdc->group,
4775 4776 &krdc->group->diskq.disk_hdr);
4776 4777 SET_QNXTIO(dq, QHEAD(dq));
4777 4778 mutex_exit(QLOCK(dq));
4778 4779 }
4779 4780
4780 4781 return;
4781 4782 }
4782 4783 }
4783 4784
4784 4785 if (rdc_get_vflags(urdc) & RDC_SYNCING)
4785 4786 return;
4786 4787
4787 4788 if (RDC_IS_DISKQ(krdc->group))
4788 4789 flag_op(urdc, RDC_QUEUING);
4789 4790
4790 4791 if ((RDC_IS_DISKQ(krdc->group)) &&
4791 4792 (!(flag & RDC_QUEUING))) { /* fail! */
4792 4793 RDC_ZERO_BITREF(krdc);
4793 4794 mutex_enter(QLOCK(dq));
4794 4795 rdc_init_diskq_header(krdc->group,
4795 4796 &krdc->group->diskq.disk_hdr);
4796 4797 SET_QNXTIO(dq, QHEAD(dq));
4797 4798 mutex_exit(QLOCK(dq));
4798 4799 }
4799 4800
4800 4801 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
4801 4802 rdc_set_flags_log(urdc, RDC_LOGGING, why);
4802 4803
4803 4804 rdc_write_state(urdc);
4804 4805
4805 4806 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4806 4807 delay(2);
4807 4808 if (!RDC_IS_DISKQ(krdc->group))
4808 4809 RDC_ZERO_BITREF(krdc);
4809 4810
4810 4811 rdc_inflwait(krdc->group);
4811 4812 /*
4812 4813 * a little lazy, but neat. recall dump_alloc_bufs to
4813 4814 * ensure that the queue pointers & seq are reset
4814 4815 * properly after we have waited for inflight stuff
4815 4816 */
4816 4817 while (rdc_dump_alloc_bufs_cd(krdc->index) == EAGAIN)
4817 4818 delay(2);
4818 4819
4819 4820 if (flag & RDC_ALLREMOTE) {
4820 4821 /* Tell other node to start logging */
4821 4822 if (krdc->lsrv && krdc->intf &&
4822 4823 !krdc->intf->if_down) {
4823 4824 (void) rdc_net_state(krdc->index,
4824 4825 CCIO_ENABLELOG);
4825 4826 }
4826 4827 }
4827 4828 }
4828 4829 }
4829 4830 /*
4830 4831 * just in case any threads were in flight during log cleanup
4831 4832 */
4832 4833 if (RDC_IS_DISKQ(krdc->group)) {
4833 4834 mutex_enter(QLOCK(dq));
4834 4835 cv_broadcast(&dq->qfullcv);
4835 4836 mutex_exit(QLOCK(dq));
4836 4837 }
4837 4838 }
4838 4839
4839 4840 static int
4840 4841 _rdc_log(rdc_k_info_t *krdc, rdc_set_t *rdc_set, spcs_s_info_t kstatus)
4841 4842 {
4842 4843 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
4843 4844 rdc_srv_t *svp;
4844 4845
4845 4846 rdc_group_enter(krdc);
4846 4847 if (rdc_check(krdc, rdc_set)) {
4847 4848 rdc_group_exit(krdc);
4848 4849 spcs_s_add(kstatus, RDC_EALREADY, rdc_set->primary.file,
4849 4850 rdc_set->secondary.file);
4850 4851 return (RDC_EALREADY);
4851 4852 }
4852 4853
4853 4854 svp = krdc->lsrv;
4854 4855 if (rdc_get_vflags(urdc) & RDC_PRIMARY)
4855 4856 krdc->intf = rdc_add_to_if(svp, &(urdc->primary.addr),
4856 4857 &(urdc->secondary.addr), 1);
4857 4858 else
4858 4859 krdc->intf = rdc_add_to_if(svp, &(urdc->secondary.addr),
4859 4860 &(urdc->primary.addr), 0);
4860 4861
4861 4862 if (!krdc->intf) {
4862 4863 rdc_group_exit(krdc);
4863 4864 spcs_s_add(kstatus, RDC_EADDTOIF, urdc->primary.intf,
4864 4865 urdc->secondary.intf);
4865 4866 return (RDC_EADDTOIF);
4866 4867 }
4867 4868
4868 4869 rdc_group_log(krdc, RDC_FLUSH | RDC_ALLREMOTE, NULL);
4869 4870
4870 4871 if (rdc_get_vflags(urdc) & RDC_SYNCING) {
4871 4872 rdc_group_exit(krdc);
4872 4873 spcs_s_add(kstatus, RDC_ESYNCING, urdc->primary.file);
4873 4874 return (RDC_ESYNCING);
4874 4875 }
4875 4876
4876 4877 rdc_group_exit(krdc);
4877 4878
4878 4879 return (0);
4879 4880 }
4880 4881
4881 4882 static int
4882 4883 rdc_log(rdc_config_t *uparms, spcs_s_info_t kstatus)
4883 4884 {
4884 4885 rdc_k_info_t *krdc;
4885 4886 int rc = 0;
4886 4887 int index;
4887 4888
4888 4889 mutex_enter(&rdc_conf_lock);
4889 4890 index = rdc_lookup_byname(uparms->rdc_set);
4890 4891 if (index >= 0)
4891 4892 krdc = &rdc_k_info[index];
4892 4893 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4893 4894 mutex_exit(&rdc_conf_lock);
4894 4895 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4895 4896 uparms->rdc_set->secondary.file);
4896 4897 return (RDC_EALREADY);
4897 4898 }
4898 4899
4899 4900 set_busy(krdc);
4900 4901 if (krdc->type_flag == 0) {
4901 4902 /* A resume or enable failed */
4902 4903 wakeup_busy(krdc);
4903 4904 mutex_exit(&rdc_conf_lock);
4904 4905 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4905 4906 uparms->rdc_set->secondary.file);
4906 4907 return (RDC_EALREADY);
4907 4908 }
4908 4909 mutex_exit(&rdc_conf_lock);
4909 4910
4910 4911 rc = _rdc_log(krdc, uparms->rdc_set, kstatus);
4911 4912
4912 4913 mutex_enter(&rdc_conf_lock);
4913 4914 wakeup_busy(krdc);
4914 4915 mutex_exit(&rdc_conf_lock);
4915 4916
4916 4917 return (rc);
4917 4918 }
4918 4919
4919 4920
4920 4921 static int
4921 4922 rdc_wait(rdc_config_t *uparms, spcs_s_info_t kstatus)
4922 4923 {
4923 4924 rdc_k_info_t *krdc;
4924 4925 rdc_u_info_t *urdc;
4925 4926 int index;
4926 4927 int need_check = 0;
4927 4928
4928 4929 mutex_enter(&rdc_conf_lock);
4929 4930 index = rdc_lookup_byname(uparms->rdc_set);
4930 4931 if (index >= 0)
4931 4932 krdc = &rdc_k_info[index];
4932 4933 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
4933 4934 mutex_exit(&rdc_conf_lock);
4934 4935 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4935 4936 uparms->rdc_set->secondary.file);
4936 4937 return (RDC_EALREADY);
4937 4938 }
4938 4939
4939 4940 urdc = &rdc_u_info[index];
4940 4941 if (!(rdc_get_vflags(urdc) & RDC_PRIMARY)) {
4941 4942 mutex_exit(&rdc_conf_lock);
4942 4943 return (0);
4943 4944 }
4944 4945
4945 4946 set_busy(krdc);
4946 4947 if (krdc->type_flag == 0) {
4947 4948 /* A resume or enable failed */
4948 4949 wakeup_busy(krdc);
4949 4950 mutex_exit(&rdc_conf_lock);
4950 4951 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4951 4952 uparms->rdc_set->secondary.file);
4952 4953 return (RDC_EALREADY);
4953 4954 }
4954 4955 mutex_exit(&rdc_conf_lock);
4955 4956
4956 4957 rdc_group_enter(krdc);
4957 4958 if (rdc_check(krdc, uparms->rdc_set)) {
4958 4959 rdc_group_exit(krdc);
4959 4960 mutex_enter(&rdc_conf_lock);
4960 4961 wakeup_busy(krdc);
4961 4962 mutex_exit(&rdc_conf_lock);
4962 4963 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
4963 4964 uparms->rdc_set->secondary.file);
4964 4965 return (RDC_EALREADY);
4965 4966 }
4966 4967
4967 4968 if ((rdc_get_vflags(urdc) & (RDC_SYNCING | RDC_PRIMARY)) !=
4968 4969 (RDC_SYNCING | RDC_PRIMARY)) {
4969 4970 rdc_group_exit(krdc);
4970 4971 mutex_enter(&rdc_conf_lock);
4971 4972 wakeup_busy(krdc);
4972 4973 mutex_exit(&rdc_conf_lock);
4973 4974 return (0);
4974 4975 }
4975 4976 if (rdc_get_vflags(urdc) & RDC_SYNCING) {
4976 4977 need_check = 1;
4977 4978 }
4978 4979 rdc_group_exit(krdc);
4979 4980
4980 4981 mutex_enter(&net_blk_lock);
4981 4982
4982 4983 mutex_enter(&rdc_conf_lock);
4983 4984 wakeup_busy(krdc);
4984 4985 mutex_exit(&rdc_conf_lock);
4985 4986
4986 4987 (void) cv_wait_sig(&krdc->synccv, &net_blk_lock);
4987 4988
4988 4989 mutex_exit(&net_blk_lock);
4989 4990 if (need_check) {
4990 4991 if (krdc->sync_done == RDC_COMPLETED) {
4991 4992 return (0);
4992 4993 } else if (krdc->sync_done == RDC_FAILED) {
4993 4994 return (EIO);
4994 4995 }
4995 4996 }
4996 4997 return (0);
4997 4998 }
4998 4999
4999 5000
5000 5001 static int
5001 5002 rdc_health(rdc_config_t *uparms, spcs_s_info_t kstatus, int *rvp)
5002 5003 {
5003 5004 rdc_k_info_t *krdc;
5004 5005 rdc_u_info_t *urdc;
5005 5006 int rc = 0;
5006 5007 int index;
5007 5008
5008 5009 mutex_enter(&rdc_conf_lock);
5009 5010 index = rdc_lookup_byname(uparms->rdc_set);
5010 5011 if (index >= 0)
5011 5012 krdc = &rdc_k_info[index];
5012 5013 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5013 5014 mutex_exit(&rdc_conf_lock);
5014 5015 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5015 5016 uparms->rdc_set->secondary.file);
5016 5017 return (RDC_EALREADY);
5017 5018 }
5018 5019
5019 5020 set_busy(krdc);
5020 5021 if (krdc->type_flag == 0) {
5021 5022 /* A resume or enable failed */
5022 5023 wakeup_busy(krdc);
5023 5024 mutex_exit(&rdc_conf_lock);
5024 5025 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5025 5026 uparms->rdc_set->secondary.file);
5026 5027 return (RDC_EALREADY);
5027 5028 }
5028 5029
5029 5030 mutex_exit(&rdc_conf_lock);
5030 5031
5031 5032 rdc_group_enter(krdc);
5032 5033 if (rdc_check(krdc, uparms->rdc_set)) {
5033 5034 rdc_group_exit(krdc);
5034 5035 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5035 5036 uparms->rdc_set->secondary.file);
5036 5037 rc = RDC_EALREADY;
5037 5038 goto done;
5038 5039 }
5039 5040
5040 5041 urdc = &rdc_u_info[index];
5041 5042 if (rdc_isactive_if(&(urdc->primary.addr), &(urdc->secondary.addr)))
5042 5043 *rvp = RDC_ACTIVE;
5043 5044 else
5044 5045 *rvp = RDC_INACTIVE;
5045 5046
5046 5047 rdc_group_exit(krdc);
5047 5048
5048 5049 done:
5049 5050 mutex_enter(&rdc_conf_lock);
5050 5051 wakeup_busy(krdc);
5051 5052 mutex_exit(&rdc_conf_lock);
5052 5053
5053 5054 return (rc);
5054 5055 }
5055 5056
5056 5057
5057 5058 static int
5058 5059 rdc_reconfig(rdc_config_t *uparms, spcs_s_info_t kstatus)
5059 5060 {
5060 5061 rdc_k_info_t *krdc;
5061 5062 rdc_u_info_t *urdc;
5062 5063 int rc = -2;
5063 5064 int index;
5064 5065
5065 5066 mutex_enter(&rdc_conf_lock);
5066 5067 index = rdc_lookup_byname(uparms->rdc_set);
5067 5068 if (index >= 0)
5068 5069 krdc = &rdc_k_info[index];
5069 5070 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5070 5071 mutex_exit(&rdc_conf_lock);
5071 5072 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5072 5073 uparms->rdc_set->secondary.file);
5073 5074 return (RDC_EALREADY);
5074 5075 }
5075 5076
5076 5077 urdc = &rdc_u_info[index];
5077 5078 set_busy(krdc);
5078 5079 if (krdc->type_flag == 0) {
5079 5080 /* A resume or enable failed */
5080 5081 wakeup_busy(krdc);
5081 5082 mutex_exit(&rdc_conf_lock);
5082 5083 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5083 5084 uparms->rdc_set->secondary.file);
5084 5085 return (RDC_EALREADY);
5085 5086 }
5086 5087
5087 5088 mutex_exit(&rdc_conf_lock);
5088 5089
5089 5090 rdc_group_enter(krdc);
5090 5091 if (rdc_check(krdc, uparms->rdc_set)) {
5091 5092 rdc_group_exit(krdc);
5092 5093 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5093 5094 uparms->rdc_set->secondary.file);
5094 5095 rc = RDC_EALREADY;
5095 5096 goto done;
5096 5097 }
5097 5098 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd))
5098 5099 (void) rdc_reset_bitmap(krdc);
5099 5100
5100 5101 /* Move to a new bitmap if necessary */
5101 5102 if (strncmp(urdc->primary.bitmap, uparms->rdc_set->primary.bitmap,
5102 5103 NSC_MAXPATH) != 0) {
5103 5104 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5104 5105 rc = rdc_move_bitmap(krdc,
5105 5106 uparms->rdc_set->primary.bitmap);
5106 5107 } else {
5107 5108 (void) strncpy(urdc->primary.bitmap,
5108 5109 uparms->rdc_set->primary.bitmap, NSC_MAXPATH);
5109 5110 /* simulate a succesful rdc_move_bitmap */
5110 5111 rc = 0;
5111 5112 }
5112 5113 }
5113 5114 if (strncmp(urdc->secondary.bitmap, uparms->rdc_set->secondary.bitmap,
5114 5115 NSC_MAXPATH) != 0) {
5115 5116 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5116 5117 (void) strncpy(urdc->secondary.bitmap,
5117 5118 uparms->rdc_set->secondary.bitmap, NSC_MAXPATH);
5118 5119 /* simulate a succesful rdc_move_bitmap */
5119 5120 rc = 0;
5120 5121 } else {
5121 5122 rc = rdc_move_bitmap(krdc,
5122 5123 uparms->rdc_set->secondary.bitmap);
5123 5124 }
5124 5125 }
5125 5126 if (rc == -1) {
5126 5127 rdc_group_exit(krdc);
5127 5128 spcs_s_add(kstatus, RDC_EBMPRECONFIG,
5128 5129 uparms->rdc_set->secondary.intf,
5129 5130 uparms->rdc_set->secondary.file);
5130 5131 rc = RDC_EBMPRECONFIG;
5131 5132 goto done;
5132 5133 }
5133 5134
5134 5135 /*
5135 5136 * At this point we fail any other type of reconfig
5136 5137 * if not in logging mode and we did not do a bitmap reconfig
5137 5138 */
5138 5139
5139 5140 if (!(rdc_get_vflags(urdc) & RDC_LOGGING) && rc == -2) {
5140 5141 /* no other changes possible unless logging */
5141 5142 rdc_group_exit(krdc);
5142 5143 spcs_s_add(kstatus, RDC_ENOTLOGGING,
5143 5144 uparms->rdc_set->primary.intf,
5144 5145 uparms->rdc_set->primary.file,
5145 5146 uparms->rdc_set->secondary.intf,
5146 5147 uparms->rdc_set->secondary.file);
5147 5148 rc = RDC_ENOTLOGGING;
5148 5149 goto done;
5149 5150 }
5150 5151 rc = 0;
5151 5152 /* Change direct file if necessary */
5152 5153 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5153 5154 strncmp(urdc->direct_file, uparms->rdc_set->direct_file,
5154 5155 NSC_MAXPATH)) {
5155 5156 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
5156 5157 rdc_group_exit(krdc);
5157 5158 goto notlogging;
5158 5159 }
5159 5160 rdc_close_direct(krdc);
5160 5161 (void) strncpy(urdc->direct_file, uparms->rdc_set->direct_file,
5161 5162 NSC_MAXPATH);
5162 5163
5163 5164 if (urdc->direct_file[0]) {
5164 5165 if (rdc_open_direct(krdc) == NULL)
5165 5166 rdc_set_flags(urdc, RDC_FCAL_FAILED);
5166 5167 else
5167 5168 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
5168 5169 }
5169 5170 }
5170 5171
5171 5172 rdc_group_exit(krdc);
5172 5173
5173 5174 /* Change group if necessary */
5174 5175 if (strncmp(urdc->group_name, uparms->rdc_set->group_name,
5175 5176 NSC_MAXPATH) != 0) {
5176 5177 char orig_group[NSC_MAXPATH];
5177 5178 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5178 5179 goto notlogging;
5179 5180 mutex_enter(&rdc_conf_lock);
5180 5181
5181 5182 (void) strncpy(orig_group, urdc->group_name, NSC_MAXPATH);
5182 5183 (void) strncpy(urdc->group_name, uparms->rdc_set->group_name,
5183 5184 NSC_MAXPATH);
5184 5185
5185 5186 rc = change_group(krdc, uparms->options);
5186 5187 if (rc == RDC_EQNOADD) {
5187 5188 mutex_exit(&rdc_conf_lock);
5188 5189 spcs_s_add(kstatus, RDC_EQNOADD,
5189 5190 uparms->rdc_set->disk_queue);
5190 5191 goto done;
5191 5192 } else if (rc < 0) {
5192 5193 (void) strncpy(urdc->group_name, orig_group,
5193 5194 NSC_MAXPATH);
5194 5195 mutex_exit(&rdc_conf_lock);
5195 5196 spcs_s_add(kstatus, RDC_EGROUP,
5196 5197 urdc->primary.intf, urdc->primary.file,
5197 5198 urdc->secondary.intf, urdc->secondary.file,
5198 5199 uparms->rdc_set->group_name);
5199 5200 rc = RDC_EGROUP;
5200 5201 goto done;
5201 5202 }
5202 5203
5203 5204 mutex_exit(&rdc_conf_lock);
5204 5205
5205 5206 if (rc >= 0) {
5206 5207 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5207 5208 goto notlogging;
5208 5209 if (uparms->options & RDC_OPT_ASYNC) {
5209 5210 mutex_enter(&rdc_conf_lock);
5210 5211 krdc->type_flag |= RDC_ASYNCMODE;
5211 5212 mutex_exit(&rdc_conf_lock);
5212 5213 if (uparms->options & RDC_OPT_PRIMARY)
5213 5214 krdc->bitmap_ref =
5214 5215 (uchar_t *)kmem_zalloc(
5215 5216 (krdc->bitmap_size * BITS_IN_BYTE *
5216 5217 BMAP_REF_PREF_SIZE), KM_SLEEP);
5217 5218 rdc_group_enter(krdc);
5218 5219 rdc_set_flags(urdc, RDC_ASYNC);
5219 5220 rdc_group_exit(krdc);
5220 5221 } else {
5221 5222 mutex_enter(&rdc_conf_lock);
5222 5223 krdc->type_flag &= ~RDC_ASYNCMODE;
5223 5224 mutex_exit(&rdc_conf_lock);
5224 5225 rdc_group_enter(krdc);
5225 5226 rdc_clr_flags(urdc, RDC_ASYNC);
5226 5227 rdc_group_exit(krdc);
5227 5228 if (krdc->bitmap_ref) {
5228 5229 kmem_free(krdc->bitmap_ref,
5229 5230 (krdc->bitmap_size * BITS_IN_BYTE *
5230 5231 BMAP_REF_PREF_SIZE));
5231 5232 krdc->bitmap_ref = NULL;
5232 5233 }
5233 5234 }
5234 5235 }
5235 5236 } else {
5236 5237 if ((((uparms->options & RDC_OPT_ASYNC) == 0) &&
5237 5238 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) ||
5238 5239 (((uparms->options & RDC_OPT_ASYNC) != 0) &&
5239 5240 ((krdc->type_flag & RDC_ASYNCMODE) == 0))) {
5240 5241 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5241 5242 goto notlogging;
5242 5243
5243 5244 if (krdc->group->count > 1) {
5244 5245 spcs_s_add(kstatus, RDC_EGROUPMODE);
5245 5246 rc = RDC_EGROUPMODE;
5246 5247 goto done;
5247 5248 }
5248 5249 }
5249 5250
5250 5251 /* Switch sync/async if necessary */
5251 5252 if (krdc->group->count == 1) {
5252 5253 /* Only member of group. Can change sync/async */
5253 5254 if (((uparms->options & RDC_OPT_ASYNC) == 0) &&
5254 5255 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) {
5255 5256 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5256 5257 goto notlogging;
5257 5258 /* switch to sync */
5258 5259 mutex_enter(&rdc_conf_lock);
5259 5260 krdc->type_flag &= ~RDC_ASYNCMODE;
5260 5261 if (RDC_IS_DISKQ(krdc->group)) {
5261 5262 krdc->group->flags &= ~RDC_DISKQUE;
5262 5263 krdc->group->flags |= RDC_MEMQUE;
5263 5264 rdc_unintercept_diskq(krdc->group);
5264 5265 mutex_enter(&krdc->group->diskqmutex);
5265 5266 rdc_close_diskq(krdc->group);
5266 5267 mutex_exit(&krdc->group->diskqmutex);
5267 5268 bzero(&urdc->disk_queue,
5268 5269 sizeof (urdc->disk_queue));
5269 5270 }
5270 5271 mutex_exit(&rdc_conf_lock);
5271 5272 rdc_group_enter(krdc);
5272 5273 rdc_clr_flags(urdc, RDC_ASYNC);
5273 5274 rdc_group_exit(krdc);
5274 5275 if (krdc->bitmap_ref) {
5275 5276 kmem_free(krdc->bitmap_ref,
5276 5277 (krdc->bitmap_size * BITS_IN_BYTE *
5277 5278 BMAP_REF_PREF_SIZE));
5278 5279 krdc->bitmap_ref = NULL;
5279 5280 }
5280 5281 } else if (((uparms->options & RDC_OPT_ASYNC) != 0) &&
5281 5282 ((krdc->type_flag & RDC_ASYNCMODE) == 0)) {
5282 5283 if (!(rdc_get_vflags(urdc) & RDC_LOGGING))
5283 5284 goto notlogging;
5284 5285 /* switch to async */
5285 5286 mutex_enter(&rdc_conf_lock);
5286 5287 krdc->type_flag |= RDC_ASYNCMODE;
5287 5288 mutex_exit(&rdc_conf_lock);
5288 5289 if (uparms->options & RDC_OPT_PRIMARY)
5289 5290 krdc->bitmap_ref =
5290 5291 (uchar_t *)kmem_zalloc(
5291 5292 (krdc->bitmap_size * BITS_IN_BYTE *
5292 5293 BMAP_REF_PREF_SIZE), KM_SLEEP);
5293 5294 rdc_group_enter(krdc);
5294 5295 rdc_set_flags(urdc, RDC_ASYNC);
5295 5296 rdc_group_exit(krdc);
5296 5297 }
5297 5298 }
5298 5299 }
5299 5300 /* Reverse concept of primary and secondary */
5300 5301 if ((uparms->options & RDC_OPT_REVERSE_ROLE) != 0) {
5301 5302 rdc_set_t rdc_set;
5302 5303 struct netbuf paddr, saddr;
5303 5304
5304 5305 mutex_enter(&rdc_conf_lock);
5305 5306
5306 5307 /*
5307 5308 * Disallow role reversal for advanced configurations
5308 5309 */
5309 5310
5310 5311 if (IS_MANY(krdc) || IS_MULTI(krdc)) {
5311 5312 mutex_exit(&rdc_conf_lock);
5312 5313 spcs_s_add(kstatus, RDC_EMASTER, urdc->primary.intf,
5313 5314 urdc->primary.file, urdc->secondary.intf,
5314 5315 urdc->secondary.file);
5315 5316 return (RDC_EMASTER);
5316 5317 }
5317 5318 bzero((void *) &rdc_set, sizeof (rdc_set_t));
5318 5319 dup_rdc_netbuf(&urdc->primary.addr, &saddr);
5319 5320 dup_rdc_netbuf(&urdc->secondary.addr, &paddr);
5320 5321 free_rdc_netbuf(&urdc->primary.addr);
5321 5322 free_rdc_netbuf(&urdc->secondary.addr);
5322 5323 dup_rdc_netbuf(&saddr, &urdc->secondary.addr);
5323 5324 dup_rdc_netbuf(&paddr, &urdc->primary.addr);
5324 5325 free_rdc_netbuf(&paddr);
5325 5326 free_rdc_netbuf(&saddr);
5326 5327 /* copy primary parts of urdc to rdc_set field by field */
5327 5328 (void) strncpy(rdc_set.primary.intf, urdc->primary.intf,
5328 5329 MAX_RDC_HOST_SIZE);
5329 5330 (void) strncpy(rdc_set.primary.file, urdc->primary.file,
5330 5331 NSC_MAXPATH);
5331 5332 (void) strncpy(rdc_set.primary.bitmap, urdc->primary.bitmap,
5332 5333 NSC_MAXPATH);
5333 5334
5334 5335 /* Now overwrite urdc primary */
5335 5336 (void) strncpy(urdc->primary.intf, urdc->secondary.intf,
5336 5337 MAX_RDC_HOST_SIZE);
5337 5338 (void) strncpy(urdc->primary.file, urdc->secondary.file,
5338 5339 NSC_MAXPATH);
5339 5340 (void) strncpy(urdc->primary.bitmap, urdc->secondary.bitmap,
5340 5341 NSC_MAXPATH);
5341 5342
5342 5343 /* Now ovwewrite urdc secondary */
5343 5344 (void) strncpy(urdc->secondary.intf, rdc_set.primary.intf,
5344 5345 MAX_RDC_HOST_SIZE);
5345 5346 (void) strncpy(urdc->secondary.file, rdc_set.primary.file,
5346 5347 NSC_MAXPATH);
5347 5348 (void) strncpy(urdc->secondary.bitmap, rdc_set.primary.bitmap,
5348 5349 NSC_MAXPATH);
5349 5350
5350 5351 if (rdc_get_vflags(urdc) & RDC_PRIMARY) {
5351 5352 rdc_clr_flags(urdc, RDC_PRIMARY);
5352 5353 if (krdc->intf) {
5353 5354 krdc->intf->issecondary = 1;
5354 5355 krdc->intf->isprimary = 0;
5355 5356 krdc->intf->if_down = 1;
5356 5357 }
5357 5358 } else {
5358 5359 rdc_set_flags(urdc, RDC_PRIMARY);
5359 5360 if (krdc->intf) {
5360 5361 krdc->intf->issecondary = 0;
5361 5362 krdc->intf->isprimary = 1;
5362 5363 krdc->intf->if_down = 1;
5363 5364 }
5364 5365 }
5365 5366
5366 5367 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5367 5368 ((krdc->type_flag & RDC_ASYNCMODE) != 0)) {
5368 5369 if (!krdc->bitmap_ref)
5369 5370 krdc->bitmap_ref =
5370 5371 (uchar_t *)kmem_zalloc((krdc->bitmap_size *
5371 5372 BITS_IN_BYTE * BMAP_REF_PREF_SIZE),
5372 5373 KM_SLEEP);
5373 5374 if (krdc->bitmap_ref == NULL) {
5374 5375 cmn_err(CE_WARN,
5375 5376 "!rdc_reconfig: bitmap_ref alloc %"
5376 5377 NSC_SZFMT " failed",
5377 5378 krdc->bitmap_size * BITS_IN_BYTE *
5378 5379 BMAP_REF_PREF_SIZE);
5379 5380 mutex_exit(&rdc_conf_lock);
5380 5381 return (-1);
5381 5382 }
5382 5383 }
5383 5384
5384 5385 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
5385 5386 (rdc_get_vflags(urdc) & RDC_SYNC_NEEDED)) {
5386 5387 /* Primary, so reverse sync needed */
5387 5388 rdc_many_enter(krdc);
5388 5389 rdc_clr_flags(urdc, RDC_SYNC_NEEDED);
5389 5390 rdc_set_mflags(urdc, RDC_RSYNC_NEEDED);
5390 5391 rdc_many_exit(krdc);
5391 5392 } else if (rdc_get_vflags(urdc) & RDC_RSYNC_NEEDED) {
5392 5393 /* Secondary, so forward sync needed */
5393 5394 rdc_many_enter(krdc);
5394 5395 rdc_clr_flags(urdc, RDC_RSYNC_NEEDED);
5395 5396 rdc_set_flags(urdc, RDC_SYNC_NEEDED);
5396 5397 rdc_many_exit(krdc);
5397 5398 }
5398 5399
5399 5400 /*
5400 5401 * rewrite bitmap header
5401 5402 */
5402 5403 rdc_write_state(urdc);
5403 5404 mutex_exit(&rdc_conf_lock);
5404 5405 }
5405 5406
5406 5407 done:
5407 5408 mutex_enter(&rdc_conf_lock);
5408 5409 wakeup_busy(krdc);
5409 5410 mutex_exit(&rdc_conf_lock);
5410 5411
5411 5412 return (rc);
5412 5413
5413 5414 notlogging:
5414 5415 /* no other changes possible unless logging */
5415 5416 mutex_enter(&rdc_conf_lock);
5416 5417 wakeup_busy(krdc);
5417 5418 mutex_exit(&rdc_conf_lock);
5418 5419 spcs_s_add(kstatus, RDC_ENOTLOGGING, urdc->primary.intf,
5419 5420 urdc->primary.file, urdc->secondary.intf,
5420 5421 urdc->secondary.file);
5421 5422 return (RDC_ENOTLOGGING);
5422 5423 }
5423 5424
5424 5425 static int
5425 5426 rdc_reset(rdc_config_t *uparms, spcs_s_info_t kstatus)
5426 5427 {
5427 5428 rdc_k_info_t *krdc;
5428 5429 rdc_u_info_t *urdc;
5429 5430 int rc = 0;
5430 5431 int index;
5431 5432 int cleared_error = 0;
5432 5433
5433 5434 mutex_enter(&rdc_conf_lock);
5434 5435 index = rdc_lookup_byname(uparms->rdc_set);
5435 5436 if (index >= 0)
5436 5437 krdc = &rdc_k_info[index];
5437 5438 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5438 5439 mutex_exit(&rdc_conf_lock);
5439 5440 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5440 5441 uparms->rdc_set->secondary.file);
5441 5442 return (RDC_EALREADY);
5442 5443 }
5443 5444
5444 5445 urdc = &rdc_u_info[index];
5445 5446 set_busy(krdc);
5446 5447 if (krdc->type_flag == 0) {
5447 5448 /* A resume or enable failed */
5448 5449 wakeup_busy(krdc);
5449 5450 mutex_exit(&rdc_conf_lock);
5450 5451 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5451 5452 uparms->rdc_set->secondary.file);
5452 5453 return (RDC_EALREADY);
5453 5454 }
5454 5455
5455 5456 mutex_exit(&rdc_conf_lock);
5456 5457
5457 5458 rdc_group_enter(krdc);
5458 5459 if (rdc_check(krdc, uparms->rdc_set)) {
5459 5460 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5460 5461 uparms->rdc_set->secondary.file);
5461 5462 rc = RDC_EALREADY;
5462 5463 goto done;
5463 5464 }
5464 5465
5465 5466 if ((rdc_get_vflags(urdc) & RDC_BMP_FAILED) && (krdc->bitmapfd)) {
5466 5467 if (rdc_reset_bitmap(krdc) == 0)
5467 5468 cleared_error++;
5468 5469 }
5469 5470
5470 5471 /* Fix direct file if necessary */
5471 5472 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) && urdc->direct_file[0]) {
5472 5473 if (rdc_open_direct(krdc) == NULL)
5473 5474 rdc_set_flags(urdc, RDC_FCAL_FAILED);
5474 5475 else {
5475 5476 rdc_clr_flags(urdc, RDC_FCAL_FAILED);
5476 5477 cleared_error++;
5477 5478 }
5478 5479 }
5479 5480
5480 5481 if ((rdc_get_vflags(urdc) & RDC_VOL_FAILED)) {
5481 5482 rdc_many_enter(krdc);
5482 5483 rdc_clr_flags(urdc, RDC_VOL_FAILED);
5483 5484 cleared_error++;
5484 5485 rdc_many_exit(krdc);
5485 5486 }
5486 5487
5487 5488 if (cleared_error) {
5488 5489 /* cleared an error so we should be in logging mode */
5489 5490 rdc_set_flags_log(urdc, RDC_LOGGING, "set reset");
5490 5491 }
5491 5492 rdc_group_exit(krdc);
5492 5493
5493 5494 if ((rdc_get_vflags(urdc) & RDC_DISKQ_FAILED))
5494 5495 rdc_unfail_diskq(krdc);
5495 5496
5496 5497 done:
5497 5498 mutex_enter(&rdc_conf_lock);
5498 5499 wakeup_busy(krdc);
5499 5500 mutex_exit(&rdc_conf_lock);
5500 5501
5501 5502 return (rc);
5502 5503 }
5503 5504
5504 5505
5505 5506 static int
5506 5507 rdc_tunable(rdc_config_t *uparms, spcs_s_info_t kstatus)
5507 5508 {
5508 5509 rdc_k_info_t *krdc;
5509 5510 rdc_u_info_t *urdc;
5510 5511 rdc_k_info_t *p;
5511 5512 rdc_u_info_t *q;
5512 5513 int rc = 0;
5513 5514 int index;
5514 5515
5515 5516 mutex_enter(&rdc_conf_lock);
5516 5517 index = rdc_lookup_byname(uparms->rdc_set);
5517 5518 if (index >= 0)
5518 5519 krdc = &rdc_k_info[index];
5519 5520 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5520 5521 mutex_exit(&rdc_conf_lock);
5521 5522 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5522 5523 uparms->rdc_set->secondary.file);
5523 5524 return (RDC_EALREADY);
5524 5525 }
5525 5526
5526 5527 urdc = &rdc_u_info[index];
5527 5528 set_busy(krdc);
5528 5529 if (krdc->type_flag == 0) {
5529 5530 /* A resume or enable failed */
5530 5531 wakeup_busy(krdc);
5531 5532 mutex_exit(&rdc_conf_lock);
5532 5533 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5533 5534 uparms->rdc_set->secondary.file);
5534 5535 return (RDC_EALREADY);
5535 5536 }
5536 5537
5537 5538 mutex_exit(&rdc_conf_lock);
5538 5539
5539 5540 rdc_group_enter(krdc);
5540 5541 if (rdc_check(krdc, uparms->rdc_set)) {
5541 5542 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5542 5543 uparms->rdc_set->secondary.file);
5543 5544 rc = RDC_EALREADY;
5544 5545 goto done;
5545 5546 }
5546 5547
5547 5548 if (uparms->rdc_set->maxqfbas > 0) {
5548 5549 urdc->maxqfbas = uparms->rdc_set->maxqfbas;
5549 5550 rdc_write_state(urdc);
5550 5551 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5551 5552 q = &rdc_u_info[p->index];
5552 5553 q->maxqfbas = urdc->maxqfbas;
5553 5554 rdc_write_state(q);
5554 5555 }
5555 5556 }
5556 5557
5557 5558 if (uparms->rdc_set->maxqitems > 0) {
5558 5559 urdc->maxqitems = uparms->rdc_set->maxqitems;
5559 5560 rdc_write_state(urdc);
5560 5561 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5561 5562 q = &rdc_u_info[p->index];
5562 5563 q->maxqitems = urdc->maxqitems;
5563 5564 rdc_write_state(q);
5564 5565 }
5565 5566 }
5566 5567
5567 5568 if (uparms->options & RDC_OPT_SET_QNOBLOCK) {
5568 5569 disk_queue *que;
5569 5570
5570 5571 if (!RDC_IS_DISKQ(krdc->group)) {
5571 5572 spcs_s_add(kstatus, RDC_EQNOQUEUE, urdc->primary.intf,
5572 5573 urdc->primary.file, urdc->secondary.intf,
5573 5574 urdc->secondary.file);
5574 5575 rc = RDC_EQNOQUEUE;
5575 5576 goto done;
5576 5577 }
5577 5578
5578 5579 que = &krdc->group->diskq;
5579 5580 mutex_enter(QLOCK(que));
5580 5581 SET_QSTATE(que, RDC_QNOBLOCK);
5581 5582 /* queue will fail if this fails */
5582 5583 (void) rdc_stamp_diskq(krdc, 0, RDC_GROUP_LOCKED);
5583 5584 mutex_exit(QLOCK(que));
5584 5585
5585 5586 }
5586 5587
5587 5588 if (uparms->options & RDC_OPT_CLR_QNOBLOCK) {
5588 5589 disk_queue *que;
5589 5590
5590 5591 if (!RDC_IS_DISKQ(krdc->group)) {
5591 5592 spcs_s_add(kstatus, RDC_EQNOQUEUE, urdc->primary.intf,
5592 5593 urdc->primary.file, urdc->secondary.intf,
5593 5594 urdc->secondary.file);
5594 5595 rc = RDC_EQNOQUEUE;
5595 5596 goto done;
5596 5597 }
5597 5598 que = &krdc->group->diskq;
5598 5599 mutex_enter(QLOCK(que));
5599 5600 CLR_QSTATE(que, RDC_QNOBLOCK);
5600 5601 /* queue will fail if this fails */
5601 5602 (void) rdc_stamp_diskq(krdc, 0, RDC_GROUP_LOCKED);
5602 5603 mutex_exit(QLOCK(que));
5603 5604
5604 5605 }
5605 5606 if (uparms->rdc_set->asyncthr > 0) {
5606 5607 urdc->asyncthr = uparms->rdc_set->asyncthr;
5607 5608 rdc_write_state(urdc);
5608 5609 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5609 5610 q = &rdc_u_info[p->index];
5610 5611 q->asyncthr = urdc->asyncthr;
5611 5612 rdc_write_state(q);
5612 5613 }
5613 5614 }
5614 5615
5615 5616 if (uparms->rdc_set->autosync >= 0) {
5616 5617 if (uparms->rdc_set->autosync == 0)
5617 5618 urdc->autosync = 0;
5618 5619 else
5619 5620 urdc->autosync = 1;
5620 5621
5621 5622 rdc_write_state(urdc);
5622 5623
5623 5624 /* Changed autosync, so update rest of the group */
5624 5625
5625 5626 for (p = krdc->group_next; p != krdc; p = p->group_next) {
5626 5627 q = &rdc_u_info[p->index];
5627 5628 q->autosync = urdc->autosync;
5628 5629 rdc_write_state(q);
5629 5630 }
5630 5631 }
5631 5632
↓ open down ↓ |
5590 lines elided |
↑ open up ↑ |
5632 5633 done:
5633 5634 rdc_group_exit(krdc);
5634 5635
5635 5636 mutex_enter(&rdc_conf_lock);
5636 5637 wakeup_busy(krdc);
5637 5638 mutex_exit(&rdc_conf_lock);
5638 5639
5639 5640 return (rc);
5640 5641 }
5641 5642
5642 -/*
5643 - * Yet another standard thing that is not standard ...
5644 - */
5645 -#ifndef offsetof
5646 -#define offsetof(s, m) ((size_t)(&((s *)0)->m))
5647 -#endif
5648 -
5649 5643 static int
5650 5644 rdc_status(void *arg, int mode, rdc_config_t *uparms, spcs_s_info_t kstatus)
5651 5645 {
5652 5646 rdc_k_info_t *krdc;
5653 5647 rdc_u_info_t *urdc;
5654 5648 disk_queue *dqp;
5655 5649 int rc = 0;
5656 5650 int index;
5657 5651 char *ptr;
5658 5652 extern int rdc_status_copy32(const void *, void *, size_t, int);
5659 5653
5660 5654 mutex_enter(&rdc_conf_lock);
5661 5655 index = rdc_lookup_byname(uparms->rdc_set);
5662 5656 if (index >= 0)
5663 5657 krdc = &rdc_k_info[index];
5664 5658 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5665 5659 mutex_exit(&rdc_conf_lock);
5666 5660 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5667 5661 uparms->rdc_set->secondary.file);
5668 5662 return (RDC_EALREADY);
5669 5663 }
5670 5664
5671 5665 set_busy(krdc);
5672 5666 if (krdc->type_flag == 0) {
5673 5667 /* A resume or enable failed */
5674 5668 wakeup_busy(krdc);
5675 5669 mutex_exit(&rdc_conf_lock);
5676 5670 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5677 5671 uparms->rdc_set->secondary.file);
5678 5672 return (RDC_EALREADY);
5679 5673 }
5680 5674
5681 5675 mutex_exit(&rdc_conf_lock);
5682 5676
5683 5677 rdc_group_enter(krdc);
5684 5678 if (rdc_check(krdc, uparms->rdc_set)) {
5685 5679 rdc_group_exit(krdc);
5686 5680 spcs_s_add(kstatus, RDC_EALREADY, uparms->rdc_set->primary.file,
5687 5681 uparms->rdc_set->secondary.file);
5688 5682 rc = RDC_EALREADY;
5689 5683 goto done;
5690 5684 }
5691 5685
5692 5686 urdc = &rdc_u_info[index];
5693 5687
5694 5688 /*
5695 5689 * sneak out qstate in urdc->flags
5696 5690 * this is harmless because it's value is not used
5697 5691 * in urdc->flags. the real qstate is kept in
5698 5692 * group->diskq->disk_hdr.h.state
5699 5693 */
5700 5694 if (RDC_IS_DISKQ(krdc->group)) {
5701 5695 dqp = &krdc->group->diskq;
5702 5696 if (IS_QSTATE(dqp, RDC_QNOBLOCK))
5703 5697 urdc->flags |= RDC_QNOBLOCK;
5704 5698 }
5705 5699
5706 5700 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
5707 5701 ptr = (char *)arg + offsetof(struct rdc_config32, rdc_set);
5708 5702 rc = rdc_status_copy32(urdc, ptr, sizeof (struct rdc_set32),
5709 5703 mode);
5710 5704 } else {
5711 5705 ptr = (char *)arg + offsetof(struct rdc_config, rdc_set);
5712 5706 rc = ddi_copyout(urdc, ptr, sizeof (struct rdc_set), mode);
5713 5707 }
5714 5708 /* clear out qstate from flags */
5715 5709 urdc->flags &= ~RDC_QNOBLOCK;
5716 5710
5717 5711 if (rc)
5718 5712 rc = EFAULT;
5719 5713
5720 5714 rdc_group_exit(krdc);
5721 5715 done:
5722 5716 mutex_enter(&rdc_conf_lock);
5723 5717 wakeup_busy(krdc);
5724 5718 mutex_exit(&rdc_conf_lock);
5725 5719
5726 5720 return (rc);
5727 5721 }
5728 5722
5729 5723 /*
5730 5724 * Overwrite the bitmap with one supplied by the
5731 5725 * user.
5732 5726 * Copy into all bitmaps that are tracking this volume.
5733 5727 */
5734 5728
5735 5729 int
5736 5730 rdc_bitmapset(int op, char *sechost, char *secdev, void *bmapaddr, int bmapsz,
5737 5731 nsc_off_t off, int mode)
5738 5732 {
5739 5733 int rc;
5740 5734 rdc_k_info_t *krdc;
5741 5735 int *indexvec;
5742 5736 int index;
5743 5737 int indexit;
5744 5738 kmutex_t **grouplocks;
5745 5739 int i;
5746 5740 int groupind;
5747 5741
5748 5742 if (off % FBA_SIZE(1)) {
5749 5743 /* Must be modulo FBA */
5750 5744 cmn_err(CE_WARN, "!bitmapset: Offset is not on an FBA "
5751 5745 "boundary %llu", (unsigned long long)off);
5752 5746 return (EINVAL);
5753 5747 }
5754 5748 if (bmapsz % FBA_SIZE(1)) {
5755 5749 /* Must be modulo FBA */
5756 5750 cmn_err(CE_WARN, "!bitmapset: Size is not on an FBA "
5757 5751 "boundary %d", bmapsz);
5758 5752 return (EINVAL);
5759 5753 }
5760 5754
5761 5755 mutex_enter(&rdc_conf_lock);
5762 5756 index = rdc_lookup_byhostdev(sechost, secdev);
5763 5757 if (index >= 0) {
5764 5758 krdc = &rdc_k_info[index];
5765 5759 }
5766 5760 if (index < 0 || (krdc->type_flag & RDC_DISABLEPEND)) {
5767 5761 rc = ENODEV;
5768 5762 mutex_exit(&rdc_conf_lock);
5769 5763 return (rc);
5770 5764 }
5771 5765 indexvec = kmem_alloc(rdc_max_sets * sizeof (int), KM_SLEEP);
5772 5766 grouplocks = kmem_alloc(rdc_max_sets * sizeof (kmutex_t *), KM_SLEEP);
5773 5767
5774 5768 /*
5775 5769 * I now have this set, and I want to take the group
5776 5770 * lock on it, and all the group locks of all the
5777 5771 * sets on the many and multi-hop links.
5778 5772 * I have to take the many lock while traversing the
5779 5773 * many/multi links.
5780 5774 * I think I also need to set the busy count on this
5781 5775 * set, otherwise when I drop the conf_lock, what
5782 5776 * will stop some other process from coming in and
5783 5777 * issuing a disable?
5784 5778 */
5785 5779 set_busy(krdc);
5786 5780 mutex_exit(&rdc_conf_lock);
5787 5781
5788 5782 retrylock:
5789 5783 groupind = 0;
5790 5784 indexit = 0;
5791 5785 rdc_many_enter(krdc);
5792 5786 /*
5793 5787 * Take this initial sets group lock first.
5794 5788 */
5795 5789 if (!mutex_tryenter(&krdc->group->lock)) {
5796 5790 rdc_many_exit(krdc);
5797 5791 goto retrylock;
5798 5792 }
5799 5793
5800 5794 grouplocks[groupind] = &krdc->group->lock;
5801 5795 groupind++;
5802 5796
5803 5797 rc = rdc_checkforbitmap(index, off + bmapsz);
5804 5798 if (rc) {
5805 5799 goto done;
5806 5800 }
5807 5801 indexvec[indexit] = index;
5808 5802 indexit++;
5809 5803 if (IS_MANY(krdc)) {
5810 5804 rdc_k_info_t *ktmp;
5811 5805
5812 5806 for (ktmp = krdc->many_next; ktmp != krdc;
5813 5807 ktmp = ktmp->many_next) {
5814 5808 /*
5815 5809 * attempt to take the group lock,
5816 5810 * if we don't already have it.
5817 5811 */
5818 5812 if (ktmp->group == NULL) {
5819 5813 rc = ENODEV;
5820 5814 goto done;
5821 5815 }
5822 5816 for (i = 0; i < groupind; i++) {
5823 5817 if (grouplocks[i] == &ktmp->group->lock)
5824 5818 /* already have the group lock */
5825 5819 break;
5826 5820 }
5827 5821 /*
5828 5822 * didn't find our lock in our collection,
5829 5823 * attempt to take group lock.
5830 5824 */
5831 5825 if (i >= groupind) {
5832 5826 if (!mutex_tryenter(&ktmp->group->lock)) {
5833 5827 for (i = 0; i < groupind; i++) {
5834 5828 mutex_exit(grouplocks[i]);
5835 5829 }
5836 5830 rdc_many_exit(krdc);
5837 5831 goto retrylock;
5838 5832 }
5839 5833 grouplocks[groupind] = &ktmp->group->lock;
5840 5834 groupind++;
5841 5835 }
5842 5836 rc = rdc_checkforbitmap(ktmp->index, off + bmapsz);
5843 5837 if (rc == 0) {
5844 5838 indexvec[indexit] = ktmp->index;
5845 5839 indexit++;
5846 5840 } else {
5847 5841 goto done;
5848 5842 }
5849 5843 }
5850 5844 }
5851 5845 if (IS_MULTI(krdc)) {
5852 5846 rdc_k_info_t *kmulti = krdc->multi_next;
5853 5847
5854 5848 if (kmulti->group == NULL) {
5855 5849 rc = ENODEV;
5856 5850 goto done;
5857 5851 }
5858 5852 /*
5859 5853 * This can't be in our group already.
5860 5854 */
5861 5855 if (!mutex_tryenter(&kmulti->group->lock)) {
5862 5856 for (i = 0; i < groupind; i++) {
5863 5857 mutex_exit(grouplocks[i]);
5864 5858 }
5865 5859 rdc_many_exit(krdc);
5866 5860 goto retrylock;
5867 5861 }
5868 5862 grouplocks[groupind] = &kmulti->group->lock;
5869 5863 groupind++;
5870 5864
5871 5865 rc = rdc_checkforbitmap(kmulti->index, off + bmapsz);
5872 5866 if (rc == 0) {
5873 5867 indexvec[indexit] = kmulti->index;
5874 5868 indexit++;
5875 5869 } else {
5876 5870 goto done;
5877 5871 }
5878 5872 }
5879 5873 rc = rdc_installbitmap(op, bmapaddr, bmapsz, off, mode, indexvec,
5880 5874 indexit);
5881 5875 done:
5882 5876 for (i = 0; i < groupind; i++) {
5883 5877 mutex_exit(grouplocks[i]);
5884 5878 }
5885 5879 rdc_many_exit(krdc);
5886 5880 mutex_enter(&rdc_conf_lock);
5887 5881 wakeup_busy(krdc);
5888 5882 mutex_exit(&rdc_conf_lock);
5889 5883 kmem_free(indexvec, rdc_max_sets * sizeof (int));
5890 5884 kmem_free(grouplocks, rdc_max_sets * sizeof (kmutex_t *));
5891 5885 return (rc);
5892 5886 }
5893 5887
5894 5888 static int
5895 5889 rdc_checkforbitmap(int index, nsc_off_t limit)
5896 5890 {
5897 5891 rdc_k_info_t *krdc;
5898 5892 rdc_u_info_t *urdc;
5899 5893
5900 5894 krdc = &rdc_k_info[index];
5901 5895 urdc = &rdc_u_info[index];
5902 5896
5903 5897 if (!IS_ENABLED(urdc)) {
5904 5898 return (EIO);
5905 5899 }
5906 5900 if (!(rdc_get_vflags(urdc) & RDC_LOGGING)) {
5907 5901 return (ENXIO);
5908 5902 }
5909 5903 if (krdc->dcio_bitmap == NULL) {
5910 5904 cmn_err(CE_WARN, "!checkforbitmap: No bitmap for set (%s:%s)",
5911 5905 urdc->secondary.intf, urdc->secondary.file);
5912 5906 return (ENOENT);
5913 5907 }
5914 5908 if (limit > krdc->bitmap_size) {
5915 5909 cmn_err(CE_WARN, "!checkbitmap: Bitmap exceeded, "
5916 5910 "incore %" NSC_SZFMT " user supplied %" NSC_SZFMT
5917 5911 " for set (%s:%s)", krdc->bitmap_size,
5918 5912 limit, urdc->secondary.intf, urdc->secondary.file);
5919 5913 return (ENOSPC);
5920 5914 }
5921 5915 return (0);
5922 5916 }
5923 5917
5924 5918
5925 5919
5926 5920 /*
5927 5921 * Copy the user supplied bitmap to this set.
5928 5922 */
5929 5923 static int
5930 5924 rdc_installbitmap(int op, void *bmapaddr, int bmapsz,
5931 5925 nsc_off_t off, int mode, int *vec, int veccnt)
5932 5926 {
5933 5927 int rc;
5934 5928 nsc_off_t sfba;
5935 5929 nsc_off_t efba;
5936 5930 nsc_off_t fba;
5937 5931 void *ormem = NULL;
5938 5932 int len;
5939 5933 int left;
5940 5934 int copied;
5941 5935 int index;
5942 5936 rdc_k_info_t *krdc;
5943 5937 rdc_u_info_t *urdc;
5944 5938
5945 5939 rc = 0;
5946 5940 ormem = kmem_alloc(RDC_MAXDATA, KM_SLEEP);
5947 5941 left = bmapsz;
5948 5942 copied = 0;
5949 5943 while (left > 0) {
5950 5944 if (left > RDC_MAXDATA) {
5951 5945 len = RDC_MAXDATA;
5952 5946 } else {
5953 5947 len = left;
5954 5948 }
5955 5949 if (ddi_copyin((char *)bmapaddr + copied, ormem,
5956 5950 len, mode)) {
5957 5951 cmn_err(CE_WARN, "!installbitmap: Copyin failed");
5958 5952 rc = EFAULT;
5959 5953 goto out;
5960 5954 }
5961 5955 sfba = FBA_NUM(off + copied);
5962 5956 efba = FBA_NUM(off + copied + len);
5963 5957 for (index = 0; index < veccnt; index++) {
5964 5958 krdc = &rdc_k_info[vec[index]];
5965 5959 urdc = &rdc_u_info[vec[index]];
5966 5960
5967 5961 mutex_enter(&krdc->bmapmutex);
5968 5962 if (op == RDC_BITMAPSET) {
5969 5963 bcopy(ormem, krdc->dcio_bitmap + off + copied,
5970 5964 len);
5971 5965 } else {
5972 5966 rdc_lor(ormem,
5973 5967 krdc->dcio_bitmap + off + copied, len);
5974 5968 }
5975 5969 /*
5976 5970 * Maybe this should be just done once outside of
5977 5971 * the the loop? (Less work, but leaves a window
5978 5972 * where the bits_set doesn't match the bitmap).
5979 5973 */
5980 5974 urdc->bits_set = RDC_COUNT_BITMAP(krdc);
5981 5975 mutex_exit(&krdc->bmapmutex);
5982 5976 if (krdc->bitmap_write > 0) {
5983 5977 for (fba = sfba; fba < efba; fba++) {
5984 5978 if (rc = rdc_write_bitmap_fba(krdc,
5985 5979 fba)) {
5986 5980
5987 5981 cmn_err(CE_WARN,
5988 5982 "!installbitmap: "
5989 5983 "write_bitmap_fba failed "
5990 5984 "on fba number %" NSC_SZFMT
5991 5985 " set %s:%s", fba,
5992 5986 urdc->secondary.intf,
5993 5987 urdc->secondary.file);
5994 5988 goto out;
5995 5989 }
5996 5990 }
5997 5991 }
5998 5992 }
5999 5993 copied += len;
6000 5994 left -= len;
6001 5995 }
6002 5996 out:
6003 5997 kmem_free(ormem, RDC_MAXDATA);
6004 5998 return (rc);
6005 5999 }
6006 6000
6007 6001 /*
6008 6002 * _rdc_config
6009 6003 */
6010 6004 int
6011 6005 _rdc_config(void *arg, int mode, spcs_s_info_t kstatus, int *rvp)
6012 6006 {
6013 6007 int rc = 0;
6014 6008 struct netbuf fsvaddr, tsvaddr;
6015 6009 struct knetconfig *knconf;
6016 6010 char *p = NULL, *pf = NULL;
6017 6011 struct rdc_config *uap;
6018 6012 STRUCT_DECL(knetconfig, knconf_tmp);
6019 6013 STRUCT_DECL(rdc_config, uparms);
6020 6014 int enable, disable;
6021 6015 int cmd;
6022 6016
6023 6017
6024 6018 STRUCT_HANDLE(rdc_set, rs);
6025 6019 STRUCT_HANDLE(rdc_addr, pa);
6026 6020 STRUCT_HANDLE(rdc_addr, sa);
6027 6021
6028 6022 STRUCT_INIT(uparms, mode);
6029 6023
6030 6024 bzero(STRUCT_BUF(uparms), STRUCT_SIZE(uparms));
6031 6025 bzero(&fsvaddr, sizeof (fsvaddr));
6032 6026 bzero(&tsvaddr, sizeof (tsvaddr));
6033 6027
6034 6028 knconf = NULL;
6035 6029
6036 6030 if (ddi_copyin(arg, STRUCT_BUF(uparms), STRUCT_SIZE(uparms), mode)) {
6037 6031 return (EFAULT);
6038 6032 }
6039 6033
6040 6034 STRUCT_SET_HANDLE(rs, mode, STRUCT_FGETP(uparms, rdc_set));
6041 6035 STRUCT_SET_HANDLE(pa, mode, STRUCT_FADDR(rs, primary));
6042 6036 STRUCT_SET_HANDLE(sa, mode, STRUCT_FADDR(rs, secondary));
6043 6037 cmd = STRUCT_FGET(uparms, command);
6044 6038 if (cmd == RDC_CMD_ENABLE || cmd == RDC_CMD_RESUME) {
6045 6039 fsvaddr.len = STRUCT_FGET(pa, addr.len);
6046 6040 fsvaddr.maxlen = STRUCT_FGET(pa, addr.maxlen);
6047 6041 fsvaddr.buf = kmem_zalloc(fsvaddr.len, KM_SLEEP);
6048 6042
6049 6043 if (ddi_copyin(STRUCT_FGETP(pa, addr.buf),
6050 6044 fsvaddr.buf, fsvaddr.len, mode)) {
6051 6045 kmem_free(fsvaddr.buf, fsvaddr.len);
6052 6046 #ifdef DEBUG
6053 6047 cmn_err(CE_WARN, "!copyin failed primary.addr 2");
6054 6048 #endif
6055 6049 return (EFAULT);
6056 6050 }
6057 6051
6058 6052
6059 6053 tsvaddr.len = STRUCT_FGET(sa, addr.len);
6060 6054 tsvaddr.maxlen = STRUCT_FGET(sa, addr.maxlen);
6061 6055 tsvaddr.buf = kmem_zalloc(tsvaddr.len, KM_SLEEP);
6062 6056
6063 6057 if (ddi_copyin(STRUCT_FGETP(sa, addr.buf),
6064 6058 tsvaddr.buf, tsvaddr.len, mode)) {
6065 6059 #ifdef DEBUG
6066 6060 cmn_err(CE_WARN, "!copyin failed secondary addr");
6067 6061 #endif
6068 6062 kmem_free(fsvaddr.buf, fsvaddr.len);
6069 6063 kmem_free(tsvaddr.buf, tsvaddr.len);
6070 6064 return (EFAULT);
6071 6065 }
6072 6066 } else {
6073 6067 fsvaddr.len = 0;
6074 6068 fsvaddr.maxlen = 0;
6075 6069 fsvaddr.buf = kmem_zalloc(fsvaddr.len, KM_SLEEP);
6076 6070 tsvaddr.len = 0;
6077 6071 tsvaddr.maxlen = 0;
6078 6072 tsvaddr.buf = kmem_zalloc(tsvaddr.len, KM_SLEEP);
6079 6073 }
6080 6074
6081 6075 if (STRUCT_FGETP(uparms, rdc_set->netconfig) != NULL) {
6082 6076 STRUCT_INIT(knconf_tmp, mode);
6083 6077 knconf = kmem_zalloc(sizeof (*knconf), KM_SLEEP);
6084 6078 if (ddi_copyin(STRUCT_FGETP(uparms, rdc_set->netconfig),
6085 6079 STRUCT_BUF(knconf_tmp), STRUCT_SIZE(knconf_tmp), mode)) {
6086 6080 #ifdef DEBUG
6087 6081 cmn_err(CE_WARN, "!copyin failed netconfig");
6088 6082 #endif
6089 6083 kmem_free(fsvaddr.buf, fsvaddr.len);
6090 6084 kmem_free(tsvaddr.buf, tsvaddr.len);
6091 6085 kmem_free(knconf, sizeof (*knconf));
6092 6086 return (EFAULT);
6093 6087 }
6094 6088
6095 6089 knconf->knc_semantics = STRUCT_FGET(knconf_tmp, knc_semantics);
6096 6090 knconf->knc_protofmly = STRUCT_FGETP(knconf_tmp, knc_protofmly);
6097 6091 knconf->knc_proto = STRUCT_FGETP(knconf_tmp, knc_proto);
6098 6092
6099 6093 #ifndef _SunOS_5_6
6100 6094 if ((mode & DATAMODEL_LP64) == 0) {
6101 6095 knconf->knc_rdev =
6102 6096 expldev(STRUCT_FGET(knconf_tmp, knc_rdev));
6103 6097 } else {
6104 6098 #endif
6105 6099 knconf->knc_rdev = STRUCT_FGET(knconf_tmp, knc_rdev);
6106 6100 #ifndef _SunOS_5_6
6107 6101 }
6108 6102 #endif
6109 6103
6110 6104 pf = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
6111 6105 p = kmem_alloc(KNC_STRSIZE, KM_SLEEP);
6112 6106 rc = ddi_copyin(knconf->knc_protofmly, pf, KNC_STRSIZE, mode);
6113 6107 if (rc) {
6114 6108 #ifdef DEBUG
6115 6109 cmn_err(CE_WARN, "!copyin failed parms protofmly");
6116 6110 #endif
6117 6111 rc = EFAULT;
6118 6112 goto out;
6119 6113 }
6120 6114 rc = ddi_copyin(knconf->knc_proto, p, KNC_STRSIZE, mode);
6121 6115 if (rc) {
6122 6116 #ifdef DEBUG
6123 6117 cmn_err(CE_WARN, "!copyin failed parms proto");
6124 6118 #endif
6125 6119 rc = EFAULT;
6126 6120 goto out;
6127 6121 }
6128 6122 knconf->knc_protofmly = pf;
6129 6123 knconf->knc_proto = p;
6130 6124 } /* !NULL netconfig */
6131 6125
6132 6126 uap = kmem_alloc(sizeof (*uap), KM_SLEEP);
6133 6127
6134 6128 /* copy relevant parts of rdc_config to uap field by field */
6135 6129
6136 6130 (void) strncpy(uap->rdc_set[0].primary.intf, STRUCT_FGETP(pa, intf),
6137 6131 MAX_RDC_HOST_SIZE);
6138 6132 (void) strncpy(uap->rdc_set[0].primary.file, STRUCT_FGETP(pa, file),
6139 6133 NSC_MAXPATH);
6140 6134 (void) strncpy(uap->rdc_set[0].primary.bitmap, STRUCT_FGETP(pa, bitmap),
6141 6135 NSC_MAXPATH);
6142 6136 uap->rdc_set[0].netconfig = knconf;
6143 6137 uap->rdc_set[0].flags = STRUCT_FGET(uparms, rdc_set->flags);
6144 6138 uap->rdc_set[0].index = STRUCT_FGET(uparms, rdc_set->index);
6145 6139 uap->rdc_set[0].setid = STRUCT_FGET(uparms, rdc_set->setid);
6146 6140 uap->rdc_set[0].sync_pos = STRUCT_FGET(uparms, rdc_set->sync_pos);
6147 6141 uap->rdc_set[0].volume_size = STRUCT_FGET(uparms, rdc_set->volume_size);
6148 6142 uap->rdc_set[0].bits_set = STRUCT_FGET(uparms, rdc_set->bits_set);
6149 6143 uap->rdc_set[0].autosync = STRUCT_FGET(uparms, rdc_set->autosync);
6150 6144 uap->rdc_set[0].maxqfbas = STRUCT_FGET(uparms, rdc_set->maxqfbas);
6151 6145 uap->rdc_set[0].maxqitems = STRUCT_FGET(uparms, rdc_set->maxqitems);
6152 6146 uap->rdc_set[0].asyncthr = STRUCT_FGET(uparms, rdc_set->asyncthr);
6153 6147 uap->rdc_set[0].syshostid = STRUCT_FGET(uparms, rdc_set->syshostid);
6154 6148 uap->rdc_set[0].primary.addr = fsvaddr; /* struct copy */
6155 6149 uap->rdc_set[0].secondary.addr = tsvaddr; /* struct copy */
6156 6150
6157 6151 (void) strncpy(uap->rdc_set[0].secondary.intf, STRUCT_FGETP(sa, intf),
6158 6152 MAX_RDC_HOST_SIZE);
6159 6153 (void) strncpy(uap->rdc_set[0].secondary.file, STRUCT_FGETP(sa, file),
6160 6154 NSC_MAXPATH);
6161 6155 (void) strncpy(uap->rdc_set[0].secondary.bitmap,
6162 6156 STRUCT_FGETP(sa, bitmap), NSC_MAXPATH);
6163 6157
6164 6158 (void) strncpy(uap->rdc_set[0].direct_file,
6165 6159 STRUCT_FGETP(rs, direct_file), NSC_MAXPATH);
6166 6160
6167 6161 (void) strncpy(uap->rdc_set[0].group_name, STRUCT_FGETP(rs, group_name),
6168 6162 NSC_MAXPATH);
6169 6163
6170 6164 (void) strncpy(uap->rdc_set[0].disk_queue, STRUCT_FGETP(rs, disk_queue),
6171 6165 NSC_MAXPATH);
6172 6166
6173 6167 uap->command = STRUCT_FGET(uparms, command);
6174 6168 uap->options = STRUCT_FGET(uparms, options);
6175 6169
6176 6170 enable = (uap->command == RDC_CMD_ENABLE ||
6177 6171 uap->command == RDC_CMD_RESUME);
6178 6172 disable = (uap->command == RDC_CMD_DISABLE ||
6179 6173 uap->command == RDC_CMD_SUSPEND);
6180 6174
6181 6175 /*
6182 6176 * Initialise the threadset if it has not already been done.
6183 6177 *
6184 6178 * This has to be done now, not in rdcattach(), because
6185 6179 * rdcattach() can be called before nskernd is running (eg.
6186 6180 * boot -r) in which case the nst_init() would fail and hence
6187 6181 * the attach would fail.
6188 6182 *
6189 6183 * Threadset creation is locked by the rdc_conf_lock,
6190 6184 * destruction is inherently single threaded as it is done in
6191 6185 * _rdc_unload() which must be the last thing performed by
6192 6186 * rdcdetach().
6193 6187 */
6194 6188
6195 6189 if (enable && _rdc_ioset == NULL) {
6196 6190 mutex_enter(&rdc_conf_lock);
6197 6191
6198 6192 if (_rdc_ioset == NULL) {
6199 6193 rc = rdc_thread_configure();
6200 6194 }
6201 6195
6202 6196 mutex_exit(&rdc_conf_lock);
6203 6197
6204 6198 if (rc || _rdc_ioset == NULL) {
6205 6199 spcs_s_add(kstatus, RDC_ENOTHREADS);
6206 6200 rc = RDC_ENOTHREADS;
6207 6201 goto outuap;
6208 6202 }
6209 6203 }
6210 6204 switch (uap->command) {
6211 6205 case RDC_CMD_ENABLE:
6212 6206 rc = rdc_enable(uap, kstatus);
6213 6207 break;
6214 6208 case RDC_CMD_DISABLE:
6215 6209 rc = rdc_disable(uap, kstatus);
6216 6210 break;
6217 6211 case RDC_CMD_COPY:
6218 6212 rc = rdc_sync(uap, kstatus);
6219 6213 break;
6220 6214 case RDC_CMD_LOG:
6221 6215 rc = rdc_log(uap, kstatus);
6222 6216 break;
6223 6217 case RDC_CMD_RECONFIG:
6224 6218 rc = rdc_reconfig(uap, kstatus);
6225 6219 break;
6226 6220 case RDC_CMD_RESUME:
6227 6221 rc = rdc_resume(uap, kstatus);
6228 6222 break;
6229 6223 case RDC_CMD_SUSPEND:
6230 6224 rc = rdc_suspend(uap, kstatus);
6231 6225 break;
6232 6226 case RDC_CMD_TUNABLE:
6233 6227 rc = rdc_tunable(uap, kstatus);
6234 6228 break;
6235 6229 case RDC_CMD_WAIT:
6236 6230 rc = rdc_wait(uap, kstatus);
6237 6231 break;
6238 6232 case RDC_CMD_HEALTH:
6239 6233 rc = rdc_health(uap, kstatus, rvp);
6240 6234 break;
6241 6235 case RDC_CMD_STATUS:
6242 6236 rc = rdc_status(arg, mode, uap, kstatus);
6243 6237 break;
6244 6238 case RDC_CMD_RESET:
6245 6239 rc = rdc_reset(uap, kstatus);
6246 6240 break;
6247 6241 case RDC_CMD_ADDQ:
6248 6242 rc = rdc_add_diskq(uap, kstatus);
6249 6243 break;
6250 6244 case RDC_CMD_REMQ:
6251 6245 if ((rc = rdc_rem_diskq(uap, kstatus)) != 0)
6252 6246 break;
6253 6247 /* FALLTHRU */
6254 6248 case RDC_CMD_KILLQ:
6255 6249 rc = rdc_kill_diskq(uap, kstatus);
6256 6250 break;
6257 6251 case RDC_CMD_INITQ:
6258 6252 rc = rdc_init_diskq(uap, kstatus);
6259 6253 break;
6260 6254
6261 6255 default:
6262 6256 rc = EINVAL;
6263 6257 break;
6264 6258 }
6265 6259
6266 6260 /*
6267 6261 * Tune the threadset size after a successful rdc_set addition
6268 6262 * or removal.
6269 6263 */
6270 6264 if ((enable || disable) && rc == 0) {
6271 6265 mutex_enter(&rdc_conf_lock);
6272 6266 rdc_thread_tune(enable ? 2 : -2);
6273 6267 mutex_exit(&rdc_conf_lock);
6274 6268 }
6275 6269 outuap:
6276 6270 kmem_free(uap, sizeof (*uap));
6277 6271 out:
6278 6272 kmem_free(fsvaddr.buf, fsvaddr.len);
6279 6273 kmem_free(tsvaddr.buf, tsvaddr.len);
6280 6274 if (pf)
6281 6275 kmem_free(pf, KNC_STRSIZE);
6282 6276 if (p)
6283 6277 kmem_free(p, KNC_STRSIZE);
6284 6278 if (knconf)
6285 6279 kmem_free(knconf, sizeof (*knconf));
6286 6280 return (rc);
6287 6281 }
6288 6282
6289 6283
6290 6284 /*
6291 6285 * krdc->group->lock held on entry to halt_sync()
6292 6286 */
6293 6287 static void
6294 6288 halt_sync(rdc_k_info_t *krdc)
6295 6289 {
6296 6290 rdc_u_info_t *urdc = &rdc_u_info[krdc->index];
6297 6291
6298 6292 ASSERT(MUTEX_HELD(&krdc->group->lock));
6299 6293 ASSERT(IS_ENABLED(urdc));
6300 6294
6301 6295 /*
6302 6296 * If a sync is in progress, halt it
6303 6297 */
6304 6298 if ((rdc_get_vflags(urdc) & RDC_PRIMARY) &&
6305 6299 (krdc->aux_state & RDC_AUXSYNCIP)) {
6306 6300 krdc->disk_status = 1;
6307 6301
6308 6302 while (krdc->disk_status == 1) {
6309 6303 if (cv_wait_sig(&krdc->haltcv, &krdc->group->lock) == 0)
6310 6304 break;
6311 6305 }
6312 6306 }
6313 6307 }
6314 6308
6315 6309 /*
6316 6310 * return size in blocks
6317 6311 */
6318 6312 uint64_t
6319 6313 mirror_getsize(int index)
6320 6314 {
6321 6315 rdc_k_info_t *krdc;
6322 6316 rdc_u_info_t *urdc;
6323 6317 int rc, rs;
6324 6318 nsc_size_t size;
6325 6319
6326 6320 krdc = &rdc_k_info[index];
6327 6321 urdc = &rdc_u_info[index];
6328 6322
6329 6323 rc = _rdc_rsrv_devs(krdc, RDC_RAW, RDC_INTERNAL);
6330 6324 rs = nsc_partsize(RDC_U_FD(krdc), &size);
6331 6325 urdc->volume_size = size;
6332 6326 if (rc == 0)
6333 6327 _rdc_rlse_devs(krdc, RDC_RAW);
6334 6328
6335 6329 return (rs == 0 ? urdc->volume_size : 0);
6336 6330 }
6337 6331
6338 6332
6339 6333 /*
6340 6334 * Create a new dataset for this transfer, and add it to the list
6341 6335 * of datasets via the net_dataset pointer in the krdc.
6342 6336 */
6343 6337 rdc_net_dataset_t *
6344 6338 rdc_net_add_set(int index)
6345 6339 {
6346 6340 rdc_k_info_t *krdc;
6347 6341 rdc_u_info_t *urdc;
6348 6342 rdc_net_dataset_t *dset;
6349 6343
6350 6344 if (index >= rdc_max_sets) {
6351 6345 cmn_err(CE_NOTE, "!rdc_net_add_set: bad index %d", index);
6352 6346 return (NULL);
6353 6347 }
6354 6348 krdc = &rdc_k_info[index];
6355 6349 urdc = &rdc_u_info[index];
6356 6350
6357 6351 dset = kmem_alloc(sizeof (*dset), KM_NOSLEEP);
6358 6352 if (dset == NULL) {
6359 6353 cmn_err(CE_NOTE, "!rdc_net_add_set: kmem_alloc failed");
6360 6354 return (NULL);
6361 6355 }
6362 6356 RDC_DSMEMUSE(sizeof (*dset));
6363 6357 dset->inuse = 1;
6364 6358 dset->nitems = 0;
6365 6359 dset->delpend = 0;
6366 6360 dset->head = NULL;
6367 6361 dset->tail = NULL;
6368 6362 mutex_enter(&krdc->dc_sleep);
6369 6363
6370 6364 if (!IS_ENABLED(urdc)) {
6371 6365 /* raced with a disable command */
6372 6366 kmem_free(dset, sizeof (*dset));
6373 6367 RDC_DSMEMUSE(-sizeof (*dset));
6374 6368 mutex_exit(&krdc->dc_sleep);
6375 6369 return (NULL);
6376 6370 }
6377 6371 /*
6378 6372 * Shared the id generator, (and the locks).
6379 6373 */
6380 6374 mutex_enter(&rdc_net_hnd_id_lock);
6381 6375 if (++rdc_net_hnd_id == 0)
6382 6376 rdc_net_hnd_id = 1;
6383 6377 dset->id = rdc_net_hnd_id;
6384 6378 mutex_exit(&rdc_net_hnd_id_lock);
6385 6379
6386 6380 #ifdef DEBUG
6387 6381 if (krdc->net_dataset != NULL) {
6388 6382 rdc_net_dataset_t *dset2;
6389 6383 for (dset2 = krdc->net_dataset; dset2; dset2 = dset2->next) {
6390 6384 if (dset2->id == dset->id) {
6391 6385 cmn_err(CE_PANIC,
6392 6386 "rdc_net_add_set duplicate id %p:%d %p:%d",
6393 6387 (void *)dset, dset->id,
6394 6388 (void *)dset2, dset2->id);
6395 6389 }
6396 6390 }
6397 6391 }
6398 6392 #endif
6399 6393 dset->next = krdc->net_dataset;
6400 6394 krdc->net_dataset = dset;
6401 6395 mutex_exit(&krdc->dc_sleep);
6402 6396
6403 6397 return (dset);
6404 6398 }
6405 6399
6406 6400 /*
6407 6401 * fetch the previously added dataset.
6408 6402 */
6409 6403 rdc_net_dataset_t *
6410 6404 rdc_net_get_set(int index, int id)
6411 6405 {
6412 6406 rdc_k_info_t *krdc;
6413 6407 rdc_net_dataset_t *dset;
6414 6408
6415 6409 if (index >= rdc_max_sets) {
6416 6410 cmn_err(CE_NOTE, "!rdc_net_get_set: bad index %d", index);
6417 6411 return (NULL);
6418 6412 }
6419 6413 krdc = &rdc_k_info[index];
6420 6414
6421 6415 mutex_enter(&krdc->dc_sleep);
6422 6416
6423 6417 dset = krdc->net_dataset;
6424 6418 while (dset && (dset->id != id))
6425 6419 dset = dset->next;
6426 6420
6427 6421 if (dset) {
6428 6422 dset->inuse++;
6429 6423 }
6430 6424
6431 6425 mutex_exit(&krdc->dc_sleep);
6432 6426 return (dset);
6433 6427 }
6434 6428
6435 6429 /*
6436 6430 * Decrement the inuse counter. Data may be freed.
6437 6431 */
6438 6432 void
6439 6433 rdc_net_put_set(int index, rdc_net_dataset_t *dset)
6440 6434 {
6441 6435 rdc_k_info_t *krdc;
6442 6436
6443 6437 if (index >= rdc_max_sets) {
6444 6438 cmn_err(CE_NOTE, "!rdc_net_put_set: bad index %d", index);
6445 6439 return;
6446 6440 }
6447 6441 krdc = &rdc_k_info[index];
6448 6442
6449 6443 mutex_enter(&krdc->dc_sleep);
6450 6444 dset->inuse--;
6451 6445 ASSERT(dset->inuse >= 0);
6452 6446 if ((dset->inuse == 0) && (dset->delpend)) {
6453 6447 rdc_net_free_set(krdc, dset);
6454 6448 }
6455 6449 mutex_exit(&krdc->dc_sleep);
6456 6450 }
6457 6451
6458 6452 /*
6459 6453 * Mark that we are finished with this set. Decrement inuse
6460 6454 * counter, mark as needing deletion, and
6461 6455 * remove from linked list.
6462 6456 */
6463 6457 void
6464 6458 rdc_net_del_set(int index, rdc_net_dataset_t *dset)
6465 6459 {
6466 6460 rdc_k_info_t *krdc;
6467 6461
6468 6462 if (index >= rdc_max_sets) {
6469 6463 cmn_err(CE_NOTE, "!rdc_net_del_set: bad index %d", index);
6470 6464 return;
6471 6465 }
6472 6466 krdc = &rdc_k_info[index];
6473 6467
6474 6468 mutex_enter(&krdc->dc_sleep);
6475 6469 dset->inuse--;
6476 6470 ASSERT(dset->inuse >= 0);
6477 6471 dset->delpend = 1;
6478 6472 if (dset->inuse == 0) {
6479 6473 rdc_net_free_set(krdc, dset);
6480 6474 }
6481 6475 mutex_exit(&krdc->dc_sleep);
6482 6476 }
6483 6477
6484 6478 /*
6485 6479 * free all the memory associated with this set, and remove from
6486 6480 * list.
6487 6481 * Enters and exits with dc_sleep lock held.
6488 6482 */
6489 6483
6490 6484 void
6491 6485 rdc_net_free_set(rdc_k_info_t *krdc, rdc_net_dataset_t *dset)
6492 6486 {
6493 6487 rdc_net_dataset_t **dsetp;
6494 6488 #ifdef DEBUG
6495 6489 int found = 0;
6496 6490 #endif
6497 6491
6498 6492 ASSERT(MUTEX_HELD(&krdc->dc_sleep));
6499 6493 ASSERT(dset);
6500 6494 for (dsetp = &krdc->net_dataset; *dsetp; dsetp = &((*dsetp)->next)) {
6501 6495 if (*dsetp == dset) {
6502 6496 *dsetp = dset->next;
6503 6497 #ifdef DEBUG
6504 6498 found = 1;
6505 6499 #endif
6506 6500 break;
6507 6501 }
6508 6502 }
6509 6503
6510 6504 #ifdef DEBUG
6511 6505 if (found == 0) {
6512 6506 cmn_err(CE_WARN, "!rdc_net_free_set: Unable to find "
6513 6507 "dataset 0x%p in krdc list", (void *)dset);
6514 6508 }
6515 6509 #endif
6516 6510 /*
6517 6511 * unlinked from list. Free all the data
6518 6512 */
6519 6513 rdc_ditemsfree(dset);
6520 6514 /*
6521 6515 * free my core.
6522 6516 */
6523 6517 kmem_free(dset, sizeof (*dset));
6524 6518 RDC_DSMEMUSE(-sizeof (*dset));
6525 6519 }
6526 6520
6527 6521
6528 6522 /*
6529 6523 * Free all the dataitems and the data it points to.
6530 6524 */
6531 6525 static void
6532 6526 rdc_ditemsfree(rdc_net_dataset_t *dset)
6533 6527 {
6534 6528 rdc_net_dataitem_t *ditem;
6535 6529 rdc_net_dataitem_t *nitem;
6536 6530
6537 6531 ditem = dset->head;
6538 6532
6539 6533 while (ditem) {
6540 6534 nitem = ditem->next;
6541 6535 kmem_free(ditem->dptr, ditem->mlen);
6542 6536 RDC_DSMEMUSE(-ditem->mlen);
6543 6537 dset->nitems--;
6544 6538 kmem_free(ditem, sizeof (*ditem));
6545 6539 RDC_DSMEMUSE(-sizeof (*ditem));
6546 6540 ditem = nitem;
6547 6541 }
6548 6542 ASSERT(dset->nitems == 0);
6549 6543 }
6550 6544
6551 6545 /*
6552 6546 * allocate and initialize a rdc_aio_t
6553 6547 */
6554 6548 rdc_aio_t *
6555 6549 rdc_aio_tbuf_get(void *n, void *h, int pos, int len, int flag, int index, int s)
6556 6550 {
6557 6551 rdc_aio_t *p;
6558 6552
6559 6553 p = kmem_zalloc(sizeof (rdc_aio_t), KM_NOSLEEP);
6560 6554 if (p == NULL) {
6561 6555 #ifdef DEBUG
6562 6556 cmn_err(CE_NOTE, "!_rdcaiotbufget: kmem_alloc failed bp aio");
6563 6557 #endif
6564 6558 return (NULL);
6565 6559 } else {
6566 6560 p->next = n; /* overload */
6567 6561 p->handle = h;
6568 6562 p->pos = pos;
6569 6563 p->qpos = -1;
6570 6564 p->len = len;
6571 6565 p->flag = flag;
6572 6566 p->index = index;
6573 6567 p->iostatus = s; /* overload */
6574 6568 /* set up seq later, in case thr create fails */
6575 6569 }
6576 6570 return (p);
6577 6571 }
6578 6572
6579 6573 /*
6580 6574 * rdc_aio_buf_get
6581 6575 * get an aio_buf
6582 6576 */
6583 6577 aio_buf_t *
6584 6578 rdc_aio_buf_get(rdc_buf_t *h, int index)
6585 6579 {
6586 6580 aio_buf_t *p;
6587 6581
6588 6582 if (index >= rdc_max_sets) {
6589 6583 cmn_err(CE_NOTE, "!rdc: rdc_aio_buf_get bad index %x", index);
6590 6584 return (NULL);
6591 6585 }
6592 6586
6593 6587 mutex_enter(&h->aio_lock);
6594 6588
6595 6589 p = h->rdc_anon;
6596 6590 while (p && (p->kindex != index))
6597 6591 p = p->next;
6598 6592
6599 6593 mutex_exit(&h->aio_lock);
6600 6594 return (p);
6601 6595 }
6602 6596
6603 6597 /*
6604 6598 * rdc_aio_buf_del
6605 6599 * delete a aio_buf
6606 6600 */
6607 6601 void
6608 6602 rdc_aio_buf_del(rdc_buf_t *h, rdc_k_info_t *krdc)
6609 6603 {
6610 6604 aio_buf_t *p, **pp;
6611 6605
6612 6606 mutex_enter(&h->aio_lock);
6613 6607
6614 6608 p = NULL;
6615 6609 for (pp = &h->rdc_anon; *pp; pp = &((*pp)->next)) {
6616 6610 if ((*pp)->kindex == krdc->index) {
6617 6611 p = *pp;
6618 6612 break;
6619 6613 }
6620 6614 }
6621 6615
6622 6616 if (p) {
6623 6617 *pp = p->next;
6624 6618 kmem_free(p, sizeof (*p));
6625 6619 }
6626 6620 mutex_exit(&h->aio_lock);
6627 6621 }
6628 6622
6629 6623 /*
6630 6624 * rdc_aio_buf_add
6631 6625 * Add a aio_buf.
6632 6626 */
6633 6627 aio_buf_t *
6634 6628 rdc_aio_buf_add(int index, rdc_buf_t *h)
6635 6629 {
6636 6630 aio_buf_t *p;
6637 6631
6638 6632 p = kmem_zalloc(sizeof (*p), KM_NOSLEEP);
6639 6633 if (p == NULL) {
6640 6634 cmn_err(CE_NOTE, "!rdc_aio_buf_add: kmem_alloc failed");
6641 6635 return (NULL);
6642 6636 }
6643 6637
6644 6638 p->rdc_abufp = NULL;
6645 6639 p->kindex = index;
6646 6640
6647 6641 mutex_enter(&h->aio_lock);
6648 6642 p->next = h->rdc_anon;
6649 6643 h->rdc_anon = p;
6650 6644 mutex_exit(&h->aio_lock);
6651 6645 return (p);
6652 6646 }
6653 6647
6654 6648 /*
6655 6649 * kmemalloc a new group structure and setup the common
6656 6650 * fields.
6657 6651 */
6658 6652 static rdc_group_t *
6659 6653 rdc_newgroup()
6660 6654 {
6661 6655 rdc_group_t *group;
6662 6656
6663 6657 group = kmem_zalloc(sizeof (rdc_group_t), KM_SLEEP);
6664 6658 group->diskq.lastio = kmem_zalloc(sizeof (rdc_aio_t), KM_SLEEP);
6665 6659 group->count = 1;
6666 6660 group->seq = RDC_NEWSEQ;
6667 6661 group->seqack = RDC_NEWSEQ;
6668 6662 mutex_init(&group->lock, NULL, MUTEX_DRIVER, NULL);
6669 6663 mutex_init(&group->ra_queue.net_qlock, NULL, MUTEX_DRIVER, NULL);
6670 6664 mutex_init(&group->diskqmutex, NULL, MUTEX_DRIVER, NULL);
6671 6665 mutex_init(&group->diskq.disk_qlock, NULL, MUTEX_DRIVER, NULL);
6672 6666 mutex_init(&group->diskq.head_lock, NULL, MUTEX_DRIVER, NULL);
6673 6667 mutex_init(&group->addthrnumlk, NULL, MUTEX_DRIVER, NULL);
6674 6668 cv_init(&group->unregistercv, NULL, CV_DRIVER, NULL);
6675 6669 cv_init(&group->asyncqcv, NULL, CV_DRIVER, NULL);
6676 6670 cv_init(&group->diskq.busycv, NULL, CV_DRIVER, NULL);
6677 6671 cv_init(&group->diskq.qfullcv, NULL, CV_DRIVER, NULL);
6678 6672 cv_init(&group->ra_queue.qfcv, NULL, CV_DRIVER, NULL);
6679 6673 group->ra_queue.qfill_sleeping = RDC_QFILL_DEAD;
6680 6674 group->diskq.busycnt = 0;
6681 6675 ASSERT(group->synccount == 0); /* group was kmem_zalloc'ed */
6682 6676
6683 6677 /*
6684 6678 * add default number of threads to the flusher thread set, plus
6685 6679 * one extra thread for the disk queue flusher
6686 6680 */
6687 6681 if (nst_add_thread(_rdc_flset, 3) != 3)
6688 6682 cmn_err(CE_NOTE, "!rdc_newgroup: nst_add_thread failed");
6689 6683
6690 6684 return (group);
6691 6685 }
6692 6686
6693 6687 void
6694 6688 rdc_delgroup(rdc_group_t *group)
6695 6689 {
6696 6690
6697 6691 ASSERT(group->asyncstall == 0);
6698 6692 ASSERT(group->rdc_thrnum == 0);
6699 6693 ASSERT(group->count == 0);
6700 6694 ASSERT(MUTEX_HELD(&rdc_many_lock));
6701 6695
6702 6696 mutex_enter(&group->ra_queue.net_qlock);
6703 6697 rdc_sleepqdiscard(group);
6704 6698 mutex_exit(&group->ra_queue.net_qlock);
6705 6699
6706 6700 /* try to remove flusher threads that this group added to _rdc_flset */
6707 6701 if (nst_del_thread(_rdc_flset, group->rdc_addthrnum + 3) !=
6708 6702 group->rdc_addthrnum + 3)
6709 6703 cmn_err(CE_NOTE, "!rdc_delgroup: nst_del_thread failed");
6710 6704
6711 6705 mutex_destroy(&group->lock);
6712 6706 mutex_destroy(&group->ra_queue.net_qlock);
6713 6707 mutex_destroy(&group->diskqmutex);
6714 6708 mutex_destroy(&group->diskq.disk_qlock);
6715 6709 mutex_destroy(&group->diskq.head_lock);
6716 6710 mutex_destroy(&group->addthrnumlk);
6717 6711 cv_destroy(&group->unregistercv);
6718 6712 cv_destroy(&group->asyncqcv);
6719 6713 cv_destroy(&group->diskq.busycv);
6720 6714 cv_destroy(&group->diskq.qfullcv);
6721 6715 cv_destroy(&group->ra_queue.qfcv);
6722 6716 kmem_free(group->diskq.lastio, sizeof (rdc_aio_t));
6723 6717 kmem_free(group, sizeof (rdc_group_t));
6724 6718 }
↓ open down ↓ |
1066 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX