Print this page
fixup .text where possible
style update i86pc/io
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/dr/dr.c
+++ new/usr/src/uts/i86pc/io/dr/dr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26 /*
27 27 * Copyright (c) 2010, Intel Corporation.
28 28 * All rights reserved.
29 29 */
30 30
31 31 /*
32 32 * PIM-DR layer of DR driver. Provides interface between user
33 33 * level applications and the PSM-DR layer.
34 34 */
35 35
36 36 #include <sys/note.h>
37 37 #include <sys/debug.h>
38 38 #include <sys/types.h>
39 39 #include <sys/errno.h>
40 40 #include <sys/cred.h>
41 41 #include <sys/dditypes.h>
42 42 #include <sys/devops.h>
43 43 #include <sys/modctl.h>
44 44 #include <sys/poll.h>
45 45 #include <sys/conf.h>
46 46 #include <sys/ddi.h>
47 47 #include <sys/sunddi.h>
48 48 #include <sys/sunndi.h>
49 49 #include <sys/stat.h>
50 50 #include <sys/kmem.h>
51 51 #include <sys/processor.h>
52 52 #include <sys/cpuvar.h>
53 53 #include <sys/mem_config.h>
54 54
55 55 #include <sys/autoconf.h>
56 56 #include <sys/cmn_err.h>
57 57
58 58 #include <sys/ddi_impldefs.h>
59 59 #include <sys/promif.h>
60 60 #include <sys/machsystm.h>
61 61
62 62 #include <sys/dr.h>
63 63 #include <sys/drmach.h>
64 64 #include <sys/dr_util.h>
65 65
66 66 extern int nulldev();
67 67 extern int nodev();
68 68 extern struct memlist *phys_install;
69 69
70 70 #ifdef DEBUG
71 71 uint_t dr_debug = 0; /* dr.h for bit values */
72 72 #endif /* DEBUG */
73 73
74 74 static int dr_dev_type_to_nt(char *);
75 75
76 76 /*
77 77 * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
78 78 * kernel. They are, however, referenced during both debug and non-debug
79 79 * compiles.
80 80 */
81 81
82 82 static char *state_str[] = {
83 83 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
84 84 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
85 85 "FATAL"
86 86 };
87 87
88 88 #define SBD_CMD_STR(c) \
89 89 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
90 90 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
91 91 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
92 92 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
93 93 ((c) == SBD_CMD_TEST) ? "TEST" : \
94 94 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
95 95 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
96 96 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
97 97 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
98 98 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
99 99 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : \
100 100 ((c) == SBD_CMD_STATUS) ? "STATUS" : "unknown")
101 101
102 102 #define DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[DEVSET_NIX(ut)][un]))
103 103
104 104 #define DR_MAKE_MINOR(i, b) (((i) << 16) | (b))
105 105 #define DR_MINOR2INST(m) (((m) >> 16) & 0xffff)
106 106 #define DR_MINOR2BNUM(m) ((m) & 0xffff)
107 107
108 108 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
109 109 static char *dr_ie_fmt = "dr.c %d";
110 110
111 111 /* struct for drmach device name to sbd_comp_type_t mapping */
112 112 typedef struct {
113 113 char *s_devtype;
114 114 sbd_comp_type_t s_nodetype;
115 115 } dr_devname_t;
116 116
117 117 /* struct to map starfire device attributes - name:sbd_comp_type_t */
118 118 static dr_devname_t dr_devattr[] = {
119 119 { DRMACH_DEVTYPE_MEM, SBD_COMP_MEM },
120 120 { DRMACH_DEVTYPE_CPU, SBD_COMP_CPU },
121 121 { DRMACH_DEVTYPE_PCI, SBD_COMP_IO },
122 122 #if defined(DRMACH_DEVTYPE_SBUS)
123 123 { DRMACH_DEVTYPE_SBUS, SBD_COMP_IO },
124 124 #endif
125 125 #if defined(DRMACH_DEVTYPE_WCI)
126 126 { DRMACH_DEVTYPE_WCI, SBD_COMP_IO },
127 127 #endif
128 128 /* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
129 129 { NULL, SBD_COMP_UNKNOWN }
130 130 };
131 131
132 132 /*
133 133 * Per instance soft-state structure.
134 134 */
135 135 typedef struct dr_softstate {
136 136 dev_info_t *dip;
137 137 dr_board_t *boards;
138 138 kmutex_t i_lock;
139 139 int dr_initialized;
140 140 } dr_softstate_t;
141 141
142 142 /*
143 143 * dr Global data elements
144 144 */
145 145 struct dr_global {
146 146 dr_softstate_t *softsp; /* pointer to initialize soft state */
147 147 kmutex_t lock;
148 148 } dr_g;
↓ open down ↓ |
148 lines elided |
↑ open up ↑ |
149 149
150 150 dr_unsafe_devs_t dr_unsafe_devs;
151 151
152 152 /*
153 153 * Table of known passthru commands.
154 154 */
155 155 struct {
156 156 char *pt_name;
157 157 int (*pt_func)(dr_handle_t *);
158 158 } pt_arr[] = {
159 - "quiesce", dr_pt_test_suspend,
159 + { "quiesce", dr_pt_test_suspend }
160 160 };
161 161
162 162 int dr_modunload_okay = 0; /* set to non-zero to allow unload */
163 163
164 164 /*
165 165 * State transition table. States valid transitions for "board" state.
166 166 * Recall that non-zero return value terminates operation, however
167 167 * the herrno value is what really indicates an error , if any.
168 168 */
169 169 static int
170 170 _cmd2index(int c)
171 171 {
172 172 /*
173 173 * Translate DR CMD to index into dr_state_transition.
174 174 */
175 175 switch (c) {
176 176 case SBD_CMD_CONNECT: return (0);
177 177 case SBD_CMD_DISCONNECT: return (1);
178 178 case SBD_CMD_CONFIGURE: return (2);
179 179 case SBD_CMD_UNCONFIGURE: return (3);
180 180 case SBD_CMD_ASSIGN: return (4);
181 181 case SBD_CMD_UNASSIGN: return (5);
182 182 case SBD_CMD_POWERON: return (6);
183 183 case SBD_CMD_POWEROFF: return (7);
184 184 case SBD_CMD_TEST: return (8);
185 185 default: return (-1);
186 186 }
187 187 }
188 188
189 189 #define CMD2INDEX(c) _cmd2index(c)
190 190
191 191 static struct dr_state_trans {
192 192 int x_cmd;
193 193 struct {
194 194 int x_rv; /* return value of pre_op */
195 195 int x_err; /* error, if any */
196 196 } x_op[DR_STATE_MAX];
197 197 } dr_state_transition[] = {
198 198 { SBD_CMD_CONNECT,
199 199 {
200 200 { 0, 0 }, /* empty */
201 201 { 0, 0 }, /* occupied */
202 202 { -1, ESBD_STATE }, /* connected */
203 203 { -1, ESBD_STATE }, /* unconfigured */
204 204 { -1, ESBD_STATE }, /* partial */
205 205 { -1, ESBD_STATE }, /* configured */
206 206 { -1, ESBD_STATE }, /* release */
207 207 { -1, ESBD_STATE }, /* unreferenced */
208 208 { -1, ESBD_FATAL_STATE }, /* fatal */
209 209 }
210 210 },
211 211 { SBD_CMD_DISCONNECT,
212 212 {
213 213 { -1, ESBD_STATE }, /* empty */
214 214 { 0, 0 }, /* occupied */
215 215 { 0, 0 }, /* connected */
216 216 { 0, 0 }, /* unconfigured */
217 217 { -1, ESBD_STATE }, /* partial */
218 218 { -1, ESBD_STATE }, /* configured */
219 219 { -1, ESBD_STATE }, /* release */
220 220 { -1, ESBD_STATE }, /* unreferenced */
221 221 { -1, ESBD_FATAL_STATE }, /* fatal */
222 222 }
223 223 },
224 224 { SBD_CMD_CONFIGURE,
225 225 {
226 226 { -1, ESBD_STATE }, /* empty */
227 227 { -1, ESBD_STATE }, /* occupied */
228 228 { 0, 0 }, /* connected */
229 229 { 0, 0 }, /* unconfigured */
230 230 { 0, 0 }, /* partial */
231 231 { 0, 0 }, /* configured */
232 232 { -1, ESBD_STATE }, /* release */
233 233 { -1, ESBD_STATE }, /* unreferenced */
234 234 { -1, ESBD_FATAL_STATE }, /* fatal */
235 235 }
236 236 },
237 237 { SBD_CMD_UNCONFIGURE,
238 238 {
239 239 { -1, ESBD_STATE }, /* empty */
240 240 { -1, ESBD_STATE }, /* occupied */
241 241 { -1, ESBD_STATE }, /* connected */
242 242 { -1, ESBD_STATE }, /* unconfigured */
243 243 { 0, 0 }, /* partial */
244 244 { 0, 0 }, /* configured */
245 245 { 0, 0 }, /* release */
246 246 { 0, 0 }, /* unreferenced */
247 247 { -1, ESBD_FATAL_STATE }, /* fatal */
248 248 }
249 249 },
250 250 { SBD_CMD_ASSIGN,
251 251 {
252 252 { 0, 0 }, /* empty */
253 253 { 0, 0 }, /* occupied */
254 254 { -1, ESBD_STATE }, /* connected */
255 255 { -1, ESBD_STATE }, /* unconfigured */
256 256 { -1, ESBD_STATE }, /* partial */
257 257 { -1, ESBD_STATE }, /* configured */
258 258 { -1, ESBD_STATE }, /* release */
259 259 { -1, ESBD_STATE }, /* unreferenced */
260 260 { -1, ESBD_FATAL_STATE }, /* fatal */
261 261 }
262 262 },
263 263 { SBD_CMD_UNASSIGN,
264 264 {
265 265 { 0, 0 }, /* empty */
266 266 { 0, 0 }, /* occupied */
267 267 { -1, ESBD_STATE }, /* connected */
268 268 { -1, ESBD_STATE }, /* unconfigured */
269 269 { -1, ESBD_STATE }, /* partial */
270 270 { -1, ESBD_STATE }, /* configured */
271 271 { -1, ESBD_STATE }, /* release */
272 272 { -1, ESBD_STATE }, /* unreferenced */
273 273 { -1, ESBD_FATAL_STATE }, /* fatal */
274 274 }
275 275 },
276 276 { SBD_CMD_POWERON,
277 277 {
278 278 { 0, 0 }, /* empty */
279 279 { 0, 0 }, /* occupied */
280 280 { -1, ESBD_STATE }, /* connected */
281 281 { -1, ESBD_STATE }, /* unconfigured */
282 282 { -1, ESBD_STATE }, /* partial */
283 283 { -1, ESBD_STATE }, /* configured */
284 284 { -1, ESBD_STATE }, /* release */
285 285 { -1, ESBD_STATE }, /* unreferenced */
286 286 { -1, ESBD_FATAL_STATE }, /* fatal */
287 287 }
288 288 },
289 289 { SBD_CMD_POWEROFF,
290 290 {
291 291 { 0, 0 }, /* empty */
292 292 { 0, 0 }, /* occupied */
293 293 { -1, ESBD_STATE }, /* connected */
294 294 { -1, ESBD_STATE }, /* unconfigured */
295 295 { -1, ESBD_STATE }, /* partial */
296 296 { -1, ESBD_STATE }, /* configured */
297 297 { -1, ESBD_STATE }, /* release */
298 298 { -1, ESBD_STATE }, /* unreferenced */
299 299 { -1, ESBD_FATAL_STATE }, /* fatal */
300 300 }
301 301 },
302 302 { SBD_CMD_TEST,
303 303 {
304 304 { 0, 0 }, /* empty */
305 305 { 0, 0 }, /* occupied */
306 306 { -1, ESBD_STATE }, /* connected */
307 307 { -1, ESBD_STATE }, /* unconfigured */
308 308 { -1, ESBD_STATE }, /* partial */
309 309 { -1, ESBD_STATE }, /* configured */
310 310 { -1, ESBD_STATE }, /* release */
311 311 { -1, ESBD_STATE }, /* unreferenced */
312 312 { -1, ESBD_FATAL_STATE }, /* fatal */
313 313 }
314 314 },
315 315 };
316 316
317 317 /*
318 318 * Global R/W lock to synchronize access across
319 319 * multiple boards. Users wanting multi-board access
320 320 * must grab WRITE lock, others must grab READ lock.
321 321 */
322 322 krwlock_t dr_grwlock;
323 323
324 324 /*
325 325 * Head of the boardlist used as a reference point for
326 326 * locating board structs.
327 327 * TODO: eliminate dr_boardlist
328 328 */
329 329 dr_board_t *dr_boardlist;
330 330
331 331 /*
332 332 * DR support functions.
333 333 */
334 334 static dr_devset_t dr_dev2devset(sbd_comp_id_t *cid);
335 335 static int dr_check_transition(dr_board_t *bp,
336 336 dr_devset_t *devsetp,
337 337 struct dr_state_trans *transp,
338 338 int cmd);
339 339 static int dr_check_unit_attached(dr_common_unit_t *dp);
340 340 static sbd_error_t *dr_init_devlists(dr_board_t *bp);
341 341 static void dr_board_discovery(dr_board_t *bp);
342 342 static int dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd);
343 343 static void dr_board_destroy(dr_board_t *bp);
344 344 static void dr_board_transition(dr_board_t *bp, dr_state_t st);
345 345
346 346 /*
347 347 * DR driver (DDI) entry points.
348 348 */
349 349 static int dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
350 350 void *arg, void **result);
351 351 static int dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
352 352 static int dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
353 353 static int dr_probe(dev_info_t *dip);
354 354 static int dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
355 355 cred_t *cred_p, int *rval_p);
356 356 static int dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
357 357 static int dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
358 358
359 359 /*
360 360 * DR command processing operations.
361 361 */
362 362 static int dr_copyin_iocmd(dr_handle_t *hp);
363 363 static int dr_copyout_iocmd(dr_handle_t *hp);
364 364 static int dr_copyout_errs(dr_handle_t *hp);
365 365 static int dr_pre_op(dr_handle_t *hp);
366 366 static int dr_post_op(dr_handle_t *hp, int rv);
367 367 static int dr_exec_op(dr_handle_t *hp);
368 368 static void dr_assign_board(dr_handle_t *hp);
369 369 static void dr_unassign_board(dr_handle_t *hp);
370 370 static void dr_connect(dr_handle_t *hp);
371 371 static int dr_disconnect(dr_handle_t *hp);
372 372 static void dr_dev_configure(dr_handle_t *hp);
373 373 static void dr_dev_release(dr_handle_t *hp);
374 374 static int dr_dev_unconfigure(dr_handle_t *hp);
375 375 static void dr_dev_cancel(dr_handle_t *hp);
376 376 static int dr_dev_status(dr_handle_t *hp);
377 377 static int dr_get_ncm(dr_handle_t *hp);
378 378 static int dr_pt_ioctl(dr_handle_t *hp);
379 379 static void dr_poweron_board(dr_handle_t *hp);
380 380 static void dr_poweroff_board(dr_handle_t *hp);
381 381 static void dr_test_board(dr_handle_t *hp);
382 382
383 383 /*
384 384 * Autoconfiguration data structures
385 385 */
386 386 struct cb_ops dr_cb_ops = {
387 387 dr_open, /* open */
388 388 dr_close, /* close */
389 389 nodev, /* strategy */
390 390 nodev, /* print */
391 391 nodev, /* dump */
392 392 nodev, /* read */
393 393 nodev, /* write */
394 394 dr_ioctl, /* ioctl */
395 395 nodev, /* devmap */
396 396 nodev, /* mmap */
397 397 nodev, /* segmap */
398 398 nochpoll, /* chpoll */
399 399 ddi_prop_op, /* cb_prop_op */
400 400 NULL, /* struct streamtab */
401 401 D_NEW | D_MP | D_MTSAFE, /* compatibility flags */
402 402 CB_REV, /* Rev */
403 403 nodev, /* cb_aread */
404 404 nodev /* cb_awrite */
405 405 };
406 406
407 407 struct dev_ops dr_dev_ops = {
408 408 DEVO_REV, /* build version */
409 409 0, /* dev ref count */
410 410 dr_getinfo, /* getinfo */
411 411 nulldev, /* identify */
412 412 dr_probe, /* probe */
413 413 dr_attach, /* attach */
414 414 dr_detach, /* detach */
415 415 nodev, /* reset */
416 416 &dr_cb_ops, /* cb_ops */
417 417 (struct bus_ops *)NULL, /* bus ops */
418 418 NULL, /* power */
419 419 ddi_quiesce_not_needed, /* quiesce */
420 420 };
421 421
↓ open down ↓ |
252 lines elided |
↑ open up ↑ |
422 422 extern struct mod_ops mod_driverops;
423 423
424 424 static struct modldrv modldrv = {
425 425 &mod_driverops,
426 426 "Dynamic Reconfiguration",
427 427 &dr_dev_ops
428 428 };
429 429
430 430 static struct modlinkage modlinkage = {
431 431 MODREV_1,
432 - (void *)&modldrv,
433 - NULL
432 + { (void *)&modldrv,
433 + NULL }
434 434 };
435 435
436 436 /*
437 437 * Driver entry points.
438 438 */
439 439 int
440 440 _init(void)
441 441 {
442 442 int err;
443 443
444 444 /*
445 445 * If you need to support multiple nodes (instances), then
446 446 * whatever the maximum number of supported nodes is would
447 447 * need to passed as the third parameter to ddi_soft_state_init().
448 448 * Alternative would be to dynamically fini and re-init the
449 449 * soft state structure each time a node is attached.
450 450 */
451 451 err = ddi_soft_state_init((void **)&dr_g.softsp,
452 452 sizeof (dr_softstate_t), 1);
453 453 if (err)
454 454 return (err);
455 455
456 456 mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
457 457 rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
458 458
459 459 return (mod_install(&modlinkage));
460 460 }
461 461
462 462 int
463 463 _fini(void)
464 464 {
465 465 int err;
466 466
467 467 if ((err = mod_remove(&modlinkage)) != 0)
468 468 return (err);
469 469
470 470 mutex_destroy(&dr_g.lock);
471 471 rw_destroy(&dr_grwlock);
472 472
473 473 ddi_soft_state_fini((void **)&dr_g.softsp);
474 474
475 475 return (0);
476 476 }
477 477
478 478 int
479 479 _info(struct modinfo *modinfop)
480 480 {
481 481 return (mod_info(&modlinkage, modinfop));
482 482 }
483 483
484 484 /*ARGSUSED1*/
485 485 static int
486 486 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
487 487 {
488 488 int instance;
489 489 dr_softstate_t *softsp;
490 490 dr_board_t *bp;
491 491
492 492 /*
493 493 * Don't open unless we've attached.
494 494 */
495 495 instance = DR_MINOR2INST(getminor(*dev));
496 496 softsp = ddi_get_soft_state(dr_g.softsp, instance);
497 497 if (softsp == NULL)
498 498 return (ENXIO);
499 499
500 500 mutex_enter(&softsp->i_lock);
501 501 if (!softsp->dr_initialized) {
502 502 int bd;
503 503 int rv = 0;
504 504
505 505 bp = softsp->boards;
506 506
507 507 /* initialize each array element */
508 508 for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
509 509 rv = dr_board_init(bp, softsp->dip, bd);
510 510 if (rv)
511 511 break;
512 512 }
513 513
514 514 if (rv == 0) {
515 515 softsp->dr_initialized = 1;
516 516 } else {
517 517 /* destroy elements initialized thus far */
518 518 while (--bp >= softsp->boards)
519 519 dr_board_destroy(bp);
520 520
521 521 /* TODO: should this be another errno val ? */
522 522 mutex_exit(&softsp->i_lock);
523 523 return (ENXIO);
524 524 }
525 525 }
526 526 mutex_exit(&softsp->i_lock);
527 527
528 528 bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
529 529
530 530 /*
531 531 * prevent opening of a dyn-ap for a board
532 532 * that does not exist
533 533 */
534 534 if (!bp->b_assigned) {
535 535 if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
536 536 return (ENODEV);
537 537 }
538 538
539 539 return (0);
540 540 }
541 541
542 542 /*ARGSUSED*/
543 543 static int
544 544 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
545 545 {
546 546 return (0);
547 547 }
548 548
549 549 /*
550 550 * Enable/disable DR features.
551 551 */
552 552 int dr_enable = 1;
553 553
554 554 /*ARGSUSED3*/
555 555 static int
556 556 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
557 557 cred_t *cred_p, int *rval_p)
558 558 {
559 559 int rv = 0;
560 560 int instance;
561 561 int bd;
562 562 dr_handle_t *hp;
563 563 dr_softstate_t *softsp;
564 564 static fn_t f = "dr_ioctl";
565 565
566 566 PR_ALL("%s...\n", f);
567 567
568 568 instance = DR_MINOR2INST(getminor(dev));
569 569 softsp = ddi_get_soft_state(dr_g.softsp, instance);
570 570 if (softsp == NULL) {
571 571 cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
572 572 return (ENXIO);
573 573 }
574 574
575 575 if (!dr_enable) {
576 576 switch (cmd) {
577 577 case SBD_CMD_STATUS:
578 578 case SBD_CMD_GETNCM:
579 579 case SBD_CMD_PASSTHRU:
580 580 break;
581 581 default:
582 582 return (ENOTSUP);
583 583 }
584 584 }
585 585
586 586 bd = DR_MINOR2BNUM(getminor(dev));
587 587 if (bd >= MAX_BOARDS)
588 588 return (ENXIO);
589 589
590 590 /* get and initialize storage for new handle */
591 591 hp = GETSTRUCT(dr_handle_t, 1);
592 592 hp->h_bd = &softsp->boards[bd];
593 593 hp->h_err = NULL;
594 594 hp->h_dev = getminor(dev);
595 595 hp->h_cmd = cmd;
596 596 hp->h_mode = mode;
597 597 hp->h_iap = (sbd_ioctl_arg_t *)arg;
598 598
599 599 /* copy sbd command into handle */
600 600 rv = dr_copyin_iocmd(hp);
601 601 if (rv) {
602 602 FREESTRUCT(hp, dr_handle_t, 1);
603 603 return (EINVAL);
604 604 }
605 605
606 606 /* translate canonical name to component type */
607 607 if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
608 608 hp->h_sbdcmd.cmd_cm.c_id.c_type =
609 609 dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
610 610
611 611 PR_ALL("%s: c_name = %s, c_type = %d\n",
612 612 f,
613 613 hp->h_sbdcmd.cmd_cm.c_id.c_name,
614 614 hp->h_sbdcmd.cmd_cm.c_id.c_type);
615 615 } else {
616 616 /*EMPTY*/
617 617 PR_ALL("%s: c_name is NULL\n", f);
618 618 }
619 619
620 620 /* determine scope of operation */
621 621 hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
622 622
623 623 switch (hp->h_cmd) {
624 624 case SBD_CMD_STATUS:
625 625 case SBD_CMD_GETNCM:
626 626 /* no locks needed for these commands */
627 627 break;
628 628
629 629 default:
630 630 rw_enter(&dr_grwlock, RW_WRITER);
631 631 mutex_enter(&hp->h_bd->b_lock);
632 632
633 633 /*
634 634 * If we're dealing with memory at all, then we have
635 635 * to keep the "exclusive" global lock held. This is
636 636 * necessary since we will probably need to look at
637 637 * multiple board structs. Otherwise, we only have
638 638 * to deal with the board in question and so can drop
639 639 * the global lock to "shared".
640 640 */
641 641 rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
642 642 if (rv == 0)
643 643 rw_downgrade(&dr_grwlock);
644 644 break;
645 645 }
646 646 rv = 0;
647 647
648 648 if (rv == 0)
649 649 rv = dr_pre_op(hp);
650 650 if (rv == 0) {
651 651 rv = dr_exec_op(hp);
652 652 rv = dr_post_op(hp, rv);
653 653 }
654 654
655 655 if (rv == -1)
656 656 rv = EIO;
657 657
658 658 if (hp->h_err != NULL)
659 659 if (!(rv = dr_copyout_errs(hp)))
660 660 rv = EIO;
661 661
662 662 /* undo locking, if any, done before dr_pre_op */
663 663 switch (hp->h_cmd) {
664 664 case SBD_CMD_STATUS:
665 665 case SBD_CMD_GETNCM:
666 666 break;
667 667
668 668 case SBD_CMD_ASSIGN:
669 669 case SBD_CMD_UNASSIGN:
670 670 case SBD_CMD_POWERON:
671 671 case SBD_CMD_POWEROFF:
672 672 case SBD_CMD_CONNECT:
673 673 case SBD_CMD_CONFIGURE:
674 674 case SBD_CMD_UNCONFIGURE:
675 675 case SBD_CMD_DISCONNECT:
676 676 /* Board changed state. Log a sysevent. */
677 677 if (rv == 0)
678 678 (void) drmach_log_sysevent(hp->h_bd->b_num, "",
679 679 SE_SLEEP, 0);
680 680 /* Fall through */
681 681
682 682 default:
683 683 mutex_exit(&hp->h_bd->b_lock);
684 684 rw_exit(&dr_grwlock);
685 685 }
686 686
687 687 if (hp->h_opts.size != 0)
688 688 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
689 689
690 690 FREESTRUCT(hp, dr_handle_t, 1);
691 691
692 692 return (rv);
693 693 }
694 694
695 695 /*ARGSUSED*/
696 696 static int
697 697 dr_probe(dev_info_t *dip)
698 698 {
699 699 return (DDI_PROBE_SUCCESS);
700 700 }
701 701
702 702 static int
703 703 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
704 704 {
705 705 int rv, rv2;
706 706 int bd;
707 707 int instance;
708 708 sbd_error_t *err;
709 709 dr_softstate_t *softsp;
710 710
711 711 instance = ddi_get_instance(dip);
712 712
713 713 switch (cmd) {
714 714 case DDI_ATTACH:
715 715 rw_enter(&dr_grwlock, RW_WRITER);
716 716
717 717 rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
718 718 if (rv != DDI_SUCCESS) {
719 719 cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
720 720 instance);
721 721 return (DDI_FAILURE);
722 722 }
723 723
724 724 /* initialize softstate structure */
725 725 softsp = ddi_get_soft_state(dr_g.softsp, instance);
726 726 softsp->dip = dip;
727 727
728 728 mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
729 729
730 730 /* allocate board array (aka boardlist) */
731 731 softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
732 732
733 733 /* TODO: eliminate dr_boardlist */
734 734 dr_boardlist = softsp->boards;
735 735
736 736 /* initialize each array element */
737 737 rv = DDI_SUCCESS;
738 738 for (bd = 0; bd < MAX_BOARDS; bd++) {
739 739 dr_board_t *bp = &softsp->boards[bd];
740 740 char *p, *name;
741 741 int l, minor_num;
742 742
743 743 /*
744 744 * initialized board attachment point path
745 745 * (relative to pseudo) in a form immediately
746 746 * reusable as an cfgadm command argument.
747 747 * TODO: clean this up
748 748 */
749 749 p = bp->b_path;
750 750 l = sizeof (bp->b_path);
751 751 (void) snprintf(p, l, "dr@%d:", instance);
752 752 while (*p != '\0') {
753 753 l--;
754 754 p++;
755 755 }
756 756
757 757 name = p;
758 758 err = drmach_board_name(bd, p, l);
759 759 if (err) {
760 760 sbd_err_clear(&err);
761 761 rv = DDI_FAILURE;
762 762 break;
763 763 }
764 764
765 765 minor_num = DR_MAKE_MINOR(instance, bd);
766 766 rv = ddi_create_minor_node(dip, name, S_IFCHR,
767 767 minor_num, DDI_NT_SBD_ATTACHMENT_POINT, NULL);
768 768 if (rv != DDI_SUCCESS)
769 769 rv = DDI_FAILURE;
770 770 }
771 771
772 772 if (rv == DDI_SUCCESS) {
773 773 /*
774 774 * Announce the node's presence.
775 775 */
776 776 ddi_report_dev(dip);
777 777 } else {
778 778 ddi_remove_minor_node(dip, NULL);
779 779 }
780 780 /*
781 781 * Init registered unsafe devs.
782 782 */
783 783 dr_unsafe_devs.devnames = NULL;
784 784 rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
785 785 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
786 786 "unsupported-io-drivers", &dr_unsafe_devs.devnames,
787 787 &dr_unsafe_devs.ndevs);
788 788
789 789 if (rv2 != DDI_PROP_SUCCESS)
790 790 dr_unsafe_devs.ndevs = 0;
791 791
792 792 rw_exit(&dr_grwlock);
793 793 return (rv);
794 794
795 795 default:
796 796 return (DDI_FAILURE);
797 797 }
798 798
799 799 /*NOTREACHED*/
800 800 }
801 801
802 802 static int
803 803 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
804 804 {
805 805 int instance;
806 806 dr_softstate_t *softsp;
807 807
808 808 switch (cmd) {
809 809 case DDI_DETACH:
810 810 if (!dr_modunload_okay)
811 811 return (DDI_FAILURE);
812 812
813 813 rw_enter(&dr_grwlock, RW_WRITER);
814 814
815 815 instance = ddi_get_instance(dip);
816 816 softsp = ddi_get_soft_state(dr_g.softsp, instance);
817 817
818 818 /* TODO: eliminate dr_boardlist */
819 819 ASSERT(softsp->boards == dr_boardlist);
820 820
821 821 /* remove all minor nodes */
822 822 ddi_remove_minor_node(dip, NULL);
823 823
824 824 if (softsp->dr_initialized) {
825 825 int bd;
826 826
827 827 for (bd = 0; bd < MAX_BOARDS; bd++)
828 828 dr_board_destroy(&softsp->boards[bd]);
829 829 }
830 830
831 831 FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
832 832 mutex_destroy(&softsp->i_lock);
833 833 ddi_soft_state_free(dr_g.softsp, instance);
834 834
835 835 rw_exit(&dr_grwlock);
836 836 return (DDI_SUCCESS);
837 837
838 838 default:
839 839 return (DDI_FAILURE);
840 840 }
841 841 /*NOTREACHED*/
842 842 }
843 843
844 844 static int
845 845 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
846 846 {
847 847 _NOTE(ARGUNUSED(dip))
848 848
849 849 dev_t dev = (dev_t)arg;
850 850 int instance, error;
851 851 dr_softstate_t *softsp;
852 852
853 853 *result = NULL;
854 854 error = DDI_SUCCESS;
855 855 instance = DR_MINOR2INST(getminor(dev));
856 856
857 857 switch (cmd) {
858 858 case DDI_INFO_DEVT2DEVINFO:
859 859 softsp = ddi_get_soft_state(dr_g.softsp, instance);
860 860 if (softsp == NULL)
861 861 return (DDI_FAILURE);
862 862 *result = (void *)softsp->dip;
863 863 break;
864 864
865 865 case DDI_INFO_DEVT2INSTANCE:
866 866 *result = (void *)(uintptr_t)instance;
867 867 break;
868 868
869 869 default:
870 870 error = DDI_FAILURE;
871 871 break;
872 872 }
873 873
874 874 return (error);
875 875 }
876 876
877 877 /*
878 878 * DR operations.
879 879 */
880 880
881 881 static int
882 882 dr_copyin_iocmd(dr_handle_t *hp)
883 883 {
884 884 static fn_t f = "dr_copyin_iocmd";
885 885 sbd_cmd_t *scp = &hp->h_sbdcmd;
886 886
887 887 if (hp->h_iap == NULL)
888 888 return (EINVAL);
889 889
890 890 bzero((caddr_t)scp, sizeof (sbd_cmd_t));
891 891
892 892 #ifdef _MULTI_DATAMODEL
893 893 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
894 894 sbd_cmd32_t scmd32;
895 895
896 896 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
897 897
898 898 if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
899 899 sizeof (sbd_cmd32_t), hp->h_mode)) {
900 900 cmn_err(CE_WARN,
901 901 "%s: (32bit) failed to copyin "
902 902 "sbdcmd-struct", f);
903 903 return (EFAULT);
904 904 }
905 905 scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
906 906 scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
907 907 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
908 908 &scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
909 909 scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
910 910 scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
911 911 scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
912 912
913 913 switch (hp->h_cmd) {
914 914 case SBD_CMD_STATUS:
915 915 scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
916 916 scp->cmd_stat.s_statp =
917 917 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
918 918 break;
919 919 default:
920 920 break;
921 921
922 922 }
923 923 } else
924 924 #endif /* _MULTI_DATAMODEL */
925 925 if (ddi_copyin((void *)hp->h_iap, (void *)scp,
926 926 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
927 927 cmn_err(CE_WARN,
928 928 "%s: failed to copyin sbdcmd-struct", f);
929 929 return (EFAULT);
930 930 }
931 931
932 932 if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
933 933 hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
934 934 ++hp->h_opts.size;
935 935 if (ddi_copyin((void *)scp->cmd_cm.c_opts,
936 936 (void *)hp->h_opts.copts,
937 937 scp->cmd_cm.c_len, hp->h_mode) != 0) {
938 938 cmn_err(CE_WARN, "%s: failed to copyin options", f);
939 939 return (EFAULT);
940 940 }
941 941 }
942 942
943 943 return (0);
944 944 }
945 945
946 946 static int
947 947 dr_copyout_iocmd(dr_handle_t *hp)
948 948 {
949 949 static fn_t f = "dr_copyout_iocmd";
950 950 sbd_cmd_t *scp = &hp->h_sbdcmd;
951 951
952 952 if (hp->h_iap == NULL)
953 953 return (EINVAL);
954 954
955 955 #ifdef _MULTI_DATAMODEL
956 956 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
957 957 sbd_cmd32_t scmd32;
958 958
959 959 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
960 960 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
961 961 bcopy(&scp->cmd_cm.c_id.c_name[0],
962 962 &scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
963 963
964 964 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
965 965 scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
966 966 scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
967 967
968 968 switch (hp->h_cmd) {
969 969 case SBD_CMD_GETNCM:
970 970 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
971 971 break;
972 972 default:
973 973 break;
974 974 }
975 975
976 976 if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
977 977 sizeof (sbd_cmd32_t), hp->h_mode)) {
978 978 cmn_err(CE_WARN,
979 979 "%s: (32bit) failed to copyout "
980 980 "sbdcmd-struct", f);
981 981 return (EFAULT);
982 982 }
983 983 } else
984 984 #endif /* _MULTI_DATAMODEL */
985 985 if (ddi_copyout((void *)scp, (void *)hp->h_iap,
986 986 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
987 987 cmn_err(CE_WARN,
988 988 "%s: failed to copyout sbdcmd-struct", f);
989 989 return (EFAULT);
990 990 }
991 991
992 992 return (0);
993 993 }
994 994
995 995 static int
996 996 dr_copyout_errs(dr_handle_t *hp)
997 997 {
998 998 static fn_t f = "dr_copyout_errs";
999 999
1000 1000 if (hp->h_err == NULL)
1001 1001 return (0);
1002 1002
1003 1003 if (hp->h_err->e_code) {
1004 1004 PR_ALL("%s: error %d %s",
1005 1005 f, hp->h_err->e_code, hp->h_err->e_rsc);
1006 1006 }
1007 1007
1008 1008 #ifdef _MULTI_DATAMODEL
1009 1009 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1010 1010 sbd_error32_t *serr32p;
1011 1011
1012 1012 serr32p = GETSTRUCT(sbd_error32_t, 1);
1013 1013
1014 1014 serr32p->e_code = hp->h_err->e_code;
1015 1015 bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1016 1016 MAXPATHLEN);
1017 1017 if (ddi_copyout((void *)serr32p,
1018 1018 (void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1019 1019 sizeof (sbd_error32_t), hp->h_mode)) {
1020 1020 cmn_err(CE_WARN,
1021 1021 "%s: (32bit) failed to copyout", f);
1022 1022 return (EFAULT);
1023 1023 }
1024 1024 FREESTRUCT(serr32p, sbd_error32_t, 1);
1025 1025 } else
1026 1026 #endif /* _MULTI_DATAMODEL */
1027 1027 if (ddi_copyout((void *)hp->h_err,
1028 1028 (void *)&hp->h_iap->i_err,
1029 1029 sizeof (sbd_error_t), hp->h_mode)) {
1030 1030 cmn_err(CE_WARN,
1031 1031 "%s: failed to copyout", f);
1032 1032 return (EFAULT);
1033 1033 }
1034 1034
1035 1035 sbd_err_clear(&hp->h_err);
1036 1036
1037 1037 return (0);
1038 1038
1039 1039 }
1040 1040
1041 1041 /*
1042 1042 * pre-op entry point must sbd_err_set_c(), if needed.
1043 1043 * Return value of non-zero indicates failure.
1044 1044 */
1045 1045 static int
1046 1046 dr_pre_op(dr_handle_t *hp)
1047 1047 {
1048 1048 int rv = 0, t;
1049 1049 int cmd, serr = 0;
1050 1050 dr_devset_t devset;
1051 1051 dr_board_t *bp = hp->h_bd;
1052 1052 dr_handle_t *shp = hp;
1053 1053 static fn_t f = "dr_pre_op";
1054 1054
1055 1055 cmd = hp->h_cmd;
1056 1056 devset = shp->h_devset;
1057 1057
1058 1058 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1059 1059
1060 1060 devset = DEVSET_AND(devset, DR_DEVS_PRESENT(bp));
1061 1061 hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts, &devset);
1062 1062 if (hp->h_err != NULL) {
1063 1063 PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1064 1064 SBD_CMD_STR(cmd), cmd);
1065 1065 return (-1);
1066 1066 }
1067 1067
1068 1068 /*
1069 1069 * Check for valid state transitions.
1070 1070 */
1071 1071 if ((t = CMD2INDEX(cmd)) != -1) {
1072 1072 struct dr_state_trans *transp;
1073 1073 int state_err;
1074 1074
1075 1075 transp = &dr_state_transition[t];
1076 1076 ASSERT(transp->x_cmd == cmd);
1077 1077
1078 1078 state_err = dr_check_transition(bp, &devset, transp, cmd);
1079 1079
1080 1080 if (state_err < 0) {
1081 1081 /*
1082 1082 * Invalidate device.
1083 1083 */
1084 1084 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1085 1085 serr = -1;
1086 1086 PR_ALL("%s: invalid devset (0x%x)\n",
1087 1087 f, (uint_t)devset);
1088 1088 } else if (state_err != 0) {
1089 1089 /*
1090 1090 * State transition is not a valid one.
1091 1091 */
1092 1092 dr_op_err(CE_IGNORE, hp,
1093 1093 transp->x_op[state_err].x_err, NULL);
1094 1094
1095 1095 serr = transp->x_op[state_err].x_rv;
1096 1096
1097 1097 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1098 1098 f, state_str[state_err], state_err,
1099 1099 SBD_CMD_STR(cmd), cmd);
1100 1100 } else {
1101 1101 shp->h_devset = devset;
1102 1102 }
1103 1103 }
1104 1104
1105 1105 if (serr) {
1106 1106 rv = -1;
1107 1107 }
1108 1108
1109 1109 return (rv);
1110 1110 }
1111 1111
1112 1112 static int
1113 1113 dr_post_op(dr_handle_t *hp, int rv)
1114 1114 {
1115 1115 int cmd;
1116 1116 sbd_error_t *err;
1117 1117 dr_board_t *bp = hp->h_bd;
1118 1118 static fn_t f = "dr_post_op";
1119 1119
1120 1120 cmd = hp->h_cmd;
1121 1121
1122 1122 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1123 1123
1124 1124 err = drmach_post_op(cmd, bp->b_id, &hp->h_opts, rv);
1125 1125 if (err != NULL) {
1126 1126 PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1127 1127 SBD_CMD_STR(cmd), cmd);
1128 1128 if (rv == 0) {
1129 1129 ASSERT(hp->h_err == NULL);
1130 1130 hp->h_err = err;
1131 1131 rv = -1;
1132 1132 } else if (hp->h_err == NULL) {
1133 1133 hp->h_err = err;
1134 1134 } else {
1135 1135 sbd_err_clear(&err);
1136 1136 }
1137 1137 }
1138 1138
1139 1139 return (rv);
1140 1140 }
1141 1141
1142 1142 static int
1143 1143 dr_exec_op(dr_handle_t *hp)
1144 1144 {
1145 1145 int rv = 0;
1146 1146 static fn_t f = "dr_exec_op";
1147 1147
1148 1148 /* errors should have been caught by now */
1149 1149 ASSERT(hp->h_err == NULL);
1150 1150
1151 1151 switch (hp->h_cmd) {
1152 1152 case SBD_CMD_ASSIGN:
1153 1153 dr_assign_board(hp);
1154 1154 break;
1155 1155
1156 1156 case SBD_CMD_UNASSIGN:
1157 1157 dr_unassign_board(hp);
1158 1158 break;
1159 1159
1160 1160 case SBD_CMD_POWEROFF:
1161 1161 dr_poweroff_board(hp);
1162 1162 break;
1163 1163
1164 1164 case SBD_CMD_POWERON:
1165 1165 dr_poweron_board(hp);
1166 1166 break;
1167 1167
1168 1168 case SBD_CMD_TEST:
1169 1169 dr_test_board(hp);
1170 1170 break;
1171 1171
1172 1172 case SBD_CMD_CONNECT:
1173 1173 dr_connect(hp);
1174 1174 break;
1175 1175
1176 1176 case SBD_CMD_CONFIGURE:
1177 1177 dr_dev_configure(hp);
1178 1178 break;
1179 1179
1180 1180 case SBD_CMD_UNCONFIGURE:
1181 1181 dr_dev_release(hp);
1182 1182 if (hp->h_err == NULL)
1183 1183 rv = dr_dev_unconfigure(hp);
1184 1184 else
1185 1185 dr_dev_cancel(hp);
1186 1186 break;
1187 1187
1188 1188 case SBD_CMD_DISCONNECT:
1189 1189 rv = dr_disconnect(hp);
1190 1190 break;
1191 1191
1192 1192 case SBD_CMD_STATUS:
1193 1193 rv = dr_dev_status(hp);
1194 1194 break;
1195 1195
1196 1196 case SBD_CMD_GETNCM:
1197 1197 hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1198 1198 rv = dr_copyout_iocmd(hp);
1199 1199 break;
1200 1200
1201 1201 case SBD_CMD_PASSTHRU:
1202 1202 rv = dr_pt_ioctl(hp);
1203 1203 break;
1204 1204
1205 1205 default:
1206 1206 cmn_err(CE_WARN,
1207 1207 "%s: unknown command (%d)",
1208 1208 f, hp->h_cmd);
1209 1209 break;
1210 1210 }
1211 1211
1212 1212 if (hp->h_err != NULL) {
1213 1213 rv = -1;
1214 1214 }
1215 1215
1216 1216 return (rv);
1217 1217 }
1218 1218
1219 1219 static void
1220 1220 dr_assign_board(dr_handle_t *hp)
1221 1221 {
1222 1222 dr_board_t *bp = hp->h_bd;
1223 1223
1224 1224 hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1225 1225 if (hp->h_err == NULL) {
1226 1226 bp->b_assigned = 1;
1227 1227 }
1228 1228 }
1229 1229
1230 1230 static void
1231 1231 dr_unassign_board(dr_handle_t *hp)
1232 1232 {
1233 1233 dr_board_t *bp = hp->h_bd;
1234 1234
1235 1235 /*
1236 1236 * Block out status during unassign.
1237 1237 * Not doing cv_wait_sig here as starfire SSP software
1238 1238 * ignores unassign failure and removes board from
1239 1239 * domain mask causing system panic.
1240 1240 * TODO: Change cv_wait to cv_wait_sig when SSP software
1241 1241 * handles unassign failure.
1242 1242 */
1243 1243 dr_lock_status(bp);
1244 1244
1245 1245 hp->h_err = drmach_board_unassign(bp->b_id);
1246 1246 if (hp->h_err == NULL) {
1247 1247 /*
1248 1248 * clear drmachid_t handle; not valid after board unassign
1249 1249 */
1250 1250 bp->b_id = 0;
1251 1251 bp->b_assigned = 0;
1252 1252 }
1253 1253
1254 1254 dr_unlock_status(bp);
1255 1255 }
1256 1256
1257 1257 static void
1258 1258 dr_poweron_board(dr_handle_t *hp)
1259 1259 {
1260 1260 dr_board_t *bp = hp->h_bd;
1261 1261
1262 1262 hp->h_err = drmach_board_poweron(bp->b_id);
1263 1263 }
1264 1264
1265 1265 static void
1266 1266 dr_poweroff_board(dr_handle_t *hp)
1267 1267 {
1268 1268 dr_board_t *bp = hp->h_bd;
1269 1269
1270 1270 hp->h_err = drmach_board_poweroff(bp->b_id);
1271 1271 }
1272 1272
1273 1273 static void
1274 1274 dr_test_board(dr_handle_t *hp)
1275 1275 {
1276 1276 dr_board_t *bp = hp->h_bd;
1277 1277 hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1278 1278 dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1279 1279 }
1280 1280
1281 1281 /*
1282 1282 * Create and populate the component nodes for a board. Assumes that the
1283 1283 * devlists for the board have been initialized.
1284 1284 */
1285 1285 static void
1286 1286 dr_make_comp_nodes(dr_board_t *bp)
1287 1287 {
1288 1288 int i;
1289 1289
1290 1290 /*
1291 1291 * Make nodes for the individual components on the board.
1292 1292 * First we need to initialize memory unit data structures of board
1293 1293 * structure.
1294 1294 */
1295 1295 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1296 1296 dr_mem_unit_t *mp;
1297 1297
1298 1298 mp = dr_get_mem_unit(bp, i);
1299 1299 dr_init_mem_unit(mp);
1300 1300 }
1301 1301
1302 1302 /*
1303 1303 * Initialize cpu unit data structures.
1304 1304 */
1305 1305 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1306 1306 dr_cpu_unit_t *cp;
1307 1307
1308 1308 cp = dr_get_cpu_unit(bp, i);
1309 1309 dr_init_cpu_unit(cp);
1310 1310 }
1311 1311
1312 1312 /*
1313 1313 * Initialize io unit data structures.
1314 1314 */
1315 1315 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1316 1316 dr_io_unit_t *ip;
1317 1317
1318 1318 ip = dr_get_io_unit(bp, i);
1319 1319 dr_init_io_unit(ip);
1320 1320 }
1321 1321
1322 1322 dr_board_transition(bp, DR_STATE_CONNECTED);
1323 1323
1324 1324 bp->b_rstate = SBD_STAT_CONNECTED;
1325 1325 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1326 1326 bp->b_cond = SBD_COND_OK;
1327 1327 (void) drv_getparm(TIME, (void *)&bp->b_time);
1328 1328
1329 1329 }
1330 1330
1331 1331 /*
1332 1332 * Only do work if called to operate on an entire board
1333 1333 * which doesn't already have components present.
1334 1334 */
1335 1335 static void
1336 1336 dr_connect(dr_handle_t *hp)
1337 1337 {
1338 1338 dr_board_t *bp = hp->h_bd;
1339 1339 static fn_t f = "dr_connect";
1340 1340
1341 1341 PR_ALL("%s...\n", f);
1342 1342
1343 1343 if (DR_DEVS_PRESENT(bp)) {
1344 1344 /*
1345 1345 * Board already has devices present.
1346 1346 */
1347 1347 PR_ALL("%s: devices already present (" DEVSET_FMT_STR ")\n",
1348 1348 f, DEVSET_FMT_ARG(DR_DEVS_PRESENT(bp)));
1349 1349 return;
1350 1350 }
1351 1351
1352 1352 hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1353 1353 if (hp->h_err)
1354 1354 return;
1355 1355
1356 1356 hp->h_err = dr_init_devlists(bp);
1357 1357 if (hp->h_err)
1358 1358 return;
1359 1359 else if (bp->b_ndev == 0) {
1360 1360 dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1361 1361 return;
1362 1362 } else {
1363 1363 dr_make_comp_nodes(bp);
1364 1364 return;
1365 1365 }
1366 1366 /*NOTREACHED*/
1367 1367 }
1368 1368
1369 1369 static int
1370 1370 dr_disconnect(dr_handle_t *hp)
1371 1371 {
1372 1372 int i;
1373 1373 dr_devset_t devset;
1374 1374 dr_board_t *bp = hp->h_bd;
1375 1375 static fn_t f = "dr_disconnect";
1376 1376
1377 1377 PR_ALL("%s...\n", f);
1378 1378
1379 1379 /*
1380 1380 * Only devices which are present, but
1381 1381 * unattached can be disconnected.
1382 1382 */
1383 1383 devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1384 1384 DR_DEVS_UNATTACHED(bp);
1385 1385
1386 1386 if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1387 1387 dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1388 1388 return (0);
1389 1389 }
1390 1390
1391 1391 /*
1392 1392 * Block out status during disconnect.
1393 1393 */
1394 1394 mutex_enter(&bp->b_slock);
1395 1395 while (bp->b_sflags & DR_BSLOCK) {
1396 1396 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1397 1397 mutex_exit(&bp->b_slock);
1398 1398 return (EINTR);
1399 1399 }
1400 1400 }
1401 1401 bp->b_sflags |= DR_BSLOCK;
1402 1402 mutex_exit(&bp->b_slock);
1403 1403
1404 1404 hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1405 1405 if (hp->h_err && hp->h_err->e_code == EX86_WALK_DEPENDENCY) {
1406 1406 /*
1407 1407 * Other boards have dependency on this board. No device nodes
1408 1408 * have been destroyed so keep current board status.
1409 1409 */
1410 1410 goto disconnect_done;
1411 1411 }
1412 1412
1413 1413 DR_DEVS_DISCONNECT(bp, devset);
1414 1414
1415 1415 ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1416 1416
1417 1417 /*
1418 1418 * Update per-device state transitions.
1419 1419 */
1420 1420 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1421 1421 dr_cpu_unit_t *cp;
1422 1422
1423 1423 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1424 1424 continue;
1425 1425
1426 1426 cp = dr_get_cpu_unit(bp, i);
1427 1427 if (dr_disconnect_cpu(cp) == 0)
1428 1428 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1429 1429 else if (cp->sbc_cm.sbdev_error != NULL)
1430 1430 DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1431 1431
1432 1432 ASSERT(cp->sbc_cm.sbdev_error == NULL);
1433 1433 }
1434 1434
1435 1435 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1436 1436 dr_mem_unit_t *mp;
1437 1437
1438 1438 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1439 1439 continue;
1440 1440
1441 1441 mp = dr_get_mem_unit(bp, i);
1442 1442 if (dr_disconnect_mem(mp) == 0)
1443 1443 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1444 1444 else if (mp->sbm_cm.sbdev_error != NULL)
1445 1445 DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1446 1446
1447 1447 ASSERT(mp->sbm_cm.sbdev_error == NULL);
1448 1448 }
1449 1449
1450 1450 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1451 1451 dr_io_unit_t *ip;
1452 1452
1453 1453 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1454 1454 continue;
1455 1455
1456 1456 ip = dr_get_io_unit(bp, i);
1457 1457 if (dr_disconnect_io(ip) == 0)
1458 1458 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1459 1459 else if (ip->sbi_cm.sbdev_error != NULL)
1460 1460 DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1461 1461
1462 1462 ASSERT(ip->sbi_cm.sbdev_error == NULL);
1463 1463 }
1464 1464
1465 1465 if (hp->h_err) {
1466 1466 /*
1467 1467 * For certain errors, drmach_board_disconnect will mark
1468 1468 * the board as unusable; in these cases the devtree must
1469 1469 * be purged so that status calls will succeed.
1470 1470 * XXX
1471 1471 * This implementation checks for discrete error codes -
1472 1472 * someday, the i/f to drmach_board_disconnect should be
1473 1473 * changed to avoid the e_code testing.
1474 1474 */
1475 1475 if (hp->h_err->e_code == EX86_DEPROBE) {
1476 1476 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1477 1477 bp->b_busy = 0;
1478 1478 (void) drv_getparm(TIME, (void *)&bp->b_time);
1479 1479
1480 1480 if (drmach_board_deprobe(bp->b_id))
1481 1481 goto disconnect_done;
1482 1482 else
1483 1483 bp->b_ndev = 0;
1484 1484 }
1485 1485 }
1486 1486
1487 1487 /*
1488 1488 * Once all the components on a board have been disconnect
1489 1489 * the board's state can transition to disconnected and
1490 1490 * we can allow the deprobe to take place.
1491 1491 */
1492 1492 if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1493 1493 dr_board_transition(bp, DR_STATE_OCCUPIED);
1494 1494 bp->b_rstate = SBD_STAT_DISCONNECTED;
1495 1495 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1496 1496 bp->b_busy = 0;
1497 1497 (void) drv_getparm(TIME, (void *)&bp->b_time);
1498 1498
1499 1499 hp->h_err = drmach_board_deprobe(bp->b_id);
1500 1500
1501 1501 if (hp->h_err == NULL) {
1502 1502 bp->b_ndev = 0;
1503 1503 dr_board_transition(bp, DR_STATE_EMPTY);
1504 1504 bp->b_rstate = SBD_STAT_EMPTY;
1505 1505 (void) drv_getparm(TIME, (void *)&bp->b_time);
1506 1506 }
1507 1507 }
1508 1508
1509 1509 disconnect_done:
1510 1510 dr_unlock_status(bp);
1511 1511
1512 1512 return (0);
1513 1513 }
1514 1514
1515 1515 /*
1516 1516 * Check if a particular device is a valid target of the current
1517 1517 * operation. Return 1 if it is a valid target, and 0 otherwise.
1518 1518 */
1519 1519 static int
1520 1520 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1521 1521 {
1522 1522 dr_common_unit_t *cp;
1523 1523 int is_present;
1524 1524 int is_attached;
1525 1525
1526 1526 cp = &dp->du_common;
1527 1527
1528 1528 /* check if the user requested this device */
1529 1529 if ((uset & (1 << cp->sbdev_unum)) == 0) {
1530 1530 return (0);
1531 1531 }
1532 1532
1533 1533 is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1534 1534 is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1535 1535
1536 1536 /*
1537 1537 * If the present_only flag is set, a valid target
1538 1538 * must be present but not attached. Otherwise, it
1539 1539 * must be both present and attached.
1540 1540 */
1541 1541 if (is_present && (present_only ^ is_attached)) {
1542 1542 /* sanity check */
1543 1543 ASSERT(cp->sbdev_id != (drmachid_t)0);
1544 1544
1545 1545 return (1);
1546 1546 }
1547 1547
1548 1548 return (0);
1549 1549 }
1550 1550
1551 1551 static void
1552 1552 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1553 1553 dr_common_unit_t ***devlist, int *devnum)
1554 1554 {
1555 1555 dr_board_t *bp = hp->h_bd;
1556 1556 int unum;
1557 1557 int nunits;
1558 1558 uint_t uset;
1559 1559 int len;
1560 1560 dr_common_unit_t **list, **wp;
1561 1561
1562 1562 switch (type) {
1563 1563 case SBD_COMP_CPU:
1564 1564 nunits = MAX_CPU_UNITS_PER_BOARD;
1565 1565 break;
1566 1566 case SBD_COMP_MEM:
1567 1567 nunits = MAX_MEM_UNITS_PER_BOARD;
1568 1568 break;
1569 1569 case SBD_COMP_IO:
1570 1570 nunits = MAX_IO_UNITS_PER_BOARD;
1571 1571 break;
1572 1572 default:
1573 1573 /* catch this in debug kernels */
1574 1574 ASSERT(0);
1575 1575 break;
1576 1576 }
1577 1577
1578 1578 /* allocate list storage. */
1579 1579 len = sizeof (dr_common_unit_t *) * (nunits + 1);
1580 1580 list = kmem_zalloc(len, KM_SLEEP);
1581 1581
1582 1582 /* record length of storage in first element */
1583 1583 *list++ = (dr_common_unit_t *)(uintptr_t)len;
1584 1584
1585 1585 /* get bit array signifying which units are to be involved */
1586 1586 uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1587 1587
1588 1588 /*
1589 1589 * Adjust the loop count for CPU devices since all cores
1590 1590 * in a CMP will be examined in a single iteration.
1591 1591 */
1592 1592 if (type == SBD_COMP_CPU) {
1593 1593 nunits = MAX_CMP_UNITS_PER_BOARD;
1594 1594 }
1595 1595
1596 1596 /* populate list */
1597 1597 for (wp = list, unum = 0; unum < nunits; unum++) {
1598 1598 dr_dev_unit_t *dp;
1599 1599 int core;
1600 1600 int cunum;
1601 1601
1602 1602 dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1603 1603 if (dr_dev_is_target(dp, present_only, uset)) {
1604 1604 *wp++ = &dp->du_common;
1605 1605 }
1606 1606
1607 1607 /* further processing is only required for CPUs */
1608 1608 if (type != SBD_COMP_CPU) {
1609 1609 continue;
1610 1610 }
1611 1611
1612 1612 /*
1613 1613 * Add any additional cores from the current CPU
1614 1614 * device. This is to ensure that all the cores
1615 1615 * are grouped together in the device list, and
1616 1616 * consequently sequenced together during the actual
1617 1617 * operation.
1618 1618 */
1619 1619 for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1620 1620 cunum = DR_CMP_CORE_UNUM(unum, core);
1621 1621 dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1622 1622
1623 1623 if (dr_dev_is_target(dp, present_only, uset)) {
1624 1624 *wp++ = &dp->du_common;
1625 1625 }
1626 1626 }
1627 1627 }
1628 1628
1629 1629 /* calculate number of units in list, return result and list pointer */
1630 1630 *devnum = wp - list;
1631 1631 *devlist = list;
1632 1632 }
1633 1633
1634 1634 static void
1635 1635 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1636 1636 {
1637 1637 int len;
1638 1638 int n = 0;
1639 1639 dr_common_unit_t *cp, **rp = list;
1640 1640
1641 1641 /*
1642 1642 * move first encountered unit error to handle if handle
1643 1643 * does not yet have a recorded error.
1644 1644 */
1645 1645 if (hp->h_err == NULL) {
1646 1646 while (n++ < devnum) {
1647 1647 cp = *rp++;
1648 1648 if (cp->sbdev_error != NULL) {
1649 1649 hp->h_err = cp->sbdev_error;
1650 1650 cp->sbdev_error = NULL;
1651 1651 break;
1652 1652 }
1653 1653 }
1654 1654 }
1655 1655
1656 1656 /* free remaining unit errors */
1657 1657 while (n++ < devnum) {
1658 1658 cp = *rp++;
1659 1659 if (cp->sbdev_error != NULL) {
1660 1660 sbd_err_clear(&cp->sbdev_error);
1661 1661 cp->sbdev_error = NULL;
1662 1662 }
1663 1663 }
1664 1664
1665 1665 /* free list */
1666 1666 list -= 1;
1667 1667 len = (int)(uintptr_t)list[0];
1668 1668 kmem_free(list, len);
1669 1669 }
1670 1670
1671 1671 static int
1672 1672 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1673 1673 int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1674 1674 void (*op)(dr_handle_t *, dr_common_unit_t *),
1675 1675 int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1676 1676 void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1677 1677 {
1678 1678 int devnum, rv;
1679 1679 dr_common_unit_t **devlist;
1680 1680
1681 1681 dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1682 1682
1683 1683 rv = 0;
1684 1684 if (devnum > 0) {
1685 1685 rv = (*pre_op)(hp, devlist, devnum);
1686 1686 if (rv == 0) {
1687 1687 int n;
1688 1688
1689 1689 for (n = 0; n < devnum; n++)
1690 1690 (*op)(hp, devlist[n]);
1691 1691
1692 1692 rv = (*post_op)(hp, devlist, devnum);
1693 1693
1694 1694 (*board_op)(hp, devlist, devnum);
1695 1695 }
1696 1696 }
1697 1697
1698 1698 dr_dev_clean_up(hp, devlist, devnum);
1699 1699 return (rv);
1700 1700 }
1701 1701
1702 1702 /*ARGSUSED*/
1703 1703 static int
1704 1704 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1705 1705 {
1706 1706 return (0);
1707 1707 }
1708 1708
1709 1709 static void
1710 1710 dr_attach_update_state(dr_handle_t *hp,
1711 1711 dr_common_unit_t **devlist, int devnum)
1712 1712 {
1713 1713 dr_board_t *bp = hp->h_bd;
1714 1714 int i;
1715 1715 dr_devset_t devs_unattached, devs_present;
1716 1716 static fn_t f = "dr_attach_update_state";
1717 1717
1718 1718 for (i = 0; i < devnum; i++) {
1719 1719 dr_common_unit_t *cp = devlist[i];
1720 1720
1721 1721 if (dr_check_unit_attached(cp) == -1) {
1722 1722 PR_ALL("%s: ERROR %s not attached\n",
1723 1723 f, cp->sbdev_path);
1724 1724 continue;
1725 1725 }
1726 1726
1727 1727 DR_DEV_SET_ATTACHED(cp);
1728 1728
1729 1729 dr_device_transition(cp, DR_STATE_CONFIGURED);
1730 1730 cp->sbdev_cond = SBD_COND_OK;
1731 1731 }
1732 1732
1733 1733 devs_present = DR_DEVS_PRESENT(bp);
1734 1734 devs_unattached = DR_DEVS_UNATTACHED(bp);
1735 1735
1736 1736 switch (bp->b_state) {
1737 1737 case DR_STATE_CONNECTED:
1738 1738 case DR_STATE_UNCONFIGURED:
1739 1739 ASSERT(devs_present);
1740 1740
1741 1741 if (devs_unattached == 0) {
1742 1742 /*
1743 1743 * All devices finally attached.
1744 1744 */
1745 1745 dr_board_transition(bp, DR_STATE_CONFIGURED);
1746 1746 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1747 1747 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1748 1748 hp->h_bd->b_cond = SBD_COND_OK;
1749 1749 hp->h_bd->b_busy = 0;
1750 1750 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1751 1751 } else if (devs_present != devs_unattached) {
1752 1752 /*
1753 1753 * Only some devices are fully attached.
1754 1754 */
1755 1755 dr_board_transition(bp, DR_STATE_PARTIAL);
1756 1756 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1757 1757 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1758 1758 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1759 1759 }
1760 1760 break;
1761 1761
1762 1762 case DR_STATE_PARTIAL:
1763 1763 ASSERT(devs_present);
1764 1764 /*
1765 1765 * All devices finally attached.
1766 1766 */
1767 1767 if (devs_unattached == 0) {
1768 1768 dr_board_transition(bp, DR_STATE_CONFIGURED);
1769 1769 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1770 1770 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1771 1771 hp->h_bd->b_cond = SBD_COND_OK;
1772 1772 hp->h_bd->b_busy = 0;
1773 1773 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1774 1774 }
1775 1775 break;
1776 1776
1777 1777 default:
1778 1778 break;
1779 1779 }
1780 1780 }
1781 1781
1782 1782 static void
1783 1783 dr_dev_configure(dr_handle_t *hp)
1784 1784 {
1785 1785 int rv;
1786 1786
1787 1787 rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1788 1788 dr_pre_attach_cpu,
1789 1789 dr_attach_cpu,
1790 1790 dr_post_attach_cpu,
1791 1791 dr_attach_update_state);
1792 1792
1793 1793 if (rv >= 0) {
1794 1794 rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1795 1795 dr_pre_attach_mem,
1796 1796 dr_attach_mem,
1797 1797 dr_post_attach_mem,
1798 1798 dr_attach_update_state);
1799 1799 }
1800 1800
1801 1801 if (rv >= 0) {
1802 1802 (void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1803 1803 dr_pre_attach_io,
1804 1804 dr_attach_io,
1805 1805 dr_post_attach_io,
1806 1806 dr_attach_update_state);
1807 1807 }
1808 1808 }
1809 1809
1810 1810 static void
1811 1811 dr_release_update_state(dr_handle_t *hp,
1812 1812 dr_common_unit_t **devlist, int devnum)
1813 1813 {
1814 1814 _NOTE(ARGUNUSED(devlist))
1815 1815 _NOTE(ARGUNUSED(devnum))
1816 1816
1817 1817 dr_board_t *bp = hp->h_bd;
1818 1818
1819 1819 /*
1820 1820 * If the entire board was released and all components
1821 1821 * unreferenced then transfer it to the UNREFERENCED state.
1822 1822 */
1823 1823 if ((bp->b_state != DR_STATE_RELEASE) &&
1824 1824 (DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1825 1825 dr_board_transition(bp, DR_STATE_RELEASE);
1826 1826 hp->h_bd->b_busy = 1;
1827 1827 }
1828 1828 }
1829 1829
1830 1830 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1831 1831 int
1832 1832 dr_release_dev_done(dr_common_unit_t *cp)
1833 1833 {
1834 1834 if (cp->sbdev_state == DR_STATE_RELEASE) {
1835 1835 ASSERT(DR_DEV_IS_RELEASED(cp));
1836 1836
1837 1837 DR_DEV_SET_UNREFERENCED(cp);
1838 1838
1839 1839 dr_device_transition(cp, DR_STATE_UNREFERENCED);
1840 1840
1841 1841 return (0);
1842 1842 } else {
1843 1843 return (-1);
1844 1844 }
1845 1845 }
1846 1846
1847 1847 static void
1848 1848 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1849 1849 {
1850 1850 _NOTE(ARGUNUSED(hp))
1851 1851
1852 1852 dr_board_t *bp;
1853 1853 static fn_t f = "dr_release_done";
1854 1854
1855 1855 PR_ALL("%s...\n", f);
1856 1856
1857 1857 /* get board pointer & sanity check */
1858 1858 bp = cp->sbdev_bp;
1859 1859 ASSERT(bp == hp->h_bd);
1860 1860
1861 1861 /*
1862 1862 * Transfer the device which just completed its release
1863 1863 * to the UNREFERENCED state.
1864 1864 */
1865 1865 switch (cp->sbdev_type) {
1866 1866 case SBD_COMP_MEM:
1867 1867 dr_release_mem_done(cp);
1868 1868 break;
1869 1869
1870 1870 default:
1871 1871 DR_DEV_SET_RELEASED(cp);
1872 1872
1873 1873 dr_device_transition(cp, DR_STATE_RELEASE);
1874 1874
1875 1875 (void) dr_release_dev_done(cp);
1876 1876 break;
1877 1877 }
1878 1878
1879 1879 /*
1880 1880 * If we're not already in the RELEASE state for this
1881 1881 * board and we now have released all that were previously
1882 1882 * attached, then transfer the board to the RELEASE state.
1883 1883 */
1884 1884 if ((bp->b_state == DR_STATE_RELEASE) &&
1885 1885 (DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1886 1886 dr_board_transition(bp, DR_STATE_UNREFERENCED);
1887 1887 bp->b_busy = 1;
1888 1888 (void) drv_getparm(TIME, (void *)&bp->b_time);
1889 1889 }
1890 1890 }
1891 1891
1892 1892 static void
1893 1893 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1894 1894 {
1895 1895 dr_release_mem(dv);
1896 1896 dr_release_done(hp, dv);
1897 1897 }
1898 1898
1899 1899 static void
1900 1900 dr_dev_release(dr_handle_t *hp)
1901 1901 {
1902 1902 int rv;
1903 1903
1904 1904 hp->h_bd->b_busy = 1;
1905 1905
1906 1906 rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1907 1907 dr_pre_release_cpu,
1908 1908 dr_release_done,
1909 1909 dr_dev_noop,
1910 1910 dr_release_update_state);
1911 1911
1912 1912 if (rv >= 0) {
1913 1913 rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1914 1914 dr_pre_release_mem,
1915 1915 dr_dev_release_mem,
1916 1916 dr_dev_noop,
1917 1917 dr_release_update_state);
1918 1918 }
1919 1919
1920 1920 if (rv >= 0) {
1921 1921 rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1922 1922 dr_pre_release_io,
1923 1923 dr_release_done,
1924 1924 dr_dev_noop,
1925 1925 dr_release_update_state);
1926 1926
1927 1927 }
1928 1928
1929 1929 if (rv < 0)
1930 1930 hp->h_bd->b_busy = 0;
1931 1931 /* else, b_busy will be cleared in dr_detach_update_state() */
1932 1932 }
1933 1933
1934 1934 static void
1935 1935 dr_detach_update_state(dr_handle_t *hp,
1936 1936 dr_common_unit_t **devlist, int devnum)
1937 1937 {
1938 1938 dr_board_t *bp = hp->h_bd;
1939 1939 int i;
1940 1940 dr_state_t bstate;
1941 1941 static fn_t f = "dr_detach_update_state";
1942 1942
1943 1943 for (i = 0; i < devnum; i++) {
1944 1944 dr_common_unit_t *cp = devlist[i];
1945 1945
1946 1946 if (dr_check_unit_attached(cp) >= 0) {
1947 1947 /*
1948 1948 * Device is still attached probably due
1949 1949 * to an error. Need to keep track of it.
1950 1950 */
1951 1951 PR_ALL("%s: ERROR %s not detached\n",
1952 1952 f, cp->sbdev_path);
1953 1953
1954 1954 continue;
1955 1955 }
1956 1956
1957 1957 DR_DEV_CLR_ATTACHED(cp);
1958 1958 DR_DEV_CLR_RELEASED(cp);
1959 1959 DR_DEV_CLR_UNREFERENCED(cp);
1960 1960 dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1961 1961 }
1962 1962
1963 1963 bstate = bp->b_state;
1964 1964 if (bstate != DR_STATE_UNCONFIGURED) {
1965 1965 if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1966 1966 /*
1967 1967 * All devices are finally detached.
1968 1968 */
1969 1969 dr_board_transition(bp, DR_STATE_UNCONFIGURED);
1970 1970 hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
1971 1971 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1972 1972 } else if ((bp->b_state != DR_STATE_PARTIAL) &&
1973 1973 (DR_DEVS_ATTACHED(bp) !=
1974 1974 DR_DEVS_PRESENT(bp))) {
1975 1975 /*
1976 1976 * Some devices remain attached.
1977 1977 */
1978 1978 dr_board_transition(bp, DR_STATE_PARTIAL);
1979 1979 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1980 1980 }
1981 1981
1982 1982 if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
1983 1983 hp->h_bd->b_busy = 0;
1984 1984 }
1985 1985 }
1986 1986
1987 1987 static int
1988 1988 dr_dev_unconfigure(dr_handle_t *hp)
1989 1989 {
1990 1990 dr_board_t *bp = hp->h_bd;
1991 1991
1992 1992 /*
1993 1993 * Block out status during IO unconfig.
1994 1994 */
1995 1995 mutex_enter(&bp->b_slock);
1996 1996 while (bp->b_sflags & DR_BSLOCK) {
1997 1997 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1998 1998 mutex_exit(&bp->b_slock);
1999 1999 return (EINTR);
2000 2000 }
2001 2001 }
2002 2002 bp->b_sflags |= DR_BSLOCK;
2003 2003 mutex_exit(&bp->b_slock);
2004 2004
2005 2005 (void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2006 2006 dr_pre_detach_io,
2007 2007 dr_detach_io,
2008 2008 dr_post_detach_io,
2009 2009 dr_detach_update_state);
2010 2010
2011 2011 dr_unlock_status(bp);
2012 2012
2013 2013 (void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2014 2014 dr_pre_detach_cpu,
2015 2015 dr_detach_cpu,
2016 2016 dr_post_detach_cpu,
2017 2017 dr_detach_update_state);
2018 2018
2019 2019 (void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2020 2020 dr_pre_detach_mem,
2021 2021 dr_detach_mem,
2022 2022 dr_post_detach_mem,
2023 2023 dr_detach_update_state);
2024 2024
2025 2025 return (0);
2026 2026 }
2027 2027
2028 2028 static void
2029 2029 dr_dev_cancel(dr_handle_t *hp)
2030 2030 {
2031 2031 int i;
2032 2032 dr_devset_t devset;
2033 2033 dr_board_t *bp = hp->h_bd;
2034 2034 static fn_t f = "dr_dev_cancel";
2035 2035
2036 2036 PR_ALL("%s...\n", f);
2037 2037
2038 2038 /*
2039 2039 * Only devices which have been "released" are
2040 2040 * subject to cancellation.
2041 2041 */
2042 2042 devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2043 2043
2044 2044 /*
2045 2045 * Nothing to do for CPUs or IO other than change back
2046 2046 * their state.
2047 2047 */
2048 2048 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2049 2049 dr_cpu_unit_t *cp;
2050 2050 dr_state_t nstate;
2051 2051
2052 2052 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2053 2053 continue;
2054 2054
2055 2055 cp = dr_get_cpu_unit(bp, i);
2056 2056 if (dr_cancel_cpu(cp) == 0)
2057 2057 nstate = DR_STATE_CONFIGURED;
2058 2058 else
2059 2059 nstate = DR_STATE_FATAL;
2060 2060
2061 2061 dr_device_transition(&cp->sbc_cm, nstate);
2062 2062 }
2063 2063
2064 2064 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2065 2065 dr_io_unit_t *ip;
2066 2066
2067 2067 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2068 2068 continue;
2069 2069 ip = dr_get_io_unit(bp, i);
2070 2070 dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2071 2071 }
2072 2072 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2073 2073 dr_mem_unit_t *mp;
2074 2074 dr_state_t nstate;
2075 2075
2076 2076 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2077 2077 continue;
2078 2078
2079 2079 mp = dr_get_mem_unit(bp, i);
2080 2080 if (dr_cancel_mem(mp) == 0)
2081 2081 nstate = DR_STATE_CONFIGURED;
2082 2082 else
2083 2083 nstate = DR_STATE_FATAL;
2084 2084
2085 2085 dr_device_transition(&mp->sbm_cm, nstate);
2086 2086 }
2087 2087
2088 2088 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2089 2089
2090 2090 DR_DEVS_CANCEL(bp, devset);
2091 2091
2092 2092 if (DR_DEVS_RELEASED(bp) == 0) {
2093 2093 dr_state_t new_state;
2094 2094 /*
2095 2095 * If the board no longer has any released devices
2096 2096 * than transfer it back to the CONFIG/PARTIAL state.
2097 2097 */
2098 2098 if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2099 2099 new_state = DR_STATE_CONFIGURED;
2100 2100 else
2101 2101 new_state = DR_STATE_PARTIAL;
2102 2102 if (bp->b_state != new_state) {
2103 2103 dr_board_transition(bp, new_state);
2104 2104 }
2105 2105 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2106 2106 hp->h_bd->b_busy = 0;
2107 2107 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2108 2108 }
2109 2109 }
2110 2110
2111 2111 static int
2112 2112 dr_dev_status(dr_handle_t *hp)
2113 2113 {
2114 2114 int nstat, mode, ncm, sz, pbsz, pnstat;
2115 2115 dr_handle_t *shp;
2116 2116 dr_devset_t devset = 0;
2117 2117 sbd_stat_t *dstatp = NULL;
2118 2118 sbd_dev_stat_t *devstatp;
2119 2119 dr_board_t *bp;
2120 2120 drmach_status_t pstat;
2121 2121 int rv = 0;
2122 2122
2123 2123 #ifdef _MULTI_DATAMODEL
2124 2124 int sz32 = 0;
2125 2125 #endif /* _MULTI_DATAMODEL */
2126 2126
2127 2127 static fn_t f = "dr_dev_status";
2128 2128
2129 2129 PR_ALL("%s...\n", f);
2130 2130
2131 2131 mode = hp->h_mode;
2132 2132 shp = hp;
2133 2133 devset = shp->h_devset;
2134 2134 bp = hp->h_bd;
2135 2135
2136 2136 /*
2137 2137 * Block out disconnect, unassign, IO unconfigure and
2138 2138 * devinfo branch creation during status.
2139 2139 */
2140 2140 mutex_enter(&bp->b_slock);
2141 2141 while (bp->b_sflags & DR_BSLOCK) {
2142 2142 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2143 2143 mutex_exit(&bp->b_slock);
2144 2144 return (EINTR);
2145 2145 }
2146 2146 }
2147 2147 bp->b_sflags |= DR_BSLOCK;
2148 2148 mutex_exit(&bp->b_slock);
2149 2149
2150 2150 ncm = 1;
2151 2151 if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2152 2152 if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2153 2153 /*
2154 2154 * Calculate the maximum number of components possible
2155 2155 * for a board. This number will be used to size the
2156 2156 * status scratch buffer used by board and component
2157 2157 * status functions.
2158 2158 * This buffer may differ in size from what is provided
2159 2159 * by the plugin, since the known component set on the
2160 2160 * board may change between the plugin's GETNCM call, and
2161 2161 * the status call. Sizing will be adjusted to the plugin's
2162 2162 * receptacle buffer at copyout time.
2163 2163 */
2164 2164 ncm = MAX_CPU_UNITS_PER_BOARD +
2165 2165 MAX_MEM_UNITS_PER_BOARD +
2166 2166 MAX_IO_UNITS_PER_BOARD;
2167 2167
2168 2168 } else {
2169 2169 /*
2170 2170 * In the case of c_type == SBD_COMP_NONE, and
2171 2171 * SBD_FLAG_ALLCMP not specified, only the board
2172 2172 * info is to be returned, no components.
2173 2173 */
2174 2174 ncm = 0;
2175 2175 devset = 0;
2176 2176 }
2177 2177 }
2178 2178
2179 2179 sz = sizeof (sbd_stat_t);
2180 2180 if (ncm > 1)
2181 2181 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2182 2182
2183 2183
2184 2184 pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2185 2185 pnstat = (pbsz - sizeof (sbd_stat_t)) / sizeof (sbd_dev_stat_t);
2186 2186
2187 2187 /*
2188 2188 * s_nbytes describes the size of the preallocated user
2189 2189 * buffer into which the application is execting to
2190 2190 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2191 2191 */
2192 2192
2193 2193 #ifdef _MULTI_DATAMODEL
2194 2194
2195 2195 /*
2196 2196 * More buffer space is required for the 64bit to 32bit
2197 2197 * conversion of data structures.
2198 2198 */
2199 2199 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2200 2200 sz32 = sizeof (sbd_stat32_t);
2201 2201 if (ncm > 1)
2202 2202 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2203 2203 pnstat = (pbsz - sizeof (sbd_stat32_t))/
2204 2204 sizeof (sbd_dev_stat32_t);
2205 2205 }
2206 2206
2207 2207 sz += sz32;
2208 2208 #endif
2209 2209 /*
2210 2210 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2211 2211 * increment the plugin's nstat count.
2212 2212 */
2213 2213 ++pnstat;
2214 2214
2215 2215 if (bp->b_id == 0) {
2216 2216 bzero(&pstat, sizeof (pstat));
2217 2217 } else {
2218 2218 sbd_error_t *err;
2219 2219
2220 2220 err = drmach_status(bp->b_id, &pstat);
2221 2221 if (err) {
2222 2222 DRERR_SET_C(&hp->h_err, &err);
2223 2223 rv = EIO;
2224 2224 goto status_done;
2225 2225 }
2226 2226 }
2227 2227
2228 2228 dstatp = (sbd_stat_t *)(void *)GETSTRUCT(char, sz);
2229 2229
2230 2230 devstatp = &dstatp->s_stat[0];
2231 2231
2232 2232 dstatp->s_board = bp->b_num;
2233 2233
2234 2234 /*
2235 2235 * Detect transitions between empty and disconnected.
2236 2236 */
2237 2237 if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2238 2238 bp->b_rstate = SBD_STAT_DISCONNECTED;
2239 2239 else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2240 2240 bp->b_rstate = SBD_STAT_EMPTY;
2241 2241
2242 2242 dstatp->s_rstate = bp->b_rstate;
2243 2243 dstatp->s_ostate = bp->b_ostate;
2244 2244 dstatp->s_cond = bp->b_cond = pstat.cond;
2245 2245 dstatp->s_busy = bp->b_busy | pstat.busy;
2246 2246 dstatp->s_time = bp->b_time;
2247 2247 dstatp->s_power = pstat.powered;
2248 2248 dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2249 2249 dstatp->s_nstat = nstat = 0;
2250 2250 bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2251 2251 bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2252 2252
2253 2253 devset &= DR_DEVS_PRESENT(bp);
2254 2254 if (devset == 0) {
2255 2255 /*
2256 2256 * No device chosen.
2257 2257 */
2258 2258 PR_ALL("%s: no device present\n", f);
2259 2259 }
2260 2260
2261 2261 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2262 2262 if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2263 2263 dstatp->s_nstat += nstat;
2264 2264 devstatp += nstat;
2265 2265 }
2266 2266
2267 2267 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2268 2268 if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2269 2269 dstatp->s_nstat += nstat;
2270 2270 devstatp += nstat;
2271 2271 }
2272 2272
2273 2273 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2274 2274 if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2275 2275 dstatp->s_nstat += nstat;
2276 2276 devstatp += nstat;
2277 2277 }
2278 2278
2279 2279 /*
2280 2280 * Due to a possible change in number of components between
2281 2281 * the time of plugin's GETNCM call and now, there may be
2282 2282 * more or less components than the plugin's buffer can
2283 2283 * hold. Adjust s_nstat accordingly.
2284 2284 */
2285 2285
2286 2286 dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2287 2287
2288 2288 #ifdef _MULTI_DATAMODEL
2289 2289 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2290 2290 int i, j;
2291 2291 sbd_stat32_t *dstat32p;
2292 2292
2293 2293 dstat32p = (sbd_stat32_t *)devstatp;
2294 2294
2295 2295 /* Alignment Paranoia */
2296 2296 if ((ulong_t)dstat32p & 0x1) {
2297 2297 PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2298 2298 f, sizeof (sbd_stat32_t), (void *)dstat32p);
2299 2299 DR_OP_INTERNAL_ERROR(hp);
2300 2300 rv = EINVAL;
2301 2301 goto status_done;
2302 2302 }
2303 2303
2304 2304 /* paranoia: detect buffer overrun */
2305 2305 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2306 2306 ((caddr_t)dstatp) + sz) {
2307 2307 DR_OP_INTERNAL_ERROR(hp);
2308 2308 rv = EINVAL;
2309 2309 goto status_done;
2310 2310 }
2311 2311
2312 2312 /* copy sbd_stat_t structure members */
2313 2313 #define _SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2314 2314 _SBD_STAT(int32_t, s_board);
2315 2315 _SBD_STAT(int32_t, s_rstate);
2316 2316 _SBD_STAT(int32_t, s_ostate);
2317 2317 _SBD_STAT(int32_t, s_cond);
2318 2318 _SBD_STAT(int32_t, s_busy);
2319 2319 _SBD_STAT(time32_t, s_time);
2320 2320 _SBD_STAT(uint32_t, s_power);
2321 2321 _SBD_STAT(uint32_t, s_assigned);
2322 2322 _SBD_STAT(int32_t, s_nstat);
2323 2323 bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2324 2324 SBD_TYPE_LEN);
2325 2325 bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2326 2326 SBD_MAX_INFO);
2327 2327 #undef _SBD_STAT
2328 2328
2329 2329 for (i = 0; i < dstatp->s_nstat; i++) {
2330 2330 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
2331 2331 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
2332 2332 #define _SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2333 2333
2334 2334 /* copy sbd_cm_stat_t structure members */
2335 2335 _SBD_DEV_STAT(int32_t, ds_type);
2336 2336 _SBD_DEV_STAT(int32_t, ds_unit);
2337 2337 _SBD_DEV_STAT(int32_t, ds_ostate);
2338 2338 _SBD_DEV_STAT(int32_t, ds_cond);
2339 2339 _SBD_DEV_STAT(int32_t, ds_busy);
2340 2340 _SBD_DEV_STAT(int32_t, ds_suspend);
2341 2341 _SBD_DEV_STAT(time32_t, ds_time);
2342 2342 bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2343 2343 OBP_MAXPROPNAME);
2344 2344
2345 2345 switch (dsp->ds_type) {
2346 2346 case SBD_COMP_CPU:
2347 2347 /* copy sbd_cpu_stat_t structure members */
2348 2348 _SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2349 2349 _SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2350 2350 _SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2351 2351 _SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2352 2352 break;
2353 2353
2354 2354 case SBD_COMP_MEM:
2355 2355 /* copy sbd_mem_stat_t structure members */
2356 2356 _SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2357 2357 _SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2358 2358 _SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2359 2359 _SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2360 2360 _SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2361 2361 _SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2362 2362 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2363 2363 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2364 2364 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2365 2365 _SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2366 2366 _SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2367 2367 bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2368 2368 &ds32p->d_mem.ms_peer_ap_id[0],
2369 2369 sizeof (ds32p->d_mem.ms_peer_ap_id));
2370 2370 break;
2371 2371
2372 2372 case SBD_COMP_IO:
2373 2373 /* copy sbd_io_stat_t structure members */
2374 2374 _SBD_DEV_STAT(int32_t, d_io.is_referenced);
2375 2375 _SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2376 2376
2377 2377 for (j = 0; j < SBD_MAX_UNSAFE; j++)
2378 2378 _SBD_DEV_STAT(int32_t,
2379 2379 d_io.is_unsafe_list[j]);
2380 2380
2381 2381 bcopy(&dsp->d_io.is_pathname[0],
2382 2382 &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2383 2383 break;
2384 2384
2385 2385 case SBD_COMP_CMP:
2386 2386 /* copy sbd_cmp_stat_t structure members */
2387 2387 bcopy(&dsp->d_cmp.ps_cpuid[0],
2388 2388 &ds32p->d_cmp.ps_cpuid[0],
2389 2389 sizeof (ds32p->d_cmp.ps_cpuid));
2390 2390 _SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2391 2391 _SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2392 2392 _SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2393 2393 break;
2394 2394
2395 2395 default:
2396 2396 cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2397 2397 f, (int)dsp->ds_type);
2398 2398 rv = EFAULT;
2399 2399 goto status_done;
2400 2400 }
2401 2401 #undef _SBD_DEV_STAT
2402 2402 }
2403 2403
2404 2404
2405 2405 if (ddi_copyout((void *)dstat32p,
2406 2406 hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2407 2407 cmn_err(CE_WARN,
2408 2408 "%s: failed to copyout status "
2409 2409 "for board %d", f, bp->b_num);
2410 2410 rv = EFAULT;
2411 2411 goto status_done;
2412 2412 }
2413 2413 } else
2414 2414 #endif /* _MULTI_DATAMODEL */
2415 2415
2416 2416 if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2417 2417 pbsz, mode) != 0) {
2418 2418 cmn_err(CE_WARN,
2419 2419 "%s: failed to copyout status for board %d",
2420 2420 f, bp->b_num);
2421 2421 rv = EFAULT;
2422 2422 goto status_done;
2423 2423 }
2424 2424
2425 2425 status_done:
2426 2426 if (dstatp != NULL)
2427 2427 FREESTRUCT(dstatp, char, sz);
2428 2428
2429 2429 dr_unlock_status(bp);
2430 2430
2431 2431 return (rv);
2432 2432 }
2433 2433
2434 2434 static int
2435 2435 dr_get_ncm(dr_handle_t *hp)
2436 2436 {
2437 2437 int i;
2438 2438 int ncm = 0;
2439 2439 dr_devset_t devset;
2440 2440
2441 2441 devset = DR_DEVS_PRESENT(hp->h_bd);
2442 2442 if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2443 2443 devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2444 2444 DEVSET_ANYUNIT);
2445 2445
2446 2446 /*
2447 2447 * Handle CPUs first to deal with possible CMP
2448 2448 * devices. If the CPU is a CMP, we need to only
2449 2449 * increment ncm once even if there are multiple
2450 2450 * cores for that CMP present in the devset.
2451 2451 */
2452 2452 for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2453 2453 if (devset & DEVSET(SBD_COMP_CMP, i)) {
2454 2454 ncm++;
2455 2455 }
2456 2456 }
2457 2457
2458 2458 /* eliminate the CPU information from the devset */
2459 2459 devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2460 2460
2461 2461 for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2462 2462 ncm += devset & 0x1;
2463 2463 devset >>= 1;
2464 2464 }
2465 2465
2466 2466 return (ncm);
2467 2467 }
2468 2468
2469 2469 /* used by dr_mem.c */
2470 2470 /* TODO: eliminate dr_boardlist */
2471 2471 dr_board_t *
2472 2472 dr_lookup_board(int board_num)
2473 2473 {
2474 2474 dr_board_t *bp;
2475 2475
2476 2476 ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2477 2477
2478 2478 bp = &dr_boardlist[board_num];
2479 2479 ASSERT(bp->b_num == board_num);
2480 2480
2481 2481 return (bp);
2482 2482 }
2483 2483
2484 2484 static dr_dev_unit_t *
2485 2485 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2486 2486 {
2487 2487 dr_dev_unit_t *dp;
2488 2488
2489 2489 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2490 2490 ASSERT(dp->du_common.sbdev_bp == bp);
2491 2491 ASSERT(dp->du_common.sbdev_unum == unit_num);
2492 2492 ASSERT(dp->du_common.sbdev_type == nt);
2493 2493
2494 2494 return (dp);
2495 2495 }
2496 2496
2497 2497 dr_cpu_unit_t *
2498 2498 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2499 2499 {
2500 2500 dr_dev_unit_t *dp;
2501 2501
2502 2502 ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2503 2503
2504 2504 dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2505 2505 return (&dp->du_cpu);
2506 2506 }
2507 2507
2508 2508 dr_mem_unit_t *
2509 2509 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2510 2510 {
2511 2511 dr_dev_unit_t *dp;
2512 2512
2513 2513 ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2514 2514
2515 2515 dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2516 2516 return (&dp->du_mem);
2517 2517 }
2518 2518
2519 2519 dr_io_unit_t *
2520 2520 dr_get_io_unit(dr_board_t *bp, int unit_num)
2521 2521 {
2522 2522 dr_dev_unit_t *dp;
2523 2523
2524 2524 ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2525 2525
2526 2526 dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2527 2527 return (&dp->du_io);
2528 2528 }
2529 2529
2530 2530 dr_common_unit_t *
2531 2531 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2532 2532 {
2533 2533 dr_dev_unit_t *dp;
2534 2534
2535 2535 dp = dr_get_dev_unit(bp, nt, unum);
2536 2536 return (&dp->du_common);
2537 2537 }
2538 2538
2539 2539 static dr_devset_t
2540 2540 dr_dev2devset(sbd_comp_id_t *cid)
2541 2541 {
2542 2542 static fn_t f = "dr_dev2devset";
2543 2543
2544 2544 dr_devset_t devset;
2545 2545 int unit = cid->c_unit;
2546 2546
2547 2547 switch (cid->c_type) {
2548 2548 case SBD_COMP_NONE:
2549 2549 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2550 2550 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2551 2551 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
2552 2552 PR_ALL("%s: COMP_NONE devset = " DEVSET_FMT_STR "\n",
2553 2553 f, DEVSET_FMT_ARG(devset));
2554 2554 break;
2555 2555
2556 2556 case SBD_COMP_CPU:
2557 2557 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2558 2558 cmn_err(CE_WARN,
2559 2559 "%s: invalid cpu unit# = %d",
2560 2560 f, unit);
2561 2561 devset = 0;
2562 2562 } else {
2563 2563 /*
2564 2564 * Generate a devset that includes all the
2565 2565 * cores of a CMP device. If this is not a
2566 2566 * CMP, the extra cores will be eliminated
2567 2567 * later since they are not present. This is
2568 2568 * also true for CMP devices that do not have
2569 2569 * all cores active.
2570 2570 */
2571 2571 devset = DEVSET(SBD_COMP_CMP, unit);
2572 2572 }
2573 2573
2574 2574 PR_ALL("%s: CPU devset = " DEVSET_FMT_STR "\n",
2575 2575 f, DEVSET_FMT_ARG(devset));
2576 2576 break;
2577 2577
2578 2578 case SBD_COMP_MEM:
2579 2579 if (unit == SBD_NULL_UNIT) {
2580 2580 unit = 0;
2581 2581 cid->c_unit = 0;
2582 2582 }
2583 2583
2584 2584 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2585 2585 cmn_err(CE_WARN,
2586 2586 "%s: invalid mem unit# = %d",
2587 2587 f, unit);
2588 2588 devset = 0;
2589 2589 } else
2590 2590 devset = DEVSET(cid->c_type, unit);
2591 2591
2592 2592 PR_ALL("%s: MEM devset = " DEVSET_FMT_STR "\n",
2593 2593 f, DEVSET_FMT_ARG(devset));
2594 2594 break;
2595 2595
2596 2596 case SBD_COMP_IO:
2597 2597 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2598 2598 cmn_err(CE_WARN,
2599 2599 "%s: invalid io unit# = %d",
2600 2600 f, unit);
2601 2601 devset = 0;
2602 2602 } else
2603 2603 devset = DEVSET(cid->c_type, unit);
2604 2604
2605 2605 PR_ALL("%s: IO devset = " DEVSET_FMT_STR "\n",
2606 2606 f, DEVSET_FMT_ARG(devset));
2607 2607 break;
2608 2608
2609 2609 default:
2610 2610 case SBD_COMP_UNKNOWN:
2611 2611 devset = 0;
2612 2612 break;
2613 2613 }
2614 2614
2615 2615 return (devset);
2616 2616 }
2617 2617
2618 2618 /*
2619 2619 * Converts a dynamic attachment point name to a SBD_COMP_* type.
2620 2620 * Returns SDB_COMP_UNKNOWN if name is not recognized.
2621 2621 */
2622 2622 static int
2623 2623 dr_dev_type_to_nt(char *type)
2624 2624 {
2625 2625 int i;
2626 2626
2627 2627 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2628 2628 if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2629 2629 break;
2630 2630
2631 2631 return (dr_devattr[i].s_nodetype);
2632 2632 }
2633 2633
2634 2634 /*
2635 2635 * Converts a SBD_COMP_* type to a dynamic attachment point name.
2636 2636 * Return NULL if SBD_COMP_ type is not recognized.
2637 2637 */
2638 2638 char *
2639 2639 dr_nt_to_dev_type(int nt)
2640 2640 {
2641 2641 int i;
2642 2642
2643 2643 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2644 2644 if (dr_devattr[i].s_nodetype == nt)
2645 2645 break;
2646 2646
2647 2647 return (dr_devattr[i].s_devtype);
2648 2648 }
2649 2649
2650 2650 /*
2651 2651 * State transition policy is that if there is some component for which
2652 2652 * the state transition is valid, then let it through. The exception is
2653 2653 * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2654 2654 * for ALL components.
2655 2655 * Returns the state that is in error, if any.
2656 2656 */
2657 2657 static int
2658 2658 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2659 2659 struct dr_state_trans *transp, int cmd)
2660 2660 {
2661 2661 int s, ut;
2662 2662 int state_err = 0;
2663 2663 dr_devset_t devset;
2664 2664 dr_common_unit_t *cp;
2665 2665 static fn_t f = "dr_check_transition";
2666 2666
2667 2667 devset = *devsetp;
2668 2668
2669 2669 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2670 2670 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2671 2671 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2672 2672 continue;
2673 2673
2674 2674 cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2675 2675 s = (int)cp->sbdev_state;
2676 2676 if (!DR_DEV_IS_PRESENT(cp)) {
2677 2677 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2678 2678 } else {
2679 2679 if (transp->x_op[s].x_rv) {
2680 2680 if (!state_err)
2681 2681 state_err = s;
2682 2682 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2683 2683 }
2684 2684 }
2685 2685 }
2686 2686 }
2687 2687 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2688 2688 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2689 2689 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2690 2690 continue;
2691 2691
2692 2692 cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2693 2693 s = (int)cp->sbdev_state;
2694 2694 if (!DR_DEV_IS_PRESENT(cp)) {
2695 2695 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2696 2696 } else {
2697 2697 if (transp->x_op[s].x_rv) {
2698 2698 if (!state_err)
2699 2699 state_err = s;
2700 2700 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2701 2701 }
2702 2702 }
2703 2703 }
2704 2704 }
2705 2705 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2706 2706 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2707 2707 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2708 2708 continue;
2709 2709
2710 2710 cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2711 2711 s = (int)cp->sbdev_state;
2712 2712 if (!DR_DEV_IS_PRESENT(cp)) {
2713 2713 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2714 2714 } else {
2715 2715 if (transp->x_op[s].x_rv) {
2716 2716 if (!state_err)
2717 2717 state_err = s;
2718 2718 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2719 2719 }
2720 2720 }
2721 2721 }
2722 2722 }
2723 2723
2724 2724 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2725 2725 f, (uint_t)*devsetp, (uint_t)devset);
2726 2726
2727 2727 *devsetp = devset;
2728 2728 /*
2729 2729 * If there are some remaining components for which
2730 2730 * this state transition is valid, then allow them
2731 2731 * through, otherwise if none are left then return
2732 2732 * the state error. The exception is SBD_CMD_DISCONNECT.
2733 2733 * On disconnect, the state transition must be valid for ALL
2734 2734 * components.
2735 2735 */
2736 2736 if (cmd == SBD_CMD_DISCONNECT)
2737 2737 return (state_err);
2738 2738 return (devset ? 0 : state_err);
2739 2739 }
2740 2740
2741 2741 void
2742 2742 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2743 2743 {
2744 2744 PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2745 2745 cp->sbdev_path,
2746 2746 state_str[cp->sbdev_state], cp->sbdev_state,
2747 2747 state_str[st], st);
2748 2748
2749 2749 cp->sbdev_state = st;
2750 2750 if (st == DR_STATE_CONFIGURED) {
2751 2751 cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2752 2752 if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2753 2753 cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2754 2754 (void) drv_getparm(TIME,
2755 2755 (void *) &cp->sbdev_bp->b_time);
2756 2756 }
2757 2757 } else
2758 2758 cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2759 2759
2760 2760 (void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2761 2761 }
2762 2762
2763 2763 static void
2764 2764 dr_board_transition(dr_board_t *bp, dr_state_t st)
2765 2765 {
2766 2766 PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2767 2767 bp->b_num,
2768 2768 state_str[bp->b_state], bp->b_state,
2769 2769 state_str[st], st);
2770 2770
2771 2771 bp->b_state = st;
2772 2772 }
2773 2773
2774 2774 void
2775 2775 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2776 2776 {
2777 2777 sbd_error_t *err;
2778 2778 va_list args;
2779 2779
2780 2780 va_start(args, fmt);
2781 2781 err = drerr_new_v(code, fmt, args);
2782 2782 va_end(args);
2783 2783
2784 2784 if (ce != CE_IGNORE)
2785 2785 sbd_err_log(err, ce);
2786 2786
2787 2787 DRERR_SET_C(&hp->h_err, &err);
2788 2788 }
2789 2789
2790 2790 void
2791 2791 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2792 2792 {
2793 2793 sbd_error_t *err;
2794 2794
2795 2795 err = drerr_new(0, code, cp->sbdev_path, NULL);
2796 2796
2797 2797 if (ce != CE_IGNORE)
2798 2798 sbd_err_log(err, ce);
2799 2799
2800 2800 DRERR_SET_C(&cp->sbdev_error, &err);
2801 2801 }
2802 2802
2803 2803 /*
2804 2804 * A callback routine. Called from the drmach layer as a result of
2805 2805 * call to drmach_board_find_devices from dr_init_devlists.
2806 2806 */
2807 2807 static sbd_error_t *
2808 2808 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2809 2809 {
2810 2810 dr_board_t *bp = data;
2811 2811 dr_dev_unit_t *dp;
2812 2812 int nt;
2813 2813 static fn_t f = "dr_dev_found";
2814 2814
2815 2815 PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2816 2816 f, bp->b_num, name, unum, id);
2817 2817
2818 2818 nt = dr_dev_type_to_nt((char *)name);
2819 2819 if (nt == SBD_COMP_UNKNOWN) {
2820 2820 /*
2821 2821 * this should not happen. When it does, it indicates
2822 2822 * a missmatch in devices supported by the drmach layer
2823 2823 * vs devices supported by this layer.
2824 2824 */
2825 2825 return (DR_INTERNAL_ERROR());
2826 2826 }
2827 2827
2828 2828 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2829 2829
2830 2830 /* sanity check */
2831 2831 ASSERT(dp->du_common.sbdev_bp == bp);
2832 2832 ASSERT(dp->du_common.sbdev_unum == unum);
2833 2833 ASSERT(dp->du_common.sbdev_type == nt);
2834 2834
2835 2835 /* render dynamic attachment point path of this unit */
2836 2836 (void) snprintf(dp->du_common.sbdev_path,
2837 2837 sizeof (dp->du_common.sbdev_path), "%s::%s%d",
2838 2838 bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2839 2839
2840 2840 dp->du_common.sbdev_id = id;
2841 2841 DR_DEV_SET_PRESENT(&dp->du_common);
2842 2842
2843 2843 bp->b_ndev++;
2844 2844
2845 2845 return (NULL);
2846 2846 }
2847 2847
2848 2848 static sbd_error_t *
2849 2849 dr_init_devlists(dr_board_t *bp)
2850 2850 {
2851 2851 int i;
2852 2852 sbd_error_t *err;
2853 2853 dr_dev_unit_t *dp;
2854 2854 static fn_t f = "dr_init_devlists";
2855 2855
2856 2856 PR_ALL("%s (%s)...\n", f, bp->b_path);
2857 2857
2858 2858 /* sanity check */
2859 2859 ASSERT(bp->b_ndev == 0);
2860 2860
2861 2861 DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2862 2862
2863 2863 /*
2864 2864 * This routine builds the board's devlist and initializes
2865 2865 * the common portion of the unit data structures.
2866 2866 * Note: because the common portion is considered
2867 2867 * uninitialized, the dr_get_*_unit() routines can not
2868 2868 * be used.
2869 2869 */
2870 2870
2871 2871 /*
2872 2872 * Clear out old entries, if any.
2873 2873 */
2874 2874 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2875 2875 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2876 2876
2877 2877 bzero(dp, sizeof (*dp));
2878 2878 dp->du_common.sbdev_bp = bp;
2879 2879 dp->du_common.sbdev_unum = i;
2880 2880 dp->du_common.sbdev_type = SBD_COMP_CPU;
2881 2881 }
2882 2882
2883 2883 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2884 2884 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2885 2885
2886 2886 bzero(dp, sizeof (*dp));
2887 2887 dp->du_common.sbdev_bp = bp;
2888 2888 dp->du_common.sbdev_unum = i;
2889 2889 dp->du_common.sbdev_type = SBD_COMP_MEM;
2890 2890 }
2891 2891
2892 2892 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2893 2893 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2894 2894
2895 2895 bzero(dp, sizeof (*dp));
2896 2896 dp->du_common.sbdev_bp = bp;
2897 2897 dp->du_common.sbdev_unum = i;
2898 2898 dp->du_common.sbdev_type = SBD_COMP_IO;
2899 2899 }
2900 2900
2901 2901 err = NULL;
2902 2902 if (bp->b_id) {
2903 2903 /* find devices on this board */
2904 2904 err = drmach_board_find_devices(
2905 2905 bp->b_id, bp, dr_dev_found);
2906 2906 }
2907 2907
2908 2908 return (err);
2909 2909 }
2910 2910
2911 2911 /*
2912 2912 * Return the unit number of the respective drmachid if
2913 2913 * it's found to be attached.
2914 2914 */
2915 2915 static int
2916 2916 dr_check_unit_attached(dr_common_unit_t *cp)
2917 2917 {
2918 2918 int rv = 0;
2919 2919 processorid_t cpuid;
2920 2920 uint64_t basepa, endpa;
2921 2921 struct memlist *ml;
2922 2922 extern struct memlist *phys_install;
2923 2923 sbd_error_t *err;
2924 2924 int yes;
2925 2925 static fn_t f = "dr_check_unit_attached";
2926 2926
2927 2927 switch (cp->sbdev_type) {
2928 2928 case SBD_COMP_CPU:
2929 2929 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2930 2930 if (err) {
2931 2931 DRERR_SET_C(&cp->sbdev_error, &err);
2932 2932 rv = -1;
2933 2933 break;
2934 2934 }
2935 2935 mutex_enter(&cpu_lock);
2936 2936 if (cpu_get(cpuid) == NULL)
2937 2937 rv = -1;
2938 2938 mutex_exit(&cpu_lock);
2939 2939 break;
2940 2940
2941 2941 case SBD_COMP_MEM:
2942 2942 err = drmach_mem_get_slice_info(cp->sbdev_id,
2943 2943 &basepa, &endpa, NULL);
2944 2944 if (err) {
2945 2945 DRERR_SET_C(&cp->sbdev_error, &err);
2946 2946 rv = -1;
2947 2947 break;
2948 2948 }
2949 2949
2950 2950 /*
2951 2951 * Check if base address is in phys_install.
2952 2952 */
2953 2953 memlist_read_lock();
2954 2954 for (ml = phys_install; ml; ml = ml->ml_next)
2955 2955 if ((endpa <= ml->ml_address) ||
2956 2956 (basepa >= (ml->ml_address + ml->ml_size)))
2957 2957 continue;
2958 2958 else
2959 2959 break;
2960 2960 memlist_read_unlock();
2961 2961 if (ml == NULL)
2962 2962 rv = -1;
2963 2963 break;
2964 2964
2965 2965 case SBD_COMP_IO:
2966 2966 err = drmach_io_is_attached(cp->sbdev_id, &yes);
2967 2967 if (err) {
2968 2968 DRERR_SET_C(&cp->sbdev_error, &err);
2969 2969 rv = -1;
2970 2970 break;
2971 2971 } else if (!yes)
2972 2972 rv = -1;
2973 2973 break;
2974 2974
2975 2975 default:
2976 2976 PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
2977 2977 f, cp->sbdev_type, cp->sbdev_id);
2978 2978 rv = -1;
2979 2979 break;
2980 2980 }
2981 2981
2982 2982 return (rv);
2983 2983 }
2984 2984
2985 2985 /*
2986 2986 * See if drmach recognizes the passthru command. DRMACH expects the
2987 2987 * id to identify the thing to which the command is being applied. Using
2988 2988 * nonsense SBD terms, that information has been perversely encoded in the
2989 2989 * c_id member of the sbd_cmd_t structure. This logic reads those tea
2990 2990 * leaves, finds the associated drmach id, then calls drmach to process
2991 2991 * the passthru command.
2992 2992 */
2993 2993 static int
2994 2994 dr_pt_try_drmach(dr_handle_t *hp)
2995 2995 {
2996 2996 dr_board_t *bp = hp->h_bd;
2997 2997 sbd_comp_id_t *comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
2998 2998 drmachid_t id;
2999 2999
3000 3000 if (comp_id->c_type == SBD_COMP_NONE) {
3001 3001 id = bp->b_id;
3002 3002 } else {
3003 3003 sbd_comp_type_t nt;
3004 3004
3005 3005 nt = dr_dev_type_to_nt(comp_id->c_name);
3006 3006 if (nt == SBD_COMP_UNKNOWN) {
3007 3007 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3008 3008 id = 0;
3009 3009 } else {
3010 3010 /* pt command applied to dynamic attachment point */
3011 3011 dr_common_unit_t *cp;
3012 3012 cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3013 3013 id = cp->sbdev_id;
3014 3014 }
3015 3015 }
3016 3016
3017 3017 if (hp->h_err == NULL)
3018 3018 hp->h_err = drmach_passthru(id, &hp->h_opts);
3019 3019
3020 3020 return (hp->h_err == NULL ? 0 : -1);
3021 3021 }
3022 3022
3023 3023 static int
3024 3024 dr_pt_ioctl(dr_handle_t *hp)
3025 3025 {
3026 3026 int cmd, rv, len;
3027 3027 int32_t sz;
3028 3028 int found;
3029 3029 char *copts;
3030 3030 static fn_t f = "dr_pt_ioctl";
3031 3031
3032 3032 PR_ALL("%s...\n", f);
3033 3033
3034 3034 sz = hp->h_opts.size;
3035 3035 copts = hp->h_opts.copts;
3036 3036
3037 3037 if (sz == 0 || copts == (char *)NULL) {
3038 3038 cmn_err(CE_WARN, "%s: invalid passthru args", f);
3039 3039 return (EINVAL);
3040 3040 }
3041 3041
3042 3042 found = 0;
3043 3043 for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3044 3044 len = strlen(pt_arr[cmd].pt_name);
3045 3045 found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3046 3046 if (found)
3047 3047 break;
3048 3048 }
3049 3049
3050 3050 if (found)
3051 3051 rv = (*pt_arr[cmd].pt_func)(hp);
3052 3052 else
3053 3053 rv = dr_pt_try_drmach(hp);
3054 3054
3055 3055 return (rv);
3056 3056 }
3057 3057
3058 3058 /*
3059 3059 * Called at driver load time to determine the state and condition
3060 3060 * of an existing board in the system.
3061 3061 */
3062 3062 static void
3063 3063 dr_board_discovery(dr_board_t *bp)
3064 3064 {
3065 3065 int i;
3066 3066 dr_devset_t devs_lost, devs_attached = 0;
3067 3067 dr_cpu_unit_t *cp;
3068 3068 dr_mem_unit_t *mp;
3069 3069 dr_io_unit_t *ip;
3070 3070 static fn_t f = "dr_board_discovery";
3071 3071
3072 3072 if (DR_DEVS_PRESENT(bp) == 0) {
3073 3073 PR_ALL("%s: board %d has no devices present\n",
3074 3074 f, bp->b_num);
3075 3075 return;
3076 3076 }
3077 3077
3078 3078 /*
3079 3079 * Check for existence of cpus.
3080 3080 */
3081 3081 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3082 3082 cp = dr_get_cpu_unit(bp, i);
3083 3083
3084 3084 if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3085 3085 continue;
3086 3086
3087 3087 if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3088 3088 DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3089 3089 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3090 3090 PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3091 3091 f, bp->b_num, i);
3092 3092 }
3093 3093 dr_init_cpu_unit(cp);
3094 3094 }
3095 3095
3096 3096 /*
3097 3097 * Check for existence of memory.
3098 3098 */
3099 3099 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3100 3100 mp = dr_get_mem_unit(bp, i);
3101 3101
3102 3102 if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3103 3103 continue;
3104 3104
3105 3105 if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3106 3106 DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3107 3107 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3108 3108 PR_ALL("%s: board %d, mem-unit %d - attached\n",
3109 3109 f, bp->b_num, i);
3110 3110 }
3111 3111 dr_init_mem_unit(mp);
3112 3112 }
3113 3113
3114 3114 /*
3115 3115 * Check for i/o state.
3116 3116 */
3117 3117 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3118 3118 ip = dr_get_io_unit(bp, i);
3119 3119
3120 3120 if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3121 3121 continue;
3122 3122
3123 3123 if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3124 3124 /*
3125 3125 * Found it!
3126 3126 */
3127 3127 DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3128 3128 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3129 3129 PR_ALL("%s: board %d, io-unit %d - attached\n",
3130 3130 f, bp->b_num, i);
3131 3131 }
3132 3132 dr_init_io_unit(ip);
3133 3133 }
3134 3134
3135 3135 DR_DEVS_CONFIGURE(bp, devs_attached);
3136 3136 if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3137 3137 int ut;
3138 3138
3139 3139 /*
3140 3140 * It is not legal on board discovery to have a
3141 3141 * board that is only partially attached. A board
3142 3142 * is either all attached or all connected. If a
3143 3143 * board has at least one attached device, then
3144 3144 * the the remaining devices, if any, must have
3145 3145 * been lost or disconnected. These devices can
3146 3146 * only be recovered by a full attach from scratch.
3147 3147 * Note that devices previously in the unreferenced
3148 3148 * state are subsequently lost until the next full
3149 3149 * attach. This is necessary since the driver unload
3150 3150 * that must have occurred would have wiped out the
3151 3151 * information necessary to re-configure the device
3152 3152 * back online, e.g. memlist.
3153 3153 */
3154 3154 PR_ALL("%s: some devices LOST (" DEVSET_FMT_STR ")...\n",
3155 3155 f, DEVSET_FMT_ARG(devs_lost));
3156 3156
3157 3157 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3158 3158 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3159 3159 continue;
3160 3160
3161 3161 cp = dr_get_cpu_unit(bp, ut);
3162 3162 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3163 3163 }
3164 3164
3165 3165 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3166 3166 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3167 3167 continue;
3168 3168
3169 3169 mp = dr_get_mem_unit(bp, ut);
3170 3170 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3171 3171 }
3172 3172
3173 3173 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3174 3174 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3175 3175 continue;
3176 3176
3177 3177 ip = dr_get_io_unit(bp, ut);
3178 3178 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3179 3179 }
3180 3180
3181 3181 DR_DEVS_DISCONNECT(bp, devs_lost);
3182 3182 }
3183 3183 }
3184 3184
3185 3185 static int
3186 3186 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3187 3187 {
3188 3188 sbd_error_t *err;
3189 3189
3190 3190 mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3191 3191 mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3192 3192 cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3193 3193 bp->b_rstate = SBD_STAT_EMPTY;
3194 3194 bp->b_ostate = SBD_STAT_UNCONFIGURED;
3195 3195 bp->b_cond = SBD_COND_UNKNOWN;
3196 3196 (void) drv_getparm(TIME, (void *)&bp->b_time);
3197 3197
3198 3198 (void) drmach_board_lookup(bd, &bp->b_id);
3199 3199 bp->b_num = bd;
3200 3200 bp->b_dip = dip;
3201 3201
3202 3202 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3203 3203 MAX_CPU_UNITS_PER_BOARD);
3204 3204
3205 3205 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3206 3206 MAX_MEM_UNITS_PER_BOARD);
3207 3207
3208 3208 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3209 3209 MAX_IO_UNITS_PER_BOARD);
3210 3210
3211 3211 /*
3212 3212 * Initialize the devlists
3213 3213 */
3214 3214 err = dr_init_devlists(bp);
3215 3215 if (err) {
3216 3216 sbd_err_clear(&err);
3217 3217 dr_board_destroy(bp);
3218 3218 return (-1);
3219 3219 } else if (bp->b_ndev == 0) {
3220 3220 dr_board_transition(bp, DR_STATE_EMPTY);
3221 3221 } else {
3222 3222 /*
3223 3223 * Couldn't have made it down here without
3224 3224 * having found at least one device.
3225 3225 */
3226 3226 ASSERT(DR_DEVS_PRESENT(bp) != 0);
3227 3227 /*
3228 3228 * Check the state of any possible devices on the
3229 3229 * board.
3230 3230 */
3231 3231 dr_board_discovery(bp);
3232 3232
3233 3233 bp->b_assigned = 1;
3234 3234
3235 3235 if (DR_DEVS_UNATTACHED(bp) == 0) {
3236 3236 /*
3237 3237 * The board has no unattached devices, therefore
3238 3238 * by reason of insanity it must be configured!
3239 3239 */
3240 3240 dr_board_transition(bp, DR_STATE_CONFIGURED);
3241 3241 bp->b_ostate = SBD_STAT_CONFIGURED;
3242 3242 bp->b_rstate = SBD_STAT_CONNECTED;
3243 3243 bp->b_cond = SBD_COND_OK;
3244 3244 (void) drv_getparm(TIME, (void *)&bp->b_time);
3245 3245 } else if (DR_DEVS_ATTACHED(bp)) {
3246 3246 dr_board_transition(bp, DR_STATE_PARTIAL);
3247 3247 bp->b_ostate = SBD_STAT_CONFIGURED;
3248 3248 bp->b_rstate = SBD_STAT_CONNECTED;
3249 3249 bp->b_cond = SBD_COND_OK;
3250 3250 (void) drv_getparm(TIME, (void *)&bp->b_time);
3251 3251 } else {
3252 3252 dr_board_transition(bp, DR_STATE_CONNECTED);
3253 3253 bp->b_rstate = SBD_STAT_CONNECTED;
3254 3254 (void) drv_getparm(TIME, (void *)&bp->b_time);
3255 3255 }
3256 3256 }
3257 3257
3258 3258 return (0);
3259 3259 }
3260 3260
3261 3261 static void
3262 3262 dr_board_destroy(dr_board_t *bp)
3263 3263 {
3264 3264 PR_ALL("dr_board_destroy: num %d, path %s\n",
3265 3265 bp->b_num, bp->b_path);
3266 3266
3267 3267 dr_board_transition(bp, DR_STATE_EMPTY);
3268 3268 bp->b_rstate = SBD_STAT_EMPTY;
3269 3269 (void) drv_getparm(TIME, (void *)&bp->b_time);
3270 3270
3271 3271 /*
3272 3272 * Free up MEM unit structs.
3273 3273 */
3274 3274 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)],
3275 3275 dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3276 3276 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = NULL;
3277 3277 /*
3278 3278 * Free up CPU unit structs.
3279 3279 */
3280 3280 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)],
3281 3281 dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3282 3282 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = NULL;
3283 3283 /*
3284 3284 * Free up IO unit structs.
3285 3285 */
3286 3286 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_IO)],
3287 3287 dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3288 3288 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = NULL;
3289 3289
3290 3290 mutex_destroy(&bp->b_lock);
3291 3291 mutex_destroy(&bp->b_slock);
3292 3292 cv_destroy(&bp->b_scv);
3293 3293
3294 3294 /*
3295 3295 * Reset the board structure to its initial state, otherwise it will
3296 3296 * cause trouble on the next call to dr_board_init() for the same board.
3297 3297 * dr_board_init() may be called multiple times for the same board
3298 3298 * if DR driver fails to initialize some boards.
3299 3299 */
3300 3300 bzero(bp, sizeof (*bp));
3301 3301 }
3302 3302
3303 3303 void
3304 3304 dr_lock_status(dr_board_t *bp)
3305 3305 {
3306 3306 mutex_enter(&bp->b_slock);
3307 3307 while (bp->b_sflags & DR_BSLOCK)
3308 3308 cv_wait(&bp->b_scv, &bp->b_slock);
3309 3309 bp->b_sflags |= DR_BSLOCK;
3310 3310 mutex_exit(&bp->b_slock);
3311 3311 }
3312 3312
3313 3313 void
3314 3314 dr_unlock_status(dr_board_t *bp)
3315 3315 {
3316 3316 mutex_enter(&bp->b_slock);
3317 3317 bp->b_sflags &= ~DR_BSLOCK;
3318 3318 cv_signal(&bp->b_scv);
3319 3319 mutex_exit(&bp->b_slock);
3320 3320 }
3321 3321
3322 3322 /*
3323 3323 * Extract flags passed via ioctl.
3324 3324 */
3325 3325 int
3326 3326 dr_cmd_flags(dr_handle_t *hp)
3327 3327 {
3328 3328 return (hp->h_sbdcmd.cmd_cm.c_flags);
3329 3329 }
↓ open down ↓ |
2886 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX