Print this page
fixup .text where possible
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/acpi/drmach_acpi/drmach_acpi.c
+++ new/usr/src/uts/i86pc/io/acpi/drmach_acpi/drmach_acpi.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright (c) 2010, Intel Corporation.
27 27 * All rights reserved.
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/cmn_err.h>
32 32 #include <sys/conf.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/errno.h>
35 35 #include <sys/note.h>
36 36 #include <sys/dditypes.h>
37 37 #include <sys/ddi.h>
38 38 #include <sys/sunddi.h>
39 39 #include <sys/sunndi.h>
40 40 #include <sys/ddi_impldefs.h>
41 41 #include <sys/ndi_impldefs.h>
42 42 #include <sys/varargs.h>
43 43 #include <sys/modctl.h>
44 44 #include <sys/kmem.h>
45 45 #include <sys/cpuvar.h>
46 46 #include <sys/cpupart.h>
47 47 #include <sys/mem_config.h>
48 48 #include <sys/mem_cage.h>
49 49 #include <sys/memnode.h>
50 50 #include <sys/callb.h>
51 51 #include <sys/ontrap.h>
52 52 #include <sys/obpdefs.h>
53 53 #include <sys/promif.h>
54 54 #include <sys/synch.h>
55 55 #include <sys/systm.h>
56 56 #include <sys/sysmacros.h>
57 57 #include <sys/archsystm.h>
58 58 #include <sys/machsystm.h>
59 59 #include <sys/x_call.h>
60 60 #include <sys/x86_archext.h>
61 61 #include <sys/fastboot_impl.h>
62 62 #include <sys/sysevent.h>
63 63 #include <sys/sysevent/dr.h>
64 64 #include <sys/sysevent/eventdefs.h>
65 65 #include <sys/acpi/acpi.h>
66 66 #include <sys/acpica.h>
67 67 #include <sys/acpidev.h>
68 68 #include <sys/acpidev_rsc.h>
69 69 #include <sys/acpidev_dr.h>
70 70 #include <sys/dr.h>
71 71 #include <sys/dr_util.h>
72 72 #include <sys/drmach.h>
73 73 #include "drmach_acpi.h"
74 74
75 75 /* utility */
76 76 #define MBYTE (1048576ull)
77 77 #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT)
78 78 #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT))
79 79
80 80 static int drmach_init(void);
81 81 static void drmach_fini(void);
82 82 static int drmach_name2type_idx(char *);
83 83 static sbd_error_t *drmach_mem_update_lgrp(drmachid_t);
84 84
85 85 static void drmach_board_dispose(drmachid_t id);
86 86 static sbd_error_t *drmach_board_release(drmachid_t);
87 87 static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *);
88 88
89 89 static void drmach_io_dispose(drmachid_t);
90 90 static sbd_error_t *drmach_io_release(drmachid_t);
91 91 static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *);
92 92
93 93 static void drmach_cpu_dispose(drmachid_t);
94 94 static sbd_error_t *drmach_cpu_release(drmachid_t);
95 95 static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *);
96 96
97 97 static void drmach_mem_dispose(drmachid_t);
98 98 static sbd_error_t *drmach_mem_release(drmachid_t);
99 99 static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *);
100 100
101 101 #ifdef DEBUG
102 102 int drmach_debug = 1; /* set to non-zero to enable debug messages */
103 103 #endif /* DEBUG */
104 104
105 105 drmach_domain_info_t drmach_domain;
106 106
107 107 static char *drmach_ie_fmt = "drmach_acpi.c %d";
108 108 static drmach_array_t *drmach_boards;
109 109
110 110 /* rwlock to protect drmach_boards. */
111 111 static krwlock_t drmach_boards_rwlock;
112 112
113 113 /* rwlock to block out CPR thread. */
114 114 static krwlock_t drmach_cpr_rwlock;
115 115
116 116 /* CPR callb id. */
117 117 static callb_id_t drmach_cpr_cid;
118 118
119 119 static struct {
120 120 const char *name;
121 121 const char *type;
122 122 sbd_error_t *(*new)(drmach_device_t *, drmachid_t *);
123 123 } drmach_name2type[] = {
124 124 { ACPIDEV_NODE_NAME_CPU, DRMACH_DEVTYPE_CPU, drmach_cpu_new },
125 125 { ACPIDEV_NODE_NAME_MEMORY, DRMACH_DEVTYPE_MEM, drmach_mem_new },
126 126 { ACPIDEV_NODE_NAME_PCI, DRMACH_DEVTYPE_PCI, drmach_io_new },
127 127 };
128 128
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
129 129 /*
130 130 * drmach autoconfiguration data structures and interfaces
131 131 */
132 132 static struct modlmisc modlmisc = {
133 133 &mod_miscops,
134 134 "ACPI based DR v1.0"
135 135 };
136 136
137 137 static struct modlinkage modlinkage = {
138 138 MODREV_1,
139 - (void *)&modlmisc,
140 - NULL
139 + { (void *)&modlmisc,
140 + NULL }
141 141 };
142 142
143 143 int
144 144 _init(void)
145 145 {
146 146 int err;
147 147
148 148 if ((err = drmach_init()) != 0) {
149 149 return (err);
150 150 }
151 151
152 152 if ((err = mod_install(&modlinkage)) != 0) {
153 153 drmach_fini();
154 154 }
155 155
156 156 return (err);
157 157 }
158 158
159 159 int
160 160 _fini(void)
161 161 {
162 162 int err;
163 163
164 164 if ((err = mod_remove(&modlinkage)) == 0) {
165 165 drmach_fini();
166 166 }
167 167
168 168 return (err);
169 169 }
170 170
171 171 int
172 172 _info(struct modinfo *modinfop)
173 173 {
174 174 return (mod_info(&modlinkage, modinfop));
175 175 }
176 176
177 177 /*
178 178 * Internal support functions.
179 179 */
180 180 static DRMACH_HANDLE
181 181 drmach_node_acpi_get_dnode(drmach_node_t *np)
182 182 {
183 183 return ((DRMACH_HANDLE)(uintptr_t)np->here);
184 184 }
185 185
186 186 static dev_info_t *
187 187 drmach_node_acpi_get_dip(drmach_node_t *np)
188 188 {
189 189 dev_info_t *dip = NULL;
190 190
191 191 if (ACPI_FAILURE(acpica_get_devinfo((DRMACH_HANDLE)(np->here), &dip))) {
192 192 return (NULL);
193 193 }
194 194
195 195 return (dip);
196 196 }
197 197
198 198 static int
199 199 drmach_node_acpi_get_prop(drmach_node_t *np, char *name, void *buf, int len)
200 200 {
201 201 int rv = 0;
202 202 DRMACH_HANDLE hdl;
203 203
204 204 hdl = np->get_dnode(np);
205 205 if (hdl == NULL) {
206 206 DRMACH_PR("!drmach_node_acpi_get_prop: NULL handle");
207 207 rv = -1;
208 208 } else {
209 209 rv = acpidev_dr_device_getprop(hdl, name, buf, len);
210 210 if (rv >= 0) {
211 211 ASSERT(rv <= len);
212 212 rv = 0;
213 213 }
214 214 }
215 215
216 216 return (rv);
217 217 }
218 218
219 219 static int
220 220 drmach_node_acpi_get_proplen(drmach_node_t *np, char *name, int *len)
221 221 {
222 222 int rv = 0;
223 223 DRMACH_HANDLE hdl;
224 224
225 225 hdl = np->get_dnode(np);
226 226 if (hdl == NULL) {
227 227 DRMACH_PR("!drmach_node_acpi_get_proplen: NULL handle");
228 228 rv = -1;
229 229 } else {
230 230 rv = acpidev_dr_device_getprop(hdl, name, NULL, 0);
231 231 if (rv >= 0) {
232 232 *len = rv;
233 233 return (0);
234 234 }
235 235 }
236 236
237 237 return (-1);
238 238 }
239 239
240 240 static ACPI_STATUS
241 241 drmach_node_acpi_callback(ACPI_HANDLE hdl, uint_t lvl, void *ctx, void **retval)
242 242 {
243 243 _NOTE(ARGUNUSED(lvl));
244 244
245 245 int rv;
246 246 dev_info_t *dip;
247 247 drmach_node_walk_args_t *argp = ctx;
248 248 int (*cb)(drmach_node_walk_args_t *args);
249 249 acpidev_class_id_t clsid;
250 250
251 251 ASSERT(hdl != NULL);
252 252 ASSERT(ctx != NULL);
253 253 ASSERT(retval != NULL);
254 254
255 255 /* Skip subtree if the device is not powered. */
256 256 if (!acpidev_dr_device_is_powered(hdl)) {
257 257 return (AE_CTRL_DEPTH);
258 258 }
259 259
260 260 /*
261 261 * Keep scanning subtree if it fails to lookup device node.
262 262 * There may be some ACPI objects without device nodes created.
263 263 */
264 264 if (ACPI_FAILURE(acpica_get_devinfo(hdl, &dip))) {
265 265 return (AE_OK);
266 266 }
267 267
268 268 argp->node->here = hdl;
269 269 cb = (int (*)(drmach_node_walk_args_t *args))argp->func;
270 270 rv = (*cb)(argp);
271 271 argp->node->here = NULL;
272 272 if (rv) {
273 273 *(int *)retval = rv;
274 274 return (AE_CTRL_TERMINATE);
275 275 }
276 276
277 277 /*
278 278 * Skip descendants of PCI/PCIex host bridges.
279 279 * PCI/PCIex devices will be handled by pcihp.
280 280 */
281 281 clsid = acpidev_dr_device_get_class(hdl);
282 282 if (clsid == ACPIDEV_CLASS_ID_PCI || clsid == ACPIDEV_CLASS_ID_PCIEX) {
283 283 return (AE_CTRL_DEPTH);
284 284 }
285 285
286 286 return (AE_OK);
287 287 }
288 288
289 289 static int
290 290 drmach_node_acpi_walk(drmach_node_t *np, void *data,
291 291 int (*cb)(drmach_node_walk_args_t *args))
292 292 {
293 293 DRMACH_HANDLE hdl;
294 294 int rv = 0;
295 295 drmach_node_walk_args_t args;
296 296
297 297 /* initialize the args structure for callback */
298 298 args.node = np;
299 299 args.data = data;
300 300 args.func = (void *)cb;
301 301
302 302 /* save the handle, it will be modified when walking the tree. */
303 303 hdl = np->get_dnode(np);
304 304 if (hdl == NULL) {
305 305 DRMACH_PR("!drmach_node_acpi_walk: failed to get device node.");
306 306 return (EX86_INAPPROP);
307 307 }
308 308
309 309 if (ACPI_FAILURE(acpidev_dr_device_walk_device(hdl,
310 310 ACPIDEV_MAX_ENUM_LEVELS, drmach_node_acpi_callback,
311 311 &args, (void *)&rv))) {
312 312 /*
313 313 * If acpidev_dr_device_walk_device() itself fails, rv won't
314 314 * be set to suitable error code. Set it here.
315 315 */
316 316 if (rv == 0) {
317 317 cmn_err(CE_WARN, "!drmach_node_acpi_walk: failed to "
318 318 "walk ACPI namespace.");
319 319 rv = EX86_ACPIWALK;
320 320 }
321 321 }
322 322
323 323 /* restore the handle to original value after walking the tree. */
324 324 np->here = (void *)hdl;
325 325
326 326 return ((int)rv);
327 327 }
328 328
329 329 static drmach_node_t *
330 330 drmach_node_new(void)
331 331 {
332 332 drmach_node_t *np;
333 333
334 334 np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP);
335 335
336 336 np->get_dnode = drmach_node_acpi_get_dnode;
337 337 np->getdip = drmach_node_acpi_get_dip;
338 338 np->getproplen = drmach_node_acpi_get_proplen;
339 339 np->getprop = drmach_node_acpi_get_prop;
340 340 np->walk = drmach_node_acpi_walk;
341 341
342 342 return (np);
343 343 }
344 344
345 345 static drmachid_t
346 346 drmach_node_dup(drmach_node_t *np)
347 347 {
348 348 drmach_node_t *dup;
349 349
350 350 dup = drmach_node_new();
351 351 dup->here = np->here;
352 352 dup->get_dnode = np->get_dnode;
353 353 dup->getdip = np->getdip;
354 354 dup->getproplen = np->getproplen;
355 355 dup->getprop = np->getprop;
356 356 dup->walk = np->walk;
357 357
358 358 return (dup);
359 359 }
360 360
361 361 static void
362 362 drmach_node_dispose(drmach_node_t *np)
363 363 {
364 364 kmem_free(np, sizeof (*np));
365 365 }
366 366
367 367 static int
368 368 drmach_node_walk(drmach_node_t *np, void *param,
369 369 int (*cb)(drmach_node_walk_args_t *args))
370 370 {
371 371 return (np->walk(np, param, cb));
372 372 }
373 373
374 374 static DRMACH_HANDLE
375 375 drmach_node_get_dnode(drmach_node_t *np)
376 376 {
377 377 return (np->get_dnode(np));
378 378 }
379 379
380 380 /*
381 381 * drmach_array provides convenient array construction, access,
382 382 * bounds checking and array destruction logic.
383 383 */
384 384 static drmach_array_t *
385 385 drmach_array_new(uint_t min_index, uint_t max_index)
386 386 {
387 387 drmach_array_t *arr;
388 388
389 389 arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP);
390 390
391 391 arr->arr_sz = (max_index - min_index + 1) * sizeof (void *);
392 392 if (arr->arr_sz > 0) {
393 393 arr->min_index = min_index;
394 394 arr->max_index = max_index;
395 395
396 396 arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP);
397 397 return (arr);
398 398 } else {
399 399 kmem_free(arr, sizeof (*arr));
400 400 return (0);
401 401 }
402 402 }
403 403
404 404 static int
405 405 drmach_array_set(drmach_array_t *arr, uint_t idx, drmachid_t val)
406 406 {
407 407 if (idx < arr->min_index || idx > arr->max_index)
408 408 return (-1);
409 409 arr->arr[idx - arr->min_index] = val;
410 410 return (0);
411 411 }
412 412
413 413 /*
414 414 * Get the item with index idx.
415 415 * Return 0 with the value stored in val if succeeds, otherwise return -1.
416 416 */
417 417 static int
418 418 drmach_array_get(drmach_array_t *arr, uint_t idx, drmachid_t *val)
419 419 {
420 420 if (idx < arr->min_index || idx > arr->max_index)
421 421 return (-1);
422 422 *val = arr->arr[idx - arr->min_index];
423 423 return (0);
424 424 }
425 425
426 426 static int
427 427 drmach_array_first(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
428 428 {
429 429 int rv;
430 430
431 431 *idx = arr->min_index;
432 432 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
433 433 *idx += 1;
434 434
435 435 return (rv);
436 436 }
437 437
438 438 static int
439 439 drmach_array_next(drmach_array_t *arr, uint_t *idx, drmachid_t *val)
440 440 {
441 441 int rv;
442 442
443 443 *idx += 1;
444 444 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL)
445 445 *idx += 1;
446 446
447 447 return (rv);
448 448 }
449 449
450 450 static void
451 451 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t))
452 452 {
453 453 drmachid_t val;
454 454 uint_t idx;
455 455 int rv;
456 456
457 457 rv = drmach_array_first(arr, &idx, &val);
458 458 while (rv == 0) {
459 459 (*disposer)(val);
460 460 rv = drmach_array_next(arr, &idx, &val);
461 461 }
462 462
463 463 kmem_free(arr->arr, arr->arr_sz);
464 464 kmem_free(arr, sizeof (*arr));
465 465 }
466 466
467 467 static drmach_board_t *
468 468 drmach_get_board_by_bnum(uint_t bnum)
469 469 {
470 470 drmachid_t id;
471 471
472 472 if (drmach_array_get(drmach_boards, bnum, &id) == 0)
473 473 return ((drmach_board_t *)id);
474 474 else
475 475 return (NULL);
476 476 }
477 477
478 478 sbd_error_t *
479 479 drmach_device_new(drmach_node_t *node,
480 480 drmach_board_t *bp, int portid, drmachid_t *idp)
481 481 {
482 482 int i;
483 483 int rv;
484 484 drmach_device_t proto;
485 485 sbd_error_t *err;
486 486 char name[OBP_MAXDRVNAME];
487 487
488 488 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME, name, OBP_MAXDRVNAME);
489 489 if (rv) {
490 490 /* every node is expected to have a name */
491 491 err = drerr_new(1, EX86_GETPROP, "device node %s: property %s",
492 492 ddi_node_name(node->getdip(node)),
493 493 ACPIDEV_DR_PROP_DEVNAME);
494 494 return (err);
495 495 }
496 496
497 497 /*
498 498 * The node currently being examined is not listed in the name2type[]
499 499 * array. In this case, the node is no interest to drmach. Both
500 500 * dp and err are initialized here to yield nothing (no device or
501 501 * error structure) for this case.
502 502 */
503 503 i = drmach_name2type_idx(name);
504 504 if (i < 0) {
505 505 *idp = (drmachid_t)0;
506 506 return (NULL);
507 507 }
508 508
509 509 /* device specific new function will set unum */
510 510 bzero(&proto, sizeof (proto));
511 511 proto.type = drmach_name2type[i].type;
512 512 proto.bp = bp;
513 513 proto.node = node;
514 514 proto.portid = portid;
515 515
516 516 return (drmach_name2type[i].new(&proto, idp));
517 517 }
518 518
519 519 static void
520 520 drmach_device_dispose(drmachid_t id)
521 521 {
522 522 drmach_device_t *self = id;
523 523
524 524 self->cm.dispose(id);
525 525 }
526 526
527 527 static sbd_error_t *
528 528 drmach_device_status(drmachid_t id, drmach_status_t *stat)
529 529 {
530 530 drmach_common_t *cp;
531 531
532 532 if (!DRMACH_IS_ID(id))
533 533 return (drerr_new(0, EX86_NOTID, NULL));
534 534 cp = id;
535 535
536 536 return (cp->status(id, stat));
537 537 }
538 538
539 539 drmach_board_t *
540 540 drmach_board_new(uint_t bnum, int boot_board)
541 541 {
542 542 sbd_error_t *err;
543 543 drmach_board_t *bp;
544 544 dev_info_t *dip = NULL;
545 545
546 546 bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP);
547 547 bp->cm.isa = (void *)drmach_board_new;
548 548 bp->cm.release = drmach_board_release;
549 549 bp->cm.status = drmach_board_status;
550 550
551 551 bp->bnum = bnum;
552 552 bp->devices = NULL;
553 553 bp->tree = drmach_node_new();
554 554
555 555 acpidev_dr_lock_all();
556 556 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &bp->tree->here))) {
557 557 acpidev_dr_unlock_all();
558 558 drmach_board_dispose(bp);
559 559 return (NULL);
560 560 }
561 561 acpidev_dr_unlock_all();
562 562 ASSERT(bp->tree->here != NULL);
563 563
564 564 err = drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name));
565 565 if (err != NULL) {
566 566 sbd_err_clear(&err);
567 567 drmach_board_dispose(bp);
568 568 return (NULL);
569 569 }
570 570
571 571 if (acpidev_dr_device_is_powered(bp->tree->here)) {
572 572 bp->boot_board = boot_board;
573 573 bp->powered = 1;
574 574 } else {
575 575 bp->boot_board = 0;
576 576 bp->powered = 0;
577 577 }
578 578 bp->assigned = boot_board;
579 579 if (ACPI_SUCCESS(acpica_get_devinfo(bp->tree->here, &dip))) {
580 580 bp->connected = 1;
581 581 } else {
582 582 bp->connected = 0;
583 583 }
584 584
585 585 (void) drmach_array_set(drmach_boards, bnum, bp);
586 586
587 587 return (bp);
588 588 }
589 589
590 590 static void
591 591 drmach_board_dispose(drmachid_t id)
592 592 {
593 593 drmach_board_t *bp;
594 594
595 595 ASSERT(DRMACH_IS_BOARD_ID(id));
596 596 bp = id;
597 597
598 598 if (bp->tree)
599 599 drmach_node_dispose(bp->tree);
600 600
601 601 if (bp->devices)
602 602 drmach_array_dispose(bp->devices, drmach_device_dispose);
603 603
604 604 kmem_free(bp, sizeof (drmach_board_t));
605 605 }
606 606
607 607 static sbd_error_t *
608 608 drmach_board_release(drmachid_t id)
609 609 {
610 610 if (!DRMACH_IS_BOARD_ID(id))
611 611 return (drerr_new(0, EX86_INAPPROP, NULL));
612 612
613 613 return (NULL);
614 614 }
615 615
616 616 static int
617 617 drmach_board_check_power(drmach_board_t *bp)
618 618 {
619 619 DRMACH_HANDLE hdl;
620 620
621 621 hdl = drmach_node_get_dnode(bp->tree);
622 622
623 623 return (acpidev_dr_device_is_powered(hdl));
624 624 }
625 625
626 626 struct drmach_board_list_dep_arg {
627 627 int count;
628 628 size_t len;
629 629 ssize_t off;
630 630 char *buf;
631 631 char temp[MAXPATHLEN];
632 632 };
633 633
634 634 static ACPI_STATUS
635 635 drmach_board_generate_name(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
636 636 void **retval)
637 637 {
638 638 _NOTE(ARGUNUSED(retval));
639 639
640 640 struct drmach_board_list_dep_arg *argp = ctx;
641 641
642 642 ASSERT(hdl != NULL);
643 643 ASSERT(lvl == UINT32_MAX);
644 644 ASSERT(ctx != NULL);
645 645
646 646 /* Skip non-board devices. */
647 647 if (!acpidev_dr_device_is_board(hdl)) {
648 648 return (AE_OK);
649 649 }
650 650
651 651 if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, argp->temp,
652 652 sizeof (argp->temp)))) {
653 653 DRMACH_PR("!drmach_board_generate_name: failed to "
654 654 "generate board name for handle %p.", hdl);
655 655 /* Keep on walking. */
656 656 return (AE_OK);
657 657 }
658 658 argp->count++;
659 659 argp->off += snprintf(argp->buf + argp->off, argp->len - argp->off,
660 660 " %s", argp->temp);
661 661 if (argp->off >= argp->len) {
662 662 return (AE_CTRL_TERMINATE);
663 663 }
664 664
665 665 return (AE_OK);
666 666 }
667 667
668 668 static ssize_t
669 669 drmach_board_list_dependency(ACPI_HANDLE hdl, boolean_t edl, char *prefix,
670 670 char *buf, size_t len)
671 671 {
672 672 ACPI_STATUS rc;
673 673 ssize_t off;
674 674 struct drmach_board_list_dep_arg *ap;
675 675
676 676 ASSERT(buf != NULL && len != 0);
677 677 if (buf == NULL || len == 0) {
678 678 return (-1);
679 679 }
680 680
681 681 ap = kmem_zalloc(sizeof (*ap), KM_SLEEP);
682 682 ap->buf = buf;
683 683 ap->len = len;
684 684 ap->off = snprintf(buf, len, "%s", prefix);
685 685 if (ap->off >= len) {
686 686 *buf = '\0';
687 687 kmem_free(ap, sizeof (*ap));
688 688 return (-1);
689 689 }
690 690
691 691 /* Generate the device dependency list. */
692 692 if (edl) {
693 693 rc = acpidev_dr_device_walk_edl(hdl,
694 694 drmach_board_generate_name, ap, NULL);
695 695 } else {
696 696 rc = acpidev_dr_device_walk_ejd(hdl,
697 697 drmach_board_generate_name, ap, NULL);
698 698 }
699 699 if (ACPI_FAILURE(rc)) {
700 700 *buf = '\0';
701 701 ap->off = -1;
702 702 /* No device has dependency on this board. */
703 703 } else if (ap->count == 0) {
704 704 *buf = '\0';
705 705 ap->off = 0;
706 706 }
707 707
708 708 off = ap->off;
709 709 kmem_free(ap, sizeof (*ap));
710 710
711 711 return (off);
712 712 }
713 713
714 714 static sbd_error_t *
715 715 drmach_board_status(drmachid_t id, drmach_status_t *stat)
716 716 {
717 717 sbd_error_t *err = NULL;
718 718 drmach_board_t *bp;
719 719 DRMACH_HANDLE hdl;
720 720 size_t off;
721 721
722 722 if (!DRMACH_IS_BOARD_ID(id))
723 723 return (drerr_new(0, EX86_INAPPROP, NULL));
724 724 bp = id;
725 725
726 726 if (bp->tree == NULL)
727 727 return (drerr_new(0, EX86_INAPPROP, NULL));
728 728 hdl = drmach_node_get_dnode(bp->tree);
729 729 if (hdl == NULL)
730 730 return (drerr_new(0, EX86_INAPPROP, NULL));
731 731
732 732 stat->busy = 0; /* assume not busy */
733 733 stat->configured = 0; /* assume not configured */
734 734 stat->assigned = bp->assigned;
735 735 stat->powered = bp->powered = acpidev_dr_device_is_powered(hdl);
736 736 stat->empty = !acpidev_dr_device_is_present(hdl);
737 737 if (ACPI_SUCCESS(acpidev_dr_device_check_status(hdl))) {
738 738 stat->cond = bp->cond = SBD_COND_OK;
739 739 } else {
740 740 stat->cond = bp->cond = SBD_COND_FAILED;
741 741 }
742 742 stat->info[0] = '\0';
743 743
744 744 /* Generate the eject device list. */
745 745 if (drmach_board_list_dependency(hdl, B_TRUE, "EDL:",
746 746 stat->info, sizeof (stat->info)) < 0) {
747 747 DRMACH_PR("!drmach_board_status: failed to generate "
748 748 "eject device list for board %d.", bp->bnum);
749 749 stat->info[0] = '\0';
750 750 }
751 751 off = strlen(stat->info);
752 752 if (off < sizeof (stat->info)) {
753 753 if (drmach_board_list_dependency(hdl, B_FALSE,
754 754 off ? ", EJD:" : "EJD:",
755 755 stat->info + off, sizeof (stat->info) - off) < 0) {
756 756 DRMACH_PR("!drmach_board_status: failed to generate "
757 757 "eject dependent device for board %d.", bp->bnum);
758 758 stat->info[off] = '\0';
759 759 }
760 760 }
761 761
762 762 switch (acpidev_dr_get_board_type(bp->tree->get_dnode(bp->tree))) {
763 763 case ACPIDEV_CPU_BOARD:
764 764 (void) strlcpy(stat->type, "CPU Board", sizeof (stat->type));
765 765 break;
766 766 case ACPIDEV_MEMORY_BOARD:
767 767 (void) strlcpy(stat->type, "MemoryBoard", sizeof (stat->type));
768 768 break;
769 769 case ACPIDEV_IO_BOARD:
770 770 (void) strlcpy(stat->type, "IO Board", sizeof (stat->type));
771 771 break;
772 772 case ACPIDEV_SYSTEM_BOARD:
773 773 /*FALLTHROUGH*/
774 774 default:
775 775 (void) strlcpy(stat->type, "SystemBoard", sizeof (stat->type));
776 776 break;
777 777 }
778 778
779 779 if (bp->devices) {
780 780 int rv;
781 781 uint_t d_idx;
782 782 drmachid_t d_id;
783 783
784 784 rv = drmach_array_first(bp->devices, &d_idx, &d_id);
785 785 while (rv == 0) {
786 786 drmach_status_t d_stat;
787 787
788 788 err = drmach_device_status(d_id, &d_stat);
789 789 if (err)
790 790 break;
791 791
792 792 stat->busy |= d_stat.busy;
793 793 stat->configured |= d_stat.configured;
794 794
795 795 rv = drmach_array_next(bp->devices, &d_idx, &d_id);
796 796 }
797 797 }
798 798
799 799 return (err);
800 800 }
801 801
802 802 /*
803 803 * When DR is initialized, we walk the device tree and acquire a hold on
804 804 * all the nodes that are interesting to DR. This is so that the corresponding
805 805 * branches cannot be deleted.
806 806 */
807 807 static int
808 808 drmach_hold_rele_devtree(dev_info_t *rdip, void *arg)
809 809 {
810 810 int *holdp = (int *)arg;
811 811 ACPI_HANDLE hdl = NULL;
812 812 acpidev_data_handle_t dhdl;
813 813
814 814 /* Skip nodes and subtrees which are not created by acpidev. */
815 815 if (ACPI_FAILURE(acpica_get_handle(rdip, &hdl))) {
816 816 return (DDI_WALK_PRUNECHILD);
817 817 }
818 818 ASSERT(hdl != NULL);
819 819 dhdl = acpidev_data_get_handle(hdl);
820 820 if (dhdl == NULL) {
821 821 return (DDI_WALK_PRUNECHILD);
822 822 }
823 823
824 824 /* Hold/release devices which are interesting to DR operations. */
825 825 if (acpidev_data_dr_ready(dhdl)) {
826 826 if (*holdp) {
827 827 ASSERT(!e_ddi_branch_held(rdip));
828 828 e_ddi_branch_hold(rdip);
829 829 } else {
830 830 ASSERT(e_ddi_branch_held(rdip));
831 831 e_ddi_branch_rele(rdip);
832 832 }
833 833 }
834 834
835 835 return (DDI_WALK_CONTINUE);
836 836 }
837 837
838 838 static void
839 839 drmach_hold_devtree(void)
840 840 {
841 841 dev_info_t *dip;
842 842 int circ;
843 843 int hold = 1;
844 844
845 845 dip = ddi_root_node();
846 846 ndi_devi_enter(dip, &circ);
847 847 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
848 848 ndi_devi_exit(dip, circ);
849 849 }
850 850
851 851 static void
852 852 drmach_release_devtree(void)
853 853 {
854 854 dev_info_t *dip;
855 855 int circ;
856 856 int hold = 0;
857 857
858 858 dip = ddi_root_node();
859 859 ndi_devi_enter(dip, &circ);
860 860 ddi_walk_devs(ddi_get_child(dip), drmach_hold_rele_devtree, &hold);
861 861 ndi_devi_exit(dip, circ);
862 862 }
863 863
864 864 static boolean_t
865 865 drmach_cpr_callb(void *arg, int code)
866 866 {
867 867 _NOTE(ARGUNUSED(arg));
868 868
869 869 if (code == CB_CODE_CPR_CHKPT) {
870 870 /*
871 871 * Temporarily block CPR operations if there are DR operations
872 872 * ongoing.
873 873 */
874 874 rw_enter(&drmach_cpr_rwlock, RW_WRITER);
875 875 } else {
876 876 rw_exit(&drmach_cpr_rwlock);
877 877 }
878 878
879 879 return (B_TRUE);
880 880 }
881 881
882 882 static int
883 883 drmach_init(void)
884 884 {
885 885 DRMACH_HANDLE hdl;
886 886 drmachid_t id;
887 887 uint_t bnum;
888 888
889 889 if (MAX_BOARDS > SHRT_MAX) {
890 890 cmn_err(CE_WARN, "!drmach_init: system has too many (%d) "
891 891 "hotplug capable boards.", MAX_BOARDS);
892 892 return (ENXIO);
893 893 } else if (MAX_CMP_UNITS_PER_BOARD > 1) {
894 894 cmn_err(CE_WARN, "!drmach_init: DR doesn't support multiple "
895 895 "(%d) physical processors on one board.",
896 896 MAX_CMP_UNITS_PER_BOARD);
897 897 return (ENXIO);
898 898 } else if (!ISP2(MAX_CORES_PER_CMP)) {
899 899 cmn_err(CE_WARN, "!drmach_init: number of logical CPUs (%d) in "
900 900 "physical processor is not power of 2.",
901 901 MAX_CORES_PER_CMP);
902 902 return (ENXIO);
903 903 } else if (MAX_CPU_UNITS_PER_BOARD > DEVSET_CPU_NUMBER ||
904 904 MAX_MEM_UNITS_PER_BOARD > DEVSET_MEM_NUMBER ||
905 905 MAX_IO_UNITS_PER_BOARD > DEVSET_IO_NUMBER) {
906 906 cmn_err(CE_WARN, "!drmach_init: system has more CPU/memory/IO "
907 907 "units than the DR driver can handle.");
908 908 return (ENXIO);
909 909 }
910 910
911 911 rw_init(&drmach_cpr_rwlock, NULL, RW_DEFAULT, NULL);
912 912 drmach_cpr_cid = callb_add(drmach_cpr_callb, NULL,
913 913 CB_CL_CPR_PM, "drmach");
914 914
915 915 rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL);
916 916 drmach_boards = drmach_array_new(0, MAX_BOARDS - 1);
917 917 drmach_domain.allow_dr = acpidev_dr_capable();
918 918
919 919 for (bnum = 0; bnum < MAX_BOARDS; bnum++) {
920 920 hdl = NULL;
921 921 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl)) ||
922 922 hdl == NULL) {
923 923 cmn_err(CE_WARN, "!drmach_init: failed to lookup ACPI "
924 924 "handle for board %d.", bnum);
925 925 continue;
926 926 }
927 927 if (drmach_array_get(drmach_boards, bnum, &id) == -1) {
928 928 DRMACH_PR("!drmach_init: failed to get handle "
929 929 "for board %d.", bnum);
930 930 ASSERT(0);
931 931 goto error;
932 932 } else if (id == NULL) {
933 933 (void) drmach_board_new(bnum, 1);
934 934 }
935 935 }
936 936
937 937 /*
938 938 * Walk descendants of the devinfo root node and hold
939 939 * all devinfo branches of interest.
940 940 */
941 941 drmach_hold_devtree();
942 942
943 943 return (0);
944 944
945 945 error:
946 946 drmach_array_dispose(drmach_boards, drmach_board_dispose);
947 947 rw_destroy(&drmach_boards_rwlock);
948 948 rw_destroy(&drmach_cpr_rwlock);
949 949 return (ENXIO);
950 950 }
951 951
952 952 static void
953 953 drmach_fini(void)
954 954 {
955 955 rw_enter(&drmach_boards_rwlock, RW_WRITER);
956 956 if (drmach_boards != NULL) {
957 957 drmach_array_dispose(drmach_boards, drmach_board_dispose);
958 958 drmach_boards = NULL;
959 959 }
960 960 rw_exit(&drmach_boards_rwlock);
961 961
962 962 /*
963 963 * Walk descendants of the root devinfo node
964 964 * release holds acquired on branches in drmach_init()
965 965 */
966 966 drmach_release_devtree();
967 967
968 968 (void) callb_delete(drmach_cpr_cid);
969 969 rw_destroy(&drmach_cpr_rwlock);
970 970 rw_destroy(&drmach_boards_rwlock);
971 971 }
972 972
973 973 sbd_error_t *
974 974 drmach_io_new(drmach_device_t *proto, drmachid_t *idp)
975 975 {
976 976 drmach_io_t *ip;
977 977 int portid;
978 978
979 979 portid = proto->portid;
980 980 ASSERT(portid != -1);
981 981 proto->unum = portid;
982 982
983 983 ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP);
984 984 bcopy(proto, &ip->dev, sizeof (ip->dev));
985 985 ip->dev.node = drmach_node_dup(proto->node);
986 986 ip->dev.cm.isa = (void *)drmach_io_new;
987 987 ip->dev.cm.dispose = drmach_io_dispose;
988 988 ip->dev.cm.release = drmach_io_release;
989 989 ip->dev.cm.status = drmach_io_status;
990 990 (void) snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d",
991 991 ip->dev.type, ip->dev.unum);
992 992
993 993 *idp = (drmachid_t)ip;
994 994
995 995 return (NULL);
996 996 }
997 997
998 998 static void
999 999 drmach_io_dispose(drmachid_t id)
1000 1000 {
1001 1001 drmach_io_t *self;
1002 1002
1003 1003 ASSERT(DRMACH_IS_IO_ID(id));
1004 1004
1005 1005 self = id;
1006 1006 if (self->dev.node)
1007 1007 drmach_node_dispose(self->dev.node);
1008 1008
1009 1009 kmem_free(self, sizeof (*self));
1010 1010 }
1011 1011
1012 1012 static sbd_error_t *
1013 1013 drmach_io_release(drmachid_t id)
1014 1014 {
1015 1015 if (!DRMACH_IS_IO_ID(id))
1016 1016 return (drerr_new(0, EX86_INAPPROP, NULL));
1017 1017
1018 1018 return (NULL);
1019 1019 }
1020 1020
1021 1021 static sbd_error_t *
1022 1022 drmach_io_status(drmachid_t id, drmach_status_t *stat)
1023 1023 {
1024 1024 drmach_device_t *dp;
1025 1025 sbd_error_t *err;
1026 1026 int configured;
1027 1027
1028 1028 ASSERT(DRMACH_IS_IO_ID(id));
1029 1029 dp = id;
1030 1030
1031 1031 err = drmach_io_is_attached(id, &configured);
1032 1032 if (err)
1033 1033 return (err);
1034 1034
1035 1035 stat->assigned = dp->bp->assigned;
1036 1036 stat->powered = dp->bp->powered;
1037 1037 stat->configured = (configured != 0);
1038 1038 stat->busy = dp->busy;
1039 1039 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1040 1040 stat->info[0] = '\0';
1041 1041
1042 1042 return (NULL);
1043 1043 }
1044 1044
1045 1045 sbd_error_t *
1046 1046 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp)
1047 1047 {
1048 1048 int portid;
1049 1049 processorid_t cpuid;
1050 1050 drmach_cpu_t *cp = NULL;
1051 1051
1052 1052 /* the portid is APIC ID of the node */
1053 1053 portid = proto->portid;
1054 1054 ASSERT(portid != -1);
1055 1055
1056 1056 /*
1057 1057 * Assume all CPUs are homogeneous and have the same number of
1058 1058 * cores/threads.
1059 1059 */
1060 1060 proto->unum = portid % MAX_CPU_UNITS_PER_BOARD;
1061 1061
1062 1062 cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP);
1063 1063 bcopy(proto, &cp->dev, sizeof (cp->dev));
1064 1064 cp->dev.node = drmach_node_dup(proto->node);
1065 1065 cp->dev.cm.isa = (void *)drmach_cpu_new;
1066 1066 cp->dev.cm.dispose = drmach_cpu_dispose;
1067 1067 cp->dev.cm.release = drmach_cpu_release;
1068 1068 cp->dev.cm.status = drmach_cpu_status;
1069 1069 (void) snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d",
1070 1070 cp->dev.type, cp->dev.unum);
1071 1071
1072 1072 cp->apicid = portid;
1073 1073 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
1074 1074 drmach_node_get_dnode(proto->node), &cpuid))) {
1075 1075 cp->cpuid = cpuid;
1076 1076 } else {
1077 1077 cp->cpuid = -1;
1078 1078 }
1079 1079
1080 1080 /* Mark CPU0 as busy, many other components have dependency on it. */
1081 1081 if (cp->cpuid == 0) {
1082 1082 cp->dev.busy = 1;
1083 1083 }
1084 1084
1085 1085 *idp = (drmachid_t)cp;
1086 1086
1087 1087 return (NULL);
1088 1088 }
1089 1089
1090 1090 static void
1091 1091 drmach_cpu_dispose(drmachid_t id)
1092 1092 {
1093 1093 drmach_cpu_t *self;
1094 1094
1095 1095 ASSERT(DRMACH_IS_CPU_ID(id));
1096 1096
1097 1097 self = id;
1098 1098 if (self->dev.node)
1099 1099 drmach_node_dispose(self->dev.node);
1100 1100
1101 1101 kmem_free(self, sizeof (*self));
1102 1102 }
1103 1103
1104 1104 static sbd_error_t *
1105 1105 drmach_cpu_release(drmachid_t id)
1106 1106 {
1107 1107 if (!DRMACH_IS_CPU_ID(id))
1108 1108 return (drerr_new(0, EX86_INAPPROP, NULL));
1109 1109
1110 1110 return (NULL);
1111 1111 }
1112 1112
1113 1113 static sbd_error_t *
1114 1114 drmach_cpu_status(drmachid_t id, drmach_status_t *stat)
1115 1115 {
1116 1116 drmach_cpu_t *cp;
1117 1117 drmach_device_t *dp;
1118 1118
1119 1119 ASSERT(DRMACH_IS_CPU_ID(id));
1120 1120 cp = (drmach_cpu_t *)id;
1121 1121 dp = &cp->dev;
1122 1122
1123 1123 stat->assigned = dp->bp->assigned;
1124 1124 stat->powered = dp->bp->powered;
1125 1125 mutex_enter(&cpu_lock);
1126 1126 stat->configured = (cpu_get(cp->cpuid) != NULL);
1127 1127 mutex_exit(&cpu_lock);
1128 1128 stat->busy = dp->busy;
1129 1129 (void) strlcpy(stat->type, dp->type, sizeof (stat->type));
1130 1130 stat->info[0] = '\0';
1131 1131
1132 1132 return (NULL);
1133 1133 }
1134 1134
1135 1135 static int
1136 1136 drmach_setup_mc_info(DRMACH_HANDLE hdl, drmach_mem_t *mp)
1137 1137 {
1138 1138 uint_t i, j, count;
1139 1139 struct memlist *ml = NULL, *ml2 = NULL;
1140 1140 acpidev_regspec_t *regp;
1141 1141 uint64_t align, addr_min, addr_max, total_size, skipped_size;
1142 1142
1143 1143 if (hdl == NULL) {
1144 1144 return (-1);
1145 1145 } else if (ACPI_FAILURE(acpidev_dr_get_mem_alignment(hdl, &align))) {
1146 1146 return (-1);
1147 1147 } else {
1148 1148 ASSERT((align & (align - 1)) == 0);
1149 1149 mp->mem_alignment = align;
1150 1150 }
1151 1151
1152 1152 addr_min = UINT64_MAX;
1153 1153 addr_max = 0;
1154 1154 total_size = 0;
1155 1155 skipped_size = 0;
1156 1156 /*
1157 1157 * There's a memory hole just below 4G on x86, which needs special
1158 1158 * handling. All other addresses assigned to a specific memory device
1159 1159 * should be contiguous.
1160 1160 */
1161 1161 if (ACPI_FAILURE(acpidev_dr_device_get_regspec(hdl, TRUE, ®p,
1162 1162 &count))) {
1163 1163 return (-1);
1164 1164 }
1165 1165 for (i = 0, j = 0; i < count; i++) {
1166 1166 uint64_t addr, size;
1167 1167
1168 1168 addr = (uint64_t)regp[i].phys_mid << 32;
1169 1169 addr |= (uint64_t)regp[i].phys_low;
1170 1170 size = (uint64_t)regp[i].size_hi << 32;
1171 1171 size |= (uint64_t)regp[i].size_low;
1172 1172 if (size == 0)
1173 1173 continue;
1174 1174 else
1175 1175 j++;
1176 1176
1177 1177 total_size += size;
1178 1178 if (addr < addr_min)
1179 1179 addr_min = addr;
1180 1180 if (addr + size > addr_max)
1181 1181 addr_max = addr + size;
1182 1182 if (mp->dev.bp->boot_board ||
1183 1183 j <= acpidev_dr_max_segments_per_mem_device()) {
1184 1184 ml = memlist_add_span(ml, addr, size);
1185 1185 } else {
1186 1186 skipped_size += size;
1187 1187 }
1188 1188 }
1189 1189 acpidev_dr_device_free_regspec(regp, count);
1190 1190
1191 1191 if (skipped_size != 0) {
1192 1192 cmn_err(CE_WARN, "!drmach: too many (%d) segments on memory "
1193 1193 "device, max (%d) segments supported, 0x%" PRIx64 " bytes "
1194 1194 "of memory skipped.",
1195 1195 j, acpidev_dr_max_segments_per_mem_device(), skipped_size);
1196 1196 }
1197 1197
1198 1198 mp->slice_base = addr_min;
1199 1199 mp->slice_top = addr_max;
1200 1200 mp->slice_size = total_size;
1201 1201
1202 1202 if (mp->dev.bp->boot_board) {
1203 1203 uint64_t endpa = _ptob64(physmax + 1);
1204 1204
1205 1205 /*
1206 1206 * we intersect phys_install to get base_pa.
1207 1207 * This only works at boot-up time.
1208 1208 */
1209 1209 memlist_read_lock();
1210 1210 ml2 = memlist_dup(phys_install);
1211 1211 memlist_read_unlock();
1212 1212
1213 1213 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
1214 1214 if (ml2 && endpa > addr_max) {
1215 1215 ml2 = memlist_del_span(ml2, addr_max, endpa - addr_max);
1216 1216 }
1217 1217 }
1218 1218
1219 1219 /*
1220 1220 * Create a memlist for the memory board.
1221 1221 * The created memlist only contains configured memory if there's
1222 1222 * configured memory on the board, otherwise it contains all memory
1223 1223 * on the board.
1224 1224 */
1225 1225 if (ml2) {
1226 1226 uint64_t nbytes = 0;
1227 1227 struct memlist *p;
1228 1228
1229 1229 for (p = ml2; p; p = p->ml_next) {
1230 1230 nbytes += p->ml_size;
1231 1231 }
1232 1232 if (nbytes == 0) {
1233 1233 memlist_delete(ml2);
1234 1234 ml2 = NULL;
1235 1235 } else {
1236 1236 /* Node has configured memory at boot time. */
1237 1237 mp->base_pa = ml2->ml_address;
1238 1238 mp->nbytes = nbytes;
1239 1239 mp->memlist = ml2;
1240 1240 if (ml)
1241 1241 memlist_delete(ml);
1242 1242 }
1243 1243 }
1244 1244 if (ml2 == NULL) {
1245 1245 /* Not configured at boot time. */
1246 1246 mp->base_pa = UINT64_MAX;
1247 1247 mp->nbytes = 0;
1248 1248 mp->memlist = ml;
1249 1249 }
1250 1250
1251 1251 return (0);
1252 1252 }
1253 1253
1254 1254 sbd_error_t *
1255 1255 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp)
1256 1256 {
1257 1257 DRMACH_HANDLE hdl;
1258 1258 drmach_mem_t *mp;
1259 1259 int portid;
1260 1260
1261 1261 mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP);
1262 1262 portid = proto->portid;
1263 1263 ASSERT(portid != -1);
1264 1264 proto->unum = portid;
1265 1265
1266 1266 bcopy(proto, &mp->dev, sizeof (mp->dev));
1267 1267 mp->dev.node = drmach_node_dup(proto->node);
1268 1268 mp->dev.cm.isa = (void *)drmach_mem_new;
1269 1269 mp->dev.cm.dispose = drmach_mem_dispose;
1270 1270 mp->dev.cm.release = drmach_mem_release;
1271 1271 mp->dev.cm.status = drmach_mem_status;
1272 1272
1273 1273 (void) snprintf(mp->dev.cm.name, sizeof (mp->dev.cm.name), "%s%d",
1274 1274 mp->dev.type, proto->unum);
1275 1275 hdl = mp->dev.node->get_dnode(mp->dev.node);
1276 1276 ASSERT(hdl != NULL);
1277 1277 if (drmach_setup_mc_info(hdl, mp) != 0) {
1278 1278 kmem_free(mp, sizeof (drmach_mem_t));
1279 1279 *idp = (drmachid_t)NULL;
1280 1280 return (drerr_new(1, EX86_MC_SETUP, NULL));
1281 1281 }
1282 1282
1283 1283 /* make sure we do not create memoryless nodes */
1284 1284 if (mp->nbytes == 0 && mp->slice_size == 0) {
1285 1285 kmem_free(mp, sizeof (drmach_mem_t));
1286 1286 *idp = (drmachid_t)NULL;
1287 1287 } else
1288 1288 *idp = (drmachid_t)mp;
1289 1289
1290 1290 return (NULL);
1291 1291 }
1292 1292
1293 1293 static void
1294 1294 drmach_mem_dispose(drmachid_t id)
1295 1295 {
1296 1296 drmach_mem_t *mp;
1297 1297
1298 1298 ASSERT(DRMACH_IS_MEM_ID(id));
1299 1299
1300 1300 mp = id;
1301 1301
1302 1302 if (mp->dev.node)
1303 1303 drmach_node_dispose(mp->dev.node);
1304 1304
1305 1305 if (mp->memlist) {
1306 1306 memlist_delete(mp->memlist);
1307 1307 mp->memlist = NULL;
1308 1308 }
1309 1309
1310 1310 kmem_free(mp, sizeof (*mp));
1311 1311 }
1312 1312
1313 1313 static sbd_error_t *
1314 1314 drmach_mem_release(drmachid_t id)
1315 1315 {
1316 1316 if (!DRMACH_IS_MEM_ID(id))
1317 1317 return (drerr_new(0, EX86_INAPPROP, NULL));
1318 1318
1319 1319 return (NULL);
1320 1320 }
1321 1321
1322 1322 static sbd_error_t *
1323 1323 drmach_mem_status(drmachid_t id, drmach_status_t *stat)
1324 1324 {
1325 1325 uint64_t pa;
1326 1326 drmach_mem_t *dp;
1327 1327 struct memlist *ml = NULL;
1328 1328
1329 1329 ASSERT(DRMACH_IS_MEM_ID(id));
1330 1330 dp = id;
1331 1331
1332 1332 /* get starting physical address of target memory */
1333 1333 pa = dp->base_pa;
1334 1334 /* round down to slice boundary */
1335 1335 pa &= ~(dp->mem_alignment - 1);
1336 1336
1337 1337 /* stop at first span that is in slice */
1338 1338 memlist_read_lock();
1339 1339 for (ml = phys_install; ml; ml = ml->ml_next)
1340 1340 if (ml->ml_address >= pa && ml->ml_address < dp->slice_top)
1341 1341 break;
1342 1342 memlist_read_unlock();
1343 1343
1344 1344 stat->assigned = dp->dev.bp->assigned;
1345 1345 stat->powered = dp->dev.bp->powered;
1346 1346 stat->configured = (ml != NULL);
1347 1347 stat->busy = dp->dev.busy;
1348 1348 (void) strlcpy(stat->type, dp->dev.type, sizeof (stat->type));
1349 1349 stat->info[0] = '\0';
1350 1350
1351 1351 return (NULL);
1352 1352 }
1353 1353
1354 1354 /*
1355 1355 * Public interfaces exported to support platform independent dr driver.
1356 1356 */
1357 1357 uint_t
1358 1358 drmach_max_boards(void)
1359 1359 {
1360 1360 return (acpidev_dr_max_boards());
1361 1361 }
1362 1362
1363 1363 uint_t
1364 1364 drmach_max_io_units_per_board(void)
1365 1365 {
1366 1366 return (acpidev_dr_max_io_units_per_board());
1367 1367 }
1368 1368
1369 1369 uint_t
1370 1370 drmach_max_cmp_units_per_board(void)
1371 1371 {
1372 1372 return (acpidev_dr_max_cmp_units_per_board());
1373 1373 }
1374 1374
1375 1375 uint_t
1376 1376 drmach_max_mem_units_per_board(void)
1377 1377 {
1378 1378 return (acpidev_dr_max_mem_units_per_board());
1379 1379 }
1380 1380
1381 1381 uint_t
1382 1382 drmach_max_core_per_cmp(void)
1383 1383 {
1384 1384 return (acpidev_dr_max_cpu_units_per_cmp());
1385 1385 }
1386 1386
1387 1387 sbd_error_t *
1388 1388 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts, void *argp)
1389 1389 {
1390 1390 drmach_board_t *bp = (drmach_board_t *)id;
1391 1391 sbd_error_t *err = NULL;
1392 1392
1393 1393 /* allow status and ncm operations to always succeed */
1394 1394 if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) {
1395 1395 return (NULL);
1396 1396 }
1397 1397
1398 1398 switch (cmd) {
1399 1399 case SBD_CMD_POWERON:
1400 1400 case SBD_CMD_POWEROFF:
1401 1401 /*
1402 1402 * Disable fast reboot if CPU/MEM/IOH hotplug event happens.
1403 1403 * Note: this is a temporary solution and will be revised when
1404 1404 * fast reboot can support CPU/MEM/IOH DR operations in future.
1405 1405 *
1406 1406 * ACPI BIOS generates some static ACPI tables, such as MADT,
1407 1407 * SRAT and SLIT, to describe system hardware configuration on
1408 1408 * power-on. When CPU/MEM/IOH hotplug event happens, those
1409 1409 * static tables won't be updated and will become stale.
1410 1410 *
1411 1411 * If we reset system by fast reboot, BIOS will have no chance
1412 1412 * to regenerate those staled static tables. Fast reboot can't
1413 1413 * tolerate such inconsistency between staled ACPI tables and
1414 1414 * real hardware configuration yet.
1415 1415 *
1416 1416 * A temporary solution is introduced to disable fast reboot if
1417 1417 * CPU/MEM/IOH hotplug event happens. This solution should be
1418 1418 * revised when fast reboot is enhanced to support CPU/MEM/IOH
1419 1419 * DR operations.
1420 1420 */
1421 1421 fastreboot_disable(FBNS_HOTPLUG);
1422 1422 /*FALLTHROUGH*/
1423 1423
1424 1424 default:
1425 1425 /* Block out the CPR thread. */
1426 1426 rw_enter(&drmach_cpr_rwlock, RW_READER);
1427 1427 break;
1428 1428 }
1429 1429
1430 1430 /* check all other commands for the required option string */
1431 1431 if ((opts->size > 0) && (opts->copts != NULL)) {
1432 1432 if (strstr(opts->copts, ACPIDEV_CMD_OST_PREFIX) == NULL) {
1433 1433 err = drerr_new(1, EX86_SUPPORT, NULL);
1434 1434 }
1435 1435 } else {
1436 1436 err = drerr_new(1, EX86_SUPPORT, NULL);
1437 1437 }
1438 1438
1439 1439 if (!err && id && DRMACH_IS_BOARD_ID(id)) {
1440 1440 switch (cmd) {
1441 1441 case SBD_CMD_TEST:
1442 1442 break;
1443 1443 case SBD_CMD_CONNECT:
1444 1444 if (bp->connected)
1445 1445 err = drerr_new(0, ESBD_STATE, NULL);
1446 1446 else if (!drmach_domain.allow_dr)
1447 1447 err = drerr_new(1, EX86_SUPPORT, NULL);
1448 1448 break;
1449 1449 case SBD_CMD_DISCONNECT:
1450 1450 if (!bp->connected)
1451 1451 err = drerr_new(0, ESBD_STATE, NULL);
1452 1452 else if (!drmach_domain.allow_dr)
1453 1453 err = drerr_new(1, EX86_SUPPORT, NULL);
1454 1454 break;
1455 1455 default:
1456 1456 if (!drmach_domain.allow_dr)
1457 1457 err = drerr_new(1, EX86_SUPPORT, NULL);
1458 1458 break;
1459 1459
1460 1460 }
1461 1461 }
1462 1462
1463 1463 /*
1464 1464 * CPU/memory/IO DR operations will be supported in stages on x86.
1465 1465 * With early versions, some operations should be blocked here.
1466 1466 * This temporary hook will be removed when all CPU/memory/IO DR
1467 1467 * operations are supported on x86 systems.
1468 1468 *
1469 1469 * We only need to filter unsupported device types for
1470 1470 * SBD_CMD_CONFIGURE/SBD_CMD_UNCONFIGURE commands, all other
1471 1471 * commands are supported by all device types.
1472 1472 */
1473 1473 if (!err && (cmd == SBD_CMD_CONFIGURE || cmd == SBD_CMD_UNCONFIGURE)) {
1474 1474 int i;
1475 1475 dr_devset_t *devsetp = (dr_devset_t *)argp;
1476 1476 dr_devset_t devset = *devsetp;
1477 1477
1478 1478 switch (cmd) {
1479 1479 case SBD_CMD_CONFIGURE:
1480 1480 if (!plat_dr_support_cpu()) {
1481 1481 DEVSET_DEL(devset, SBD_COMP_CPU,
1482 1482 DEVSET_ANYUNIT);
1483 1483 } else {
1484 1484 for (i = MAX_CPU_UNITS_PER_BOARD;
1485 1485 i < DEVSET_CPU_NUMBER; i++) {
1486 1486 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1487 1487 }
1488 1488 }
1489 1489
1490 1490 if (!plat_dr_support_memory()) {
1491 1491 DEVSET_DEL(devset, SBD_COMP_MEM,
1492 1492 DEVSET_ANYUNIT);
1493 1493 } else {
1494 1494 for (i = MAX_MEM_UNITS_PER_BOARD;
1495 1495 i < DEVSET_MEM_NUMBER; i++) {
1496 1496 DEVSET_DEL(devset, SBD_COMP_MEM, i);
1497 1497 }
1498 1498 }
1499 1499
1500 1500 /* No support of configuring IOH devices yet. */
1501 1501 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1502 1502 break;
1503 1503
1504 1504 case SBD_CMD_UNCONFIGURE:
1505 1505 if (!plat_dr_support_cpu()) {
1506 1506 DEVSET_DEL(devset, SBD_COMP_CPU,
1507 1507 DEVSET_ANYUNIT);
1508 1508 } else {
1509 1509 for (i = MAX_CPU_UNITS_PER_BOARD;
1510 1510 i < DEVSET_CPU_NUMBER; i++) {
1511 1511 DEVSET_DEL(devset, SBD_COMP_CPU, i);
1512 1512 }
1513 1513 }
1514 1514
1515 1515 /* No support of unconfiguring MEM/IOH devices yet. */
1516 1516 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
1517 1517 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
1518 1518 break;
1519 1519 }
1520 1520
1521 1521 *devsetp = devset;
1522 1522 if (DEVSET_IS_NULL(devset)) {
1523 1523 err = drerr_new(1, EX86_SUPPORT, NULL);
1524 1524 }
1525 1525 }
1526 1526
1527 1527 return (err);
1528 1528 }
1529 1529
1530 1530 sbd_error_t *
1531 1531 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts, int rv)
1532 1532 {
1533 1533 _NOTE(ARGUNUSED(id, opts, rv));
1534 1534
1535 1535 switch (cmd) {
1536 1536 case SBD_CMD_STATUS:
1537 1537 case SBD_CMD_GETNCM:
1538 1538 break;
1539 1539
1540 1540 default:
1541 1541 rw_exit(&drmach_cpr_rwlock);
1542 1542 break;
1543 1543 }
1544 1544
1545 1545 return (NULL);
1546 1546 }
1547 1547
1548 1548 sbd_error_t *
1549 1549 drmach_configure(drmachid_t id, int flags)
1550 1550 {
1551 1551 _NOTE(ARGUNUSED(flags));
1552 1552
1553 1553 drmach_device_t *dp;
1554 1554 sbd_error_t *err = NULL;
1555 1555 dev_info_t *rdip;
1556 1556 dev_info_t *fdip = NULL;
1557 1557
1558 1558 if (!DRMACH_IS_DEVICE_ID(id))
1559 1559 return (drerr_new(0, EX86_INAPPROP, NULL));
1560 1560 dp = id;
1561 1561
1562 1562 rdip = dp->node->getdip(dp->node);
1563 1563 ASSERT(rdip);
1564 1564 ASSERT(e_ddi_branch_held(rdip));
1565 1565
1566 1566 /* allocate cpu id for the CPU device. */
1567 1567 if (DRMACH_IS_CPU_ID(id)) {
1568 1568 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1569 1569 ASSERT(hdl != NULL);
1570 1570 if (ACPI_FAILURE(acpidev_dr_allocate_cpuid(hdl, NULL))) {
1571 1571 err = drerr_new(1, EX86_ALLOC_CPUID, NULL);
1572 1572 }
1573 1573 return (err);
1574 1574 }
1575 1575
1576 1576 if (DRMACH_IS_MEM_ID(id)) {
1577 1577 err = drmach_mem_update_lgrp(id);
1578 1578 if (err)
1579 1579 return (err);
1580 1580 }
1581 1581
1582 1582 if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) {
1583 1583 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1584 1584 dev_info_t *dip = (fdip != NULL) ? fdip : rdip;
1585 1585
1586 1586 (void) ddi_pathname(dip, path);
1587 1587 err = drerr_new(1, EX86_DRVFAIL, path);
1588 1588 kmem_free(path, MAXPATHLEN);
1589 1589
1590 1590 /* If non-NULL, fdip is returned held and must be released */
1591 1591 if (fdip != NULL)
1592 1592 ddi_release_devi(fdip);
1593 1593 }
1594 1594
1595 1595 return (err);
1596 1596 }
1597 1597
1598 1598 sbd_error_t *
1599 1599 drmach_unconfigure(drmachid_t id, int flags)
1600 1600 {
1601 1601 _NOTE(ARGUNUSED(flags));
1602 1602
1603 1603 drmach_device_t *dp;
1604 1604 sbd_error_t *err = NULL;
1605 1605 dev_info_t *rdip, *fdip = NULL;
1606 1606
1607 1607 if (!DRMACH_IS_DEVICE_ID(id))
1608 1608 return (drerr_new(0, EX86_INAPPROP, NULL));
1609 1609 dp = id;
1610 1610
1611 1611 rdip = dp->node->getdip(dp->node);
1612 1612 ASSERT(rdip);
1613 1613 ASSERT(e_ddi_branch_held(rdip));
1614 1614
1615 1615 if (DRMACH_IS_CPU_ID(id)) {
1616 1616 DRMACH_HANDLE hdl = drmach_node_get_dnode(dp->node);
1617 1617 ASSERT(hdl != NULL);
1618 1618 if (ACPI_FAILURE(acpidev_dr_free_cpuid(hdl))) {
1619 1619 err = drerr_new(1, EX86_FREE_CPUID, NULL);
1620 1620 }
1621 1621 return (err);
1622 1622 }
1623 1623
1624 1624 /*
1625 1625 * Note: FORCE flag is no longer necessary under devfs
1626 1626 */
1627 1627 if (e_ddi_branch_unconfigure(rdip, &fdip, 0)) {
1628 1628 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1629 1629
1630 1630 /*
1631 1631 * If non-NULL, fdip is returned held and must be released.
1632 1632 */
1633 1633 if (fdip != NULL) {
1634 1634 (void) ddi_pathname(fdip, path);
1635 1635 ndi_rele_devi(fdip);
1636 1636 } else {
1637 1637 (void) ddi_pathname(rdip, path);
1638 1638 }
1639 1639
1640 1640 err = drerr_new(1, EX86_DRVFAIL, path);
1641 1641
1642 1642 kmem_free(path, MAXPATHLEN);
1643 1643 }
1644 1644
1645 1645 return (err);
1646 1646 }
1647 1647
1648 1648 sbd_error_t *
1649 1649 drmach_get_dip(drmachid_t id, dev_info_t **dip)
1650 1650 {
1651 1651 drmach_device_t *dp;
1652 1652
1653 1653 if (!DRMACH_IS_DEVICE_ID(id))
1654 1654 return (drerr_new(0, EX86_INAPPROP, NULL));
1655 1655 dp = id;
1656 1656
1657 1657 *dip = dp->node->getdip(dp->node);
1658 1658
1659 1659 return (NULL);
1660 1660 }
1661 1661
1662 1662 sbd_error_t *
1663 1663 drmach_release(drmachid_t id)
1664 1664 {
1665 1665 drmach_common_t *cp;
1666 1666
1667 1667 if (!DRMACH_IS_DEVICE_ID(id))
1668 1668 return (drerr_new(0, EX86_INAPPROP, NULL));
1669 1669 cp = id;
1670 1670
1671 1671 return (cp->release(id));
1672 1672 }
1673 1673
1674 1674 sbd_error_t *
1675 1675 drmach_status(drmachid_t id, drmach_status_t *stat)
1676 1676 {
1677 1677 drmach_common_t *cp;
1678 1678 sbd_error_t *err;
1679 1679
1680 1680 rw_enter(&drmach_boards_rwlock, RW_READER);
1681 1681 if (!DRMACH_IS_ID(id)) {
1682 1682 rw_exit(&drmach_boards_rwlock);
1683 1683 return (drerr_new(0, EX86_NOTID, NULL));
1684 1684 }
1685 1685 cp = (drmach_common_t *)id;
1686 1686 err = cp->status(id, stat);
1687 1687 rw_exit(&drmach_boards_rwlock);
1688 1688
1689 1689 return (err);
1690 1690 }
1691 1691
1692 1692 static sbd_error_t *
1693 1693 drmach_update_acpi_status(drmachid_t id, drmach_opts_t *opts)
1694 1694 {
1695 1695 char *copts;
1696 1696 drmach_board_t *bp;
1697 1697 DRMACH_HANDLE hdl;
1698 1698 int event, code;
1699 1699 boolean_t inprogress = B_FALSE;
1700 1700
1701 1701 if (DRMACH_NULL_ID(id) || !DRMACH_IS_BOARD_ID(id))
1702 1702 return (drerr_new(0, EX86_INAPPROP, NULL));
1703 1703 bp = (drmach_board_t *)id;
1704 1704 hdl = drmach_node_get_dnode(bp->tree);
1705 1705 ASSERT(hdl != NULL);
1706 1706 if (hdl == NULL)
1707 1707 return (drerr_new(0, EX86_INAPPROP, NULL));
1708 1708
1709 1709 /* Get the status code. */
1710 1710 copts = opts->copts;
1711 1711 if (strncmp(copts, ACPIDEV_CMD_OST_INPROGRESS,
1712 1712 strlen(ACPIDEV_CMD_OST_INPROGRESS)) == 0) {
1713 1713 inprogress = B_TRUE;
1714 1714 code = ACPI_OST_STA_INSERT_IN_PROGRESS;
1715 1715 copts += strlen(ACPIDEV_CMD_OST_INPROGRESS);
1716 1716 } else if (strncmp(copts, ACPIDEV_CMD_OST_SUCCESS,
1717 1717 strlen(ACPIDEV_CMD_OST_SUCCESS)) == 0) {
1718 1718 code = ACPI_OST_STA_SUCCESS;
1719 1719 copts += strlen(ACPIDEV_CMD_OST_SUCCESS);
1720 1720 } else if (strncmp(copts, ACPIDEV_CMD_OST_FAILURE,
1721 1721 strlen(ACPIDEV_CMD_OST_FAILURE)) == 0) {
1722 1722 code = ACPI_OST_STA_FAILURE;
1723 1723 copts += strlen(ACPIDEV_CMD_OST_FAILURE);
1724 1724 } else if (strncmp(copts, ACPIDEV_CMD_OST_NOOP,
1725 1725 strlen(ACPIDEV_CMD_OST_NOOP)) == 0) {
1726 1726 return (NULL);
1727 1727 } else {
1728 1728 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1729 1729 }
1730 1730
1731 1731 /* Get the event type. */
1732 1732 copts = strstr(copts, ACPIDEV_EVENT_TYPE_ATTR_NAME);
1733 1733 if (copts == NULL) {
1734 1734 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1735 1735 }
1736 1736 copts += strlen(ACPIDEV_EVENT_TYPE_ATTR_NAME);
1737 1737 if (copts[0] != '=') {
1738 1738 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1739 1739 }
1740 1740 copts += strlen("=");
1741 1741 if (strncmp(copts, ACPIDEV_EVENT_TYPE_BUS_CHECK,
1742 1742 strlen(ACPIDEV_EVENT_TYPE_BUS_CHECK)) == 0) {
1743 1743 event = ACPI_NOTIFY_BUS_CHECK;
1744 1744 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK,
1745 1745 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK)) == 0) {
1746 1746 event = ACPI_NOTIFY_DEVICE_CHECK;
1747 1747 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT,
1748 1748 strlen(ACPIDEV_EVENT_TYPE_DEVICE_CHECK_LIGHT)) == 0) {
1749 1749 event = ACPI_NOTIFY_DEVICE_CHECK_LIGHT;
1750 1750 } else if (strncmp(copts, ACPIDEV_EVENT_TYPE_EJECT_REQUEST,
1751 1751 strlen(ACPIDEV_EVENT_TYPE_EJECT_REQUEST)) == 0) {
1752 1752 event = ACPI_NOTIFY_EJECT_REQUEST;
1753 1753 if (inprogress) {
1754 1754 code = ACPI_OST_STA_EJECT_IN_PROGRESS;
1755 1755 }
1756 1756 } else {
1757 1757 return (drerr_new(0, EX86_UNKPTCMD, opts->copts));
1758 1758 }
1759 1759
1760 1760 (void) acpidev_eval_ost(hdl, event, code, NULL, 0);
1761 1761
1762 1762 return (NULL);
1763 1763 }
1764 1764
1765 1765 static struct {
1766 1766 const char *name;
1767 1767 sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts);
1768 1768 } drmach_pt_arr[] = {
1769 1769 { ACPIDEV_CMD_OST_PREFIX, &drmach_update_acpi_status },
1770 1770 /* the following line must always be last */
1771 1771 { NULL, NULL }
1772 1772 };
1773 1773
1774 1774 sbd_error_t *
1775 1775 drmach_passthru(drmachid_t id, drmach_opts_t *opts)
1776 1776 {
1777 1777 int i;
1778 1778 sbd_error_t *err;
1779 1779
1780 1780 i = 0;
1781 1781 while (drmach_pt_arr[i].name != NULL) {
1782 1782 int len = strlen(drmach_pt_arr[i].name);
1783 1783
1784 1784 if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0)
1785 1785 break;
1786 1786
1787 1787 i += 1;
1788 1788 }
1789 1789
1790 1790 if (drmach_pt_arr[i].name == NULL)
1791 1791 err = drerr_new(0, EX86_UNKPTCMD, opts->copts);
1792 1792 else
1793 1793 err = (*drmach_pt_arr[i].handler)(id, opts);
1794 1794
1795 1795 return (err);
1796 1796 }
1797 1797
1798 1798 /*
1799 1799 * Board specific interfaces to support dr driver
1800 1800 */
1801 1801 static int
1802 1802 drmach_get_portid(drmach_node_t *np)
1803 1803 {
1804 1804 uint32_t portid;
1805 1805
1806 1806 if (np->getprop(np, ACPIDEV_DR_PROP_PORTID,
1807 1807 &portid, sizeof (portid)) == 0) {
1808 1808 /*
1809 1809 * acpidev returns portid as uint32_t, validates it.
1810 1810 */
1811 1811 if (portid > INT_MAX) {
1812 1812 return (-1);
1813 1813 } else {
1814 1814 return (portid);
1815 1815 }
1816 1816 }
1817 1817
1818 1818 return (-1);
1819 1819 }
1820 1820
1821 1821 /*
1822 1822 * This is a helper function to determine if a given
1823 1823 * node should be considered for a dr operation according
1824 1824 * to predefined dr type nodes and the node's name.
1825 1825 * Formal Parameter : The name of a device node.
1826 1826 * Return Value: -1, name does not map to a valid dr type.
1827 1827 * A value greater or equal to 0, name is a valid dr type.
1828 1828 */
1829 1829 static int
1830 1830 drmach_name2type_idx(char *name)
1831 1831 {
1832 1832 int index, ntypes;
1833 1833
1834 1834 if (name == NULL)
1835 1835 return (-1);
1836 1836
1837 1837 /*
1838 1838 * Determine how many possible types are currently supported
1839 1839 * for dr.
1840 1840 */
1841 1841 ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]);
1842 1842
1843 1843 /* Determine if the node's name correspond to a predefined type. */
1844 1844 for (index = 0; index < ntypes; index++) {
1845 1845 if (strcmp(drmach_name2type[index].name, name) == 0)
1846 1846 /* The node is an allowed type for dr. */
1847 1847 return (index);
1848 1848 }
1849 1849
1850 1850 /*
1851 1851 * If the name of the node does not map to any of the
1852 1852 * types in the array drmach_name2type then the node is not of
1853 1853 * interest to dr.
1854 1854 */
1855 1855 return (-1);
1856 1856 }
1857 1857
1858 1858 static int
1859 1859 drmach_board_find_devices_cb(drmach_node_walk_args_t *args)
1860 1860 {
1861 1861 drmach_node_t *node = args->node;
1862 1862 drmach_board_cb_data_t *data = args->data;
1863 1863 drmach_board_t *obj = data->obj;
1864 1864
1865 1865 int rv, portid;
1866 1866 uint32_t bnum;
1867 1867 drmachid_t id;
1868 1868 drmach_device_t *device;
1869 1869 char name[OBP_MAXDRVNAME];
1870 1870
1871 1871 portid = drmach_get_portid(node);
1872 1872 rv = node->getprop(node, ACPIDEV_DR_PROP_DEVNAME,
1873 1873 name, OBP_MAXDRVNAME);
1874 1874 if (rv)
1875 1875 return (0);
1876 1876
1877 1877 rv = node->getprop(node, ACPIDEV_DR_PROP_BOARDNUM,
1878 1878 &bnum, sizeof (bnum));
1879 1879 if (rv) {
1880 1880 return (0);
1881 1881 }
1882 1882 if (bnum > INT_MAX) {
1883 1883 return (0);
1884 1884 }
1885 1885
1886 1886 if (bnum != obj->bnum)
1887 1887 return (0);
1888 1888
1889 1889 if (drmach_name2type_idx(name) < 0) {
1890 1890 return (0);
1891 1891 }
1892 1892
1893 1893 /*
1894 1894 * Create a device data structure from this node data.
1895 1895 * The call may yield nothing if the node is not of interest
1896 1896 * to drmach.
1897 1897 */
1898 1898 data->err = drmach_device_new(node, obj, portid, &id);
1899 1899 if (data->err)
1900 1900 return (-1);
1901 1901 else if (!id) {
1902 1902 /*
1903 1903 * drmach_device_new examined the node we passed in
1904 1904 * and determined that it was one not of interest to
1905 1905 * drmach. So, it is skipped.
1906 1906 */
1907 1907 return (0);
1908 1908 }
1909 1909
1910 1910 rv = drmach_array_set(obj->devices, data->ndevs++, id);
1911 1911 if (rv) {
1912 1912 data->err = DRMACH_INTERNAL_ERROR();
1913 1913 return (-1);
1914 1914 }
1915 1915 device = id;
1916 1916
1917 1917 data->err = (*data->found)(data->a, device->type, device->unum, id);
1918 1918
1919 1919 return (data->err == NULL ? 0 : -1);
1920 1920 }
1921 1921
1922 1922 sbd_error_t *
1923 1923 drmach_board_find_devices(drmachid_t id, void *a,
1924 1924 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t))
1925 1925 {
1926 1926 drmach_board_t *bp = (drmach_board_t *)id;
1927 1927 sbd_error_t *err;
1928 1928 int max_devices;
1929 1929 int rv;
1930 1930 drmach_board_cb_data_t data;
1931 1931
1932 1932 if (!DRMACH_IS_BOARD_ID(id))
1933 1933 return (drerr_new(0, EX86_INAPPROP, NULL));
1934 1934
1935 1935 max_devices = MAX_CPU_UNITS_PER_BOARD;
1936 1936 max_devices += MAX_MEM_UNITS_PER_BOARD;
1937 1937 max_devices += MAX_IO_UNITS_PER_BOARD;
1938 1938
1939 1939 if (bp->devices == NULL)
1940 1940 bp->devices = drmach_array_new(0, max_devices);
1941 1941 ASSERT(bp->tree != NULL);
1942 1942
1943 1943 data.obj = bp;
1944 1944 data.ndevs = 0;
1945 1945 data.found = found;
1946 1946 data.a = a;
1947 1947 data.err = NULL;
1948 1948
1949 1949 acpidev_dr_lock_all();
1950 1950 rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb);
1951 1951 acpidev_dr_unlock_all();
1952 1952 if (rv == 0) {
1953 1953 err = NULL;
1954 1954 } else {
1955 1955 drmach_array_dispose(bp->devices, drmach_device_dispose);
1956 1956 bp->devices = NULL;
1957 1957
1958 1958 if (data.err)
1959 1959 err = data.err;
1960 1960 else
1961 1961 err = DRMACH_INTERNAL_ERROR();
1962 1962 }
1963 1963
1964 1964 return (err);
1965 1965 }
1966 1966
1967 1967 int
1968 1968 drmach_board_lookup(int bnum, drmachid_t *id)
1969 1969 {
1970 1970 int rv = 0;
1971 1971
1972 1972 if (bnum < 0) {
1973 1973 *id = 0;
1974 1974 return (-1);
1975 1975 }
1976 1976
1977 1977 rw_enter(&drmach_boards_rwlock, RW_READER);
1978 1978 if (drmach_array_get(drmach_boards, (uint_t)bnum, id)) {
1979 1979 *id = 0;
1980 1980 rv = -1;
1981 1981 }
1982 1982 rw_exit(&drmach_boards_rwlock);
1983 1983
1984 1984 return (rv);
1985 1985 }
1986 1986
1987 1987 sbd_error_t *
1988 1988 drmach_board_name(int bnum, char *buf, int buflen)
1989 1989 {
1990 1990 ACPI_HANDLE hdl;
1991 1991 sbd_error_t *err = NULL;
1992 1992
1993 1993 if (bnum < 0) {
1994 1994 return (drerr_new(1, EX86_BNUM, "%d", bnum));
1995 1995 }
1996 1996
1997 1997 acpidev_dr_lock_all();
1998 1998 if (ACPI_FAILURE(acpidev_dr_get_board_handle(bnum, &hdl))) {
1999 1999 DRMACH_PR("!drmach_board_name: failed to lookup ACPI handle "
2000 2000 "for board %d.", bnum);
2001 2001 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2002 2002 } else if (ACPI_FAILURE(acpidev_dr_get_board_name(hdl, buf, buflen))) {
2003 2003 DRMACH_PR("!drmach_board_name: failed to generate board name "
2004 2004 "for board %d.", bnum);
2005 2005 err = drerr_new(0, EX86_INVALID_ARG,
2006 2006 ": buffer is too small for board name.");
2007 2007 }
2008 2008 acpidev_dr_unlock_all();
2009 2009
2010 2010 return (err);
2011 2011 }
2012 2012
2013 2013 int
2014 2014 drmach_board_is_floating(drmachid_t id)
2015 2015 {
2016 2016 drmach_board_t *bp;
2017 2017
2018 2018 if (!DRMACH_IS_BOARD_ID(id))
2019 2019 return (0);
2020 2020
2021 2021 bp = (drmach_board_t *)id;
2022 2022
2023 2023 return ((drmach_domain.floating & (1ULL << bp->bnum)) ? 1 : 0);
2024 2024 }
2025 2025
2026 2026 static ACPI_STATUS
2027 2027 drmach_board_check_dependent_cb(ACPI_HANDLE hdl, UINT32 lvl, void *ctx,
2028 2028 void **retval)
2029 2029 {
2030 2030 uint32_t bdnum;
2031 2031 drmach_board_t *bp;
2032 2032 ACPI_STATUS rc = AE_OK;
2033 2033 int cmd = (int)(intptr_t)ctx;
2034 2034
2035 2035 ASSERT(hdl != NULL);
2036 2036 ASSERT(lvl == UINT32_MAX);
2037 2037 ASSERT(retval != NULL);
2038 2038
2039 2039 /* Skip non-board devices. */
2040 2040 if (!acpidev_dr_device_is_board(hdl)) {
2041 2041 return (AE_OK);
2042 2042 } else if (ACPI_FAILURE(acpidev_dr_get_board_number(hdl, &bdnum))) {
2043 2043 DRMACH_PR("!drmach_board_check_dependent_cb: failed to get "
2044 2044 "board number for object %p.\n", hdl);
2045 2045 return (AE_ERROR);
2046 2046 } else if (bdnum > MAX_BOARDS) {
2047 2047 DRMACH_PR("!drmach_board_check_dependent_cb: board number %u "
2048 2048 "is too big, max %u.", bdnum, MAX_BOARDS);
2049 2049 return (AE_ERROR);
2050 2050 }
2051 2051
2052 2052 bp = drmach_get_board_by_bnum(bdnum);
2053 2053 switch (cmd) {
2054 2054 case SBD_CMD_CONNECT:
2055 2055 /*
2056 2056 * Its parent board should be present, assigned, powered and
2057 2057 * connected when connecting the child board.
2058 2058 */
2059 2059 if (bp == NULL) {
2060 2060 *retval = hdl;
2061 2061 rc = AE_ERROR;
2062 2062 } else {
2063 2063 bp->powered = acpidev_dr_device_is_powered(hdl);
2064 2064 if (!bp->connected || !bp->powered || !bp->assigned) {
2065 2065 *retval = hdl;
2066 2066 rc = AE_ERROR;
2067 2067 }
2068 2068 }
2069 2069 break;
2070 2070
2071 2071 case SBD_CMD_POWERON:
2072 2072 /*
2073 2073 * Its parent board should be present, assigned and powered when
2074 2074 * powering on the child board.
2075 2075 */
2076 2076 if (bp == NULL) {
2077 2077 *retval = hdl;
2078 2078 rc = AE_ERROR;
2079 2079 } else {
2080 2080 bp->powered = acpidev_dr_device_is_powered(hdl);
2081 2081 if (!bp->powered || !bp->assigned) {
2082 2082 *retval = hdl;
2083 2083 rc = AE_ERROR;
2084 2084 }
2085 2085 }
2086 2086 break;
2087 2087
2088 2088 case SBD_CMD_ASSIGN:
2089 2089 /*
2090 2090 * Its parent board should be present and assigned when
2091 2091 * assigning the child board.
2092 2092 */
2093 2093 if (bp == NULL) {
2094 2094 *retval = hdl;
2095 2095 rc = AE_ERROR;
2096 2096 } else if (!bp->assigned) {
2097 2097 *retval = hdl;
2098 2098 rc = AE_ERROR;
2099 2099 }
2100 2100 break;
2101 2101
2102 2102 case SBD_CMD_DISCONNECT:
2103 2103 /*
2104 2104 * The child board should be disconnected if present when
2105 2105 * disconnecting its parent board.
2106 2106 */
2107 2107 if (bp != NULL && bp->connected) {
2108 2108 *retval = hdl;
2109 2109 rc = AE_ERROR;
2110 2110 }
2111 2111 break;
2112 2112
2113 2113 case SBD_CMD_POWEROFF:
2114 2114 /*
2115 2115 * The child board should be disconnected and powered off if
2116 2116 * present when powering off its parent board.
2117 2117 */
2118 2118 if (bp != NULL) {
2119 2119 bp->powered = acpidev_dr_device_is_powered(hdl);
2120 2120 if (bp->connected || bp->powered) {
2121 2121 *retval = hdl;
2122 2122 rc = AE_ERROR;
2123 2123 }
2124 2124 }
2125 2125 break;
2126 2126
2127 2127 case SBD_CMD_UNASSIGN:
2128 2128 /*
2129 2129 * The child board should be disconnected, powered off and
2130 2130 * unassigned if present when unassigning its parent board.
2131 2131 */
2132 2132 if (bp != NULL) {
2133 2133 bp->powered = acpidev_dr_device_is_powered(hdl);
2134 2134 if (bp->connected || bp->powered || bp->assigned) {
2135 2135 *retval = hdl;
2136 2136 rc = AE_ERROR;
2137 2137 }
2138 2138 }
2139 2139 break;
2140 2140
2141 2141 default:
2142 2142 /* Return success for all other commands. */
2143 2143 break;
2144 2144 }
2145 2145
2146 2146 return (rc);
2147 2147 }
2148 2148
2149 2149 sbd_error_t *
2150 2150 drmach_board_check_dependent(int cmd, drmach_board_t *bp)
2151 2151 {
2152 2152 int reverse;
2153 2153 char *name;
2154 2154 sbd_error_t *err = NULL;
2155 2155 DRMACH_HANDLE hdl;
2156 2156 DRMACH_HANDLE dp = NULL;
2157 2157
2158 2158 ASSERT(bp != NULL);
2159 2159 ASSERT(DRMACH_IS_BOARD_ID(bp));
2160 2160 ASSERT(RW_LOCK_HELD(&drmach_boards_rwlock));
2161 2161
2162 2162 hdl = drmach_node_get_dnode(bp->tree);
2163 2163 if (hdl == NULL)
2164 2164 return (drerr_new(0, EX86_INAPPROP, NULL));
2165 2165
2166 2166 switch (cmd) {
2167 2167 case SBD_CMD_ASSIGN:
2168 2168 case SBD_CMD_POWERON:
2169 2169 case SBD_CMD_CONNECT:
2170 2170 if (ACPI_SUCCESS(acpidev_dr_device_walk_ejd(hdl,
2171 2171 &drmach_board_check_dependent_cb,
2172 2172 (void *)(intptr_t)cmd, &dp))) {
2173 2173 return (NULL);
2174 2174 }
2175 2175 reverse = 0;
2176 2176 break;
2177 2177
2178 2178 case SBD_CMD_UNASSIGN:
2179 2179 case SBD_CMD_POWEROFF:
2180 2180 case SBD_CMD_DISCONNECT:
2181 2181 if (ACPI_SUCCESS(acpidev_dr_device_walk_edl(hdl,
2182 2182 &drmach_board_check_dependent_cb,
2183 2183 (void *)(intptr_t)cmd, &dp))) {
2184 2184 return (NULL);
2185 2185 }
2186 2186 reverse = 1;
2187 2187 break;
2188 2188
2189 2189 default:
2190 2190 return (drerr_new(0, EX86_INAPPROP, NULL));
2191 2191 }
2192 2192
2193 2193 if (dp == NULL) {
2194 2194 return (drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name));
2195 2195 }
2196 2196 name = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
2197 2197 if (ACPI_FAILURE(acpidev_dr_get_board_name(dp, name, MAXPATHLEN))) {
2198 2198 err = drerr_new(1, EX86_WALK_DEPENDENCY, "%s", bp->cm.name);
2199 2199 } else if (reverse == 0) {
2200 2200 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2201 2201 "%s, depends on board %s", bp->cm.name, name);
2202 2202 } else {
2203 2203 err = drerr_new(1, EX86_WALK_DEPENDENCY,
2204 2204 "board %s depends on %s", name, bp->cm.name);
2205 2205 }
2206 2206 kmem_free(name, MAXPATHLEN);
2207 2207
2208 2208 return (err);
2209 2209 }
2210 2210
2211 2211 sbd_error_t *
2212 2212 drmach_board_assign(int bnum, drmachid_t *id)
2213 2213 {
2214 2214 sbd_error_t *err = NULL;
2215 2215
2216 2216 if (bnum < 0) {
2217 2217 return (drerr_new(1, EX86_BNUM, "%d", bnum));
2218 2218 }
2219 2219
2220 2220 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2221 2221
2222 2222 if (drmach_array_get(drmach_boards, bnum, id) == -1) {
2223 2223 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2224 2224 } else {
2225 2225 drmach_board_t *bp;
2226 2226
2227 2227 /*
2228 2228 * Board has already been created, downgrade to reader.
2229 2229 */
2230 2230 if (*id)
2231 2231 rw_downgrade(&drmach_boards_rwlock);
2232 2232
2233 2233 bp = *id;
2234 2234 if (!(*id))
2235 2235 bp = *id =
2236 2236 (drmachid_t)drmach_board_new(bnum, 0);
2237 2237
2238 2238 if (bp == NULL) {
2239 2239 DRMACH_PR("!drmach_board_assign: failed to create "
2240 2240 "object for board %d.", bnum);
2241 2241 err = drerr_new(1, EX86_BNUM, "%d", bnum);
2242 2242 } else {
2243 2243 err = drmach_board_check_dependent(SBD_CMD_ASSIGN, bp);
2244 2244 if (err == NULL)
2245 2245 bp->assigned = 1;
2246 2246 }
2247 2247 }
2248 2248
2249 2249 rw_exit(&drmach_boards_rwlock);
2250 2250
2251 2251 return (err);
2252 2252 }
2253 2253
2254 2254 sbd_error_t *
2255 2255 drmach_board_unassign(drmachid_t id)
2256 2256 {
2257 2257 drmach_board_t *bp;
2258 2258 sbd_error_t *err;
2259 2259 drmach_status_t stat;
2260 2260
2261 2261 if (DRMACH_NULL_ID(id))
2262 2262 return (NULL);
2263 2263
2264 2264 if (!DRMACH_IS_BOARD_ID(id)) {
2265 2265 return (drerr_new(0, EX86_INAPPROP, NULL));
2266 2266 }
2267 2267 bp = id;
2268 2268
2269 2269 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2270 2270
2271 2271 err = drmach_board_status(id, &stat);
2272 2272 if (err) {
2273 2273 rw_exit(&drmach_boards_rwlock);
2274 2274 return (err);
2275 2275 }
2276 2276
2277 2277 if (stat.configured || stat.busy) {
2278 2278 err = drerr_new(0, EX86_CONFIGBUSY, bp->cm.name);
2279 2279 } else if (bp->connected) {
2280 2280 err = drerr_new(0, EX86_CONNECTBUSY, bp->cm.name);
2281 2281 } else if (stat.powered) {
2282 2282 err = drerr_new(0, EX86_POWERBUSY, bp->cm.name);
2283 2283 } else {
2284 2284 err = drmach_board_check_dependent(SBD_CMD_UNASSIGN, bp);
2285 2285 if (err == NULL) {
2286 2286 if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0)
2287 2287 err = DRMACH_INTERNAL_ERROR();
2288 2288 else
2289 2289 drmach_board_dispose(bp);
2290 2290 }
2291 2291 }
2292 2292
2293 2293 rw_exit(&drmach_boards_rwlock);
2294 2294
2295 2295 return (err);
2296 2296 }
2297 2297
2298 2298 sbd_error_t *
2299 2299 drmach_board_poweron(drmachid_t id)
2300 2300 {
2301 2301 drmach_board_t *bp;
2302 2302 sbd_error_t *err = NULL;
2303 2303 DRMACH_HANDLE hdl;
2304 2304
2305 2305 if (!DRMACH_IS_BOARD_ID(id))
2306 2306 return (drerr_new(0, EX86_INAPPROP, NULL));
2307 2307 bp = id;
2308 2308
2309 2309 hdl = drmach_node_get_dnode(bp->tree);
2310 2310 if (hdl == NULL)
2311 2311 return (drerr_new(0, EX86_INAPPROP, NULL));
2312 2312
2313 2313 bp->powered = drmach_board_check_power(bp);
2314 2314 if (bp->powered) {
2315 2315 return (NULL);
2316 2316 }
2317 2317
2318 2318 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2319 2319 err = drmach_board_check_dependent(SBD_CMD_POWERON, bp);
2320 2320 if (err == NULL) {
2321 2321 acpidev_dr_lock_all();
2322 2322 if (ACPI_FAILURE(acpidev_dr_device_poweron(hdl)))
2323 2323 err = drerr_new(0, EX86_POWERON, NULL);
2324 2324 acpidev_dr_unlock_all();
2325 2325
2326 2326 /* Check whether the board is powered on. */
2327 2327 bp->powered = drmach_board_check_power(bp);
2328 2328 if (err == NULL && bp->powered == 0)
2329 2329 err = drerr_new(0, EX86_POWERON, NULL);
2330 2330 }
2331 2331 rw_exit(&drmach_boards_rwlock);
2332 2332
2333 2333 return (err);
2334 2334 }
2335 2335
2336 2336 sbd_error_t *
2337 2337 drmach_board_poweroff(drmachid_t id)
2338 2338 {
2339 2339 sbd_error_t *err = NULL;
2340 2340 drmach_board_t *bp;
2341 2341 drmach_status_t stat;
2342 2342 DRMACH_HANDLE hdl;
2343 2343
2344 2344 if (DRMACH_NULL_ID(id))
2345 2345 return (NULL);
2346 2346
2347 2347 if (!DRMACH_IS_BOARD_ID(id))
2348 2348 return (drerr_new(0, EX86_INAPPROP, NULL));
2349 2349 bp = id;
2350 2350
2351 2351 hdl = drmach_node_get_dnode(bp->tree);
2352 2352 if (hdl == NULL)
2353 2353 return (drerr_new(0, EX86_INAPPROP, NULL));
2354 2354
2355 2355 /* Check whether the board is busy, configured or connected. */
2356 2356 err = drmach_board_status(id, &stat);
2357 2357 if (err != NULL)
2358 2358 return (err);
2359 2359 if (stat.configured || stat.busy) {
2360 2360 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2361 2361 } else if (bp->connected) {
2362 2362 return (drerr_new(0, EX86_CONNECTBUSY, bp->cm.name));
2363 2363 }
2364 2364
2365 2365 bp->powered = drmach_board_check_power(bp);
2366 2366 if (bp->powered == 0) {
2367 2367 return (NULL);
2368 2368 }
2369 2369
2370 2370 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2371 2371 err = drmach_board_check_dependent(SBD_CMD_POWEROFF, bp);
2372 2372 if (err == NULL) {
2373 2373 acpidev_dr_lock_all();
2374 2374 if (ACPI_FAILURE(acpidev_dr_device_poweroff(hdl)))
2375 2375 err = drerr_new(0, EX86_POWEROFF, NULL);
2376 2376 acpidev_dr_unlock_all();
2377 2377
2378 2378 bp->powered = drmach_board_check_power(bp);
2379 2379 if (err == NULL && bp->powered != 0)
2380 2380 err = drerr_new(0, EX86_POWEROFF, NULL);
2381 2381 }
2382 2382 rw_exit(&drmach_boards_rwlock);
2383 2383
2384 2384 return (err);
2385 2385 }
2386 2386
2387 2387 sbd_error_t *
2388 2388 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force)
2389 2389 {
2390 2390 _NOTE(ARGUNUSED(opts, force));
2391 2391
2392 2392 drmach_board_t *bp;
2393 2393 DRMACH_HANDLE hdl;
2394 2394
2395 2395 if (DRMACH_NULL_ID(id))
2396 2396 return (NULL);
2397 2397
2398 2398 if (!DRMACH_IS_BOARD_ID(id))
2399 2399 return (drerr_new(0, EX86_INAPPROP, NULL));
2400 2400 bp = id;
2401 2401
2402 2402 hdl = drmach_node_get_dnode(bp->tree);
2403 2403 if (hdl == NULL)
2404 2404 return (drerr_new(0, EX86_INAPPROP, NULL));
2405 2405
2406 2406 if (ACPI_FAILURE(acpidev_dr_device_check_status(hdl)))
2407 2407 return (drerr_new(0, EX86_IN_FAILURE, NULL));
2408 2408
2409 2409 return (NULL);
2410 2410 }
2411 2411
2412 2412 sbd_error_t *
2413 2413 drmach_board_connect(drmachid_t id, drmach_opts_t *opts)
2414 2414 {
2415 2415 _NOTE(ARGUNUSED(opts));
2416 2416
2417 2417 sbd_error_t *err = NULL;
2418 2418 drmach_board_t *bp = (drmach_board_t *)id;
2419 2419 DRMACH_HANDLE hdl;
2420 2420
2421 2421 if (!DRMACH_IS_BOARD_ID(id))
2422 2422 return (drerr_new(0, EX86_INAPPROP, NULL));
2423 2423 bp = (drmach_board_t *)id;
2424 2424
2425 2425 hdl = drmach_node_get_dnode(bp->tree);
2426 2426 if (hdl == NULL)
2427 2427 return (drerr_new(0, EX86_INAPPROP, NULL));
2428 2428
2429 2429 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2430 2430 err = drmach_board_check_dependent(SBD_CMD_CONNECT, bp);
2431 2431 if (err == NULL) {
2432 2432 acpidev_dr_lock_all();
2433 2433 if (ACPI_FAILURE(acpidev_dr_device_insert(hdl))) {
2434 2434 (void) acpidev_dr_device_remove(hdl);
2435 2435 err = drerr_new(1, EX86_PROBE, NULL);
2436 2436 } else {
2437 2437 bp->connected = 1;
2438 2438 }
2439 2439 acpidev_dr_unlock_all();
2440 2440 }
2441 2441 rw_exit(&drmach_boards_rwlock);
2442 2442
2443 2443 return (err);
2444 2444 }
2445 2445
2446 2446 sbd_error_t *
2447 2447 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts)
2448 2448 {
2449 2449 _NOTE(ARGUNUSED(opts));
2450 2450
2451 2451 DRMACH_HANDLE hdl;
2452 2452 drmach_board_t *bp;
2453 2453 drmach_status_t stat;
2454 2454 sbd_error_t *err = NULL;
2455 2455
2456 2456 if (DRMACH_NULL_ID(id))
2457 2457 return (NULL);
2458 2458 if (!DRMACH_IS_BOARD_ID(id))
2459 2459 return (drerr_new(0, EX86_INAPPROP, NULL));
2460 2460 bp = (drmach_board_t *)id;
2461 2461
2462 2462 hdl = drmach_node_get_dnode(bp->tree);
2463 2463 if (hdl == NULL)
2464 2464 return (drerr_new(0, EX86_INAPPROP, NULL));
2465 2465
2466 2466 /* Check whether the board is busy or configured. */
2467 2467 err = drmach_board_status(id, &stat);
2468 2468 if (err != NULL)
2469 2469 return (err);
2470 2470 if (stat.configured || stat.busy)
2471 2471 return (drerr_new(0, EX86_CONFIGBUSY, bp->cm.name));
2472 2472
2473 2473 rw_enter(&drmach_boards_rwlock, RW_WRITER);
2474 2474 err = drmach_board_check_dependent(SBD_CMD_DISCONNECT, bp);
2475 2475 if (err == NULL) {
2476 2476 acpidev_dr_lock_all();
2477 2477 if (ACPI_SUCCESS(acpidev_dr_device_remove(hdl))) {
2478 2478 bp->connected = 0;
2479 2479 } else {
2480 2480 err = drerr_new(1, EX86_DEPROBE, bp->cm.name);
2481 2481 }
2482 2482 acpidev_dr_unlock_all();
2483 2483 }
2484 2484 rw_exit(&drmach_boards_rwlock);
2485 2485
2486 2486 return (err);
2487 2487 }
2488 2488
2489 2489 sbd_error_t *
2490 2490 drmach_board_deprobe(drmachid_t id)
2491 2491 {
2492 2492 drmach_board_t *bp;
2493 2493
2494 2494 if (!DRMACH_IS_BOARD_ID(id))
2495 2495 return (drerr_new(0, EX86_INAPPROP, NULL));
2496 2496 bp = id;
2497 2497
2498 2498 cmn_err(CE_CONT, "DR: detach board %d\n", bp->bnum);
2499 2499
2500 2500 if (bp->devices) {
2501 2501 drmach_array_dispose(bp->devices, drmach_device_dispose);
2502 2502 bp->devices = NULL;
2503 2503 }
2504 2504
2505 2505 bp->boot_board = 0;
2506 2506
2507 2507 return (NULL);
2508 2508 }
2509 2509
2510 2510 /*
2511 2511 * CPU specific interfaces to support dr driver
2512 2512 */
2513 2513 sbd_error_t *
2514 2514 drmach_cpu_disconnect(drmachid_t id)
2515 2515 {
2516 2516 if (!DRMACH_IS_CPU_ID(id))
2517 2517 return (drerr_new(0, EX86_INAPPROP, NULL));
2518 2518
2519 2519 return (NULL);
2520 2520 }
2521 2521
2522 2522 sbd_error_t *
2523 2523 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid)
2524 2524 {
2525 2525 drmach_cpu_t *cpu;
2526 2526
2527 2527 if (!DRMACH_IS_CPU_ID(id))
2528 2528 return (drerr_new(0, EX86_INAPPROP, NULL));
2529 2529 cpu = (drmach_cpu_t *)id;
2530 2530
2531 2531 if (cpu->cpuid == -1) {
2532 2532 if (ACPI_SUCCESS(acpica_get_cpu_id_by_object(
2533 2533 drmach_node_get_dnode(cpu->dev.node), cpuid))) {
2534 2534 cpu->cpuid = *cpuid;
2535 2535 } else {
2536 2536 *cpuid = -1;
2537 2537 }
2538 2538 } else {
2539 2539 *cpuid = cpu->cpuid;
2540 2540 }
2541 2541
2542 2542 return (NULL);
2543 2543 }
2544 2544
2545 2545 sbd_error_t *
2546 2546 drmach_cpu_get_impl(drmachid_t id, int *ip)
2547 2547 {
2548 2548 if (!DRMACH_IS_CPU_ID(id))
2549 2549 return (drerr_new(0, EX86_INAPPROP, NULL));
2550 2550
2551 2551 /* Assume all CPUs in system are homogeneous. */
2552 2552 *ip = X86_CPU_IMPL_UNKNOWN;
2553 2553
2554 2554 kpreempt_disable();
2555 2555 if (cpuid_getvendor(CPU) == X86_VENDOR_Intel) {
2556 2556 /* NHM-EX CPU */
2557 2557 if (cpuid_getfamily(CPU) == 0x6 &&
2558 2558 cpuid_getmodel(CPU) == 0x2e) {
2559 2559 *ip = X86_CPU_IMPL_NEHALEM_EX;
2560 2560 }
2561 2561 }
2562 2562 kpreempt_enable();
2563 2563
2564 2564 return (NULL);
2565 2565 }
2566 2566
2567 2567 /*
2568 2568 * Memory specific interfaces to support dr driver
2569 2569 */
2570 2570
2571 2571 /*
2572 2572 * When drmach_mem_new() is called, the mp->base_pa field is set to the base
2573 2573 * address of configured memory if there's configured memory on the board,
2574 2574 * otherwise set to UINT64_MAX. For hot-added memory board, there's no
2575 2575 * configured memory when drmach_mem_new() is called, so mp->base_pa is set
2576 2576 * to UINT64_MAX and we need to set a correct value for it after memory
2577 2577 * hot-add operations.
2578 2578 * A hot-added memory board may contain multiple memory segments,
2579 2579 * drmach_mem_add_span() will be called once for each segment, so we can't
2580 2580 * rely on the basepa argument. And it's possible that only part of a memory
2581 2581 * segment is added into OS, so need to intersect with phys_installed list
2582 2582 * to get the real base address of configured memory on the board.
2583 2583 */
2584 2584 sbd_error_t *
2585 2585 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size)
2586 2586 {
2587 2587 _NOTE(ARGUNUSED(basepa));
2588 2588
2589 2589 uint64_t nbytes = 0;
2590 2590 uint64_t endpa;
2591 2591 drmach_mem_t *mp;
2592 2592 struct memlist *ml2;
2593 2593 struct memlist *p;
2594 2594
2595 2595 ASSERT(size != 0);
2596 2596
2597 2597 if (!DRMACH_IS_MEM_ID(id))
2598 2598 return (drerr_new(0, EX86_INAPPROP, NULL));
2599 2599 mp = (drmach_mem_t *)id;
2600 2600
2601 2601 /* Compute basepa and size of installed memory. */
2602 2602 endpa = _ptob64(physmax + 1);
2603 2603 memlist_read_lock();
2604 2604 ml2 = memlist_dup(phys_install);
2605 2605 memlist_read_unlock();
2606 2606 ml2 = memlist_del_span(ml2, 0ull, mp->slice_base);
2607 2607 if (ml2 && endpa > mp->slice_top) {
2608 2608 ml2 = memlist_del_span(ml2, mp->slice_top,
2609 2609 endpa - mp->slice_top);
2610 2610 }
2611 2611
2612 2612 ASSERT(ml2);
2613 2613 if (ml2) {
2614 2614 for (p = ml2; p; p = p->ml_next) {
2615 2615 nbytes += p->ml_size;
2616 2616 if (mp->base_pa > p->ml_address)
2617 2617 mp->base_pa = p->ml_address;
2618 2618 }
2619 2619 ASSERT(nbytes > 0);
2620 2620 mp->nbytes += nbytes;
2621 2621 memlist_delete(ml2);
2622 2622 }
2623 2623
2624 2624 return (NULL);
2625 2625 }
2626 2626
2627 2627 static sbd_error_t *
2628 2628 drmach_mem_update_lgrp(drmachid_t id)
2629 2629 {
2630 2630 ACPI_STATUS rc;
2631 2631 DRMACH_HANDLE hdl;
2632 2632 void *hdlp;
2633 2633 drmach_mem_t *mp;
2634 2634 update_membounds_t umb;
2635 2635
2636 2636 if (!DRMACH_IS_MEM_ID(id))
2637 2637 return (drerr_new(0, EX86_INAPPROP, NULL));
2638 2638 mp = (drmach_mem_t *)id;
2639 2639 /* No need to update lgrp if memory is already installed. */
2640 2640 if (mp->nbytes != 0)
2641 2641 return (NULL);
2642 2642 /* No need to update lgrp if lgrp is disabled. */
2643 2643 if (max_mem_nodes == 1)
2644 2644 return (NULL);
2645 2645
2646 2646 /* Add memory to lgroup */
2647 2647 hdl = mp->dev.node->get_dnode(mp->dev.node);
2648 2648 rc = acpidev_dr_device_get_memory_index(hdl, &umb.u_device_id);
2649 2649 ASSERT(ACPI_SUCCESS(rc));
2650 2650 if (ACPI_FAILURE(rc)) {
2651 2651 cmn_err(CE_WARN, "drmach: failed to get device id of memory, "
2652 2652 "can't update lgrp information.");
2653 2653 return (drerr_new(0, EX86_INTERNAL, NULL));
2654 2654 }
2655 2655 rc = acpidev_dr_get_mem_numa_info(hdl, mp->memlist, &hdlp,
2656 2656 &umb.u_domain, &umb.u_sli_cnt, &umb.u_sli_ptr);
2657 2657 ASSERT(ACPI_SUCCESS(rc));
2658 2658 if (ACPI_FAILURE(rc)) {
2659 2659 cmn_err(CE_WARN, "drmach: failed to get lgrp info of memory, "
2660 2660 "can't update lgrp information.");
2661 2661 return (drerr_new(0, EX86_INTERNAL, NULL));
2662 2662 }
2663 2663 umb.u_base = (uint64_t)mp->slice_base;
2664 2664 umb.u_length = (uint64_t)(mp->slice_top - mp->slice_base);
2665 2665 lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb);
2666 2666 acpidev_dr_free_mem_numa_info(hdlp);
2667 2667
2668 2668 return (NULL);
2669 2669 }
2670 2670
2671 2671 sbd_error_t *
2672 2672 drmach_mem_enable(drmachid_t id)
2673 2673 {
2674 2674 if (!DRMACH_IS_MEM_ID(id))
2675 2675 return (drerr_new(0, EX86_INAPPROP, NULL));
2676 2676 else
2677 2677 return (NULL);
2678 2678 }
2679 2679
2680 2680 sbd_error_t *
2681 2681 drmach_mem_get_info(drmachid_t id, drmach_mem_info_t *mem)
2682 2682 {
2683 2683 drmach_mem_t *mp;
2684 2684
2685 2685 if (!DRMACH_IS_MEM_ID(id))
2686 2686 return (drerr_new(0, EX86_INAPPROP, NULL));
2687 2687 mp = (drmach_mem_t *)id;
2688 2688
2689 2689 /*
2690 2690 * This is only used by dr to round up/down the memory
2691 2691 * for copying.
2692 2692 */
2693 2693 mem->mi_alignment_mask = mp->mem_alignment - 1;
2694 2694 mem->mi_basepa = mp->base_pa;
2695 2695 mem->mi_size = mp->nbytes;
2696 2696 mem->mi_slice_base = mp->slice_base;
2697 2697 mem->mi_slice_top = mp->slice_top;
2698 2698 mem->mi_slice_size = mp->slice_size;
2699 2699
2700 2700 return (NULL);
2701 2701 }
2702 2702
2703 2703 sbd_error_t *
2704 2704 drmach_mem_get_slice_info(drmachid_t id,
2705 2705 uint64_t *bp, uint64_t *ep, uint64_t *sp)
2706 2706 {
2707 2707 drmach_mem_t *mp;
2708 2708
2709 2709 if (!DRMACH_IS_MEM_ID(id))
2710 2710 return (drerr_new(0, EX86_INAPPROP, NULL));
2711 2711 mp = (drmach_mem_t *)id;
2712 2712
2713 2713 if (bp)
2714 2714 *bp = mp->slice_base;
2715 2715 if (ep)
2716 2716 *ep = mp->slice_top;
2717 2717 if (sp)
2718 2718 *sp = mp->slice_size;
2719 2719
2720 2720 return (NULL);
2721 2721 }
2722 2722
2723 2723 sbd_error_t *
2724 2724 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml)
2725 2725 {
2726 2726 #ifdef DEBUG
2727 2727 int rv;
2728 2728 #endif
2729 2729 drmach_mem_t *mem;
2730 2730 struct memlist *mlist;
2731 2731
2732 2732 if (!DRMACH_IS_MEM_ID(id))
2733 2733 return (drerr_new(0, EX86_INAPPROP, NULL));
2734 2734 mem = (drmach_mem_t *)id;
2735 2735
2736 2736 mlist = memlist_dup(mem->memlist);
2737 2737 *ml = mlist;
2738 2738
2739 2739 #ifdef DEBUG
2740 2740 /*
2741 2741 * Make sure the incoming memlist doesn't already
2742 2742 * intersect with what's present in the system (phys_install).
2743 2743 */
2744 2744 memlist_read_lock();
2745 2745 rv = memlist_intersect(phys_install, mlist);
2746 2746 memlist_read_unlock();
2747 2747 if (rv) {
2748 2748 DRMACH_PR("Derived memlist intersects with phys_install\n");
2749 2749 memlist_dump(mlist);
2750 2750
2751 2751 DRMACH_PR("phys_install memlist:\n");
2752 2752 memlist_dump(phys_install);
2753 2753
2754 2754 memlist_delete(mlist);
2755 2755 return (DRMACH_INTERNAL_ERROR());
2756 2756 }
2757 2757
2758 2758 DRMACH_PR("Derived memlist:");
2759 2759 memlist_dump(mlist);
2760 2760 #endif
2761 2761
2762 2762 return (NULL);
2763 2763 }
2764 2764
2765 2765 processorid_t
2766 2766 drmach_mem_cpu_affinity(drmachid_t id)
2767 2767 {
2768 2768 _NOTE(ARGUNUSED(id));
2769 2769
2770 2770 return (CPU_CURRENT);
2771 2771 }
2772 2772
2773 2773 int
2774 2774 drmach_copy_rename_need_suspend(drmachid_t id)
2775 2775 {
2776 2776 _NOTE(ARGUNUSED(id));
2777 2777
2778 2778 return (0);
2779 2779 }
2780 2780
2781 2781 /*
2782 2782 * IO specific interfaces to support dr driver
2783 2783 */
2784 2784 sbd_error_t *
2785 2785 drmach_io_pre_release(drmachid_t id)
2786 2786 {
2787 2787 if (!DRMACH_IS_IO_ID(id))
2788 2788 return (drerr_new(0, EX86_INAPPROP, NULL));
2789 2789
2790 2790 return (NULL);
2791 2791 }
2792 2792
2793 2793 sbd_error_t *
2794 2794 drmach_io_unrelease(drmachid_t id)
2795 2795 {
2796 2796 if (!DRMACH_IS_IO_ID(id))
2797 2797 return (drerr_new(0, EX86_INAPPROP, NULL));
2798 2798
2799 2799 return (NULL);
2800 2800 }
2801 2801
2802 2802 sbd_error_t *
2803 2803 drmach_io_post_release(drmachid_t id)
2804 2804 {
2805 2805 _NOTE(ARGUNUSED(id));
2806 2806
2807 2807 return (NULL);
2808 2808 }
2809 2809
2810 2810 sbd_error_t *
2811 2811 drmach_io_post_attach(drmachid_t id)
2812 2812 {
2813 2813 if (!DRMACH_IS_IO_ID(id))
2814 2814 return (drerr_new(0, EX86_INAPPROP, NULL));
2815 2815
2816 2816 return (NULL);
2817 2817 }
2818 2818
2819 2819 sbd_error_t *
2820 2820 drmach_io_is_attached(drmachid_t id, int *yes)
2821 2821 {
2822 2822 drmach_device_t *dp;
2823 2823 dev_info_t *dip;
2824 2824 int state;
2825 2825
2826 2826 if (!DRMACH_IS_IO_ID(id))
2827 2827 return (drerr_new(0, EX86_INAPPROP, NULL));
2828 2828 dp = id;
2829 2829
2830 2830 dip = dp->node->getdip(dp->node);
2831 2831 if (dip == NULL) {
2832 2832 *yes = 0;
2833 2833 return (NULL);
2834 2834 }
2835 2835
2836 2836 state = ddi_get_devstate(dip);
2837 2837 *yes = ((i_ddi_node_state(dip) >= DS_ATTACHED) ||
2838 2838 (state == DDI_DEVSTATE_UP));
2839 2839
2840 2840 return (NULL);
2841 2841 }
2842 2842
2843 2843 /*
2844 2844 * Miscellaneous interfaces to support dr driver
2845 2845 */
2846 2846 int
2847 2847 drmach_verify_sr(dev_info_t *dip, int sflag)
2848 2848 {
2849 2849 _NOTE(ARGUNUSED(dip, sflag));
2850 2850
2851 2851 return (0);
2852 2852 }
2853 2853
2854 2854 void
2855 2855 drmach_suspend_last(void)
2856 2856 {
2857 2857 }
2858 2858
2859 2859 void
2860 2860 drmach_resume_first(void)
2861 2861 {
2862 2862 }
2863 2863
2864 2864 /*
2865 2865 * Log a DR sysevent.
2866 2866 * Return value: 0 success, non-zero failure.
2867 2867 */
2868 2868 int
2869 2869 drmach_log_sysevent(int board, char *hint, int flag, int verbose)
2870 2870 {
2871 2871 sysevent_t *ev = NULL;
2872 2872 sysevent_id_t eid;
2873 2873 int rv, km_flag;
2874 2874 sysevent_value_t evnt_val;
2875 2875 sysevent_attr_list_t *evnt_attr_list = NULL;
2876 2876 sbd_error_t *err;
2877 2877 char attach_pnt[MAXNAMELEN];
2878 2878
2879 2879 km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP;
2880 2880 attach_pnt[0] = '\0';
2881 2881 err = drmach_board_name(board, attach_pnt, MAXNAMELEN);
2882 2882 if (err != NULL) {
2883 2883 sbd_err_clear(&err);
2884 2884 rv = -1;
2885 2885 goto logexit;
2886 2886 }
2887 2887 if (verbose) {
2888 2888 DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n",
2889 2889 attach_pnt, hint, flag, verbose);
2890 2890 }
2891 2891
2892 2892 if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE,
2893 2893 SUNW_KERN_PUB"dr", km_flag)) == NULL) {
2894 2894 rv = -2;
2895 2895 goto logexit;
2896 2896 }
2897 2897 evnt_val.value_type = SE_DATA_TYPE_STRING;
2898 2898 evnt_val.value.sv_string = attach_pnt;
2899 2899 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, &evnt_val,
2900 2900 km_flag)) != 0)
2901 2901 goto logexit;
2902 2902
2903 2903 evnt_val.value_type = SE_DATA_TYPE_STRING;
2904 2904 evnt_val.value.sv_string = hint;
2905 2905 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, &evnt_val,
2906 2906 km_flag)) != 0) {
2907 2907 sysevent_free_attr(evnt_attr_list);
2908 2908 goto logexit;
2909 2909 }
2910 2910
2911 2911 (void) sysevent_attach_attributes(ev, evnt_attr_list);
2912 2912
2913 2913 /*
2914 2914 * Log the event but do not sleep waiting for its
2915 2915 * delivery. This provides insulation from syseventd.
2916 2916 */
2917 2917 rv = log_sysevent(ev, SE_NOSLEEP, &eid);
2918 2918
2919 2919 logexit:
2920 2920 if (ev)
2921 2921 sysevent_free(ev);
2922 2922 if ((rv != 0) && verbose)
2923 2923 cmn_err(CE_WARN, "!drmach_log_sysevent failed (rv %d) for %s "
2924 2924 " %s\n", rv, attach_pnt, hint);
2925 2925
2926 2926 return (rv);
2927 2927 }
↓ open down ↓ |
2777 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX