Print this page
acpica-unix2-20130823
PANKOVs restructure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/acpica/osl.c
+++ new/usr/src/uts/intel/io/acpica/osl.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 - * Copyright 2011 Joyent, Inc. All rights reserved.
25 + * Copyright 2012 Joyent, Inc. All rights reserved.
26 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
27 + * Copyright 2013 PALO, Richard. All rights reserved.
26 28 */
29 +
27 30 /*
28 31 * Copyright (c) 2009-2010, Intel Corporation.
29 32 * All rights reserved.
30 33 */
34 +
31 35 /*
32 - * ACPI CA OSL for Solaris x86
36 + * x86 ACPI CA OSL
33 37 */
34 38
35 39 #include <sys/types.h>
36 40 #include <sys/kmem.h>
37 41 #include <sys/psm.h>
38 42 #include <sys/pci_cfgspace.h>
39 43 #include <sys/apic.h>
40 44 #include <sys/ddi.h>
41 45 #include <sys/sunddi.h>
42 46 #include <sys/sunndi.h>
43 47 #include <sys/pci.h>
44 48 #include <sys/kobj.h>
45 49 #include <sys/taskq.h>
46 50 #include <sys/strlog.h>
47 51 #include <sys/x86_archext.h>
48 52 #include <sys/note.h>
49 53 #include <sys/promif.h>
50 54
51 -#include <sys/acpi/accommon.h>
55 +#include <acpica/include/accommon.h>
52 56 #include <sys/acpica.h>
53 57
54 58 #define MAX_DAT_FILE_SIZE (64*1024)
55 59
56 60 /* local functions */
57 61 static int CompressEisaID(char *np);
58 62
59 63 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
60 64 static int acpica_query_bbn_problem(void);
61 65 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
62 66 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
63 67 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
64 68 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
65 69 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
66 70
67 71 /*
68 72 * Event queue vars
69 73 */
70 74 int acpica_eventq_init = 0;
71 75 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
72 76
73 77 /*
74 78 * Priorities relative to minclsyspri that each taskq
75 79 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
76 80 * priority than OSL_GPE_HANDLER. There's an implicit
77 81 * assumption that no priority here results in exceeding
78 82 * maxclsyspri.
79 83 * Note: these initializations need to match the order of
80 84 * ACPI_EXECUTE_TYPE.
81 85 */
82 86 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
83 87 0, /* OSL_GLOBAL_LOCK_HANDLER */
84 88 2, /* OSL_NOTIFY_HANDLER */
85 89 0, /* OSL_GPE_HANDLER */
86 90 0, /* OSL_DEBUGGER_THREAD */
87 91 0, /* OSL_EC_POLL_HANDLER */
88 92 0 /* OSL_EC_BURST_HANDLER */
89 93 };
90 94
91 95 /*
92 96 * Note, if you change this path, you need to update
93 97 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
94 98 */
95 99 static char *acpi_table_path = "/boot/acpi/tables/";
96 100
97 101 /* non-zero while scan_d2a_map() is working */
98 102 static int scanning_d2a_map = 0;
99 103 static int d2a_done = 0;
100 104
101 105 /* features supported by ACPICA and ACPI device configuration. */
102 106 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
103 107 static uint64_t acpica_devcfg_features = 0;
104 108
105 109 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
106 110 int acpica_use_safe_delay = 0;
107 111
108 112 /* CPU mapping data */
109 113 struct cpu_map_item {
110 114 processorid_t cpu_id;
111 115 UINT32 proc_id;
112 116 UINT32 apic_id;
113 117 ACPI_HANDLE obj;
114 118 };
115 119
116 120 kmutex_t cpu_map_lock;
117 121 static struct cpu_map_item **cpu_map = NULL;
118 122 static int cpu_map_count_max = 0;
119 123 static int cpu_map_count = 0;
120 124 static int cpu_map_built = 0;
121 125
122 126 /*
123 127 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
124 128 * This flag is used to check for uppc-only systems by detecting whether
125 129 * acpica_map_cpu() has been called or not.
126 130 */
127 131 static int cpu_map_called = 0;
128 132
129 133 static int acpi_has_broken_bbn = -1;
130 134
131 135 /* buffer for AcpiOsVprintf() */
132 136 #define ACPI_OSL_PR_BUFLEN 1024
133 137 static char *acpi_osl_pr_buffer = NULL;
134 138 static int acpi_osl_pr_buflen;
135 139
136 140 #define D2A_DEBUG
137 141
138 142 /*
139 143 *
140 144 */
141 145 static void
142 146 discard_event_queues()
143 147 {
144 148 int i;
145 149
146 150 /*
147 151 * destroy event queues
148 152 */
149 153 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
150 154 if (osl_eventq[i])
151 155 ddi_taskq_destroy(osl_eventq[i]);
152 156 }
153 157 }
154 158
155 159
156 160 /*
157 161 *
158 162 */
159 163 static ACPI_STATUS
160 164 init_event_queues()
161 165 {
162 166 char namebuf[32];
163 167 int i, error = 0;
164 168
165 169 /*
166 170 * Initialize event queues
167 171 */
168 172
169 173 /* Always allocate only 1 thread per queue to force FIFO execution */
170 174 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
171 175 snprintf(namebuf, 32, "ACPI%d", i);
172 176 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
173 177 osl_eventq_pri_delta[i] + minclsyspri, 0);
174 178 if (osl_eventq[i] == NULL)
175 179 error++;
176 180 }
177 181
178 182 if (error != 0) {
179 183 discard_event_queues();
180 184 #ifdef DEBUG
181 185 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
182 186 #endif
183 187 return (AE_ERROR);
184 188 }
185 189
186 190 acpica_eventq_init = 1;
187 191 return (AE_OK);
188 192 }
189 193
190 194 /*
191 195 * One-time initialization of OSL layer
192 196 */
193 197 ACPI_STATUS
194 198 AcpiOsInitialize(void)
195 199 {
196 200 /*
197 201 * Allocate buffer for AcpiOsVprintf() here to avoid
198 202 * kmem_alloc()/kmem_free() at high PIL
199 203 */
200 204 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
201 205 if (acpi_osl_pr_buffer != NULL)
202 206 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
203 207
204 208 return (AE_OK);
205 209 }
206 210
207 211 /*
208 212 * One-time shut-down of OSL layer
209 213 */
210 214 ACPI_STATUS
211 215 AcpiOsTerminate(void)
212 216 {
213 217
214 218 if (acpi_osl_pr_buffer != NULL)
215 219 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
216 220
217 221 discard_event_queues();
218 222 return (AE_OK);
219 223 }
220 224
221 225
222 226 ACPI_PHYSICAL_ADDRESS
223 227 AcpiOsGetRootPointer()
224 228 {
225 229 ACPI_PHYSICAL_ADDRESS Address;
226 230
227 231 /*
228 232 * For EFI firmware, the root pointer is defined in EFI systab.
229 233 * The boot code process the table and put the physical address
230 234 * in the acpi-root-tab property.
231 235 */
232 236 Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
233 237 DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
234 238
235 239 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
236 240 Address = NULL;
237 241
238 242 return (Address);
239 243 }
240 244
241 245 /*ARGSUSED*/
242 246 ACPI_STATUS
243 247 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
244 248 ACPI_STRING *NewVal)
245 249 {
246 250
247 251 *NewVal = 0;
248 252 return (AE_OK);
249 253 }
250 254
251 255 static void
252 256 acpica_strncpy(char *dest, const char *src, int len)
253 257 {
254 258
255 259 /*LINTED*/
256 260 while ((*dest++ = *src++) && (--len > 0))
257 261 /* copy the string */;
258 262 *dest = '\0';
259 263 }
260 264
261 265 ACPI_STATUS
262 266 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
263 267 ACPI_TABLE_HEADER **NewTable)
264 268 {
265 269 char signature[5];
266 270 char oemid[7];
267 271 char oemtableid[9];
268 272 struct _buf *file;
269 273 char *buf1, *buf2;
270 274 int count;
271 275 char acpi_table_loc[128];
272 276
273 277 acpica_strncpy(signature, ExistingTable->Signature, 4);
274 278 acpica_strncpy(oemid, ExistingTable->OemId, 6);
275 279 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
276 280
277 281 #ifdef DEBUG
278 282 cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
279 283 " OEM TABLE ID [%s] OEM rev %x",
280 284 signature, ExistingTable->Revision, oemid, oemtableid,
281 285 ExistingTable->OemRevision);
282 286 #endif
283 287
284 288 /* File name format is "signature_oemid_oemtableid.dat" */
285 289 (void) strcpy(acpi_table_loc, acpi_table_path);
286 290 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
287 291 (void) strcat(acpi_table_loc, "_");
288 292 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
289 293 (void) strcat(acpi_table_loc, "_");
290 294 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
291 295 (void) strcat(acpi_table_loc, ".dat");
292 296
293 297 file = kobj_open_file(acpi_table_loc);
294 298 if (file == (struct _buf *)-1) {
295 299 *NewTable = 0;
296 300 return (AE_OK);
297 301 } else {
298 302 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
299 303 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
300 304 if (count >= MAX_DAT_FILE_SIZE) {
301 305 cmn_err(CE_WARN, "!acpica: table %s file size too big",
302 306 acpi_table_loc);
303 307 *NewTable = 0;
304 308 } else {
305 309 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
306 310 (void) memcpy(buf2, buf1, count);
307 311 *NewTable = (ACPI_TABLE_HEADER *)buf2;
↓ open down ↓ |
246 lines elided |
↑ open up ↑ |
308 312 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
309 313 acpi_table_loc);
310 314 }
311 315 }
312 316 kobj_close_file(file);
313 317 kmem_free(buf1, MAX_DAT_FILE_SIZE);
314 318
315 319 return (AE_OK);
316 320 }
317 321
322 +ACPI_STATUS
323 +AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
324 + ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
325 +{
326 + return (AE_SUPPORT);
327 +}
318 328
319 329 /*
320 330 * ACPI semaphore implementation
321 331 */
322 332 typedef struct {
323 333 kmutex_t mutex;
324 334 kcondvar_t cv;
325 335 uint32_t available;
326 336 uint32_t initial;
327 337 uint32_t maximum;
328 338 } acpi_sema_t;
329 339
330 340 /*
331 341 *
332 342 */
333 343 void
334 344 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
335 345 {
336 346 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
337 347 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
338 348 /* no need to enter mutex here at creation */
339 349 sp->available = count;
340 350 sp->initial = count;
341 351 sp->maximum = max;
342 352 }
343 353
344 354 /*
345 355 *
346 356 */
347 357 void
348 358 acpi_sema_destroy(acpi_sema_t *sp)
349 359 {
350 360
351 361 cv_destroy(&sp->cv);
352 362 mutex_destroy(&sp->mutex);
353 363 }
354 364
355 365 /*
356 366 *
357 367 */
358 368 ACPI_STATUS
359 369 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
360 370 {
361 371 ACPI_STATUS rv = AE_OK;
362 372 clock_t deadline;
363 373
364 374 mutex_enter(&sp->mutex);
365 375
366 376 if (sp->available >= count) {
367 377 /*
368 378 * Enough units available, no blocking
369 379 */
370 380 sp->available -= count;
371 381 mutex_exit(&sp->mutex);
372 382 return (rv);
373 383 } else if (wait_time == 0) {
374 384 /*
375 385 * Not enough units available and timeout
376 386 * specifies no blocking
377 387 */
378 388 rv = AE_TIME;
379 389 mutex_exit(&sp->mutex);
380 390 return (rv);
381 391 }
382 392
383 393 /*
384 394 * Not enough units available and timeout specifies waiting
385 395 */
386 396 if (wait_time != ACPI_WAIT_FOREVER)
387 397 deadline = ddi_get_lbolt() +
388 398 (clock_t)drv_usectohz(wait_time * 1000);
389 399
390 400 do {
391 401 if (wait_time == ACPI_WAIT_FOREVER)
392 402 cv_wait(&sp->cv, &sp->mutex);
393 403 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
394 404 rv = AE_TIME;
395 405 break;
396 406 }
397 407 } while (sp->available < count);
398 408
399 409 /* if we dropped out of the wait with AE_OK, we got the units */
400 410 if (rv == AE_OK)
401 411 sp->available -= count;
402 412
403 413 mutex_exit(&sp->mutex);
404 414 return (rv);
405 415 }
406 416
407 417 /*
408 418 *
409 419 */
410 420 void
411 421 acpi_sema_v(acpi_sema_t *sp, unsigned count)
412 422 {
413 423 mutex_enter(&sp->mutex);
414 424 sp->available += count;
415 425 cv_broadcast(&sp->cv);
416 426 mutex_exit(&sp->mutex);
417 427 }
418 428
419 429
420 430 ACPI_STATUS
421 431 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
422 432 ACPI_HANDLE *OutHandle)
423 433 {
424 434 acpi_sema_t *sp;
425 435
426 436 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
427 437 return (AE_BAD_PARAMETER);
428 438
429 439 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
430 440 acpi_sema_init(sp, MaxUnits, InitialUnits);
431 441 *OutHandle = (ACPI_HANDLE)sp;
432 442 return (AE_OK);
433 443 }
434 444
435 445
436 446 ACPI_STATUS
437 447 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
438 448 {
439 449
440 450 if (Handle == NULL)
441 451 return (AE_BAD_PARAMETER);
442 452
443 453 acpi_sema_destroy((acpi_sema_t *)Handle);
444 454 kmem_free((void *)Handle, sizeof (acpi_sema_t));
445 455 return (AE_OK);
446 456 }
447 457
448 458 ACPI_STATUS
449 459 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
450 460 {
451 461
452 462 if ((Handle == NULL) || (Units < 1))
453 463 return (AE_BAD_PARAMETER);
454 464
455 465 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
456 466 }
457 467
458 468 ACPI_STATUS
459 469 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
460 470 {
461 471
462 472 if ((Handle == NULL) || (Units < 1))
463 473 return (AE_BAD_PARAMETER);
464 474
465 475 acpi_sema_v((acpi_sema_t *)Handle, Units);
466 476 return (AE_OK);
467 477 }
468 478
469 479 ACPI_STATUS
470 480 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
471 481 {
472 482 kmutex_t *mp;
473 483
474 484 if (OutHandle == NULL)
475 485 return (AE_BAD_PARAMETER);
476 486
477 487 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
478 488 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
479 489 *OutHandle = (ACPI_HANDLE)mp;
480 490 return (AE_OK);
481 491 }
482 492
483 493 void
484 494 AcpiOsDeleteLock(ACPI_HANDLE Handle)
485 495 {
486 496
487 497 if (Handle == NULL)
488 498 return;
489 499
490 500 mutex_destroy((kmutex_t *)Handle);
491 501 kmem_free((void *)Handle, sizeof (kmutex_t));
492 502 }
493 503
494 504 ACPI_CPU_FLAGS
495 505 AcpiOsAcquireLock(ACPI_HANDLE Handle)
496 506 {
497 507
498 508
499 509 if (Handle == NULL)
500 510 return (AE_BAD_PARAMETER);
501 511
502 512 if (curthread == CPU->cpu_idle_thread) {
503 513 while (!mutex_tryenter((kmutex_t *)Handle))
504 514 /* spin */;
505 515 } else
506 516 mutex_enter((kmutex_t *)Handle);
507 517 return (AE_OK);
508 518 }
509 519
510 520 void
511 521 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
512 522 {
513 523 _NOTE(ARGUNUSED(Flags))
514 524
515 525 mutex_exit((kmutex_t *)Handle);
516 526 }
517 527
518 528
519 529 void *
520 530 AcpiOsAllocate(ACPI_SIZE Size)
521 531 {
522 532 ACPI_SIZE *tmp_ptr;
523 533
524 534 Size += sizeof (Size);
525 535 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
526 536 *tmp_ptr++ = Size;
527 537 return (tmp_ptr);
528 538 }
529 539
530 540 void
531 541 AcpiOsFree(void *Memory)
532 542 {
533 543 ACPI_SIZE size, *tmp_ptr;
534 544
535 545 tmp_ptr = (ACPI_SIZE *)Memory;
536 546 tmp_ptr -= 1;
537 547 size = *tmp_ptr;
538 548 kmem_free(tmp_ptr, size);
539 549 }
540 550
541 551 static int napics_found; /* number of ioapic addresses in array */
542 552 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
543 553 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
544 554 static void *dummy_ioapicadr;
545 555
546 556 void
547 557 acpica_find_ioapics(void)
548 558 {
549 559 int madt_seen, madt_size;
550 560 ACPI_SUBTABLE_HEADER *ap;
551 561 ACPI_MADT_IO_APIC *mia;
552 562
553 563 if (acpi_mapic_dtp != NULL)
554 564 return; /* already parsed table */
555 565 if (AcpiGetTable(ACPI_SIG_MADT, 1,
556 566 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
557 567 return;
558 568
559 569 napics_found = 0;
560 570
561 571 /*
562 572 * Search the MADT for ioapics
563 573 */
564 574 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
565 575 madt_size = acpi_mapic_dtp->Header.Length;
566 576 madt_seen = sizeof (*acpi_mapic_dtp);
567 577
568 578 while (madt_seen < madt_size) {
569 579
570 580 switch (ap->Type) {
571 581 case ACPI_MADT_TYPE_IO_APIC:
572 582 mia = (ACPI_MADT_IO_APIC *) ap;
573 583 if (napics_found < MAX_IO_APIC) {
574 584 ioapic_paddr[napics_found++] =
575 585 (ACPI_PHYSICAL_ADDRESS)
576 586 (mia->Address & PAGEMASK);
577 587 }
578 588 break;
579 589
580 590 default:
581 591 break;
582 592 }
583 593
584 594 /* advance to next entry */
585 595 madt_seen += ap->Length;
586 596 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
587 597 }
588 598 if (dummy_ioapicadr == NULL)
589 599 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
590 600 }
591 601
592 602
593 603 void *
594 604 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
595 605 {
596 606 int i;
597 607
598 608 /*
599 609 * If the iopaic address table is populated, check if trying
600 610 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
601 611 */
602 612 for (i = 0; i < napics_found; i++) {
603 613 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
604 614 return (dummy_ioapicadr);
605 615 }
606 616 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
607 617 return (psm_map_new((paddr_t)PhysicalAddress,
608 618 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
609 619 }
610 620
611 621 void
612 622 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
613 623 {
614 624 /*
615 625 * Check if trying to unmap dummy ioapic address.
616 626 */
617 627 if (LogicalAddress == dummy_ioapicadr)
618 628 return;
619 629
620 630 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
621 631 }
622 632
623 633 /*ARGSUSED*/
624 634 ACPI_STATUS
625 635 AcpiOsGetPhysicalAddress(void *LogicalAddress,
626 636 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
627 637 {
628 638
629 639 /* UNIMPLEMENTED: not invoked by ACPI CA code */
630 640 return (AE_NOT_IMPLEMENTED);
631 641 }
632 642
633 643
634 644 ACPI_OSD_HANDLER acpi_isr;
635 645 void *acpi_isr_context;
636 646
637 647 uint_t
638 648 acpi_wrapper_isr(char *arg)
639 649 {
640 650 _NOTE(ARGUNUSED(arg))
641 651
642 652 int status;
643 653
644 654 status = (*acpi_isr)(acpi_isr_context);
645 655
646 656 if (status == ACPI_INTERRUPT_HANDLED) {
647 657 return (DDI_INTR_CLAIMED);
648 658 } else {
649 659 return (DDI_INTR_UNCLAIMED);
650 660 }
651 661 }
652 662
653 663 static int acpi_intr_hooked = 0;
654 664
655 665 ACPI_STATUS
656 666 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
657 667 ACPI_OSD_HANDLER ServiceRoutine,
658 668 void *Context)
659 669 {
660 670 _NOTE(ARGUNUSED(InterruptNumber))
661 671
662 672 int retval;
663 673 int sci_vect;
664 674 iflag_t sci_flags;
665 675
666 676 acpi_isr = ServiceRoutine;
667 677 acpi_isr_context = Context;
668 678
669 679 /*
670 680 * Get SCI (adjusted for PIC/APIC mode if necessary)
671 681 */
672 682 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
673 683 return (AE_ERROR);
674 684 }
675 685
676 686 #ifdef DEBUG
677 687 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
678 688 #endif
679 689
680 690 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
681 691 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
682 692 if (retval) {
683 693 acpi_intr_hooked = 1;
684 694 return (AE_OK);
685 695 } else
686 696 return (AE_BAD_PARAMETER);
687 697 }
688 698
689 699 ACPI_STATUS
690 700 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
691 701 ACPI_OSD_HANDLER ServiceRoutine)
692 702 {
693 703 _NOTE(ARGUNUSED(ServiceRoutine))
694 704
695 705 #ifdef DEBUG
696 706 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
697 707 #endif
698 708 if (acpi_intr_hooked) {
699 709 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
700 710 InterruptNumber);
701 711 acpi_intr_hooked = 0;
702 712 }
703 713 return (AE_OK);
704 714 }
705 715
706 716
707 717 ACPI_THREAD_ID
708 718 AcpiOsGetThreadId(void)
709 719 {
710 720 /*
711 721 * ACPI CA doesn't care what actual value is returned as long
712 722 * as it is non-zero and unique to each existing thread.
713 723 * ACPI CA assumes that thread ID is castable to a pointer,
714 724 * so we use the current thread pointer.
715 725 */
716 726 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
717 727 }
718 728
719 729 /*
720 730 *
721 731 */
722 732 ACPI_STATUS
723 733 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
724 734 void *Context)
725 735 {
726 736
727 737 if (!acpica_eventq_init) {
728 738 /*
729 739 * Create taskqs for event handling
730 740 */
731 741 if (init_event_queues() != AE_OK)
732 742 return (AE_ERROR);
733 743 }
734 744
735 745 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
736 746 DDI_NOSLEEP) == DDI_FAILURE) {
↓ open down ↓ |
409 lines elided |
↑ open up ↑ |
737 747 #ifdef DEBUG
738 748 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
739 749 #endif
740 750 return (AE_ERROR);
741 751 }
742 752 return (AE_OK);
743 753
744 754 }
745 755
746 756 void
757 +AcpiOsWaitEventsComplete (void)
758 +{
759 + if (acpica_eventq_init) {
760 + int i;
761 + /*
762 + * blocks until all events initiated by AcpiOsExecute have completed
763 + */
764 + for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
765 + if (osl_eventq[i])
766 + ddi_taskq_wait(osl_eventq[i]);
767 + }
768 + }
769 + return;
770 +}
771 +
772 +void
747 773 AcpiOsSleep(ACPI_INTEGER Milliseconds)
748 774 {
749 775 /*
750 776 * During kernel startup, before the first tick interrupt
751 777 * has taken place, we can't call delay; very late in
752 778 * kernel shutdown or suspend/resume, clock interrupts
753 779 * are blocked, so delay doesn't work then either.
754 780 * So we busy wait if lbolt == 0 (kernel startup)
755 781 * or if acpica_use_safe_delay has been set to a
756 782 * non-zero value.
757 783 */
758 784 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
759 785 drv_usecwait(Milliseconds * 1000);
760 786 else
761 787 delay(drv_usectohz(Milliseconds * 1000));
762 788 }
763 789
764 790 void
765 791 AcpiOsStall(UINT32 Microseconds)
766 792 {
767 793 drv_usecwait(Microseconds);
768 794 }
769 795
770 796
771 797 /*
772 798 * Implementation of "Windows 2001" compatible I/O permission map
773 799 *
774 800 */
775 801 #define OSL_IO_NONE (0)
776 802 #define OSL_IO_READ (1<<0)
777 803 #define OSL_IO_WRITE (1<<1)
778 804 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
779 805 #define OSL_IO_TERM (1<<2)
780 806 #define OSL_IO_DEFAULT OSL_IO_RW
781 807
782 808 static struct io_perm {
783 809 ACPI_IO_ADDRESS low;
784 810 ACPI_IO_ADDRESS high;
785 811 uint8_t perm;
786 812 } osl_io_perm[] = {
787 813 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
788 814 };
789 815
790 816
791 817 /*
792 818 *
793 819 */
794 820 static struct io_perm *
795 821 osl_io_find_perm(ACPI_IO_ADDRESS addr)
796 822 {
797 823 struct io_perm *p;
798 824
799 825 p = osl_io_perm;
800 826 while (p != NULL) {
801 827 if ((p->low <= addr) && (addr <= p->high))
802 828 break;
803 829 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
804 830 }
805 831
806 832 return (p);
807 833 }
808 834
809 835 /*
810 836 *
811 837 */
812 838 ACPI_STATUS
813 839 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
814 840 {
815 841 struct io_perm *p;
816 842
817 843 /* verify permission */
818 844 p = osl_io_find_perm(Address);
819 845 if (p && (p->perm & OSL_IO_READ) == 0) {
820 846 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
821 847 (long)Address, Width);
822 848 *Value = 0xffffffff;
823 849 return (AE_ERROR);
824 850 }
825 851
826 852 switch (Width) {
827 853 case 8:
828 854 *Value = inb(Address);
829 855 break;
830 856 case 16:
831 857 *Value = inw(Address);
832 858 break;
833 859 case 32:
834 860 *Value = inl(Address);
835 861 break;
836 862 default:
837 863 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
838 864 (long)Address, Width);
839 865 return (AE_BAD_PARAMETER);
840 866 }
841 867 return (AE_OK);
842 868 }
843 869
844 870 ACPI_STATUS
845 871 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
846 872 {
847 873 struct io_perm *p;
848 874
849 875 /* verify permission */
850 876 p = osl_io_find_perm(Address);
851 877 if (p && (p->perm & OSL_IO_WRITE) == 0) {
852 878 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
853 879 (long)Address, Width);
854 880 return (AE_ERROR);
855 881 }
856 882
857 883 switch (Width) {
858 884 case 8:
859 885 outb(Address, Value);
860 886 break;
861 887 case 16:
862 888 outw(Address, Value);
863 889 break;
864 890 case 32:
865 891 outl(Address, Value);
866 892 break;
867 893 default:
868 894 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
869 895 (long)Address, Width);
870 896 return (AE_BAD_PARAMETER);
871 897 }
872 898 return (AE_OK);
873 899 }
874 900
875 901
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
876 902 /*
877 903 *
878 904 */
879 905
880 906 #define OSL_RW(ptr, val, type, rw) \
881 907 { if (rw) *((type *)(ptr)) = *((type *) val); \
882 908 else *((type *) val) = *((type *)(ptr)); }
883 909
884 910
885 911 static void
886 -osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
912 +osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
887 913 UINT32 Width, int write)
888 914 {
889 915 size_t maplen = Width / 8;
890 916 caddr_t ptr;
891 917
892 918 ptr = psm_map_new((paddr_t)Address, maplen,
893 919 PSM_PROT_WRITE | PSM_PROT_READ);
894 920
895 921 switch (maplen) {
896 922 case 1:
897 923 OSL_RW(ptr, Value, uint8_t, write);
898 924 break;
899 925 case 2:
900 926 OSL_RW(ptr, Value, uint16_t, write);
901 927 break;
902 928 case 4:
903 929 OSL_RW(ptr, Value, uint32_t, write);
904 930 break;
931 + case 8:
932 + OSL_RW(ptr, Value, uint64_t, write);
933 + break;
905 934 default:
906 935 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
907 936 Width);
908 937 break;
909 938 }
910 939
911 940 psm_unmap(ptr, maplen);
912 941 }
913 942
914 943 ACPI_STATUS
915 944 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
916 - UINT32 *Value, UINT32 Width)
945 + UINT64 *Value, UINT32 Width)
917 946 {
918 947 osl_rw_memory(Address, Value, Width, 0);
919 948 return (AE_OK);
920 949 }
921 950
922 951 ACPI_STATUS
923 952 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
924 - UINT32 Value, UINT32 Width)
953 + UINT64 Value, UINT32 Width)
925 954 {
926 955 osl_rw_memory(Address, &Value, Width, 1);
927 956 return (AE_OK);
928 957 }
929 958
930 959
931 960 ACPI_STATUS
932 961 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
933 962 UINT64 *Value, UINT32 Width)
934 963 {
935 964
936 965 switch (Width) {
937 966 case 8:
938 967 *Value = (UINT64)(*pci_getb_func)
939 968 (PciId->Bus, PciId->Device, PciId->Function, Reg);
940 969 break;
941 970 case 16:
942 971 *Value = (UINT64)(*pci_getw_func)
943 972 (PciId->Bus, PciId->Device, PciId->Function, Reg);
944 973 break;
945 974 case 32:
946 975 *Value = (UINT64)(*pci_getl_func)
947 976 (PciId->Bus, PciId->Device, PciId->Function, Reg);
948 977 break;
949 978 case 64:
950 979 default:
951 980 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
952 981 Reg, Width);
953 982 return (AE_BAD_PARAMETER);
954 983 }
955 984 return (AE_OK);
956 985 }
957 986
958 987 /*
959 988 *
960 989 */
961 990 int acpica_write_pci_config_ok = 1;
962 991
963 992 ACPI_STATUS
964 993 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
965 994 UINT64 Value, UINT32 Width)
966 995 {
967 996
968 997 if (!acpica_write_pci_config_ok) {
969 998 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
970 999 " %lx %d not permitted", PciId->Bus, PciId->Device,
971 1000 PciId->Function, Reg, (long)Value, Width);
972 1001 return (AE_OK);
973 1002 }
974 1003
975 1004 switch (Width) {
976 1005 case 8:
977 1006 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
978 1007 Reg, (uint8_t)Value);
979 1008 break;
980 1009 case 16:
981 1010 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
982 1011 Reg, (uint16_t)Value);
983 1012 break;
984 1013 case 32:
985 1014 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
986 1015 Reg, (uint32_t)Value);
987 1016 break;
988 1017 case 64:
989 1018 default:
990 1019 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
991 1020 Reg, Width);
992 1021 return (AE_BAD_PARAMETER);
993 1022 }
994 1023 return (AE_OK);
995 1024 }
996 1025
997 1026 /*
998 1027 * Called with ACPI_HANDLEs for both a PCI Config Space
999 1028 * OpRegion and (what ACPI CA thinks is) the PCI device
1000 1029 * to which this ConfigSpace OpRegion belongs.
1001 1030 *
1002 1031 * ACPI CA uses _BBN and _ADR objects to determine the default
1003 1032 * values for bus, segment, device and function; anything ACPI CA
1004 1033 * can't figure out from the ACPI tables will be 0. One very
1005 1034 * old 32-bit x86 system is known to have broken _BBN; this is
1006 1035 * not addressed here.
1007 1036 *
1008 1037 * Some BIOSes implement _BBN() by reading PCI config space
1009 1038 * on bus #0 - which means that we'll recurse when we attempt
1010 1039 * to create the devinfo-to-ACPI map. If Derive is called during
1011 1040 * scan_d2a_map, we don't translate the bus # and return.
1012 1041 *
1013 1042 * We get the parent of the OpRegion, which must be a PCI
1014 1043 * node, fetch the associated devinfo node and snag the
1015 1044 * b/d/f from it.
1016 1045 */
1017 1046 void
1018 1047 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1019 1048 ACPI_PCI_ID **PciId)
1020 1049 {
1021 1050 ACPI_HANDLE handle;
1022 1051 dev_info_t *dip;
1023 1052 int bus, device, func, devfn;
1024 1053
1025 1054 /*
1026 1055 * See above - avoid recursing during scanning_d2a_map.
1027 1056 */
1028 1057 if (scanning_d2a_map)
1029 1058 return;
1030 1059
1031 1060 /*
1032 1061 * Get the OpRegion's parent
1033 1062 */
1034 1063 if (AcpiGetParent(chandle, &handle) != AE_OK)
1035 1064 return;
1036 1065
1037 1066 /*
1038 1067 * If we've mapped the ACPI node to the devinfo
1039 1068 * tree, use the devinfo reg property
1040 1069 */
1041 1070 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1042 1071 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1043 1072 (*PciId)->Bus = bus;
1044 1073 (*PciId)->Device = device;
1045 1074 (*PciId)->Function = func;
1046 1075 }
1047 1076 }
1048 1077
1049 1078
1050 1079 /*ARGSUSED*/
1051 1080 BOOLEAN
1052 1081 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1053 1082 {
1054 1083
1055 1084 /* Always says yes; all mapped memory assumed readable */
1056 1085 return (1);
1057 1086 }
1058 1087
1059 1088 /*ARGSUSED*/
1060 1089 BOOLEAN
1061 1090 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1062 1091 {
1063 1092
1064 1093 /* Always says yes; all mapped memory assumed writable */
1065 1094 return (1);
1066 1095 }
1067 1096
1068 1097 UINT64
1069 1098 AcpiOsGetTimer(void)
1070 1099 {
1071 1100 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1072 1101 return ((gethrtime() + 50) / 100);
1073 1102 }
1074 1103
1075 1104 static struct AcpiOSIFeature_s {
1076 1105 uint64_t control_flag;
1077 1106 const char *feature_name;
1078 1107 } AcpiOSIFeatures[] = {
1079 1108 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1080 1109 { 0, "Processor Device" }
1081 1110 };
1082 1111
1083 1112 /*ARGSUSED*/
1084 1113 ACPI_STATUS
1085 1114 AcpiOsValidateInterface(char *feature)
1086 1115 {
1087 1116 int i;
1088 1117
1089 1118 ASSERT(feature != NULL);
1090 1119 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1091 1120 i++) {
1092 1121 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1093 1122 continue;
1094 1123 }
1095 1124 /* Check whether required core features are available. */
1096 1125 if (AcpiOSIFeatures[i].control_flag != 0 &&
1097 1126 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1098 1127 AcpiOSIFeatures[i].control_flag) {
1099 1128 break;
1100 1129 }
1101 1130 /* Feature supported. */
1102 1131 return (AE_OK);
1103 1132 }
1104 1133
1105 1134 return (AE_SUPPORT);
1106 1135 }
1107 1136
1108 1137 /*ARGSUSED*/
1109 1138 ACPI_STATUS
1110 1139 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1111 1140 ACPI_SIZE length)
1112 1141 {
1113 1142 return (AE_OK);
1114 1143 }
1115 1144
1116 1145 ACPI_STATUS
1117 1146 AcpiOsSignal(UINT32 Function, void *Info)
1118 1147 {
1119 1148 _NOTE(ARGUNUSED(Function, Info))
1120 1149
1121 1150 /* FUTUREWORK: debugger support */
1122 1151
1123 1152 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1124 1153 return (AE_OK);
1125 1154 }
1126 1155
1127 1156 void ACPI_INTERNAL_VAR_XFACE
1128 1157 AcpiOsPrintf(const char *Format, ...)
1129 1158 {
1130 1159 va_list ap;
1131 1160
1132 1161 va_start(ap, Format);
1133 1162 AcpiOsVprintf(Format, ap);
1134 1163 va_end(ap);
1135 1164 }
1136 1165
1137 1166 /*
1138 1167 * When != 0, sends output to console
1139 1168 * Patchable with kmdb or /etc/system.
1140 1169 */
1141 1170 int acpica_console_out = 0;
1142 1171
1143 1172 #define ACPICA_OUTBUF_LEN 160
1144 1173 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1145 1174 int acpica_outbuf_offset;
1146 1175
1147 1176 /*
1148 1177 *
1149 1178 */
1150 1179 static void
1151 1180 acpica_pr_buf(char *buf)
1152 1181 {
1153 1182 char c, *bufp, *outp;
1154 1183 int out_remaining;
1155 1184
1156 1185 /*
1157 1186 * copy the supplied buffer into the output buffer
1158 1187 * when we hit a '\n' or overflow the output buffer,
1159 1188 * output and reset the output buffer
1160 1189 */
1161 1190 bufp = buf;
1162 1191 outp = acpica_outbuf + acpica_outbuf_offset;
1163 1192 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1164 1193 while (c = *bufp++) {
1165 1194 *outp++ = c;
1166 1195 if (c == '\n' || --out_remaining == 0) {
1167 1196 *outp = '\0';
1168 1197 switch (acpica_console_out) {
1169 1198 case 1:
1170 1199 printf(acpica_outbuf);
1171 1200 break;
1172 1201 case 2:
1173 1202 prom_printf(acpica_outbuf);
1174 1203 break;
1175 1204 case 0:
1176 1205 default:
1177 1206 (void) strlog(0, 0, 0,
1178 1207 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1179 1208 acpica_outbuf);
1180 1209 break;
1181 1210 }
1182 1211 acpica_outbuf_offset = 0;
1183 1212 outp = acpica_outbuf;
1184 1213 out_remaining = ACPICA_OUTBUF_LEN - 1;
1185 1214 }
1186 1215 }
1187 1216
1188 1217 acpica_outbuf_offset = outp - acpica_outbuf;
1189 1218 }
1190 1219
1191 1220 void
1192 1221 AcpiOsVprintf(const char *Format, va_list Args)
1193 1222 {
1194 1223
1195 1224 /*
1196 1225 * If AcpiOsInitialize() failed to allocate a string buffer,
1197 1226 * resort to vprintf().
1198 1227 */
1199 1228 if (acpi_osl_pr_buffer == NULL) {
1200 1229 vprintf(Format, Args);
1201 1230 return;
1202 1231 }
1203 1232
1204 1233 /*
1205 1234 * It is possible that a very long debug output statement will
1206 1235 * be truncated; this is silently ignored.
1207 1236 */
1208 1237 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1209 1238 acpica_pr_buf(acpi_osl_pr_buffer);
1210 1239 }
1211 1240
1212 1241 void
1213 1242 AcpiOsRedirectOutput(void *Destination)
1214 1243 {
1215 1244 _NOTE(ARGUNUSED(Destination))
1216 1245
1217 1246 /* FUTUREWORK: debugger support */
1218 1247
1219 1248 #ifdef DEBUG
1220 1249 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1221 1250 #endif
1222 1251 }
1223 1252
1224 1253
1225 1254 UINT32
1226 1255 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1227 1256 {
1228 1257 _NOTE(ARGUNUSED(Buffer))
1229 1258 _NOTE(ARGUNUSED(len))
1230 1259 _NOTE(ARGUNUSED(BytesRead))
1231 1260
1232 1261 /* FUTUREWORK: debugger support */
1233 1262
1234 1263 return (0);
1235 1264 }
1236 1265
1237 1266 /*
1238 1267 * Device tree binding
1239 1268 */
1240 1269 static ACPI_STATUS
1241 1270 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1242 1271 {
1243 1272 _NOTE(ARGUNUSED(lvl));
1244 1273
1245 1274 int sta, hid, bbn;
1246 1275 int busno = (intptr_t)ctxp;
1247 1276 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1248 1277
1249 1278 /* Check whether device exists. */
1250 1279 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1251 1280 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1252 1281 /*
1253 1282 * Skip object if device doesn't exist.
1254 1283 * According to ACPI Spec,
1255 1284 * 1) setting either bit 0 or bit 3 means that device exists.
1256 1285 * 2) Absence of _STA method means all status bits set.
1257 1286 */
1258 1287 return (AE_CTRL_DEPTH);
1259 1288 }
1260 1289
1261 1290 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1262 1291 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1263 1292 /* Non PCI/PCIe host bridge. */
1264 1293 return (AE_OK);
1265 1294 }
1266 1295
1267 1296 if (acpi_has_broken_bbn) {
1268 1297 ACPI_BUFFER rb;
1269 1298 rb.Pointer = NULL;
1270 1299 rb.Length = ACPI_ALLOCATE_BUFFER;
1271 1300
1272 1301 /* Decree _BBN == n from PCI<n> */
1273 1302 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1274 1303 return (AE_CTRL_TERMINATE);
1275 1304 }
1276 1305 bbn = ((char *)rb.Pointer)[3] - '0';
1277 1306 AcpiOsFree(rb.Pointer);
1278 1307 if (bbn == busno || busno == 0) {
1279 1308 *hdlp = hdl;
1280 1309 return (AE_CTRL_TERMINATE);
1281 1310 }
1282 1311 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1283 1312 if (bbn == busno) {
1284 1313 *hdlp = hdl;
1285 1314 return (AE_CTRL_TERMINATE);
1286 1315 }
1287 1316 } else if (busno == 0) {
1288 1317 *hdlp = hdl;
1289 1318 return (AE_CTRL_TERMINATE);
1290 1319 }
1291 1320
1292 1321 return (AE_CTRL_DEPTH);
1293 1322 }
1294 1323
1295 1324 static int
1296 1325 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1297 1326 {
1298 1327 ACPI_HANDLE sbobj, busobj;
1299 1328
1300 1329 /* initialize static flag by querying ACPI namespace for bug */
1301 1330 if (acpi_has_broken_bbn == -1)
1302 1331 acpi_has_broken_bbn = acpica_query_bbn_problem();
1303 1332
1304 1333 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1305 1334 busobj = NULL;
1306 1335 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1307 1336 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1308 1337 (void **)&busobj);
1309 1338 if (busobj != NULL) {
1310 1339 *rh = busobj;
1311 1340 return (AE_OK);
1312 1341 }
1313 1342 }
1314 1343
1315 1344 return (AE_ERROR);
1316 1345 }
1317 1346
1318 1347 static ACPI_STATUS
1319 1348 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1320 1349 {
1321 1350 _NOTE(ARGUNUSED(lvl));
1322 1351 _NOTE(ARGUNUSED(rvpp));
1323 1352
1324 1353 int sta, hid, bbn;
1325 1354 int *cntp = (int *)ctxp;
1326 1355
1327 1356 /* Check whether device exists. */
1328 1357 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1329 1358 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1330 1359 /*
1331 1360 * Skip object if device doesn't exist.
1332 1361 * According to ACPI Spec,
1333 1362 * 1) setting either bit 0 or bit 3 means that device exists.
1334 1363 * 2) Absence of _STA method means all status bits set.
1335 1364 */
1336 1365 return (AE_CTRL_DEPTH);
1337 1366 }
1338 1367
1339 1368 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1340 1369 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1341 1370 /* Non PCI/PCIe host bridge. */
1342 1371 return (AE_OK);
1343 1372 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1344 1373 bbn == 0 && ++(*cntp) > 1) {
1345 1374 /*
1346 1375 * If we find more than one bus with a 0 _BBN
1347 1376 * we have the problem that BigBear's BIOS shows
1348 1377 */
1349 1378 return (AE_CTRL_TERMINATE);
1350 1379 } else {
1351 1380 /*
1352 1381 * Skip children of PCI/PCIe host bridge.
1353 1382 */
1354 1383 return (AE_CTRL_DEPTH);
1355 1384 }
1356 1385 }
1357 1386
1358 1387 /*
1359 1388 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1360 1389 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1361 1390 * below if it exists.
1362 1391 */
1363 1392 static int
1364 1393 acpica_query_bbn_problem(void)
1365 1394 {
1366 1395 ACPI_HANDLE sbobj;
1367 1396 int zerobbncnt;
1368 1397 void *rv;
1369 1398
1370 1399 zerobbncnt = 0;
1371 1400 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1372 1401 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1373 1402 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1374 1403 }
1375 1404
1376 1405 return (zerobbncnt > 1 ? 1 : 0);
1377 1406 }
1378 1407
1379 1408 static const char hextab[] = "0123456789ABCDEF";
1380 1409
1381 1410 static int
1382 1411 hexdig(int c)
1383 1412 {
1384 1413 /*
1385 1414 * Get hex digit:
1386 1415 *
1387 1416 * Returns the 4-bit hex digit named by the input character. Returns
1388 1417 * zero if the input character is not valid hex!
1389 1418 */
1390 1419
1391 1420 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1392 1421 int j = sizeof (hextab);
1393 1422
1394 1423 while (--j && (x != hextab[j])) {
1395 1424 }
1396 1425 return (j);
1397 1426 }
1398 1427
1399 1428 static int
1400 1429 CompressEisaID(char *np)
1401 1430 {
1402 1431 /*
1403 1432 * Compress an EISA device name:
1404 1433 *
1405 1434 * This routine converts a 7-byte ASCII device name into the 4-byte
1406 1435 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1407 1436 * NV-RAM!)
1408 1437 */
1409 1438
1410 1439 union { char octets[4]; int retval; } myu;
1411 1440
1412 1441 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1413 1442 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1414 1443 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1415 1444 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1416 1445
1417 1446 return (myu.retval);
1418 1447 }
1419 1448
1420 1449 ACPI_STATUS
1421 1450 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1422 1451 {
1423 1452 ACPI_STATUS status;
1424 1453 ACPI_BUFFER rb;
1425 1454 ACPI_OBJECT ro;
1426 1455
1427 1456 rb.Pointer = &ro;
1428 1457 rb.Length = sizeof (ro);
1429 1458 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1430 1459 ACPI_TYPE_INTEGER)) == AE_OK)
1431 1460 *rint = ro.Integer.Value;
1432 1461
1433 1462 return (status);
1434 1463 }
1435 1464
1436 1465 static int
1437 1466 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1438 1467 {
1439 1468 ACPI_BUFFER rb;
1440 1469 ACPI_OBJECT *rv;
1441 1470
1442 1471 rb.Pointer = NULL;
1443 1472 rb.Length = ACPI_ALLOCATE_BUFFER;
1444 1473 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1445 1474 rb.Length != 0) {
1446 1475 rv = rb.Pointer;
1447 1476 if (rv->Type == ACPI_TYPE_INTEGER) {
1448 1477 *rint = rv->Integer.Value;
1449 1478 AcpiOsFree(rv);
1450 1479 return (AE_OK);
1451 1480 } else if (rv->Type == ACPI_TYPE_STRING) {
1452 1481 char *stringData;
1453 1482
1454 1483 /* Convert the string into an EISA ID */
1455 1484 if (rv->String.Pointer == NULL) {
1456 1485 AcpiOsFree(rv);
1457 1486 return (AE_ERROR);
1458 1487 }
1459 1488
1460 1489 stringData = rv->String.Pointer;
1461 1490
1462 1491 /*
1463 1492 * If the string is an EisaID, it must be 7
1464 1493 * characters; if it's an ACPI ID, it will be 8
1465 1494 * (and we don't care about ACPI ids here).
1466 1495 */
1467 1496 if (strlen(stringData) != 7) {
1468 1497 AcpiOsFree(rv);
1469 1498 return (AE_ERROR);
1470 1499 }
1471 1500
1472 1501 *rint = CompressEisaID(stringData);
1473 1502 AcpiOsFree(rv);
1474 1503 return (AE_OK);
1475 1504 } else
1476 1505 AcpiOsFree(rv);
1477 1506 }
1478 1507 return (AE_ERROR);
1479 1508 }
1480 1509
1481 1510 /*
1482 1511 * Create linkage between devinfo nodes and ACPI nodes
1483 1512 */
1484 1513 ACPI_STATUS
1485 1514 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1486 1515 {
1487 1516 ACPI_STATUS status;
1488 1517 ACPI_BUFFER rb;
1489 1518
1490 1519 /*
1491 1520 * Tag the devinfo node with the ACPI name
1492 1521 */
1493 1522 rb.Pointer = NULL;
1494 1523 rb.Length = ACPI_ALLOCATE_BUFFER;
1495 1524 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1496 1525 if (ACPI_FAILURE(status)) {
1497 1526 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1498 1527 } else {
1499 1528 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1500 1529 "acpi-namespace", (char *)rb.Pointer);
1501 1530 AcpiOsFree(rb.Pointer);
1502 1531
1503 1532 /*
1504 1533 * Tag the ACPI node with the dip
1505 1534 */
1506 1535 status = acpica_set_devinfo(acpiobj, dip);
1507 1536 ASSERT(ACPI_SUCCESS(status));
1508 1537 }
1509 1538
1510 1539 return (status);
1511 1540 }
1512 1541
1513 1542 /*
1514 1543 * Destroy linkage between devinfo nodes and ACPI nodes
1515 1544 */
1516 1545 ACPI_STATUS
1517 1546 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1518 1547 {
1519 1548 (void) acpica_unset_devinfo(acpiobj);
1520 1549 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1521 1550
1522 1551 return (AE_OK);
1523 1552 }
1524 1553
1525 1554 /*
1526 1555 * Return the ACPI device node matching the CPU dev_info node.
1527 1556 */
1528 1557 ACPI_STATUS
1529 1558 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1530 1559 {
1531 1560 int i;
1532 1561
1533 1562 /*
1534 1563 * if cpu_map itself is NULL, we're a uppc system and
1535 1564 * acpica_build_processor_map() hasn't been called yet.
1536 1565 * So call it here
1537 1566 */
1538 1567 if (cpu_map == NULL) {
1539 1568 (void) acpica_build_processor_map();
1540 1569 if (cpu_map == NULL)
1541 1570 return (AE_ERROR);
1542 1571 }
1543 1572
1544 1573 if (cpu_id < 0) {
1545 1574 return (AE_ERROR);
1546 1575 }
1547 1576
1548 1577 /*
1549 1578 * search object with cpuid in cpu_map
1550 1579 */
1551 1580 mutex_enter(&cpu_map_lock);
1552 1581 for (i = 0; i < cpu_map_count; i++) {
1553 1582 if (cpu_map[i]->cpu_id == cpu_id) {
1554 1583 break;
1555 1584 }
1556 1585 }
1557 1586 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1558 1587 *rh = cpu_map[i]->obj;
1559 1588 mutex_exit(&cpu_map_lock);
1560 1589 return (AE_OK);
1561 1590 }
1562 1591
1563 1592 /* Handle special case for uppc-only systems. */
1564 1593 if (cpu_map_called == 0) {
1565 1594 uint32_t apicid = cpuid_get_apicid(CPU);
1566 1595 if (apicid != UINT32_MAX) {
1567 1596 for (i = 0; i < cpu_map_count; i++) {
1568 1597 if (cpu_map[i]->apic_id == apicid) {
1569 1598 break;
1570 1599 }
1571 1600 }
1572 1601 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1573 1602 *rh = cpu_map[i]->obj;
1574 1603 mutex_exit(&cpu_map_lock);
1575 1604 return (AE_OK);
1576 1605 }
1577 1606 }
1578 1607 }
1579 1608 mutex_exit(&cpu_map_lock);
1580 1609
1581 1610 return (AE_ERROR);
1582 1611 }
1583 1612
1584 1613 /*
1585 1614 * Determine if this object is a processor
1586 1615 */
1587 1616 static ACPI_STATUS
1588 1617 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1589 1618 {
1590 1619 ACPI_STATUS status;
1591 1620 ACPI_OBJECT_TYPE objtype;
1592 1621 unsigned long acpi_id;
1593 1622 ACPI_BUFFER rb;
1594 1623 ACPI_DEVICE_INFO *di;
1595 1624
1596 1625 if (AcpiGetType(obj, &objtype) != AE_OK)
1597 1626 return (AE_OK);
1598 1627
1599 1628 if (objtype == ACPI_TYPE_PROCESSOR) {
1600 1629 /* process a Processor */
1601 1630 rb.Pointer = NULL;
1602 1631 rb.Length = ACPI_ALLOCATE_BUFFER;
1603 1632 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1604 1633 ACPI_TYPE_PROCESSOR);
1605 1634 if (status != AE_OK) {
1606 1635 cmn_err(CE_WARN, "!acpica: error probing Processor");
1607 1636 return (status);
1608 1637 }
1609 1638 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1610 1639 AcpiOsFree(rb.Pointer);
1611 1640 } else if (objtype == ACPI_TYPE_DEVICE) {
1612 1641 /* process a processor Device */
1613 1642 status = AcpiGetObjectInfo(obj, &di);
1614 1643 if (status != AE_OK) {
1615 1644 cmn_err(CE_WARN,
1616 1645 "!acpica: error probing Processor Device\n");
1617 1646 return (status);
1618 1647 }
1619 1648
1620 1649 if (!(di->Valid & ACPI_VALID_UID) ||
1621 1650 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1622 1651 ACPI_FREE(di);
1623 1652 cmn_err(CE_WARN,
1624 1653 "!acpica: error probing Processor Device _UID\n");
1625 1654 return (AE_ERROR);
1626 1655 }
1627 1656 ACPI_FREE(di);
1628 1657 }
1629 1658 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1630 1659
1631 1660 return (AE_OK);
1632 1661 }
1633 1662
1634 1663 void
1635 1664 scan_d2a_map(void)
1636 1665 {
1637 1666 dev_info_t *dip, *cdip;
1638 1667 ACPI_HANDLE acpiobj;
1639 1668 char *device_type_prop;
1640 1669 int bus;
1641 1670 static int map_error = 0;
1642 1671
1643 1672 if (map_error || (d2a_done != 0))
1644 1673 return;
1645 1674
1646 1675 scanning_d2a_map = 1;
1647 1676
1648 1677 /*
1649 1678 * Find all child-of-root PCI buses, and find their corresponding
1650 1679 * ACPI child-of-root PCI nodes. For each one, add to the
1651 1680 * d2a table.
1652 1681 */
1653 1682
1654 1683 for (dip = ddi_get_child(ddi_root_node());
1655 1684 dip != NULL;
1656 1685 dip = ddi_get_next_sibling(dip)) {
1657 1686
1658 1687 /* prune non-PCI nodes */
1659 1688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1660 1689 DDI_PROP_DONTPASS,
1661 1690 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1662 1691 continue;
1663 1692
1664 1693 if ((strcmp("pci", device_type_prop) != 0) &&
1665 1694 (strcmp("pciex", device_type_prop) != 0)) {
1666 1695 ddi_prop_free(device_type_prop);
1667 1696 continue;
1668 1697 }
1669 1698
1670 1699 ddi_prop_free(device_type_prop);
1671 1700
1672 1701 /*
1673 1702 * To get bus number of dip, get first child and get its
1674 1703 * bus number. If NULL, just continue, because we don't
1675 1704 * care about bus nodes with no children anyway.
1676 1705 */
1677 1706 if ((cdip = ddi_get_child(dip)) == NULL)
1678 1707 continue;
1679 1708
1680 1709 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1681 1710 #ifdef D2ADEBUG
1682 1711 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1683 1712 #endif
1684 1713 map_error = 1;
1685 1714 scanning_d2a_map = 0;
1686 1715 d2a_done = 1;
1687 1716 return;
1688 1717 }
1689 1718
1690 1719 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1691 1720 #ifdef D2ADEBUG
1692 1721 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1693 1722 #endif
1694 1723 map_error = 1;
1695 1724 continue;
1696 1725 }
1697 1726
1698 1727 acpica_tag_devinfo(dip, acpiobj);
1699 1728
1700 1729 /* call recursively to enumerate subtrees */
1701 1730 scan_d2a_subtree(dip, acpiobj, bus);
1702 1731 }
1703 1732
1704 1733 scanning_d2a_map = 0;
1705 1734 d2a_done = 1;
1706 1735 }
1707 1736
1708 1737 /*
1709 1738 * For all acpi child devices of acpiobj, find their matching
1710 1739 * dip under "dip" argument. (matching means "matches dev/fn").
1711 1740 * bus is assumed to already be a match from caller, and is
1712 1741 * used here only to record in the d2a entry. Recurse if necessary.
1713 1742 */
1714 1743 static void
1715 1744 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1716 1745 {
1717 1746 int acpi_devfn, hid;
1718 1747 ACPI_HANDLE acld;
1719 1748 dev_info_t *dcld;
1720 1749 int dcld_b, dcld_d, dcld_f;
1721 1750 int dev, func;
1722 1751 char *device_type_prop;
1723 1752
1724 1753 acld = NULL;
1725 1754 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1726 1755 == AE_OK) {
1727 1756 /* get the dev/func we're looking for in the devinfo tree */
1728 1757 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1729 1758 continue;
1730 1759 dev = (acpi_devfn >> 16) & 0xFFFF;
1731 1760 func = acpi_devfn & 0xFFFF;
1732 1761
1733 1762 /* look through all the immediate children of dip */
1734 1763 for (dcld = ddi_get_child(dip); dcld != NULL;
1735 1764 dcld = ddi_get_next_sibling(dcld)) {
1736 1765 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1737 1766 continue;
1738 1767
1739 1768 /* dev must match; function must match or wildcard */
1740 1769 if (dcld_d != dev ||
1741 1770 (func != 0xFFFF && func != dcld_f))
1742 1771 continue;
1743 1772 bus = dcld_b;
1744 1773
1745 1774 /* found a match, record it */
1746 1775 acpica_tag_devinfo(dcld, acld);
1747 1776
1748 1777 /* if we find a bridge, recurse from here */
1749 1778 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1750 1779 DDI_PROP_DONTPASS, "device_type",
1751 1780 &device_type_prop) == DDI_PROP_SUCCESS) {
1752 1781 if ((strcmp("pci", device_type_prop) == 0) ||
1753 1782 (strcmp("pciex", device_type_prop) == 0))
1754 1783 scan_d2a_subtree(dcld, acld, bus);
1755 1784 ddi_prop_free(device_type_prop);
1756 1785 }
1757 1786
1758 1787 /* done finding a match, so break now */
1759 1788 break;
1760 1789 }
1761 1790 }
1762 1791 }
1763 1792
1764 1793 /*
1765 1794 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1766 1795 */
1767 1796 int
1768 1797 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1769 1798 {
1770 1799 pci_regspec_t *pci_rp;
1771 1800 int len;
1772 1801
1773 1802 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1774 1803 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1775 1804 return (-1);
1776 1805
1777 1806 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1778 1807 ddi_prop_free(pci_rp);
1779 1808 return (-1);
1780 1809 }
1781 1810 if (bus != NULL)
1782 1811 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1783 1812 if (device != NULL)
1784 1813 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1785 1814 if (func != NULL)
1786 1815 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1787 1816 ddi_prop_free(pci_rp);
1788 1817 return (0);
1789 1818 }
1790 1819
1791 1820 /*
1792 1821 * Return the ACPI device node matching this dev_info node, if it
1793 1822 * exists in the ACPI tree.
1794 1823 */
1795 1824 ACPI_STATUS
1796 1825 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1797 1826 {
1798 1827 ACPI_STATUS status;
1799 1828 char *acpiname;
1800 1829
1801 1830 #ifdef DEBUG
1802 1831 if (d2a_done == 0)
1803 1832 cmn_err(CE_WARN, "!acpica_get_handle:"
1804 1833 " no ACPI mapping for %s", ddi_node_name(dip));
1805 1834 #endif
1806 1835
1807 1836 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1808 1837 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1809 1838 return (AE_ERROR);
1810 1839 }
1811 1840
1812 1841 status = AcpiGetHandle(NULL, acpiname, rh);
1813 1842 ddi_prop_free((void *)acpiname);
1814 1843 return (status);
1815 1844 }
1816 1845
1817 1846
1818 1847
1819 1848 /*
1820 1849 * Manage OS data attachment to ACPI nodes
1821 1850 */
1822 1851
1823 1852 /*
1824 1853 * Return the (dev_info_t *) associated with the ACPI node.
1825 1854 */
1826 1855 ACPI_STATUS
1827 1856 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1828 1857 {
1829 1858 ACPI_STATUS status;
1830 1859 void *ptr;
1831 1860
1832 1861 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1833 1862 if (status == AE_OK)
1834 1863 *dipp = (dev_info_t *)ptr;
1835 1864
1836 1865 return (status);
1837 1866 }
1838 1867
1839 1868 /*
1840 1869 * Set the dev_info_t associated with the ACPI node.
1841 1870 */
1842 1871 static ACPI_STATUS
1843 1872 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1844 1873 {
1845 1874 ACPI_STATUS status;
1846 1875
1847 1876 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1848 1877 return (status);
1849 1878 }
1850 1879
1851 1880 /*
1852 1881 * Unset the dev_info_t associated with the ACPI node.
1853 1882 */
1854 1883 static ACPI_STATUS
1855 1884 acpica_unset_devinfo(ACPI_HANDLE obj)
1856 1885 {
1857 1886 return (AcpiDetachData(obj, acpica_devinfo_handler));
1858 1887 }
1859 1888
1860 1889 /*
1861 1890 *
1862 1891 */
1863 1892 void
1864 1893 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1865 1894 {
1866 1895 /* no-op */
1867 1896 }
1868 1897
1869 1898 ACPI_STATUS
1870 1899 acpica_build_processor_map(void)
1871 1900 {
1872 1901 ACPI_STATUS status;
1873 1902 void *rv;
1874 1903
1875 1904 /*
1876 1905 * shouldn't be called more than once anyway
1877 1906 */
1878 1907 if (cpu_map_built)
1879 1908 return (AE_OK);
1880 1909
1881 1910 /*
1882 1911 * ACPI device configuration driver has built mapping information
1883 1912 * among processor id and object handle, no need to probe again.
1884 1913 */
1885 1914 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1886 1915 cpu_map_built = 1;
1887 1916 return (AE_OK);
1888 1917 }
1889 1918
1890 1919 /*
1891 1920 * Look for Processor objects
1892 1921 */
1893 1922 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1894 1923 ACPI_ROOT_OBJECT,
1895 1924 4,
1896 1925 acpica_probe_processor,
1897 1926 NULL,
1898 1927 NULL,
1899 1928 &rv);
1900 1929 ASSERT(status == AE_OK);
1901 1930
1902 1931 /*
1903 1932 * Look for processor Device objects
1904 1933 */
1905 1934 status = AcpiGetDevices("ACPI0007",
1906 1935 acpica_probe_processor,
1907 1936 NULL,
1908 1937 &rv);
1909 1938 ASSERT(status == AE_OK);
1910 1939 cpu_map_built = 1;
1911 1940
1912 1941 return (status);
1913 1942 }
1914 1943
1915 1944 /*
1916 1945 * Grow cpu map table on demand.
1917 1946 */
1918 1947 static void
1919 1948 acpica_grow_cpu_map(void)
1920 1949 {
1921 1950 if (cpu_map_count == cpu_map_count_max) {
1922 1951 size_t sz;
1923 1952 struct cpu_map_item **new_map;
1924 1953
1925 1954 ASSERT(cpu_map_count_max < INT_MAX / 2);
1926 1955 cpu_map_count_max += max_ncpus;
1927 1956 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1928 1957 KM_SLEEP);
1929 1958 if (cpu_map_count != 0) {
1930 1959 ASSERT(cpu_map != NULL);
1931 1960 sz = sizeof (cpu_map[0]) * cpu_map_count;
1932 1961 kcopy(cpu_map, new_map, sz);
1933 1962 kmem_free(cpu_map, sz);
1934 1963 }
1935 1964 cpu_map = new_map;
1936 1965 }
1937 1966 }
1938 1967
1939 1968 /*
1940 1969 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1941 1970 * ACPI handle). The mapping table will be setup in two steps:
1942 1971 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1943 1972 * processor id and ACPI object handle.
1944 1973 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1945 1974 * On systems with which have ACPI device configuration for CPUs enabled,
1946 1975 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1947 1976 * otherwise acpica_map_cpu() will be called before
1948 1977 * acpica_add_processor_to_map().
1949 1978 */
1950 1979 ACPI_STATUS
1951 1980 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1952 1981 {
1953 1982 int i;
1954 1983 ACPI_STATUS rc = AE_OK;
1955 1984 struct cpu_map_item *item = NULL;
1956 1985
1957 1986 ASSERT(obj != NULL);
1958 1987 if (obj == NULL) {
1959 1988 return (AE_ERROR);
1960 1989 }
1961 1990
1962 1991 mutex_enter(&cpu_map_lock);
1963 1992
1964 1993 /*
1965 1994 * Special case for uppc
1966 1995 * If we're a uppc system and ACPI device configuration for CPU has
1967 1996 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1968 1997 * call acpica_map_cpu(). So create one and use the passed-in processor
1969 1998 * as CPU 0
1970 1999 * Assumption: the first CPU returned by
1971 2000 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1972 2001 * Unfortunately there appears to be no good way to ASSERT this.
1973 2002 */
1974 2003 if (cpu_map == NULL &&
1975 2004 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1976 2005 acpica_grow_cpu_map();
1977 2006 ASSERT(cpu_map != NULL);
1978 2007 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1979 2008 item->cpu_id = 0;
1980 2009 item->proc_id = acpi_id;
1981 2010 item->apic_id = apic_id;
1982 2011 item->obj = obj;
1983 2012 cpu_map[0] = item;
1984 2013 cpu_map_count = 1;
1985 2014 mutex_exit(&cpu_map_lock);
1986 2015 return (AE_OK);
1987 2016 }
1988 2017
1989 2018 for (i = 0; i < cpu_map_count; i++) {
1990 2019 if (cpu_map[i]->obj == obj) {
1991 2020 rc = AE_ALREADY_EXISTS;
1992 2021 break;
1993 2022 } else if (cpu_map[i]->proc_id == acpi_id) {
1994 2023 ASSERT(item == NULL);
1995 2024 item = cpu_map[i];
1996 2025 }
1997 2026 }
1998 2027
1999 2028 if (rc == AE_OK) {
2000 2029 if (item != NULL) {
2001 2030 /*
2002 2031 * ACPI alias objects may cause more than one objects
2003 2032 * with the same ACPI processor id, only remember the
2004 2033 * the first object encountered.
2005 2034 */
2006 2035 if (item->obj == NULL) {
2007 2036 item->obj = obj;
2008 2037 item->apic_id = apic_id;
2009 2038 } else {
2010 2039 rc = AE_ALREADY_EXISTS;
2011 2040 }
2012 2041 } else if (cpu_map_count >= INT_MAX / 2) {
2013 2042 rc = AE_NO_MEMORY;
2014 2043 } else {
2015 2044 acpica_grow_cpu_map();
2016 2045 ASSERT(cpu_map != NULL);
2017 2046 ASSERT(cpu_map_count < cpu_map_count_max);
2018 2047 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2019 2048 item->cpu_id = -1;
2020 2049 item->proc_id = acpi_id;
2021 2050 item->apic_id = apic_id;
2022 2051 item->obj = obj;
2023 2052 cpu_map[cpu_map_count] = item;
2024 2053 cpu_map_count++;
2025 2054 }
2026 2055 }
2027 2056
2028 2057 mutex_exit(&cpu_map_lock);
2029 2058
2030 2059 return (rc);
2031 2060 }
2032 2061
2033 2062 ACPI_STATUS
2034 2063 acpica_remove_processor_from_map(UINT32 acpi_id)
2035 2064 {
2036 2065 int i;
2037 2066 ACPI_STATUS rc = AE_NOT_EXIST;
2038 2067
2039 2068 mutex_enter(&cpu_map_lock);
2040 2069 for (i = 0; i < cpu_map_count; i++) {
2041 2070 if (cpu_map[i]->proc_id != acpi_id) {
2042 2071 continue;
2043 2072 }
2044 2073 cpu_map[i]->obj = NULL;
2045 2074 /* Free item if no more reference to it. */
2046 2075 if (cpu_map[i]->cpu_id == -1) {
2047 2076 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2048 2077 cpu_map[i] = NULL;
2049 2078 cpu_map_count--;
2050 2079 if (i != cpu_map_count) {
2051 2080 cpu_map[i] = cpu_map[cpu_map_count];
2052 2081 cpu_map[cpu_map_count] = NULL;
2053 2082 }
2054 2083 }
2055 2084 rc = AE_OK;
2056 2085 break;
2057 2086 }
2058 2087 mutex_exit(&cpu_map_lock);
2059 2088
2060 2089 return (rc);
2061 2090 }
2062 2091
2063 2092 ACPI_STATUS
2064 2093 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2065 2094 {
2066 2095 int i;
2067 2096 ACPI_STATUS rc = AE_OK;
2068 2097 struct cpu_map_item *item = NULL;
2069 2098
2070 2099 ASSERT(cpuid != -1);
2071 2100 if (cpuid == -1) {
2072 2101 return (AE_ERROR);
2073 2102 }
2074 2103
2075 2104 mutex_enter(&cpu_map_lock);
2076 2105 cpu_map_called = 1;
2077 2106 for (i = 0; i < cpu_map_count; i++) {
2078 2107 if (cpu_map[i]->cpu_id == cpuid) {
2079 2108 rc = AE_ALREADY_EXISTS;
2080 2109 break;
2081 2110 } else if (cpu_map[i]->proc_id == acpi_id) {
2082 2111 ASSERT(item == NULL);
2083 2112 item = cpu_map[i];
2084 2113 }
2085 2114 }
2086 2115 if (rc == AE_OK) {
2087 2116 if (item != NULL) {
2088 2117 if (item->cpu_id == -1) {
2089 2118 item->cpu_id = cpuid;
2090 2119 } else {
2091 2120 rc = AE_ALREADY_EXISTS;
2092 2121 }
2093 2122 } else if (cpu_map_count >= INT_MAX / 2) {
2094 2123 rc = AE_NO_MEMORY;
2095 2124 } else {
2096 2125 acpica_grow_cpu_map();
2097 2126 ASSERT(cpu_map != NULL);
2098 2127 ASSERT(cpu_map_count < cpu_map_count_max);
2099 2128 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2100 2129 item->cpu_id = cpuid;
2101 2130 item->proc_id = acpi_id;
2102 2131 item->apic_id = UINT32_MAX;
2103 2132 item->obj = NULL;
2104 2133 cpu_map[cpu_map_count] = item;
2105 2134 cpu_map_count++;
2106 2135 }
2107 2136 }
2108 2137 mutex_exit(&cpu_map_lock);
2109 2138
2110 2139 return (rc);
2111 2140 }
2112 2141
2113 2142 ACPI_STATUS
2114 2143 acpica_unmap_cpu(processorid_t cpuid)
2115 2144 {
2116 2145 int i;
2117 2146 ACPI_STATUS rc = AE_NOT_EXIST;
2118 2147
2119 2148 ASSERT(cpuid != -1);
2120 2149 if (cpuid == -1) {
2121 2150 return (rc);
2122 2151 }
2123 2152
2124 2153 mutex_enter(&cpu_map_lock);
2125 2154 for (i = 0; i < cpu_map_count; i++) {
2126 2155 if (cpu_map[i]->cpu_id != cpuid) {
2127 2156 continue;
2128 2157 }
2129 2158 cpu_map[i]->cpu_id = -1;
2130 2159 /* Free item if no more reference. */
2131 2160 if (cpu_map[i]->obj == NULL) {
2132 2161 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2133 2162 cpu_map[i] = NULL;
2134 2163 cpu_map_count--;
2135 2164 if (i != cpu_map_count) {
2136 2165 cpu_map[i] = cpu_map[cpu_map_count];
2137 2166 cpu_map[cpu_map_count] = NULL;
2138 2167 }
2139 2168 }
2140 2169 rc = AE_OK;
2141 2170 break;
2142 2171 }
2143 2172 mutex_exit(&cpu_map_lock);
2144 2173
2145 2174 return (rc);
2146 2175 }
2147 2176
2148 2177 ACPI_STATUS
2149 2178 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2150 2179 {
2151 2180 int i;
2152 2181 ACPI_STATUS rc = AE_NOT_EXIST;
2153 2182
2154 2183 ASSERT(cpuid != -1);
2155 2184 if (cpuid == -1) {
2156 2185 return (rc);
2157 2186 }
2158 2187
2159 2188 mutex_enter(&cpu_map_lock);
2160 2189 for (i = 0; i < cpu_map_count; i++) {
2161 2190 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2162 2191 *hdlp = cpu_map[i]->obj;
2163 2192 rc = AE_OK;
2164 2193 break;
2165 2194 }
2166 2195 }
2167 2196 mutex_exit(&cpu_map_lock);
2168 2197
2169 2198 return (rc);
2170 2199 }
2171 2200
2172 2201 ACPI_STATUS
2173 2202 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2174 2203 {
2175 2204 int i;
2176 2205 ACPI_STATUS rc = AE_NOT_EXIST;
2177 2206
2178 2207 mutex_enter(&cpu_map_lock);
2179 2208 for (i = 0; i < cpu_map_count; i++) {
2180 2209 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2181 2210 *hdlp = cpu_map[i]->obj;
2182 2211 rc = AE_OK;
2183 2212 break;
2184 2213 }
2185 2214 }
2186 2215 mutex_exit(&cpu_map_lock);
2187 2216
2188 2217 return (rc);
2189 2218 }
2190 2219
2191 2220 ACPI_STATUS
2192 2221 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2193 2222 {
2194 2223 int i;
2195 2224 ACPI_STATUS rc = AE_NOT_EXIST;
2196 2225
2197 2226 ASSERT(apicid != UINT32_MAX);
2198 2227 if (apicid == UINT32_MAX) {
2199 2228 return (rc);
2200 2229 }
2201 2230
2202 2231 mutex_enter(&cpu_map_lock);
2203 2232 for (i = 0; i < cpu_map_count; i++) {
2204 2233 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2205 2234 *hdlp = cpu_map[i]->obj;
2206 2235 rc = AE_OK;
2207 2236 break;
2208 2237 }
2209 2238 }
2210 2239 mutex_exit(&cpu_map_lock);
2211 2240
2212 2241 return (rc);
2213 2242 }
2214 2243
2215 2244 ACPI_STATUS
2216 2245 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2217 2246 {
2218 2247 int i;
2219 2248 ACPI_STATUS rc = AE_NOT_EXIST;
2220 2249
2221 2250 ASSERT(cpuidp != NULL);
2222 2251 if (hdl == NULL || cpuidp == NULL) {
2223 2252 return (rc);
2224 2253 }
2225 2254
2226 2255 *cpuidp = -1;
2227 2256 mutex_enter(&cpu_map_lock);
2228 2257 for (i = 0; i < cpu_map_count; i++) {
2229 2258 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2230 2259 *cpuidp = cpu_map[i]->cpu_id;
2231 2260 rc = AE_OK;
2232 2261 break;
2233 2262 }
2234 2263 }
2235 2264 mutex_exit(&cpu_map_lock);
2236 2265
2237 2266 return (rc);
2238 2267 }
2239 2268
2240 2269 ACPI_STATUS
2241 2270 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2242 2271 {
2243 2272 int i;
2244 2273 ACPI_STATUS rc = AE_NOT_EXIST;
2245 2274
2246 2275 ASSERT(rp != NULL);
2247 2276 if (hdl == NULL || rp == NULL) {
2248 2277 return (rc);
2249 2278 }
2250 2279
2251 2280 *rp = UINT32_MAX;
2252 2281 mutex_enter(&cpu_map_lock);
2253 2282 for (i = 0; i < cpu_map_count; i++) {
2254 2283 if (cpu_map[i]->obj == hdl &&
2255 2284 cpu_map[i]->apic_id != UINT32_MAX) {
2256 2285 *rp = cpu_map[i]->apic_id;
2257 2286 rc = AE_OK;
2258 2287 break;
2259 2288 }
2260 2289 }
2261 2290 mutex_exit(&cpu_map_lock);
2262 2291
2263 2292 return (rc);
2264 2293 }
2265 2294
2266 2295 ACPI_STATUS
2267 2296 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2268 2297 {
2269 2298 int i;
2270 2299 ACPI_STATUS rc = AE_NOT_EXIST;
2271 2300
2272 2301 ASSERT(rp != NULL);
2273 2302 if (hdl == NULL || rp == NULL) {
2274 2303 return (rc);
2275 2304 }
2276 2305
2277 2306 *rp = UINT32_MAX;
2278 2307 mutex_enter(&cpu_map_lock);
2279 2308 for (i = 0; i < cpu_map_count; i++) {
2280 2309 if (cpu_map[i]->obj == hdl) {
2281 2310 *rp = cpu_map[i]->proc_id;
2282 2311 rc = AE_OK;
2283 2312 break;
2284 2313 }
2285 2314 }
2286 2315 mutex_exit(&cpu_map_lock);
2287 2316
2288 2317 return (rc);
2289 2318 }
2290 2319
2291 2320 void
2292 2321 acpica_set_core_feature(uint64_t features)
2293 2322 {
2294 2323 atomic_or_64(&acpica_core_features, features);
2295 2324 }
2296 2325
2297 2326 void
2298 2327 acpica_clear_core_feature(uint64_t features)
2299 2328 {
2300 2329 atomic_and_64(&acpica_core_features, ~features);
2301 2330 }
2302 2331
2303 2332 uint64_t
2304 2333 acpica_get_core_feature(uint64_t features)
2305 2334 {
2306 2335 return (acpica_core_features & features);
2307 2336 }
2308 2337
2309 2338 void
2310 2339 acpica_set_devcfg_feature(uint64_t features)
2311 2340 {
2312 2341 atomic_or_64(&acpica_devcfg_features, features);
2313 2342 }
2314 2343
2315 2344 void
2316 2345 acpica_clear_devcfg_feature(uint64_t features)
2317 2346 {
2318 2347 atomic_and_64(&acpica_devcfg_features, ~features);
2319 2348 }
2320 2349
2321 2350 uint64_t
2322 2351 acpica_get_devcfg_feature(uint64_t features)
2323 2352 {
2324 2353 return (acpica_devcfg_features & features);
2325 2354 }
2326 2355
2327 2356 void
2328 2357 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2329 2358 {
2330 2359 *gbl_FADT = &AcpiGbl_FADT;
2331 2360 }
2332 2361
2333 2362 void
2334 2363 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2335 2364 {
2336 2365 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2337 2366 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2338 2367 AcpiGbl_FADT.PstateControl);
2339 2368
2340 2369 if (cstates && AcpiGbl_FADT.CstControl != 0)
2341 2370 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2342 2371 AcpiGbl_FADT.CstControl);
2343 2372 }
↓ open down ↓ |
1409 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX