1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2012 Joyent, Inc. All rights reserved.
26 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
27 * Copyright 2013 PALO, Richard. All rights reserved.
28 */
29
30 /*
31 * Copyright (c) 2009-2010, Intel Corporation.
32 * All rights reserved.
33 */
34
35 /*
36 * x86 ACPI CA OSL
37 */
38
39 #include <sys/types.h>
40 #include <sys/kmem.h>
41 #include <sys/psm.h>
42 #include <sys/pci_cfgspace.h>
43 #include <sys/apic.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/pci.h>
48 #include <sys/kobj.h>
49 #include <sys/taskq.h>
50 #include <sys/strlog.h>
51 #include <sys/x86_archext.h>
52 #include <sys/note.h>
53 #include <sys/promif.h>
54
55 #include <acpica/include/accommon.h>
56 #include <sys/acpica.h>
57
58 #define MAX_DAT_FILE_SIZE (64*1024)
59
60 /* local functions */
61 static int CompressEisaID(char *np);
62
63 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
64 static int acpica_query_bbn_problem(void);
65 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
66 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
67 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
68 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
69 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
70
71 /*
72 * Event queue vars
73 */
74 int acpica_eventq_init = 0;
75 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
76
77 /*
78 * Priorities relative to minclsyspri that each taskq
79 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
80 * priority than OSL_GPE_HANDLER. There's an implicit
81 * assumption that no priority here results in exceeding
82 * maxclsyspri.
83 * Note: these initializations need to match the order of
84 * ACPI_EXECUTE_TYPE.
85 */
86 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
87 0, /* OSL_GLOBAL_LOCK_HANDLER */
88 2, /* OSL_NOTIFY_HANDLER */
89 0, /* OSL_GPE_HANDLER */
90 0, /* OSL_DEBUGGER_THREAD */
91 0, /* OSL_EC_POLL_HANDLER */
92 0 /* OSL_EC_BURST_HANDLER */
93 };
94
95 /*
96 * Note, if you change this path, you need to update
97 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
98 */
99 static char *acpi_table_path = "/boot/acpi/tables/";
100
101 /* non-zero while scan_d2a_map() is working */
102 static int scanning_d2a_map = 0;
103 static int d2a_done = 0;
104
105 /* features supported by ACPICA and ACPI device configuration. */
106 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
107 static uint64_t acpica_devcfg_features = 0;
108
109 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
110 int acpica_use_safe_delay = 0;
111
112 /* CPU mapping data */
113 struct cpu_map_item {
114 processorid_t cpu_id;
115 UINT32 proc_id;
116 UINT32 apic_id;
117 ACPI_HANDLE obj;
118 };
119
120 kmutex_t cpu_map_lock;
121 static struct cpu_map_item **cpu_map = NULL;
122 static int cpu_map_count_max = 0;
123 static int cpu_map_count = 0;
124 static int cpu_map_built = 0;
125
126 /*
127 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
128 * This flag is used to check for uppc-only systems by detecting whether
129 * acpica_map_cpu() has been called or not.
130 */
131 static int cpu_map_called = 0;
132
133 static int acpi_has_broken_bbn = -1;
134
135 /* buffer for AcpiOsVprintf() */
136 #define ACPI_OSL_PR_BUFLEN 1024
137 static char *acpi_osl_pr_buffer = NULL;
138 static int acpi_osl_pr_buflen;
139
140 #define D2A_DEBUG
141
142 /*
143 *
144 */
145 static void
146 discard_event_queues()
147 {
148 int i;
149
150 /*
151 * destroy event queues
152 */
153 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
154 if (osl_eventq[i])
155 ddi_taskq_destroy(osl_eventq[i]);
156 }
157 }
158
159
160 /*
161 *
162 */
163 static ACPI_STATUS
164 init_event_queues()
165 {
166 char namebuf[32];
167 int i, error = 0;
168
169 /*
170 * Initialize event queues
171 */
172
173 /* Always allocate only 1 thread per queue to force FIFO execution */
174 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
175 snprintf(namebuf, 32, "ACPI%d", i);
176 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
177 osl_eventq_pri_delta[i] + minclsyspri, 0);
178 if (osl_eventq[i] == NULL)
179 error++;
180 }
181
182 if (error != 0) {
183 discard_event_queues();
184 #ifdef DEBUG
185 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
186 #endif
187 return (AE_ERROR);
188 }
189
190 acpica_eventq_init = 1;
191 return (AE_OK);
192 }
193
194 /*
195 * One-time initialization of OSL layer
196 */
197 ACPI_STATUS
198 AcpiOsInitialize(void)
199 {
200 /*
201 * Allocate buffer for AcpiOsVprintf() here to avoid
202 * kmem_alloc()/kmem_free() at high PIL
203 */
204 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
205 if (acpi_osl_pr_buffer != NULL)
206 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
207
208 return (AE_OK);
209 }
210
211 /*
212 * One-time shut-down of OSL layer
213 */
214 ACPI_STATUS
215 AcpiOsTerminate(void)
216 {
217
218 if (acpi_osl_pr_buffer != NULL)
219 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
220
221 discard_event_queues();
222 return (AE_OK);
223 }
224
225
226 ACPI_PHYSICAL_ADDRESS
227 AcpiOsGetRootPointer()
228 {
229 ACPI_PHYSICAL_ADDRESS Address;
230
231 /*
232 * For EFI firmware, the root pointer is defined in EFI systab.
233 * The boot code process the table and put the physical address
234 * in the acpi-root-tab property.
235 */
236 Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
237 DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
238
239 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
240 Address = NULL;
241
242 return (Address);
243 }
244
245 /*ARGSUSED*/
246 ACPI_STATUS
247 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
248 ACPI_STRING *NewVal)
249 {
250
251 *NewVal = 0;
252 return (AE_OK);
253 }
254
255 static void
256 acpica_strncpy(char *dest, const char *src, int len)
257 {
258
259 /*LINTED*/
260 while ((*dest++ = *src++) && (--len > 0))
261 /* copy the string */;
262 *dest = '\0';
263 }
264
265 ACPI_STATUS
266 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
267 ACPI_TABLE_HEADER **NewTable)
268 {
269 char signature[5];
270 char oemid[7];
271 char oemtableid[9];
272 struct _buf *file;
273 char *buf1, *buf2;
274 int count;
275 char acpi_table_loc[128];
276
277 acpica_strncpy(signature, ExistingTable->Signature, 4);
278 acpica_strncpy(oemid, ExistingTable->OemId, 6);
279 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
280
281 #ifdef DEBUG
282 cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
283 " OEM TABLE ID [%s] OEM rev %x",
284 signature, ExistingTable->Revision, oemid, oemtableid,
285 ExistingTable->OemRevision);
286 #endif
287
288 /* File name format is "signature_oemid_oemtableid.dat" */
289 (void) strcpy(acpi_table_loc, acpi_table_path);
290 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
291 (void) strcat(acpi_table_loc, "_");
292 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
293 (void) strcat(acpi_table_loc, "_");
294 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
295 (void) strcat(acpi_table_loc, ".dat");
296
297 file = kobj_open_file(acpi_table_loc);
298 if (file == (struct _buf *)-1) {
299 *NewTable = 0;
300 return (AE_OK);
301 } else {
302 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
303 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
304 if (count >= MAX_DAT_FILE_SIZE) {
305 cmn_err(CE_WARN, "!acpica: table %s file size too big",
306 acpi_table_loc);
307 *NewTable = 0;
308 } else {
309 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
310 (void) memcpy(buf2, buf1, count);
311 *NewTable = (ACPI_TABLE_HEADER *)buf2;
312 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
313 acpi_table_loc);
314 }
315 }
316 kobj_close_file(file);
317 kmem_free(buf1, MAX_DAT_FILE_SIZE);
318
319 return (AE_OK);
320 }
321
322 ACPI_STATUS
323 AcpiOsPhysicalTableOverride(ACPI_TABLE_HEADER *ExistingTable,
324 ACPI_PHYSICAL_ADDRESS *NewAddress, UINT32 *NewTableLength)
325 {
326 return (AE_SUPPORT);
327 }
328
329 /*
330 * ACPI semaphore implementation
331 */
332 typedef struct {
333 kmutex_t mutex;
334 kcondvar_t cv;
335 uint32_t available;
336 uint32_t initial;
337 uint32_t maximum;
338 } acpi_sema_t;
339
340 /*
341 *
342 */
343 void
344 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
345 {
346 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
347 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
348 /* no need to enter mutex here at creation */
349 sp->available = count;
350 sp->initial = count;
351 sp->maximum = max;
352 }
353
354 /*
355 *
356 */
357 void
358 acpi_sema_destroy(acpi_sema_t *sp)
359 {
360
361 cv_destroy(&sp->cv);
362 mutex_destroy(&sp->mutex);
363 }
364
365 /*
366 *
367 */
368 ACPI_STATUS
369 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
370 {
371 ACPI_STATUS rv = AE_OK;
372 clock_t deadline;
373
374 mutex_enter(&sp->mutex);
375
376 if (sp->available >= count) {
377 /*
378 * Enough units available, no blocking
379 */
380 sp->available -= count;
381 mutex_exit(&sp->mutex);
382 return (rv);
383 } else if (wait_time == 0) {
384 /*
385 * Not enough units available and timeout
386 * specifies no blocking
387 */
388 rv = AE_TIME;
389 mutex_exit(&sp->mutex);
390 return (rv);
391 }
392
393 /*
394 * Not enough units available and timeout specifies waiting
395 */
396 if (wait_time != ACPI_WAIT_FOREVER)
397 deadline = ddi_get_lbolt() +
398 (clock_t)drv_usectohz(wait_time * 1000);
399
400 do {
401 if (wait_time == ACPI_WAIT_FOREVER)
402 cv_wait(&sp->cv, &sp->mutex);
403 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
404 rv = AE_TIME;
405 break;
406 }
407 } while (sp->available < count);
408
409 /* if we dropped out of the wait with AE_OK, we got the units */
410 if (rv == AE_OK)
411 sp->available -= count;
412
413 mutex_exit(&sp->mutex);
414 return (rv);
415 }
416
417 /*
418 *
419 */
420 void
421 acpi_sema_v(acpi_sema_t *sp, unsigned count)
422 {
423 mutex_enter(&sp->mutex);
424 sp->available += count;
425 cv_broadcast(&sp->cv);
426 mutex_exit(&sp->mutex);
427 }
428
429
430 ACPI_STATUS
431 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
432 ACPI_HANDLE *OutHandle)
433 {
434 acpi_sema_t *sp;
435
436 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
437 return (AE_BAD_PARAMETER);
438
439 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
440 acpi_sema_init(sp, MaxUnits, InitialUnits);
441 *OutHandle = (ACPI_HANDLE)sp;
442 return (AE_OK);
443 }
444
445
446 ACPI_STATUS
447 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
448 {
449
450 if (Handle == NULL)
451 return (AE_BAD_PARAMETER);
452
453 acpi_sema_destroy((acpi_sema_t *)Handle);
454 kmem_free((void *)Handle, sizeof (acpi_sema_t));
455 return (AE_OK);
456 }
457
458 ACPI_STATUS
459 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
460 {
461
462 if ((Handle == NULL) || (Units < 1))
463 return (AE_BAD_PARAMETER);
464
465 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
466 }
467
468 ACPI_STATUS
469 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
470 {
471
472 if ((Handle == NULL) || (Units < 1))
473 return (AE_BAD_PARAMETER);
474
475 acpi_sema_v((acpi_sema_t *)Handle, Units);
476 return (AE_OK);
477 }
478
479 ACPI_STATUS
480 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
481 {
482 kmutex_t *mp;
483
484 if (OutHandle == NULL)
485 return (AE_BAD_PARAMETER);
486
487 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
488 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
489 *OutHandle = (ACPI_HANDLE)mp;
490 return (AE_OK);
491 }
492
493 void
494 AcpiOsDeleteLock(ACPI_HANDLE Handle)
495 {
496
497 if (Handle == NULL)
498 return;
499
500 mutex_destroy((kmutex_t *)Handle);
501 kmem_free((void *)Handle, sizeof (kmutex_t));
502 }
503
504 ACPI_CPU_FLAGS
505 AcpiOsAcquireLock(ACPI_HANDLE Handle)
506 {
507
508
509 if (Handle == NULL)
510 return (AE_BAD_PARAMETER);
511
512 if (curthread == CPU->cpu_idle_thread) {
513 while (!mutex_tryenter((kmutex_t *)Handle))
514 /* spin */;
515 } else
516 mutex_enter((kmutex_t *)Handle);
517 return (AE_OK);
518 }
519
520 void
521 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
522 {
523 _NOTE(ARGUNUSED(Flags))
524
525 mutex_exit((kmutex_t *)Handle);
526 }
527
528
529 void *
530 AcpiOsAllocate(ACPI_SIZE Size)
531 {
532 ACPI_SIZE *tmp_ptr;
533
534 Size += sizeof (Size);
535 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
536 *tmp_ptr++ = Size;
537 return (tmp_ptr);
538 }
539
540 void
541 AcpiOsFree(void *Memory)
542 {
543 ACPI_SIZE size, *tmp_ptr;
544
545 tmp_ptr = (ACPI_SIZE *)Memory;
546 tmp_ptr -= 1;
547 size = *tmp_ptr;
548 kmem_free(tmp_ptr, size);
549 }
550
551 static int napics_found; /* number of ioapic addresses in array */
552 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
553 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
554 static void *dummy_ioapicadr;
555
556 void
557 acpica_find_ioapics(void)
558 {
559 int madt_seen, madt_size;
560 ACPI_SUBTABLE_HEADER *ap;
561 ACPI_MADT_IO_APIC *mia;
562
563 if (acpi_mapic_dtp != NULL)
564 return; /* already parsed table */
565 if (AcpiGetTable(ACPI_SIG_MADT, 1,
566 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
567 return;
568
569 napics_found = 0;
570
571 /*
572 * Search the MADT for ioapics
573 */
574 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
575 madt_size = acpi_mapic_dtp->Header.Length;
576 madt_seen = sizeof (*acpi_mapic_dtp);
577
578 while (madt_seen < madt_size) {
579
580 switch (ap->Type) {
581 case ACPI_MADT_TYPE_IO_APIC:
582 mia = (ACPI_MADT_IO_APIC *) ap;
583 if (napics_found < MAX_IO_APIC) {
584 ioapic_paddr[napics_found++] =
585 (ACPI_PHYSICAL_ADDRESS)
586 (mia->Address & PAGEMASK);
587 }
588 break;
589
590 default:
591 break;
592 }
593
594 /* advance to next entry */
595 madt_seen += ap->Length;
596 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
597 }
598 if (dummy_ioapicadr == NULL)
599 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
600 }
601
602
603 void *
604 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
605 {
606 int i;
607
608 /*
609 * If the iopaic address table is populated, check if trying
610 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
611 */
612 for (i = 0; i < napics_found; i++) {
613 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
614 return (dummy_ioapicadr);
615 }
616 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
617 return (psm_map_new((paddr_t)PhysicalAddress,
618 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
619 }
620
621 void
622 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
623 {
624 /*
625 * Check if trying to unmap dummy ioapic address.
626 */
627 if (LogicalAddress == dummy_ioapicadr)
628 return;
629
630 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
631 }
632
633 /*ARGSUSED*/
634 ACPI_STATUS
635 AcpiOsGetPhysicalAddress(void *LogicalAddress,
636 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
637 {
638
639 /* UNIMPLEMENTED: not invoked by ACPI CA code */
640 return (AE_NOT_IMPLEMENTED);
641 }
642
643
644 ACPI_OSD_HANDLER acpi_isr;
645 void *acpi_isr_context;
646
647 uint_t
648 acpi_wrapper_isr(char *arg)
649 {
650 _NOTE(ARGUNUSED(arg))
651
652 int status;
653
654 status = (*acpi_isr)(acpi_isr_context);
655
656 if (status == ACPI_INTERRUPT_HANDLED) {
657 return (DDI_INTR_CLAIMED);
658 } else {
659 return (DDI_INTR_UNCLAIMED);
660 }
661 }
662
663 static int acpi_intr_hooked = 0;
664
665 ACPI_STATUS
666 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
667 ACPI_OSD_HANDLER ServiceRoutine,
668 void *Context)
669 {
670 _NOTE(ARGUNUSED(InterruptNumber))
671
672 int retval;
673 int sci_vect;
674 iflag_t sci_flags;
675
676 acpi_isr = ServiceRoutine;
677 acpi_isr_context = Context;
678
679 /*
680 * Get SCI (adjusted for PIC/APIC mode if necessary)
681 */
682 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
683 return (AE_ERROR);
684 }
685
686 #ifdef DEBUG
687 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
688 #endif
689
690 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
691 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
692 if (retval) {
693 acpi_intr_hooked = 1;
694 return (AE_OK);
695 } else
696 return (AE_BAD_PARAMETER);
697 }
698
699 ACPI_STATUS
700 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
701 ACPI_OSD_HANDLER ServiceRoutine)
702 {
703 _NOTE(ARGUNUSED(ServiceRoutine))
704
705 #ifdef DEBUG
706 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
707 #endif
708 if (acpi_intr_hooked) {
709 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
710 InterruptNumber);
711 acpi_intr_hooked = 0;
712 }
713 return (AE_OK);
714 }
715
716
717 ACPI_THREAD_ID
718 AcpiOsGetThreadId(void)
719 {
720 /*
721 * ACPI CA doesn't care what actual value is returned as long
722 * as it is non-zero and unique to each existing thread.
723 * ACPI CA assumes that thread ID is castable to a pointer,
724 * so we use the current thread pointer.
725 */
726 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
727 }
728
729 /*
730 *
731 */
732 ACPI_STATUS
733 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
734 void *Context)
735 {
736
737 if (!acpica_eventq_init) {
738 /*
739 * Create taskqs for event handling
740 */
741 if (init_event_queues() != AE_OK)
742 return (AE_ERROR);
743 }
744
745 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
746 DDI_NOSLEEP) == DDI_FAILURE) {
747 #ifdef DEBUG
748 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
749 #endif
750 return (AE_ERROR);
751 }
752 return (AE_OK);
753
754 }
755
756 void
757 AcpiOsWaitEventsComplete (void)
758 {
759 if (acpica_eventq_init) {
760 int i;
761 /*
762 * blocks until all events initiated by AcpiOsExecute have completed
763 */
764 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
765 if (osl_eventq[i])
766 ddi_taskq_wait(osl_eventq[i]);
767 }
768 }
769 return;
770 }
771
772 void
773 AcpiOsSleep(ACPI_INTEGER Milliseconds)
774 {
775 /*
776 * During kernel startup, before the first tick interrupt
777 * has taken place, we can't call delay; very late in
778 * kernel shutdown or suspend/resume, clock interrupts
779 * are blocked, so delay doesn't work then either.
780 * So we busy wait if lbolt == 0 (kernel startup)
781 * or if acpica_use_safe_delay has been set to a
782 * non-zero value.
783 */
784 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
785 drv_usecwait(Milliseconds * 1000);
786 else
787 delay(drv_usectohz(Milliseconds * 1000));
788 }
789
790 void
791 AcpiOsStall(UINT32 Microseconds)
792 {
793 drv_usecwait(Microseconds);
794 }
795
796
797 /*
798 * Implementation of "Windows 2001" compatible I/O permission map
799 *
800 */
801 #define OSL_IO_NONE (0)
802 #define OSL_IO_READ (1<<0)
803 #define OSL_IO_WRITE (1<<1)
804 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
805 #define OSL_IO_TERM (1<<2)
806 #define OSL_IO_DEFAULT OSL_IO_RW
807
808 static struct io_perm {
809 ACPI_IO_ADDRESS low;
810 ACPI_IO_ADDRESS high;
811 uint8_t perm;
812 } osl_io_perm[] = {
813 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
814 };
815
816
817 /*
818 *
819 */
820 static struct io_perm *
821 osl_io_find_perm(ACPI_IO_ADDRESS addr)
822 {
823 struct io_perm *p;
824
825 p = osl_io_perm;
826 while (p != NULL) {
827 if ((p->low <= addr) && (addr <= p->high))
828 break;
829 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
830 }
831
832 return (p);
833 }
834
835 /*
836 *
837 */
838 ACPI_STATUS
839 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
840 {
841 struct io_perm *p;
842
843 /* verify permission */
844 p = osl_io_find_perm(Address);
845 if (p && (p->perm & OSL_IO_READ) == 0) {
846 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
847 (long)Address, Width);
848 *Value = 0xffffffff;
849 return (AE_ERROR);
850 }
851
852 switch (Width) {
853 case 8:
854 *Value = inb(Address);
855 break;
856 case 16:
857 *Value = inw(Address);
858 break;
859 case 32:
860 *Value = inl(Address);
861 break;
862 default:
863 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
864 (long)Address, Width);
865 return (AE_BAD_PARAMETER);
866 }
867 return (AE_OK);
868 }
869
870 ACPI_STATUS
871 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
872 {
873 struct io_perm *p;
874
875 /* verify permission */
876 p = osl_io_find_perm(Address);
877 if (p && (p->perm & OSL_IO_WRITE) == 0) {
878 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
879 (long)Address, Width);
880 return (AE_ERROR);
881 }
882
883 switch (Width) {
884 case 8:
885 outb(Address, Value);
886 break;
887 case 16:
888 outw(Address, Value);
889 break;
890 case 32:
891 outl(Address, Value);
892 break;
893 default:
894 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
895 (long)Address, Width);
896 return (AE_BAD_PARAMETER);
897 }
898 return (AE_OK);
899 }
900
901
902 /*
903 *
904 */
905
906 #define OSL_RW(ptr, val, type, rw) \
907 { if (rw) *((type *)(ptr)) = *((type *) val); \
908 else *((type *) val) = *((type *)(ptr)); }
909
910
911 static void
912 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT64 *Value,
913 UINT32 Width, int write)
914 {
915 size_t maplen = Width / 8;
916 caddr_t ptr;
917
918 ptr = psm_map_new((paddr_t)Address, maplen,
919 PSM_PROT_WRITE | PSM_PROT_READ);
920
921 switch (maplen) {
922 case 1:
923 OSL_RW(ptr, Value, uint8_t, write);
924 break;
925 case 2:
926 OSL_RW(ptr, Value, uint16_t, write);
927 break;
928 case 4:
929 OSL_RW(ptr, Value, uint32_t, write);
930 break;
931 case 8:
932 OSL_RW(ptr, Value, uint64_t, write);
933 break;
934 default:
935 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
936 Width);
937 break;
938 }
939
940 psm_unmap(ptr, maplen);
941 }
942
943 ACPI_STATUS
944 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
945 UINT64 *Value, UINT32 Width)
946 {
947 osl_rw_memory(Address, Value, Width, 0);
948 return (AE_OK);
949 }
950
951 ACPI_STATUS
952 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
953 UINT64 Value, UINT32 Width)
954 {
955 osl_rw_memory(Address, &Value, Width, 1);
956 return (AE_OK);
957 }
958
959
960 ACPI_STATUS
961 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
962 UINT64 *Value, UINT32 Width)
963 {
964
965 switch (Width) {
966 case 8:
967 *Value = (UINT64)(*pci_getb_func)
968 (PciId->Bus, PciId->Device, PciId->Function, Reg);
969 break;
970 case 16:
971 *Value = (UINT64)(*pci_getw_func)
972 (PciId->Bus, PciId->Device, PciId->Function, Reg);
973 break;
974 case 32:
975 *Value = (UINT64)(*pci_getl_func)
976 (PciId->Bus, PciId->Device, PciId->Function, Reg);
977 break;
978 case 64:
979 default:
980 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
981 Reg, Width);
982 return (AE_BAD_PARAMETER);
983 }
984 return (AE_OK);
985 }
986
987 /*
988 *
989 */
990 int acpica_write_pci_config_ok = 1;
991
992 ACPI_STATUS
993 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
994 UINT64 Value, UINT32 Width)
995 {
996
997 if (!acpica_write_pci_config_ok) {
998 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
999 " %lx %d not permitted", PciId->Bus, PciId->Device,
1000 PciId->Function, Reg, (long)Value, Width);
1001 return (AE_OK);
1002 }
1003
1004 switch (Width) {
1005 case 8:
1006 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
1007 Reg, (uint8_t)Value);
1008 break;
1009 case 16:
1010 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
1011 Reg, (uint16_t)Value);
1012 break;
1013 case 32:
1014 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
1015 Reg, (uint32_t)Value);
1016 break;
1017 case 64:
1018 default:
1019 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
1020 Reg, Width);
1021 return (AE_BAD_PARAMETER);
1022 }
1023 return (AE_OK);
1024 }
1025
1026 /*
1027 * Called with ACPI_HANDLEs for both a PCI Config Space
1028 * OpRegion and (what ACPI CA thinks is) the PCI device
1029 * to which this ConfigSpace OpRegion belongs.
1030 *
1031 * ACPI CA uses _BBN and _ADR objects to determine the default
1032 * values for bus, segment, device and function; anything ACPI CA
1033 * can't figure out from the ACPI tables will be 0. One very
1034 * old 32-bit x86 system is known to have broken _BBN; this is
1035 * not addressed here.
1036 *
1037 * Some BIOSes implement _BBN() by reading PCI config space
1038 * on bus #0 - which means that we'll recurse when we attempt
1039 * to create the devinfo-to-ACPI map. If Derive is called during
1040 * scan_d2a_map, we don't translate the bus # and return.
1041 *
1042 * We get the parent of the OpRegion, which must be a PCI
1043 * node, fetch the associated devinfo node and snag the
1044 * b/d/f from it.
1045 */
1046 void
1047 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1048 ACPI_PCI_ID **PciId)
1049 {
1050 ACPI_HANDLE handle;
1051 dev_info_t *dip;
1052 int bus, device, func, devfn;
1053
1054 /*
1055 * See above - avoid recursing during scanning_d2a_map.
1056 */
1057 if (scanning_d2a_map)
1058 return;
1059
1060 /*
1061 * Get the OpRegion's parent
1062 */
1063 if (AcpiGetParent(chandle, &handle) != AE_OK)
1064 return;
1065
1066 /*
1067 * If we've mapped the ACPI node to the devinfo
1068 * tree, use the devinfo reg property
1069 */
1070 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1071 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1072 (*PciId)->Bus = bus;
1073 (*PciId)->Device = device;
1074 (*PciId)->Function = func;
1075 }
1076 }
1077
1078
1079 /*ARGSUSED*/
1080 BOOLEAN
1081 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1082 {
1083
1084 /* Always says yes; all mapped memory assumed readable */
1085 return (1);
1086 }
1087
1088 /*ARGSUSED*/
1089 BOOLEAN
1090 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1091 {
1092
1093 /* Always says yes; all mapped memory assumed writable */
1094 return (1);
1095 }
1096
1097 UINT64
1098 AcpiOsGetTimer(void)
1099 {
1100 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1101 return ((gethrtime() + 50) / 100);
1102 }
1103
1104 static struct AcpiOSIFeature_s {
1105 uint64_t control_flag;
1106 const char *feature_name;
1107 } AcpiOSIFeatures[] = {
1108 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1109 { 0, "Processor Device" }
1110 };
1111
1112 /*ARGSUSED*/
1113 ACPI_STATUS
1114 AcpiOsValidateInterface(char *feature)
1115 {
1116 int i;
1117
1118 ASSERT(feature != NULL);
1119 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1120 i++) {
1121 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1122 continue;
1123 }
1124 /* Check whether required core features are available. */
1125 if (AcpiOSIFeatures[i].control_flag != 0 &&
1126 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1127 AcpiOSIFeatures[i].control_flag) {
1128 break;
1129 }
1130 /* Feature supported. */
1131 return (AE_OK);
1132 }
1133
1134 return (AE_SUPPORT);
1135 }
1136
1137 /*ARGSUSED*/
1138 ACPI_STATUS
1139 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1140 ACPI_SIZE length)
1141 {
1142 return (AE_OK);
1143 }
1144
1145 ACPI_STATUS
1146 AcpiOsSignal(UINT32 Function, void *Info)
1147 {
1148 _NOTE(ARGUNUSED(Function, Info))
1149
1150 /* FUTUREWORK: debugger support */
1151
1152 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1153 return (AE_OK);
1154 }
1155
1156 void ACPI_INTERNAL_VAR_XFACE
1157 AcpiOsPrintf(const char *Format, ...)
1158 {
1159 va_list ap;
1160
1161 va_start(ap, Format);
1162 AcpiOsVprintf(Format, ap);
1163 va_end(ap);
1164 }
1165
1166 /*
1167 * When != 0, sends output to console
1168 * Patchable with kmdb or /etc/system.
1169 */
1170 int acpica_console_out = 0;
1171
1172 #define ACPICA_OUTBUF_LEN 160
1173 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1174 int acpica_outbuf_offset;
1175
1176 /*
1177 *
1178 */
1179 static void
1180 acpica_pr_buf(char *buf)
1181 {
1182 char c, *bufp, *outp;
1183 int out_remaining;
1184
1185 /*
1186 * copy the supplied buffer into the output buffer
1187 * when we hit a '\n' or overflow the output buffer,
1188 * output and reset the output buffer
1189 */
1190 bufp = buf;
1191 outp = acpica_outbuf + acpica_outbuf_offset;
1192 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1193 while (c = *bufp++) {
1194 *outp++ = c;
1195 if (c == '\n' || --out_remaining == 0) {
1196 *outp = '\0';
1197 switch (acpica_console_out) {
1198 case 1:
1199 printf(acpica_outbuf);
1200 break;
1201 case 2:
1202 prom_printf(acpica_outbuf);
1203 break;
1204 case 0:
1205 default:
1206 (void) strlog(0, 0, 0,
1207 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1208 acpica_outbuf);
1209 break;
1210 }
1211 acpica_outbuf_offset = 0;
1212 outp = acpica_outbuf;
1213 out_remaining = ACPICA_OUTBUF_LEN - 1;
1214 }
1215 }
1216
1217 acpica_outbuf_offset = outp - acpica_outbuf;
1218 }
1219
1220 void
1221 AcpiOsVprintf(const char *Format, va_list Args)
1222 {
1223
1224 /*
1225 * If AcpiOsInitialize() failed to allocate a string buffer,
1226 * resort to vprintf().
1227 */
1228 if (acpi_osl_pr_buffer == NULL) {
1229 vprintf(Format, Args);
1230 return;
1231 }
1232
1233 /*
1234 * It is possible that a very long debug output statement will
1235 * be truncated; this is silently ignored.
1236 */
1237 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1238 acpica_pr_buf(acpi_osl_pr_buffer);
1239 }
1240
1241 void
1242 AcpiOsRedirectOutput(void *Destination)
1243 {
1244 _NOTE(ARGUNUSED(Destination))
1245
1246 /* FUTUREWORK: debugger support */
1247
1248 #ifdef DEBUG
1249 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1250 #endif
1251 }
1252
1253
1254 UINT32
1255 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1256 {
1257 _NOTE(ARGUNUSED(Buffer))
1258 _NOTE(ARGUNUSED(len))
1259 _NOTE(ARGUNUSED(BytesRead))
1260
1261 /* FUTUREWORK: debugger support */
1262
1263 return (0);
1264 }
1265
1266 /*
1267 * Device tree binding
1268 */
1269 static ACPI_STATUS
1270 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1271 {
1272 _NOTE(ARGUNUSED(lvl));
1273
1274 int sta, hid, bbn;
1275 int busno = (intptr_t)ctxp;
1276 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1277
1278 /* Check whether device exists. */
1279 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1280 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1281 /*
1282 * Skip object if device doesn't exist.
1283 * According to ACPI Spec,
1284 * 1) setting either bit 0 or bit 3 means that device exists.
1285 * 2) Absence of _STA method means all status bits set.
1286 */
1287 return (AE_CTRL_DEPTH);
1288 }
1289
1290 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1291 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1292 /* Non PCI/PCIe host bridge. */
1293 return (AE_OK);
1294 }
1295
1296 if (acpi_has_broken_bbn) {
1297 ACPI_BUFFER rb;
1298 rb.Pointer = NULL;
1299 rb.Length = ACPI_ALLOCATE_BUFFER;
1300
1301 /* Decree _BBN == n from PCI<n> */
1302 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1303 return (AE_CTRL_TERMINATE);
1304 }
1305 bbn = ((char *)rb.Pointer)[3] - '0';
1306 AcpiOsFree(rb.Pointer);
1307 if (bbn == busno || busno == 0) {
1308 *hdlp = hdl;
1309 return (AE_CTRL_TERMINATE);
1310 }
1311 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1312 if (bbn == busno) {
1313 *hdlp = hdl;
1314 return (AE_CTRL_TERMINATE);
1315 }
1316 } else if (busno == 0) {
1317 *hdlp = hdl;
1318 return (AE_CTRL_TERMINATE);
1319 }
1320
1321 return (AE_CTRL_DEPTH);
1322 }
1323
1324 static int
1325 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1326 {
1327 ACPI_HANDLE sbobj, busobj;
1328
1329 /* initialize static flag by querying ACPI namespace for bug */
1330 if (acpi_has_broken_bbn == -1)
1331 acpi_has_broken_bbn = acpica_query_bbn_problem();
1332
1333 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1334 busobj = NULL;
1335 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1336 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1337 (void **)&busobj);
1338 if (busobj != NULL) {
1339 *rh = busobj;
1340 return (AE_OK);
1341 }
1342 }
1343
1344 return (AE_ERROR);
1345 }
1346
1347 static ACPI_STATUS
1348 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1349 {
1350 _NOTE(ARGUNUSED(lvl));
1351 _NOTE(ARGUNUSED(rvpp));
1352
1353 int sta, hid, bbn;
1354 int *cntp = (int *)ctxp;
1355
1356 /* Check whether device exists. */
1357 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1358 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1359 /*
1360 * Skip object if device doesn't exist.
1361 * According to ACPI Spec,
1362 * 1) setting either bit 0 or bit 3 means that device exists.
1363 * 2) Absence of _STA method means all status bits set.
1364 */
1365 return (AE_CTRL_DEPTH);
1366 }
1367
1368 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1369 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1370 /* Non PCI/PCIe host bridge. */
1371 return (AE_OK);
1372 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1373 bbn == 0 && ++(*cntp) > 1) {
1374 /*
1375 * If we find more than one bus with a 0 _BBN
1376 * we have the problem that BigBear's BIOS shows
1377 */
1378 return (AE_CTRL_TERMINATE);
1379 } else {
1380 /*
1381 * Skip children of PCI/PCIe host bridge.
1382 */
1383 return (AE_CTRL_DEPTH);
1384 }
1385 }
1386
1387 /*
1388 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1389 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1390 * below if it exists.
1391 */
1392 static int
1393 acpica_query_bbn_problem(void)
1394 {
1395 ACPI_HANDLE sbobj;
1396 int zerobbncnt;
1397 void *rv;
1398
1399 zerobbncnt = 0;
1400 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1401 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1402 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1403 }
1404
1405 return (zerobbncnt > 1 ? 1 : 0);
1406 }
1407
1408 static const char hextab[] = "0123456789ABCDEF";
1409
1410 static int
1411 hexdig(int c)
1412 {
1413 /*
1414 * Get hex digit:
1415 *
1416 * Returns the 4-bit hex digit named by the input character. Returns
1417 * zero if the input character is not valid hex!
1418 */
1419
1420 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1421 int j = sizeof (hextab);
1422
1423 while (--j && (x != hextab[j])) {
1424 }
1425 return (j);
1426 }
1427
1428 static int
1429 CompressEisaID(char *np)
1430 {
1431 /*
1432 * Compress an EISA device name:
1433 *
1434 * This routine converts a 7-byte ASCII device name into the 4-byte
1435 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1436 * NV-RAM!)
1437 */
1438
1439 union { char octets[4]; int retval; } myu;
1440
1441 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1442 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1443 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1444 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1445
1446 return (myu.retval);
1447 }
1448
1449 ACPI_STATUS
1450 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1451 {
1452 ACPI_STATUS status;
1453 ACPI_BUFFER rb;
1454 ACPI_OBJECT ro;
1455
1456 rb.Pointer = &ro;
1457 rb.Length = sizeof (ro);
1458 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1459 ACPI_TYPE_INTEGER)) == AE_OK)
1460 *rint = ro.Integer.Value;
1461
1462 return (status);
1463 }
1464
1465 static int
1466 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1467 {
1468 ACPI_BUFFER rb;
1469 ACPI_OBJECT *rv;
1470
1471 rb.Pointer = NULL;
1472 rb.Length = ACPI_ALLOCATE_BUFFER;
1473 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1474 rb.Length != 0) {
1475 rv = rb.Pointer;
1476 if (rv->Type == ACPI_TYPE_INTEGER) {
1477 *rint = rv->Integer.Value;
1478 AcpiOsFree(rv);
1479 return (AE_OK);
1480 } else if (rv->Type == ACPI_TYPE_STRING) {
1481 char *stringData;
1482
1483 /* Convert the string into an EISA ID */
1484 if (rv->String.Pointer == NULL) {
1485 AcpiOsFree(rv);
1486 return (AE_ERROR);
1487 }
1488
1489 stringData = rv->String.Pointer;
1490
1491 /*
1492 * If the string is an EisaID, it must be 7
1493 * characters; if it's an ACPI ID, it will be 8
1494 * (and we don't care about ACPI ids here).
1495 */
1496 if (strlen(stringData) != 7) {
1497 AcpiOsFree(rv);
1498 return (AE_ERROR);
1499 }
1500
1501 *rint = CompressEisaID(stringData);
1502 AcpiOsFree(rv);
1503 return (AE_OK);
1504 } else
1505 AcpiOsFree(rv);
1506 }
1507 return (AE_ERROR);
1508 }
1509
1510 /*
1511 * Create linkage between devinfo nodes and ACPI nodes
1512 */
1513 ACPI_STATUS
1514 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1515 {
1516 ACPI_STATUS status;
1517 ACPI_BUFFER rb;
1518
1519 /*
1520 * Tag the devinfo node with the ACPI name
1521 */
1522 rb.Pointer = NULL;
1523 rb.Length = ACPI_ALLOCATE_BUFFER;
1524 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1525 if (ACPI_FAILURE(status)) {
1526 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1527 } else {
1528 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1529 "acpi-namespace", (char *)rb.Pointer);
1530 AcpiOsFree(rb.Pointer);
1531
1532 /*
1533 * Tag the ACPI node with the dip
1534 */
1535 status = acpica_set_devinfo(acpiobj, dip);
1536 ASSERT(ACPI_SUCCESS(status));
1537 }
1538
1539 return (status);
1540 }
1541
1542 /*
1543 * Destroy linkage between devinfo nodes and ACPI nodes
1544 */
1545 ACPI_STATUS
1546 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1547 {
1548 (void) acpica_unset_devinfo(acpiobj);
1549 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1550
1551 return (AE_OK);
1552 }
1553
1554 /*
1555 * Return the ACPI device node matching the CPU dev_info node.
1556 */
1557 ACPI_STATUS
1558 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1559 {
1560 int i;
1561
1562 /*
1563 * if cpu_map itself is NULL, we're a uppc system and
1564 * acpica_build_processor_map() hasn't been called yet.
1565 * So call it here
1566 */
1567 if (cpu_map == NULL) {
1568 (void) acpica_build_processor_map();
1569 if (cpu_map == NULL)
1570 return (AE_ERROR);
1571 }
1572
1573 if (cpu_id < 0) {
1574 return (AE_ERROR);
1575 }
1576
1577 /*
1578 * search object with cpuid in cpu_map
1579 */
1580 mutex_enter(&cpu_map_lock);
1581 for (i = 0; i < cpu_map_count; i++) {
1582 if (cpu_map[i]->cpu_id == cpu_id) {
1583 break;
1584 }
1585 }
1586 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1587 *rh = cpu_map[i]->obj;
1588 mutex_exit(&cpu_map_lock);
1589 return (AE_OK);
1590 }
1591
1592 /* Handle special case for uppc-only systems. */
1593 if (cpu_map_called == 0) {
1594 uint32_t apicid = cpuid_get_apicid(CPU);
1595 if (apicid != UINT32_MAX) {
1596 for (i = 0; i < cpu_map_count; i++) {
1597 if (cpu_map[i]->apic_id == apicid) {
1598 break;
1599 }
1600 }
1601 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1602 *rh = cpu_map[i]->obj;
1603 mutex_exit(&cpu_map_lock);
1604 return (AE_OK);
1605 }
1606 }
1607 }
1608 mutex_exit(&cpu_map_lock);
1609
1610 return (AE_ERROR);
1611 }
1612
1613 /*
1614 * Determine if this object is a processor
1615 */
1616 static ACPI_STATUS
1617 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1618 {
1619 ACPI_STATUS status;
1620 ACPI_OBJECT_TYPE objtype;
1621 unsigned long acpi_id;
1622 ACPI_BUFFER rb;
1623 ACPI_DEVICE_INFO *di;
1624
1625 if (AcpiGetType(obj, &objtype) != AE_OK)
1626 return (AE_OK);
1627
1628 if (objtype == ACPI_TYPE_PROCESSOR) {
1629 /* process a Processor */
1630 rb.Pointer = NULL;
1631 rb.Length = ACPI_ALLOCATE_BUFFER;
1632 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1633 ACPI_TYPE_PROCESSOR);
1634 if (status != AE_OK) {
1635 cmn_err(CE_WARN, "!acpica: error probing Processor");
1636 return (status);
1637 }
1638 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1639 AcpiOsFree(rb.Pointer);
1640 } else if (objtype == ACPI_TYPE_DEVICE) {
1641 /* process a processor Device */
1642 status = AcpiGetObjectInfo(obj, &di);
1643 if (status != AE_OK) {
1644 cmn_err(CE_WARN,
1645 "!acpica: error probing Processor Device\n");
1646 return (status);
1647 }
1648
1649 if (!(di->Valid & ACPI_VALID_UID) ||
1650 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1651 ACPI_FREE(di);
1652 cmn_err(CE_WARN,
1653 "!acpica: error probing Processor Device _UID\n");
1654 return (AE_ERROR);
1655 }
1656 ACPI_FREE(di);
1657 }
1658 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1659
1660 return (AE_OK);
1661 }
1662
1663 void
1664 scan_d2a_map(void)
1665 {
1666 dev_info_t *dip, *cdip;
1667 ACPI_HANDLE acpiobj;
1668 char *device_type_prop;
1669 int bus;
1670 static int map_error = 0;
1671
1672 if (map_error || (d2a_done != 0))
1673 return;
1674
1675 scanning_d2a_map = 1;
1676
1677 /*
1678 * Find all child-of-root PCI buses, and find their corresponding
1679 * ACPI child-of-root PCI nodes. For each one, add to the
1680 * d2a table.
1681 */
1682
1683 for (dip = ddi_get_child(ddi_root_node());
1684 dip != NULL;
1685 dip = ddi_get_next_sibling(dip)) {
1686
1687 /* prune non-PCI nodes */
1688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1689 DDI_PROP_DONTPASS,
1690 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1691 continue;
1692
1693 if ((strcmp("pci", device_type_prop) != 0) &&
1694 (strcmp("pciex", device_type_prop) != 0)) {
1695 ddi_prop_free(device_type_prop);
1696 continue;
1697 }
1698
1699 ddi_prop_free(device_type_prop);
1700
1701 /*
1702 * To get bus number of dip, get first child and get its
1703 * bus number. If NULL, just continue, because we don't
1704 * care about bus nodes with no children anyway.
1705 */
1706 if ((cdip = ddi_get_child(dip)) == NULL)
1707 continue;
1708
1709 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1710 #ifdef D2ADEBUG
1711 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1712 #endif
1713 map_error = 1;
1714 scanning_d2a_map = 0;
1715 d2a_done = 1;
1716 return;
1717 }
1718
1719 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1720 #ifdef D2ADEBUG
1721 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1722 #endif
1723 map_error = 1;
1724 continue;
1725 }
1726
1727 acpica_tag_devinfo(dip, acpiobj);
1728
1729 /* call recursively to enumerate subtrees */
1730 scan_d2a_subtree(dip, acpiobj, bus);
1731 }
1732
1733 scanning_d2a_map = 0;
1734 d2a_done = 1;
1735 }
1736
1737 /*
1738 * For all acpi child devices of acpiobj, find their matching
1739 * dip under "dip" argument. (matching means "matches dev/fn").
1740 * bus is assumed to already be a match from caller, and is
1741 * used here only to record in the d2a entry. Recurse if necessary.
1742 */
1743 static void
1744 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1745 {
1746 int acpi_devfn, hid;
1747 ACPI_HANDLE acld;
1748 dev_info_t *dcld;
1749 int dcld_b, dcld_d, dcld_f;
1750 int dev, func;
1751 char *device_type_prop;
1752
1753 acld = NULL;
1754 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1755 == AE_OK) {
1756 /* get the dev/func we're looking for in the devinfo tree */
1757 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1758 continue;
1759 dev = (acpi_devfn >> 16) & 0xFFFF;
1760 func = acpi_devfn & 0xFFFF;
1761
1762 /* look through all the immediate children of dip */
1763 for (dcld = ddi_get_child(dip); dcld != NULL;
1764 dcld = ddi_get_next_sibling(dcld)) {
1765 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1766 continue;
1767
1768 /* dev must match; function must match or wildcard */
1769 if (dcld_d != dev ||
1770 (func != 0xFFFF && func != dcld_f))
1771 continue;
1772 bus = dcld_b;
1773
1774 /* found a match, record it */
1775 acpica_tag_devinfo(dcld, acld);
1776
1777 /* if we find a bridge, recurse from here */
1778 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1779 DDI_PROP_DONTPASS, "device_type",
1780 &device_type_prop) == DDI_PROP_SUCCESS) {
1781 if ((strcmp("pci", device_type_prop) == 0) ||
1782 (strcmp("pciex", device_type_prop) == 0))
1783 scan_d2a_subtree(dcld, acld, bus);
1784 ddi_prop_free(device_type_prop);
1785 }
1786
1787 /* done finding a match, so break now */
1788 break;
1789 }
1790 }
1791 }
1792
1793 /*
1794 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1795 */
1796 int
1797 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1798 {
1799 pci_regspec_t *pci_rp;
1800 int len;
1801
1802 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1803 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1804 return (-1);
1805
1806 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1807 ddi_prop_free(pci_rp);
1808 return (-1);
1809 }
1810 if (bus != NULL)
1811 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1812 if (device != NULL)
1813 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1814 if (func != NULL)
1815 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1816 ddi_prop_free(pci_rp);
1817 return (0);
1818 }
1819
1820 /*
1821 * Return the ACPI device node matching this dev_info node, if it
1822 * exists in the ACPI tree.
1823 */
1824 ACPI_STATUS
1825 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1826 {
1827 ACPI_STATUS status;
1828 char *acpiname;
1829
1830 #ifdef DEBUG
1831 if (d2a_done == 0)
1832 cmn_err(CE_WARN, "!acpica_get_handle:"
1833 " no ACPI mapping for %s", ddi_node_name(dip));
1834 #endif
1835
1836 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1837 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1838 return (AE_ERROR);
1839 }
1840
1841 status = AcpiGetHandle(NULL, acpiname, rh);
1842 ddi_prop_free((void *)acpiname);
1843 return (status);
1844 }
1845
1846
1847
1848 /*
1849 * Manage OS data attachment to ACPI nodes
1850 */
1851
1852 /*
1853 * Return the (dev_info_t *) associated with the ACPI node.
1854 */
1855 ACPI_STATUS
1856 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1857 {
1858 ACPI_STATUS status;
1859 void *ptr;
1860
1861 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1862 if (status == AE_OK)
1863 *dipp = (dev_info_t *)ptr;
1864
1865 return (status);
1866 }
1867
1868 /*
1869 * Set the dev_info_t associated with the ACPI node.
1870 */
1871 static ACPI_STATUS
1872 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1873 {
1874 ACPI_STATUS status;
1875
1876 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1877 return (status);
1878 }
1879
1880 /*
1881 * Unset the dev_info_t associated with the ACPI node.
1882 */
1883 static ACPI_STATUS
1884 acpica_unset_devinfo(ACPI_HANDLE obj)
1885 {
1886 return (AcpiDetachData(obj, acpica_devinfo_handler));
1887 }
1888
1889 /*
1890 *
1891 */
1892 void
1893 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1894 {
1895 /* no-op */
1896 }
1897
1898 ACPI_STATUS
1899 acpica_build_processor_map(void)
1900 {
1901 ACPI_STATUS status;
1902 void *rv;
1903
1904 /*
1905 * shouldn't be called more than once anyway
1906 */
1907 if (cpu_map_built)
1908 return (AE_OK);
1909
1910 /*
1911 * ACPI device configuration driver has built mapping information
1912 * among processor id and object handle, no need to probe again.
1913 */
1914 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1915 cpu_map_built = 1;
1916 return (AE_OK);
1917 }
1918
1919 /*
1920 * Look for Processor objects
1921 */
1922 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1923 ACPI_ROOT_OBJECT,
1924 4,
1925 acpica_probe_processor,
1926 NULL,
1927 NULL,
1928 &rv);
1929 ASSERT(status == AE_OK);
1930
1931 /*
1932 * Look for processor Device objects
1933 */
1934 status = AcpiGetDevices("ACPI0007",
1935 acpica_probe_processor,
1936 NULL,
1937 &rv);
1938 ASSERT(status == AE_OK);
1939 cpu_map_built = 1;
1940
1941 return (status);
1942 }
1943
1944 /*
1945 * Grow cpu map table on demand.
1946 */
1947 static void
1948 acpica_grow_cpu_map(void)
1949 {
1950 if (cpu_map_count == cpu_map_count_max) {
1951 size_t sz;
1952 struct cpu_map_item **new_map;
1953
1954 ASSERT(cpu_map_count_max < INT_MAX / 2);
1955 cpu_map_count_max += max_ncpus;
1956 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1957 KM_SLEEP);
1958 if (cpu_map_count != 0) {
1959 ASSERT(cpu_map != NULL);
1960 sz = sizeof (cpu_map[0]) * cpu_map_count;
1961 kcopy(cpu_map, new_map, sz);
1962 kmem_free(cpu_map, sz);
1963 }
1964 cpu_map = new_map;
1965 }
1966 }
1967
1968 /*
1969 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1970 * ACPI handle). The mapping table will be setup in two steps:
1971 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1972 * processor id and ACPI object handle.
1973 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1974 * On systems with which have ACPI device configuration for CPUs enabled,
1975 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1976 * otherwise acpica_map_cpu() will be called before
1977 * acpica_add_processor_to_map().
1978 */
1979 ACPI_STATUS
1980 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1981 {
1982 int i;
1983 ACPI_STATUS rc = AE_OK;
1984 struct cpu_map_item *item = NULL;
1985
1986 ASSERT(obj != NULL);
1987 if (obj == NULL) {
1988 return (AE_ERROR);
1989 }
1990
1991 mutex_enter(&cpu_map_lock);
1992
1993 /*
1994 * Special case for uppc
1995 * If we're a uppc system and ACPI device configuration for CPU has
1996 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1997 * call acpica_map_cpu(). So create one and use the passed-in processor
1998 * as CPU 0
1999 * Assumption: the first CPU returned by
2000 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
2001 * Unfortunately there appears to be no good way to ASSERT this.
2002 */
2003 if (cpu_map == NULL &&
2004 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
2005 acpica_grow_cpu_map();
2006 ASSERT(cpu_map != NULL);
2007 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2008 item->cpu_id = 0;
2009 item->proc_id = acpi_id;
2010 item->apic_id = apic_id;
2011 item->obj = obj;
2012 cpu_map[0] = item;
2013 cpu_map_count = 1;
2014 mutex_exit(&cpu_map_lock);
2015 return (AE_OK);
2016 }
2017
2018 for (i = 0; i < cpu_map_count; i++) {
2019 if (cpu_map[i]->obj == obj) {
2020 rc = AE_ALREADY_EXISTS;
2021 break;
2022 } else if (cpu_map[i]->proc_id == acpi_id) {
2023 ASSERT(item == NULL);
2024 item = cpu_map[i];
2025 }
2026 }
2027
2028 if (rc == AE_OK) {
2029 if (item != NULL) {
2030 /*
2031 * ACPI alias objects may cause more than one objects
2032 * with the same ACPI processor id, only remember the
2033 * the first object encountered.
2034 */
2035 if (item->obj == NULL) {
2036 item->obj = obj;
2037 item->apic_id = apic_id;
2038 } else {
2039 rc = AE_ALREADY_EXISTS;
2040 }
2041 } else if (cpu_map_count >= INT_MAX / 2) {
2042 rc = AE_NO_MEMORY;
2043 } else {
2044 acpica_grow_cpu_map();
2045 ASSERT(cpu_map != NULL);
2046 ASSERT(cpu_map_count < cpu_map_count_max);
2047 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2048 item->cpu_id = -1;
2049 item->proc_id = acpi_id;
2050 item->apic_id = apic_id;
2051 item->obj = obj;
2052 cpu_map[cpu_map_count] = item;
2053 cpu_map_count++;
2054 }
2055 }
2056
2057 mutex_exit(&cpu_map_lock);
2058
2059 return (rc);
2060 }
2061
2062 ACPI_STATUS
2063 acpica_remove_processor_from_map(UINT32 acpi_id)
2064 {
2065 int i;
2066 ACPI_STATUS rc = AE_NOT_EXIST;
2067
2068 mutex_enter(&cpu_map_lock);
2069 for (i = 0; i < cpu_map_count; i++) {
2070 if (cpu_map[i]->proc_id != acpi_id) {
2071 continue;
2072 }
2073 cpu_map[i]->obj = NULL;
2074 /* Free item if no more reference to it. */
2075 if (cpu_map[i]->cpu_id == -1) {
2076 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2077 cpu_map[i] = NULL;
2078 cpu_map_count--;
2079 if (i != cpu_map_count) {
2080 cpu_map[i] = cpu_map[cpu_map_count];
2081 cpu_map[cpu_map_count] = NULL;
2082 }
2083 }
2084 rc = AE_OK;
2085 break;
2086 }
2087 mutex_exit(&cpu_map_lock);
2088
2089 return (rc);
2090 }
2091
2092 ACPI_STATUS
2093 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2094 {
2095 int i;
2096 ACPI_STATUS rc = AE_OK;
2097 struct cpu_map_item *item = NULL;
2098
2099 ASSERT(cpuid != -1);
2100 if (cpuid == -1) {
2101 return (AE_ERROR);
2102 }
2103
2104 mutex_enter(&cpu_map_lock);
2105 cpu_map_called = 1;
2106 for (i = 0; i < cpu_map_count; i++) {
2107 if (cpu_map[i]->cpu_id == cpuid) {
2108 rc = AE_ALREADY_EXISTS;
2109 break;
2110 } else if (cpu_map[i]->proc_id == acpi_id) {
2111 ASSERT(item == NULL);
2112 item = cpu_map[i];
2113 }
2114 }
2115 if (rc == AE_OK) {
2116 if (item != NULL) {
2117 if (item->cpu_id == -1) {
2118 item->cpu_id = cpuid;
2119 } else {
2120 rc = AE_ALREADY_EXISTS;
2121 }
2122 } else if (cpu_map_count >= INT_MAX / 2) {
2123 rc = AE_NO_MEMORY;
2124 } else {
2125 acpica_grow_cpu_map();
2126 ASSERT(cpu_map != NULL);
2127 ASSERT(cpu_map_count < cpu_map_count_max);
2128 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2129 item->cpu_id = cpuid;
2130 item->proc_id = acpi_id;
2131 item->apic_id = UINT32_MAX;
2132 item->obj = NULL;
2133 cpu_map[cpu_map_count] = item;
2134 cpu_map_count++;
2135 }
2136 }
2137 mutex_exit(&cpu_map_lock);
2138
2139 return (rc);
2140 }
2141
2142 ACPI_STATUS
2143 acpica_unmap_cpu(processorid_t cpuid)
2144 {
2145 int i;
2146 ACPI_STATUS rc = AE_NOT_EXIST;
2147
2148 ASSERT(cpuid != -1);
2149 if (cpuid == -1) {
2150 return (rc);
2151 }
2152
2153 mutex_enter(&cpu_map_lock);
2154 for (i = 0; i < cpu_map_count; i++) {
2155 if (cpu_map[i]->cpu_id != cpuid) {
2156 continue;
2157 }
2158 cpu_map[i]->cpu_id = -1;
2159 /* Free item if no more reference. */
2160 if (cpu_map[i]->obj == NULL) {
2161 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2162 cpu_map[i] = NULL;
2163 cpu_map_count--;
2164 if (i != cpu_map_count) {
2165 cpu_map[i] = cpu_map[cpu_map_count];
2166 cpu_map[cpu_map_count] = NULL;
2167 }
2168 }
2169 rc = AE_OK;
2170 break;
2171 }
2172 mutex_exit(&cpu_map_lock);
2173
2174 return (rc);
2175 }
2176
2177 ACPI_STATUS
2178 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2179 {
2180 int i;
2181 ACPI_STATUS rc = AE_NOT_EXIST;
2182
2183 ASSERT(cpuid != -1);
2184 if (cpuid == -1) {
2185 return (rc);
2186 }
2187
2188 mutex_enter(&cpu_map_lock);
2189 for (i = 0; i < cpu_map_count; i++) {
2190 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2191 *hdlp = cpu_map[i]->obj;
2192 rc = AE_OK;
2193 break;
2194 }
2195 }
2196 mutex_exit(&cpu_map_lock);
2197
2198 return (rc);
2199 }
2200
2201 ACPI_STATUS
2202 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2203 {
2204 int i;
2205 ACPI_STATUS rc = AE_NOT_EXIST;
2206
2207 mutex_enter(&cpu_map_lock);
2208 for (i = 0; i < cpu_map_count; i++) {
2209 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2210 *hdlp = cpu_map[i]->obj;
2211 rc = AE_OK;
2212 break;
2213 }
2214 }
2215 mutex_exit(&cpu_map_lock);
2216
2217 return (rc);
2218 }
2219
2220 ACPI_STATUS
2221 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2222 {
2223 int i;
2224 ACPI_STATUS rc = AE_NOT_EXIST;
2225
2226 ASSERT(apicid != UINT32_MAX);
2227 if (apicid == UINT32_MAX) {
2228 return (rc);
2229 }
2230
2231 mutex_enter(&cpu_map_lock);
2232 for (i = 0; i < cpu_map_count; i++) {
2233 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2234 *hdlp = cpu_map[i]->obj;
2235 rc = AE_OK;
2236 break;
2237 }
2238 }
2239 mutex_exit(&cpu_map_lock);
2240
2241 return (rc);
2242 }
2243
2244 ACPI_STATUS
2245 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2246 {
2247 int i;
2248 ACPI_STATUS rc = AE_NOT_EXIST;
2249
2250 ASSERT(cpuidp != NULL);
2251 if (hdl == NULL || cpuidp == NULL) {
2252 return (rc);
2253 }
2254
2255 *cpuidp = -1;
2256 mutex_enter(&cpu_map_lock);
2257 for (i = 0; i < cpu_map_count; i++) {
2258 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2259 *cpuidp = cpu_map[i]->cpu_id;
2260 rc = AE_OK;
2261 break;
2262 }
2263 }
2264 mutex_exit(&cpu_map_lock);
2265
2266 return (rc);
2267 }
2268
2269 ACPI_STATUS
2270 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2271 {
2272 int i;
2273 ACPI_STATUS rc = AE_NOT_EXIST;
2274
2275 ASSERT(rp != NULL);
2276 if (hdl == NULL || rp == NULL) {
2277 return (rc);
2278 }
2279
2280 *rp = UINT32_MAX;
2281 mutex_enter(&cpu_map_lock);
2282 for (i = 0; i < cpu_map_count; i++) {
2283 if (cpu_map[i]->obj == hdl &&
2284 cpu_map[i]->apic_id != UINT32_MAX) {
2285 *rp = cpu_map[i]->apic_id;
2286 rc = AE_OK;
2287 break;
2288 }
2289 }
2290 mutex_exit(&cpu_map_lock);
2291
2292 return (rc);
2293 }
2294
2295 ACPI_STATUS
2296 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2297 {
2298 int i;
2299 ACPI_STATUS rc = AE_NOT_EXIST;
2300
2301 ASSERT(rp != NULL);
2302 if (hdl == NULL || rp == NULL) {
2303 return (rc);
2304 }
2305
2306 *rp = UINT32_MAX;
2307 mutex_enter(&cpu_map_lock);
2308 for (i = 0; i < cpu_map_count; i++) {
2309 if (cpu_map[i]->obj == hdl) {
2310 *rp = cpu_map[i]->proc_id;
2311 rc = AE_OK;
2312 break;
2313 }
2314 }
2315 mutex_exit(&cpu_map_lock);
2316
2317 return (rc);
2318 }
2319
2320 void
2321 acpica_set_core_feature(uint64_t features)
2322 {
2323 atomic_or_64(&acpica_core_features, features);
2324 }
2325
2326 void
2327 acpica_clear_core_feature(uint64_t features)
2328 {
2329 atomic_and_64(&acpica_core_features, ~features);
2330 }
2331
2332 uint64_t
2333 acpica_get_core_feature(uint64_t features)
2334 {
2335 return (acpica_core_features & features);
2336 }
2337
2338 void
2339 acpica_set_devcfg_feature(uint64_t features)
2340 {
2341 atomic_or_64(&acpica_devcfg_features, features);
2342 }
2343
2344 void
2345 acpica_clear_devcfg_feature(uint64_t features)
2346 {
2347 atomic_and_64(&acpica_devcfg_features, ~features);
2348 }
2349
2350 uint64_t
2351 acpica_get_devcfg_feature(uint64_t features)
2352 {
2353 return (acpica_devcfg_features & features);
2354 }
2355
2356 void
2357 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2358 {
2359 *gbl_FADT = &AcpiGbl_FADT;
2360 }
2361
2362 void
2363 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2364 {
2365 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2366 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2367 AcpiGbl_FADT.PstateControl);
2368
2369 if (cstates && AcpiGbl_FADT.CstControl != 0)
2370 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2371 AcpiGbl_FADT.CstControl);
2372 }