1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2011 Joyent, Inc. All rights reserved.
26 */
27 /*
28 * Copyright (c) 2009-2010, Intel Corporation.
29 * All rights reserved.
30 */
31 /*
32 * ACPI CA OSL for Solaris x86
33 */
34
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/psm.h>
38 #include <sys/pci_cfgspace.h>
39 #include <sys/apic.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/pci.h>
44 #include <sys/kobj.h>
45 #include <sys/taskq.h>
46 #include <sys/strlog.h>
47 #include <sys/x86_archext.h>
48 #include <sys/note.h>
49 #include <sys/promif.h>
50
51 #include <sys/acpi/accommon.h>
52 #include <sys/acpica.h>
53
54 #define MAX_DAT_FILE_SIZE (64*1024)
55
56 /* local functions */
57 static int CompressEisaID(char *np);
58
59 static void scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus);
60 static int acpica_query_bbn_problem(void);
61 static int acpica_find_pcibus(int busno, ACPI_HANDLE *rh);
62 static int acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint);
63 static ACPI_STATUS acpica_set_devinfo(ACPI_HANDLE, dev_info_t *);
64 static ACPI_STATUS acpica_unset_devinfo(ACPI_HANDLE);
65 static void acpica_devinfo_handler(ACPI_HANDLE, void *);
66
67 /*
68 * Event queue vars
69 */
70 int acpica_eventq_init = 0;
71 ddi_taskq_t *osl_eventq[OSL_EC_BURST_HANDLER+1];
72
73 /*
74 * Priorities relative to minclsyspri that each taskq
75 * run at; OSL_NOTIFY_HANDLER needs to run at a higher
76 * priority than OSL_GPE_HANDLER. There's an implicit
77 * assumption that no priority here results in exceeding
78 * maxclsyspri.
79 * Note: these initializations need to match the order of
80 * ACPI_EXECUTE_TYPE.
81 */
82 int osl_eventq_pri_delta[OSL_EC_BURST_HANDLER+1] = {
83 0, /* OSL_GLOBAL_LOCK_HANDLER */
84 2, /* OSL_NOTIFY_HANDLER */
85 0, /* OSL_GPE_HANDLER */
86 0, /* OSL_DEBUGGER_THREAD */
87 0, /* OSL_EC_POLL_HANDLER */
88 0 /* OSL_EC_BURST_HANDLER */
89 };
90
91 /*
92 * Note, if you change this path, you need to update
93 * /boot/grub/filelist.ramdisk and pkg SUNWckr/prototype_i386
94 */
95 static char *acpi_table_path = "/boot/acpi/tables/";
96
97 /* non-zero while scan_d2a_map() is working */
98 static int scanning_d2a_map = 0;
99 static int d2a_done = 0;
100
101 /* features supported by ACPICA and ACPI device configuration. */
102 uint64_t acpica_core_features = ACPI_FEATURE_OSI_MODULE;
103 static uint64_t acpica_devcfg_features = 0;
104
105 /* set by acpi_poweroff() in PSMs and appm_ioctl() in acpippm for S3 */
106 int acpica_use_safe_delay = 0;
107
108 /* CPU mapping data */
109 struct cpu_map_item {
110 processorid_t cpu_id;
111 UINT32 proc_id;
112 UINT32 apic_id;
113 ACPI_HANDLE obj;
114 };
115
116 kmutex_t cpu_map_lock;
117 static struct cpu_map_item **cpu_map = NULL;
118 static int cpu_map_count_max = 0;
119 static int cpu_map_count = 0;
120 static int cpu_map_built = 0;
121
122 /*
123 * On systems with the uppc PSM only, acpica_map_cpu() won't be called at all.
124 * This flag is used to check for uppc-only systems by detecting whether
125 * acpica_map_cpu() has been called or not.
126 */
127 static int cpu_map_called = 0;
128
129 static int acpi_has_broken_bbn = -1;
130
131 /* buffer for AcpiOsVprintf() */
132 #define ACPI_OSL_PR_BUFLEN 1024
133 static char *acpi_osl_pr_buffer = NULL;
134 static int acpi_osl_pr_buflen;
135
136 #define D2A_DEBUG
137
138 /*
139 *
140 */
141 static void
142 discard_event_queues()
143 {
144 int i;
145
146 /*
147 * destroy event queues
148 */
149 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
150 if (osl_eventq[i])
151 ddi_taskq_destroy(osl_eventq[i]);
152 }
153 }
154
155
156 /*
157 *
158 */
159 static ACPI_STATUS
160 init_event_queues()
161 {
162 char namebuf[32];
163 int i, error = 0;
164
165 /*
166 * Initialize event queues
167 */
168
169 /* Always allocate only 1 thread per queue to force FIFO execution */
170 for (i = OSL_GLOBAL_LOCK_HANDLER; i <= OSL_EC_BURST_HANDLER; i++) {
171 snprintf(namebuf, 32, "ACPI%d", i);
172 osl_eventq[i] = ddi_taskq_create(NULL, namebuf, 1,
173 osl_eventq_pri_delta[i] + minclsyspri, 0);
174 if (osl_eventq[i] == NULL)
175 error++;
176 }
177
178 if (error != 0) {
179 discard_event_queues();
180 #ifdef DEBUG
181 cmn_err(CE_WARN, "!acpica: could not initialize event queues");
182 #endif
183 return (AE_ERROR);
184 }
185
186 acpica_eventq_init = 1;
187 return (AE_OK);
188 }
189
190 /*
191 * One-time initialization of OSL layer
192 */
193 ACPI_STATUS
194 AcpiOsInitialize(void)
195 {
196 /*
197 * Allocate buffer for AcpiOsVprintf() here to avoid
198 * kmem_alloc()/kmem_free() at high PIL
199 */
200 acpi_osl_pr_buffer = kmem_alloc(ACPI_OSL_PR_BUFLEN, KM_SLEEP);
201 if (acpi_osl_pr_buffer != NULL)
202 acpi_osl_pr_buflen = ACPI_OSL_PR_BUFLEN;
203
204 return (AE_OK);
205 }
206
207 /*
208 * One-time shut-down of OSL layer
209 */
210 ACPI_STATUS
211 AcpiOsTerminate(void)
212 {
213
214 if (acpi_osl_pr_buffer != NULL)
215 kmem_free(acpi_osl_pr_buffer, acpi_osl_pr_buflen);
216
217 discard_event_queues();
218 return (AE_OK);
219 }
220
221
222 ACPI_PHYSICAL_ADDRESS
223 AcpiOsGetRootPointer()
224 {
225 ACPI_PHYSICAL_ADDRESS Address;
226
227 /*
228 * For EFI firmware, the root pointer is defined in EFI systab.
229 * The boot code process the table and put the physical address
230 * in the acpi-root-tab property.
231 */
232 Address = ddi_prop_get_int(DDI_DEV_T_ANY, ddi_root_node(),
233 DDI_PROP_DONTPASS, "acpi-root-tab", NULL);
234
235 if ((Address == NULL) && ACPI_FAILURE(AcpiFindRootPointer(&Address)))
236 Address = NULL;
237
238 return (Address);
239 }
240
241 /*ARGSUSED*/
242 ACPI_STATUS
243 AcpiOsPredefinedOverride(const ACPI_PREDEFINED_NAMES *InitVal,
244 ACPI_STRING *NewVal)
245 {
246
247 *NewVal = 0;
248 return (AE_OK);
249 }
250
251 static void
252 acpica_strncpy(char *dest, const char *src, int len)
253 {
254
255 /*LINTED*/
256 while ((*dest++ = *src++) && (--len > 0))
257 /* copy the string */;
258 *dest = '\0';
259 }
260
261 ACPI_STATUS
262 AcpiOsTableOverride(ACPI_TABLE_HEADER *ExistingTable,
263 ACPI_TABLE_HEADER **NewTable)
264 {
265 char signature[5];
266 char oemid[7];
267 char oemtableid[9];
268 struct _buf *file;
269 char *buf1, *buf2;
270 int count;
271 char acpi_table_loc[128];
272
273 acpica_strncpy(signature, ExistingTable->Signature, 4);
274 acpica_strncpy(oemid, ExistingTable->OemId, 6);
275 acpica_strncpy(oemtableid, ExistingTable->OemTableId, 8);
276
277 #ifdef DEBUG
278 cmn_err(CE_NOTE, "!acpica: table [%s] v%d OEM ID [%s]"
279 " OEM TABLE ID [%s] OEM rev %x",
280 signature, ExistingTable->Revision, oemid, oemtableid,
281 ExistingTable->OemRevision);
282 #endif
283
284 /* File name format is "signature_oemid_oemtableid.dat" */
285 (void) strcpy(acpi_table_loc, acpi_table_path);
286 (void) strcat(acpi_table_loc, signature); /* for example, DSDT */
287 (void) strcat(acpi_table_loc, "_");
288 (void) strcat(acpi_table_loc, oemid); /* for example, IntelR */
289 (void) strcat(acpi_table_loc, "_");
290 (void) strcat(acpi_table_loc, oemtableid); /* for example, AWRDACPI */
291 (void) strcat(acpi_table_loc, ".dat");
292
293 file = kobj_open_file(acpi_table_loc);
294 if (file == (struct _buf *)-1) {
295 *NewTable = 0;
296 return (AE_OK);
297 } else {
298 buf1 = (char *)kmem_alloc(MAX_DAT_FILE_SIZE, KM_SLEEP);
299 count = kobj_read_file(file, buf1, MAX_DAT_FILE_SIZE-1, 0);
300 if (count >= MAX_DAT_FILE_SIZE) {
301 cmn_err(CE_WARN, "!acpica: table %s file size too big",
302 acpi_table_loc);
303 *NewTable = 0;
304 } else {
305 buf2 = (char *)kmem_alloc(count, KM_SLEEP);
306 (void) memcpy(buf2, buf1, count);
307 *NewTable = (ACPI_TABLE_HEADER *)buf2;
308 cmn_err(CE_NOTE, "!acpica: replacing table: %s",
309 acpi_table_loc);
310 }
311 }
312 kobj_close_file(file);
313 kmem_free(buf1, MAX_DAT_FILE_SIZE);
314
315 return (AE_OK);
316 }
317
318
319 /*
320 * ACPI semaphore implementation
321 */
322 typedef struct {
323 kmutex_t mutex;
324 kcondvar_t cv;
325 uint32_t available;
326 uint32_t initial;
327 uint32_t maximum;
328 } acpi_sema_t;
329
330 /*
331 *
332 */
333 void
334 acpi_sema_init(acpi_sema_t *sp, unsigned max, unsigned count)
335 {
336 mutex_init(&sp->mutex, NULL, MUTEX_DRIVER, NULL);
337 cv_init(&sp->cv, NULL, CV_DRIVER, NULL);
338 /* no need to enter mutex here at creation */
339 sp->available = count;
340 sp->initial = count;
341 sp->maximum = max;
342 }
343
344 /*
345 *
346 */
347 void
348 acpi_sema_destroy(acpi_sema_t *sp)
349 {
350
351 cv_destroy(&sp->cv);
352 mutex_destroy(&sp->mutex);
353 }
354
355 /*
356 *
357 */
358 ACPI_STATUS
359 acpi_sema_p(acpi_sema_t *sp, unsigned count, uint16_t wait_time)
360 {
361 ACPI_STATUS rv = AE_OK;
362 clock_t deadline;
363
364 mutex_enter(&sp->mutex);
365
366 if (sp->available >= count) {
367 /*
368 * Enough units available, no blocking
369 */
370 sp->available -= count;
371 mutex_exit(&sp->mutex);
372 return (rv);
373 } else if (wait_time == 0) {
374 /*
375 * Not enough units available and timeout
376 * specifies no blocking
377 */
378 rv = AE_TIME;
379 mutex_exit(&sp->mutex);
380 return (rv);
381 }
382
383 /*
384 * Not enough units available and timeout specifies waiting
385 */
386 if (wait_time != ACPI_WAIT_FOREVER)
387 deadline = ddi_get_lbolt() +
388 (clock_t)drv_usectohz(wait_time * 1000);
389
390 do {
391 if (wait_time == ACPI_WAIT_FOREVER)
392 cv_wait(&sp->cv, &sp->mutex);
393 else if (cv_timedwait(&sp->cv, &sp->mutex, deadline) < 0) {
394 rv = AE_TIME;
395 break;
396 }
397 } while (sp->available < count);
398
399 /* if we dropped out of the wait with AE_OK, we got the units */
400 if (rv == AE_OK)
401 sp->available -= count;
402
403 mutex_exit(&sp->mutex);
404 return (rv);
405 }
406
407 /*
408 *
409 */
410 void
411 acpi_sema_v(acpi_sema_t *sp, unsigned count)
412 {
413 mutex_enter(&sp->mutex);
414 sp->available += count;
415 cv_broadcast(&sp->cv);
416 mutex_exit(&sp->mutex);
417 }
418
419
420 ACPI_STATUS
421 AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits,
422 ACPI_HANDLE *OutHandle)
423 {
424 acpi_sema_t *sp;
425
426 if ((OutHandle == NULL) || (InitialUnits > MaxUnits))
427 return (AE_BAD_PARAMETER);
428
429 sp = (acpi_sema_t *)kmem_alloc(sizeof (acpi_sema_t), KM_SLEEP);
430 acpi_sema_init(sp, MaxUnits, InitialUnits);
431 *OutHandle = (ACPI_HANDLE)sp;
432 return (AE_OK);
433 }
434
435
436 ACPI_STATUS
437 AcpiOsDeleteSemaphore(ACPI_HANDLE Handle)
438 {
439
440 if (Handle == NULL)
441 return (AE_BAD_PARAMETER);
442
443 acpi_sema_destroy((acpi_sema_t *)Handle);
444 kmem_free((void *)Handle, sizeof (acpi_sema_t));
445 return (AE_OK);
446 }
447
448 ACPI_STATUS
449 AcpiOsWaitSemaphore(ACPI_HANDLE Handle, UINT32 Units, UINT16 Timeout)
450 {
451
452 if ((Handle == NULL) || (Units < 1))
453 return (AE_BAD_PARAMETER);
454
455 return (acpi_sema_p((acpi_sema_t *)Handle, Units, Timeout));
456 }
457
458 ACPI_STATUS
459 AcpiOsSignalSemaphore(ACPI_HANDLE Handle, UINT32 Units)
460 {
461
462 if ((Handle == NULL) || (Units < 1))
463 return (AE_BAD_PARAMETER);
464
465 acpi_sema_v((acpi_sema_t *)Handle, Units);
466 return (AE_OK);
467 }
468
469 ACPI_STATUS
470 AcpiOsCreateLock(ACPI_HANDLE *OutHandle)
471 {
472 kmutex_t *mp;
473
474 if (OutHandle == NULL)
475 return (AE_BAD_PARAMETER);
476
477 mp = (kmutex_t *)kmem_alloc(sizeof (kmutex_t), KM_SLEEP);
478 mutex_init(mp, NULL, MUTEX_DRIVER, NULL);
479 *OutHandle = (ACPI_HANDLE)mp;
480 return (AE_OK);
481 }
482
483 void
484 AcpiOsDeleteLock(ACPI_HANDLE Handle)
485 {
486
487 if (Handle == NULL)
488 return;
489
490 mutex_destroy((kmutex_t *)Handle);
491 kmem_free((void *)Handle, sizeof (kmutex_t));
492 }
493
494 ACPI_CPU_FLAGS
495 AcpiOsAcquireLock(ACPI_HANDLE Handle)
496 {
497
498
499 if (Handle == NULL)
500 return (AE_BAD_PARAMETER);
501
502 if (curthread == CPU->cpu_idle_thread) {
503 while (!mutex_tryenter((kmutex_t *)Handle))
504 /* spin */;
505 } else
506 mutex_enter((kmutex_t *)Handle);
507 return (AE_OK);
508 }
509
510 void
511 AcpiOsReleaseLock(ACPI_HANDLE Handle, ACPI_CPU_FLAGS Flags)
512 {
513 _NOTE(ARGUNUSED(Flags))
514
515 mutex_exit((kmutex_t *)Handle);
516 }
517
518
519 void *
520 AcpiOsAllocate(ACPI_SIZE Size)
521 {
522 ACPI_SIZE *tmp_ptr;
523
524 Size += sizeof (Size);
525 tmp_ptr = (ACPI_SIZE *)kmem_zalloc(Size, KM_SLEEP);
526 *tmp_ptr++ = Size;
527 return (tmp_ptr);
528 }
529
530 void
531 AcpiOsFree(void *Memory)
532 {
533 ACPI_SIZE size, *tmp_ptr;
534
535 tmp_ptr = (ACPI_SIZE *)Memory;
536 tmp_ptr -= 1;
537 size = *tmp_ptr;
538 kmem_free(tmp_ptr, size);
539 }
540
541 static int napics_found; /* number of ioapic addresses in array */
542 static ACPI_PHYSICAL_ADDRESS ioapic_paddr[MAX_IO_APIC];
543 static ACPI_TABLE_MADT *acpi_mapic_dtp = NULL;
544 static void *dummy_ioapicadr;
545
546 void
547 acpica_find_ioapics(void)
548 {
549 int madt_seen, madt_size;
550 ACPI_SUBTABLE_HEADER *ap;
551 ACPI_MADT_IO_APIC *mia;
552
553 if (acpi_mapic_dtp != NULL)
554 return; /* already parsed table */
555 if (AcpiGetTable(ACPI_SIG_MADT, 1,
556 (ACPI_TABLE_HEADER **) &acpi_mapic_dtp) != AE_OK)
557 return;
558
559 napics_found = 0;
560
561 /*
562 * Search the MADT for ioapics
563 */
564 ap = (ACPI_SUBTABLE_HEADER *) (acpi_mapic_dtp + 1);
565 madt_size = acpi_mapic_dtp->Header.Length;
566 madt_seen = sizeof (*acpi_mapic_dtp);
567
568 while (madt_seen < madt_size) {
569
570 switch (ap->Type) {
571 case ACPI_MADT_TYPE_IO_APIC:
572 mia = (ACPI_MADT_IO_APIC *) ap;
573 if (napics_found < MAX_IO_APIC) {
574 ioapic_paddr[napics_found++] =
575 (ACPI_PHYSICAL_ADDRESS)
576 (mia->Address & PAGEMASK);
577 }
578 break;
579
580 default:
581 break;
582 }
583
584 /* advance to next entry */
585 madt_seen += ap->Length;
586 ap = (ACPI_SUBTABLE_HEADER *)(((char *)ap) + ap->Length);
587 }
588 if (dummy_ioapicadr == NULL)
589 dummy_ioapicadr = kmem_zalloc(PAGESIZE, KM_SLEEP);
590 }
591
592
593 void *
594 AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Size)
595 {
596 int i;
597
598 /*
599 * If the iopaic address table is populated, check if trying
600 * to access an ioapic. Instead, return a pointer to a dummy ioapic.
601 */
602 for (i = 0; i < napics_found; i++) {
603 if ((PhysicalAddress & PAGEMASK) == ioapic_paddr[i])
604 return (dummy_ioapicadr);
605 }
606 /* FUTUREWORK: test PhysicalAddress for > 32 bits */
607 return (psm_map_new((paddr_t)PhysicalAddress,
608 (size_t)Size, PSM_PROT_WRITE | PSM_PROT_READ));
609 }
610
611 void
612 AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Size)
613 {
614 /*
615 * Check if trying to unmap dummy ioapic address.
616 */
617 if (LogicalAddress == dummy_ioapicadr)
618 return;
619
620 psm_unmap((caddr_t)LogicalAddress, (size_t)Size);
621 }
622
623 /*ARGSUSED*/
624 ACPI_STATUS
625 AcpiOsGetPhysicalAddress(void *LogicalAddress,
626 ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
627 {
628
629 /* UNIMPLEMENTED: not invoked by ACPI CA code */
630 return (AE_NOT_IMPLEMENTED);
631 }
632
633
634 ACPI_OSD_HANDLER acpi_isr;
635 void *acpi_isr_context;
636
637 uint_t
638 acpi_wrapper_isr(char *arg)
639 {
640 _NOTE(ARGUNUSED(arg))
641
642 int status;
643
644 status = (*acpi_isr)(acpi_isr_context);
645
646 if (status == ACPI_INTERRUPT_HANDLED) {
647 return (DDI_INTR_CLAIMED);
648 } else {
649 return (DDI_INTR_UNCLAIMED);
650 }
651 }
652
653 static int acpi_intr_hooked = 0;
654
655 ACPI_STATUS
656 AcpiOsInstallInterruptHandler(UINT32 InterruptNumber,
657 ACPI_OSD_HANDLER ServiceRoutine,
658 void *Context)
659 {
660 _NOTE(ARGUNUSED(InterruptNumber))
661
662 int retval;
663 int sci_vect;
664 iflag_t sci_flags;
665
666 acpi_isr = ServiceRoutine;
667 acpi_isr_context = Context;
668
669 /*
670 * Get SCI (adjusted for PIC/APIC mode if necessary)
671 */
672 if (acpica_get_sci(&sci_vect, &sci_flags) != AE_OK) {
673 return (AE_ERROR);
674 }
675
676 #ifdef DEBUG
677 cmn_err(CE_NOTE, "!acpica: attaching SCI %d", sci_vect);
678 #endif
679
680 retval = add_avintr(NULL, SCI_IPL, (avfunc)acpi_wrapper_isr,
681 "ACPI SCI", sci_vect, NULL, NULL, NULL, NULL);
682 if (retval) {
683 acpi_intr_hooked = 1;
684 return (AE_OK);
685 } else
686 return (AE_BAD_PARAMETER);
687 }
688
689 ACPI_STATUS
690 AcpiOsRemoveInterruptHandler(UINT32 InterruptNumber,
691 ACPI_OSD_HANDLER ServiceRoutine)
692 {
693 _NOTE(ARGUNUSED(ServiceRoutine))
694
695 #ifdef DEBUG
696 cmn_err(CE_NOTE, "!acpica: detaching SCI %d", InterruptNumber);
697 #endif
698 if (acpi_intr_hooked) {
699 rem_avintr(NULL, LOCK_LEVEL - 1, (avfunc)acpi_wrapper_isr,
700 InterruptNumber);
701 acpi_intr_hooked = 0;
702 }
703 return (AE_OK);
704 }
705
706
707 ACPI_THREAD_ID
708 AcpiOsGetThreadId(void)
709 {
710 /*
711 * ACPI CA doesn't care what actual value is returned as long
712 * as it is non-zero and unique to each existing thread.
713 * ACPI CA assumes that thread ID is castable to a pointer,
714 * so we use the current thread pointer.
715 */
716 return (ACPI_CAST_PTHREAD_T((uintptr_t)curthread));
717 }
718
719 /*
720 *
721 */
722 ACPI_STATUS
723 AcpiOsExecute(ACPI_EXECUTE_TYPE Type, ACPI_OSD_EXEC_CALLBACK Function,
724 void *Context)
725 {
726
727 if (!acpica_eventq_init) {
728 /*
729 * Create taskqs for event handling
730 */
731 if (init_event_queues() != AE_OK)
732 return (AE_ERROR);
733 }
734
735 if (ddi_taskq_dispatch(osl_eventq[Type], Function, Context,
736 DDI_NOSLEEP) == DDI_FAILURE) {
737 #ifdef DEBUG
738 cmn_err(CE_WARN, "!acpica: unable to dispatch event");
739 #endif
740 return (AE_ERROR);
741 }
742 return (AE_OK);
743
744 }
745
746 void
747 AcpiOsSleep(ACPI_INTEGER Milliseconds)
748 {
749 /*
750 * During kernel startup, before the first tick interrupt
751 * has taken place, we can't call delay; very late in
752 * kernel shutdown or suspend/resume, clock interrupts
753 * are blocked, so delay doesn't work then either.
754 * So we busy wait if lbolt == 0 (kernel startup)
755 * or if acpica_use_safe_delay has been set to a
756 * non-zero value.
757 */
758 if ((ddi_get_lbolt() == 0) || acpica_use_safe_delay)
759 drv_usecwait(Milliseconds * 1000);
760 else
761 delay(drv_usectohz(Milliseconds * 1000));
762 }
763
764 void
765 AcpiOsStall(UINT32 Microseconds)
766 {
767 drv_usecwait(Microseconds);
768 }
769
770
771 /*
772 * Implementation of "Windows 2001" compatible I/O permission map
773 *
774 */
775 #define OSL_IO_NONE (0)
776 #define OSL_IO_READ (1<<0)
777 #define OSL_IO_WRITE (1<<1)
778 #define OSL_IO_RW (OSL_IO_READ | OSL_IO_WRITE)
779 #define OSL_IO_TERM (1<<2)
780 #define OSL_IO_DEFAULT OSL_IO_RW
781
782 static struct io_perm {
783 ACPI_IO_ADDRESS low;
784 ACPI_IO_ADDRESS high;
785 uint8_t perm;
786 } osl_io_perm[] = {
787 { 0xcf8, 0xd00, OSL_IO_TERM | OSL_IO_RW}
788 };
789
790
791 /*
792 *
793 */
794 static struct io_perm *
795 osl_io_find_perm(ACPI_IO_ADDRESS addr)
796 {
797 struct io_perm *p;
798
799 p = osl_io_perm;
800 while (p != NULL) {
801 if ((p->low <= addr) && (addr <= p->high))
802 break;
803 p = (p->perm & OSL_IO_TERM) ? NULL : p+1;
804 }
805
806 return (p);
807 }
808
809 /*
810 *
811 */
812 ACPI_STATUS
813 AcpiOsReadPort(ACPI_IO_ADDRESS Address, UINT32 *Value, UINT32 Width)
814 {
815 struct io_perm *p;
816
817 /* verify permission */
818 p = osl_io_find_perm(Address);
819 if (p && (p->perm & OSL_IO_READ) == 0) {
820 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u not permitted",
821 (long)Address, Width);
822 *Value = 0xffffffff;
823 return (AE_ERROR);
824 }
825
826 switch (Width) {
827 case 8:
828 *Value = inb(Address);
829 break;
830 case 16:
831 *Value = inw(Address);
832 break;
833 case 32:
834 *Value = inl(Address);
835 break;
836 default:
837 cmn_err(CE_WARN, "!AcpiOsReadPort: %lx %u failed",
838 (long)Address, Width);
839 return (AE_BAD_PARAMETER);
840 }
841 return (AE_OK);
842 }
843
844 ACPI_STATUS
845 AcpiOsWritePort(ACPI_IO_ADDRESS Address, UINT32 Value, UINT32 Width)
846 {
847 struct io_perm *p;
848
849 /* verify permission */
850 p = osl_io_find_perm(Address);
851 if (p && (p->perm & OSL_IO_WRITE) == 0) {
852 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u not permitted",
853 (long)Address, Width);
854 return (AE_ERROR);
855 }
856
857 switch (Width) {
858 case 8:
859 outb(Address, Value);
860 break;
861 case 16:
862 outw(Address, Value);
863 break;
864 case 32:
865 outl(Address, Value);
866 break;
867 default:
868 cmn_err(CE_WARN, "!AcpiOsWritePort: %lx %u failed",
869 (long)Address, Width);
870 return (AE_BAD_PARAMETER);
871 }
872 return (AE_OK);
873 }
874
875
876 /*
877 *
878 */
879
880 #define OSL_RW(ptr, val, type, rw) \
881 { if (rw) *((type *)(ptr)) = *((type *) val); \
882 else *((type *) val) = *((type *)(ptr)); }
883
884
885 static void
886 osl_rw_memory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value,
887 UINT32 Width, int write)
888 {
889 size_t maplen = Width / 8;
890 caddr_t ptr;
891
892 ptr = psm_map_new((paddr_t)Address, maplen,
893 PSM_PROT_WRITE | PSM_PROT_READ);
894
895 switch (maplen) {
896 case 1:
897 OSL_RW(ptr, Value, uint8_t, write);
898 break;
899 case 2:
900 OSL_RW(ptr, Value, uint16_t, write);
901 break;
902 case 4:
903 OSL_RW(ptr, Value, uint32_t, write);
904 break;
905 default:
906 cmn_err(CE_WARN, "!osl_rw_memory: invalid size %d",
907 Width);
908 break;
909 }
910
911 psm_unmap(ptr, maplen);
912 }
913
914 ACPI_STATUS
915 AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address,
916 UINT32 *Value, UINT32 Width)
917 {
918 osl_rw_memory(Address, Value, Width, 0);
919 return (AE_OK);
920 }
921
922 ACPI_STATUS
923 AcpiOsWriteMemory(ACPI_PHYSICAL_ADDRESS Address,
924 UINT32 Value, UINT32 Width)
925 {
926 osl_rw_memory(Address, &Value, Width, 1);
927 return (AE_OK);
928 }
929
930
931 ACPI_STATUS
932 AcpiOsReadPciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
933 UINT64 *Value, UINT32 Width)
934 {
935
936 switch (Width) {
937 case 8:
938 *Value = (UINT64)(*pci_getb_func)
939 (PciId->Bus, PciId->Device, PciId->Function, Reg);
940 break;
941 case 16:
942 *Value = (UINT64)(*pci_getw_func)
943 (PciId->Bus, PciId->Device, PciId->Function, Reg);
944 break;
945 case 32:
946 *Value = (UINT64)(*pci_getl_func)
947 (PciId->Bus, PciId->Device, PciId->Function, Reg);
948 break;
949 case 64:
950 default:
951 cmn_err(CE_WARN, "!AcpiOsReadPciConfiguration: %x %u failed",
952 Reg, Width);
953 return (AE_BAD_PARAMETER);
954 }
955 return (AE_OK);
956 }
957
958 /*
959 *
960 */
961 int acpica_write_pci_config_ok = 1;
962
963 ACPI_STATUS
964 AcpiOsWritePciConfiguration(ACPI_PCI_ID *PciId, UINT32 Reg,
965 UINT64 Value, UINT32 Width)
966 {
967
968 if (!acpica_write_pci_config_ok) {
969 cmn_err(CE_NOTE, "!write to PCI cfg %x/%x/%x %x"
970 " %lx %d not permitted", PciId->Bus, PciId->Device,
971 PciId->Function, Reg, (long)Value, Width);
972 return (AE_OK);
973 }
974
975 switch (Width) {
976 case 8:
977 (*pci_putb_func)(PciId->Bus, PciId->Device, PciId->Function,
978 Reg, (uint8_t)Value);
979 break;
980 case 16:
981 (*pci_putw_func)(PciId->Bus, PciId->Device, PciId->Function,
982 Reg, (uint16_t)Value);
983 break;
984 case 32:
985 (*pci_putl_func)(PciId->Bus, PciId->Device, PciId->Function,
986 Reg, (uint32_t)Value);
987 break;
988 case 64:
989 default:
990 cmn_err(CE_WARN, "!AcpiOsWritePciConfiguration: %x %u failed",
991 Reg, Width);
992 return (AE_BAD_PARAMETER);
993 }
994 return (AE_OK);
995 }
996
997 /*
998 * Called with ACPI_HANDLEs for both a PCI Config Space
999 * OpRegion and (what ACPI CA thinks is) the PCI device
1000 * to which this ConfigSpace OpRegion belongs.
1001 *
1002 * ACPI CA uses _BBN and _ADR objects to determine the default
1003 * values for bus, segment, device and function; anything ACPI CA
1004 * can't figure out from the ACPI tables will be 0. One very
1005 * old 32-bit x86 system is known to have broken _BBN; this is
1006 * not addressed here.
1007 *
1008 * Some BIOSes implement _BBN() by reading PCI config space
1009 * on bus #0 - which means that we'll recurse when we attempt
1010 * to create the devinfo-to-ACPI map. If Derive is called during
1011 * scan_d2a_map, we don't translate the bus # and return.
1012 *
1013 * We get the parent of the OpRegion, which must be a PCI
1014 * node, fetch the associated devinfo node and snag the
1015 * b/d/f from it.
1016 */
1017 void
1018 AcpiOsDerivePciId(ACPI_HANDLE rhandle, ACPI_HANDLE chandle,
1019 ACPI_PCI_ID **PciId)
1020 {
1021 ACPI_HANDLE handle;
1022 dev_info_t *dip;
1023 int bus, device, func, devfn;
1024
1025 /*
1026 * See above - avoid recursing during scanning_d2a_map.
1027 */
1028 if (scanning_d2a_map)
1029 return;
1030
1031 /*
1032 * Get the OpRegion's parent
1033 */
1034 if (AcpiGetParent(chandle, &handle) != AE_OK)
1035 return;
1036
1037 /*
1038 * If we've mapped the ACPI node to the devinfo
1039 * tree, use the devinfo reg property
1040 */
1041 if (ACPI_SUCCESS(acpica_get_devinfo(handle, &dip)) &&
1042 (acpica_get_bdf(dip, &bus, &device, &func) >= 0)) {
1043 (*PciId)->Bus = bus;
1044 (*PciId)->Device = device;
1045 (*PciId)->Function = func;
1046 }
1047 }
1048
1049
1050 /*ARGSUSED*/
1051 BOOLEAN
1052 AcpiOsReadable(void *Pointer, ACPI_SIZE Length)
1053 {
1054
1055 /* Always says yes; all mapped memory assumed readable */
1056 return (1);
1057 }
1058
1059 /*ARGSUSED*/
1060 BOOLEAN
1061 AcpiOsWritable(void *Pointer, ACPI_SIZE Length)
1062 {
1063
1064 /* Always says yes; all mapped memory assumed writable */
1065 return (1);
1066 }
1067
1068 UINT64
1069 AcpiOsGetTimer(void)
1070 {
1071 /* gethrtime() returns 1nS resolution; convert to 100nS granules */
1072 return ((gethrtime() + 50) / 100);
1073 }
1074
1075 static struct AcpiOSIFeature_s {
1076 uint64_t control_flag;
1077 const char *feature_name;
1078 } AcpiOSIFeatures[] = {
1079 { ACPI_FEATURE_OSI_MODULE, "Module Device" },
1080 { 0, "Processor Device" }
1081 };
1082
1083 /*ARGSUSED*/
1084 ACPI_STATUS
1085 AcpiOsValidateInterface(char *feature)
1086 {
1087 int i;
1088
1089 ASSERT(feature != NULL);
1090 for (i = 0; i < sizeof (AcpiOSIFeatures) / sizeof (AcpiOSIFeatures[0]);
1091 i++) {
1092 if (strcmp(feature, AcpiOSIFeatures[i].feature_name) != 0) {
1093 continue;
1094 }
1095 /* Check whether required core features are available. */
1096 if (AcpiOSIFeatures[i].control_flag != 0 &&
1097 acpica_get_core_feature(AcpiOSIFeatures[i].control_flag) !=
1098 AcpiOSIFeatures[i].control_flag) {
1099 break;
1100 }
1101 /* Feature supported. */
1102 return (AE_OK);
1103 }
1104
1105 return (AE_SUPPORT);
1106 }
1107
1108 /*ARGSUSED*/
1109 ACPI_STATUS
1110 AcpiOsValidateAddress(UINT8 spaceid, ACPI_PHYSICAL_ADDRESS addr,
1111 ACPI_SIZE length)
1112 {
1113 return (AE_OK);
1114 }
1115
1116 ACPI_STATUS
1117 AcpiOsSignal(UINT32 Function, void *Info)
1118 {
1119 _NOTE(ARGUNUSED(Function, Info))
1120
1121 /* FUTUREWORK: debugger support */
1122
1123 cmn_err(CE_NOTE, "!OsSignal unimplemented");
1124 return (AE_OK);
1125 }
1126
1127 void ACPI_INTERNAL_VAR_XFACE
1128 AcpiOsPrintf(const char *Format, ...)
1129 {
1130 va_list ap;
1131
1132 va_start(ap, Format);
1133 AcpiOsVprintf(Format, ap);
1134 va_end(ap);
1135 }
1136
1137 /*
1138 * When != 0, sends output to console
1139 * Patchable with kmdb or /etc/system.
1140 */
1141 int acpica_console_out = 0;
1142
1143 #define ACPICA_OUTBUF_LEN 160
1144 char acpica_outbuf[ACPICA_OUTBUF_LEN];
1145 int acpica_outbuf_offset;
1146
1147 /*
1148 *
1149 */
1150 static void
1151 acpica_pr_buf(char *buf)
1152 {
1153 char c, *bufp, *outp;
1154 int out_remaining;
1155
1156 /*
1157 * copy the supplied buffer into the output buffer
1158 * when we hit a '\n' or overflow the output buffer,
1159 * output and reset the output buffer
1160 */
1161 bufp = buf;
1162 outp = acpica_outbuf + acpica_outbuf_offset;
1163 out_remaining = ACPICA_OUTBUF_LEN - acpica_outbuf_offset - 1;
1164 while (c = *bufp++) {
1165 *outp++ = c;
1166 if (c == '\n' || --out_remaining == 0) {
1167 *outp = '\0';
1168 switch (acpica_console_out) {
1169 case 1:
1170 printf(acpica_outbuf);
1171 break;
1172 case 2:
1173 prom_printf(acpica_outbuf);
1174 break;
1175 case 0:
1176 default:
1177 (void) strlog(0, 0, 0,
1178 SL_CONSOLE | SL_NOTE | SL_LOGONLY,
1179 acpica_outbuf);
1180 break;
1181 }
1182 acpica_outbuf_offset = 0;
1183 outp = acpica_outbuf;
1184 out_remaining = ACPICA_OUTBUF_LEN - 1;
1185 }
1186 }
1187
1188 acpica_outbuf_offset = outp - acpica_outbuf;
1189 }
1190
1191 void
1192 AcpiOsVprintf(const char *Format, va_list Args)
1193 {
1194
1195 /*
1196 * If AcpiOsInitialize() failed to allocate a string buffer,
1197 * resort to vprintf().
1198 */
1199 if (acpi_osl_pr_buffer == NULL) {
1200 vprintf(Format, Args);
1201 return;
1202 }
1203
1204 /*
1205 * It is possible that a very long debug output statement will
1206 * be truncated; this is silently ignored.
1207 */
1208 (void) vsnprintf(acpi_osl_pr_buffer, acpi_osl_pr_buflen, Format, Args);
1209 acpica_pr_buf(acpi_osl_pr_buffer);
1210 }
1211
1212 void
1213 AcpiOsRedirectOutput(void *Destination)
1214 {
1215 _NOTE(ARGUNUSED(Destination))
1216
1217 /* FUTUREWORK: debugger support */
1218
1219 #ifdef DEBUG
1220 cmn_err(CE_WARN, "!acpica: AcpiOsRedirectOutput called");
1221 #endif
1222 }
1223
1224
1225 UINT32
1226 AcpiOsGetLine(char *Buffer, UINT32 len, UINT32 *BytesRead)
1227 {
1228 _NOTE(ARGUNUSED(Buffer))
1229 _NOTE(ARGUNUSED(len))
1230 _NOTE(ARGUNUSED(BytesRead))
1231
1232 /* FUTUREWORK: debugger support */
1233
1234 return (0);
1235 }
1236
1237 /*
1238 * Device tree binding
1239 */
1240 static ACPI_STATUS
1241 acpica_find_pcibus_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1242 {
1243 _NOTE(ARGUNUSED(lvl));
1244
1245 int sta, hid, bbn;
1246 int busno = (intptr_t)ctxp;
1247 ACPI_HANDLE *hdlp = (ACPI_HANDLE *)rvpp;
1248
1249 /* Check whether device exists. */
1250 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1251 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1252 /*
1253 * Skip object if device doesn't exist.
1254 * According to ACPI Spec,
1255 * 1) setting either bit 0 or bit 3 means that device exists.
1256 * 2) Absence of _STA method means all status bits set.
1257 */
1258 return (AE_CTRL_DEPTH);
1259 }
1260
1261 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1262 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1263 /* Non PCI/PCIe host bridge. */
1264 return (AE_OK);
1265 }
1266
1267 if (acpi_has_broken_bbn) {
1268 ACPI_BUFFER rb;
1269 rb.Pointer = NULL;
1270 rb.Length = ACPI_ALLOCATE_BUFFER;
1271
1272 /* Decree _BBN == n from PCI<n> */
1273 if (AcpiGetName(hdl, ACPI_SINGLE_NAME, &rb) != AE_OK) {
1274 return (AE_CTRL_TERMINATE);
1275 }
1276 bbn = ((char *)rb.Pointer)[3] - '0';
1277 AcpiOsFree(rb.Pointer);
1278 if (bbn == busno || busno == 0) {
1279 *hdlp = hdl;
1280 return (AE_CTRL_TERMINATE);
1281 }
1282 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn))) {
1283 if (bbn == busno) {
1284 *hdlp = hdl;
1285 return (AE_CTRL_TERMINATE);
1286 }
1287 } else if (busno == 0) {
1288 *hdlp = hdl;
1289 return (AE_CTRL_TERMINATE);
1290 }
1291
1292 return (AE_CTRL_DEPTH);
1293 }
1294
1295 static int
1296 acpica_find_pcibus(int busno, ACPI_HANDLE *rh)
1297 {
1298 ACPI_HANDLE sbobj, busobj;
1299
1300 /* initialize static flag by querying ACPI namespace for bug */
1301 if (acpi_has_broken_bbn == -1)
1302 acpi_has_broken_bbn = acpica_query_bbn_problem();
1303
1304 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1305 busobj = NULL;
1306 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1307 acpica_find_pcibus_walker, NULL, (void *)(intptr_t)busno,
1308 (void **)&busobj);
1309 if (busobj != NULL) {
1310 *rh = busobj;
1311 return (AE_OK);
1312 }
1313 }
1314
1315 return (AE_ERROR);
1316 }
1317
1318 static ACPI_STATUS
1319 acpica_query_bbn_walker(ACPI_HANDLE hdl, UINT32 lvl, void *ctxp, void **rvpp)
1320 {
1321 _NOTE(ARGUNUSED(lvl));
1322 _NOTE(ARGUNUSED(rvpp));
1323
1324 int sta, hid, bbn;
1325 int *cntp = (int *)ctxp;
1326
1327 /* Check whether device exists. */
1328 if (ACPI_SUCCESS(acpica_eval_int(hdl, "_STA", &sta)) &&
1329 !(sta & (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_FUNCTIONING))) {
1330 /*
1331 * Skip object if device doesn't exist.
1332 * According to ACPI Spec,
1333 * 1) setting either bit 0 or bit 3 means that device exists.
1334 * 2) Absence of _STA method means all status bits set.
1335 */
1336 return (AE_CTRL_DEPTH);
1337 }
1338
1339 if (ACPI_FAILURE(acpica_eval_hid(hdl, "_HID", &hid)) ||
1340 (hid != HID_PCI_BUS && hid != HID_PCI_EXPRESS_BUS)) {
1341 /* Non PCI/PCIe host bridge. */
1342 return (AE_OK);
1343 } else if (ACPI_SUCCESS(acpica_eval_int(hdl, "_BBN", &bbn)) &&
1344 bbn == 0 && ++(*cntp) > 1) {
1345 /*
1346 * If we find more than one bus with a 0 _BBN
1347 * we have the problem that BigBear's BIOS shows
1348 */
1349 return (AE_CTRL_TERMINATE);
1350 } else {
1351 /*
1352 * Skip children of PCI/PCIe host bridge.
1353 */
1354 return (AE_CTRL_DEPTH);
1355 }
1356 }
1357
1358 /*
1359 * Look for ACPI problem where _BBN is zero for multiple PCI buses
1360 * This is a clear ACPI bug, but we have a workaround in acpica_find_pcibus()
1361 * below if it exists.
1362 */
1363 static int
1364 acpica_query_bbn_problem(void)
1365 {
1366 ACPI_HANDLE sbobj;
1367 int zerobbncnt;
1368 void *rv;
1369
1370 zerobbncnt = 0;
1371 if (ACPI_SUCCESS(AcpiGetHandle(NULL, "\\_SB", &sbobj))) {
1372 (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, sbobj, UINT32_MAX,
1373 acpica_query_bbn_walker, NULL, &zerobbncnt, &rv);
1374 }
1375
1376 return (zerobbncnt > 1 ? 1 : 0);
1377 }
1378
1379 static const char hextab[] = "0123456789ABCDEF";
1380
1381 static int
1382 hexdig(int c)
1383 {
1384 /*
1385 * Get hex digit:
1386 *
1387 * Returns the 4-bit hex digit named by the input character. Returns
1388 * zero if the input character is not valid hex!
1389 */
1390
1391 int x = ((c < 'a') || (c > 'z')) ? c : (c - ' ');
1392 int j = sizeof (hextab);
1393
1394 while (--j && (x != hextab[j])) {
1395 }
1396 return (j);
1397 }
1398
1399 static int
1400 CompressEisaID(char *np)
1401 {
1402 /*
1403 * Compress an EISA device name:
1404 *
1405 * This routine converts a 7-byte ASCII device name into the 4-byte
1406 * compressed form used by EISA (50 bytes of ROM to save 1 byte of
1407 * NV-RAM!)
1408 */
1409
1410 union { char octets[4]; int retval; } myu;
1411
1412 myu.octets[0] = ((np[0] & 0x1F) << 2) + ((np[1] >> 3) & 0x03);
1413 myu.octets[1] = ((np[1] & 0x07) << 5) + (np[2] & 0x1F);
1414 myu.octets[2] = (hexdig(np[3]) << 4) + hexdig(np[4]);
1415 myu.octets[3] = (hexdig(np[5]) << 4) + hexdig(np[6]);
1416
1417 return (myu.retval);
1418 }
1419
1420 ACPI_STATUS
1421 acpica_eval_int(ACPI_HANDLE dev, char *method, int *rint)
1422 {
1423 ACPI_STATUS status;
1424 ACPI_BUFFER rb;
1425 ACPI_OBJECT ro;
1426
1427 rb.Pointer = &ro;
1428 rb.Length = sizeof (ro);
1429 if ((status = AcpiEvaluateObjectTyped(dev, method, NULL, &rb,
1430 ACPI_TYPE_INTEGER)) == AE_OK)
1431 *rint = ro.Integer.Value;
1432
1433 return (status);
1434 }
1435
1436 static int
1437 acpica_eval_hid(ACPI_HANDLE dev, char *method, int *rint)
1438 {
1439 ACPI_BUFFER rb;
1440 ACPI_OBJECT *rv;
1441
1442 rb.Pointer = NULL;
1443 rb.Length = ACPI_ALLOCATE_BUFFER;
1444 if (AcpiEvaluateObject(dev, method, NULL, &rb) == AE_OK &&
1445 rb.Length != 0) {
1446 rv = rb.Pointer;
1447 if (rv->Type == ACPI_TYPE_INTEGER) {
1448 *rint = rv->Integer.Value;
1449 AcpiOsFree(rv);
1450 return (AE_OK);
1451 } else if (rv->Type == ACPI_TYPE_STRING) {
1452 char *stringData;
1453
1454 /* Convert the string into an EISA ID */
1455 if (rv->String.Pointer == NULL) {
1456 AcpiOsFree(rv);
1457 return (AE_ERROR);
1458 }
1459
1460 stringData = rv->String.Pointer;
1461
1462 /*
1463 * If the string is an EisaID, it must be 7
1464 * characters; if it's an ACPI ID, it will be 8
1465 * (and we don't care about ACPI ids here).
1466 */
1467 if (strlen(stringData) != 7) {
1468 AcpiOsFree(rv);
1469 return (AE_ERROR);
1470 }
1471
1472 *rint = CompressEisaID(stringData);
1473 AcpiOsFree(rv);
1474 return (AE_OK);
1475 } else
1476 AcpiOsFree(rv);
1477 }
1478 return (AE_ERROR);
1479 }
1480
1481 /*
1482 * Create linkage between devinfo nodes and ACPI nodes
1483 */
1484 ACPI_STATUS
1485 acpica_tag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1486 {
1487 ACPI_STATUS status;
1488 ACPI_BUFFER rb;
1489
1490 /*
1491 * Tag the devinfo node with the ACPI name
1492 */
1493 rb.Pointer = NULL;
1494 rb.Length = ACPI_ALLOCATE_BUFFER;
1495 status = AcpiGetName(acpiobj, ACPI_FULL_PATHNAME, &rb);
1496 if (ACPI_FAILURE(status)) {
1497 cmn_err(CE_WARN, "acpica: could not get ACPI path!");
1498 } else {
1499 (void) ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1500 "acpi-namespace", (char *)rb.Pointer);
1501 AcpiOsFree(rb.Pointer);
1502
1503 /*
1504 * Tag the ACPI node with the dip
1505 */
1506 status = acpica_set_devinfo(acpiobj, dip);
1507 ASSERT(ACPI_SUCCESS(status));
1508 }
1509
1510 return (status);
1511 }
1512
1513 /*
1514 * Destroy linkage between devinfo nodes and ACPI nodes
1515 */
1516 ACPI_STATUS
1517 acpica_untag_devinfo(dev_info_t *dip, ACPI_HANDLE acpiobj)
1518 {
1519 (void) acpica_unset_devinfo(acpiobj);
1520 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "acpi-namespace");
1521
1522 return (AE_OK);
1523 }
1524
1525 /*
1526 * Return the ACPI device node matching the CPU dev_info node.
1527 */
1528 ACPI_STATUS
1529 acpica_get_handle_cpu(int cpu_id, ACPI_HANDLE *rh)
1530 {
1531 int i;
1532
1533 /*
1534 * if cpu_map itself is NULL, we're a uppc system and
1535 * acpica_build_processor_map() hasn't been called yet.
1536 * So call it here
1537 */
1538 if (cpu_map == NULL) {
1539 (void) acpica_build_processor_map();
1540 if (cpu_map == NULL)
1541 return (AE_ERROR);
1542 }
1543
1544 if (cpu_id < 0) {
1545 return (AE_ERROR);
1546 }
1547
1548 /*
1549 * search object with cpuid in cpu_map
1550 */
1551 mutex_enter(&cpu_map_lock);
1552 for (i = 0; i < cpu_map_count; i++) {
1553 if (cpu_map[i]->cpu_id == cpu_id) {
1554 break;
1555 }
1556 }
1557 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1558 *rh = cpu_map[i]->obj;
1559 mutex_exit(&cpu_map_lock);
1560 return (AE_OK);
1561 }
1562
1563 /* Handle special case for uppc-only systems. */
1564 if (cpu_map_called == 0) {
1565 uint32_t apicid = cpuid_get_apicid(CPU);
1566 if (apicid != UINT32_MAX) {
1567 for (i = 0; i < cpu_map_count; i++) {
1568 if (cpu_map[i]->apic_id == apicid) {
1569 break;
1570 }
1571 }
1572 if (i < cpu_map_count && (cpu_map[i]->obj != NULL)) {
1573 *rh = cpu_map[i]->obj;
1574 mutex_exit(&cpu_map_lock);
1575 return (AE_OK);
1576 }
1577 }
1578 }
1579 mutex_exit(&cpu_map_lock);
1580
1581 return (AE_ERROR);
1582 }
1583
1584 /*
1585 * Determine if this object is a processor
1586 */
1587 static ACPI_STATUS
1588 acpica_probe_processor(ACPI_HANDLE obj, UINT32 level, void *ctx, void **rv)
1589 {
1590 ACPI_STATUS status;
1591 ACPI_OBJECT_TYPE objtype;
1592 unsigned long acpi_id;
1593 ACPI_BUFFER rb;
1594 ACPI_DEVICE_INFO *di;
1595
1596 if (AcpiGetType(obj, &objtype) != AE_OK)
1597 return (AE_OK);
1598
1599 if (objtype == ACPI_TYPE_PROCESSOR) {
1600 /* process a Processor */
1601 rb.Pointer = NULL;
1602 rb.Length = ACPI_ALLOCATE_BUFFER;
1603 status = AcpiEvaluateObjectTyped(obj, NULL, NULL, &rb,
1604 ACPI_TYPE_PROCESSOR);
1605 if (status != AE_OK) {
1606 cmn_err(CE_WARN, "!acpica: error probing Processor");
1607 return (status);
1608 }
1609 acpi_id = ((ACPI_OBJECT *)rb.Pointer)->Processor.ProcId;
1610 AcpiOsFree(rb.Pointer);
1611 } else if (objtype == ACPI_TYPE_DEVICE) {
1612 /* process a processor Device */
1613 status = AcpiGetObjectInfo(obj, &di);
1614 if (status != AE_OK) {
1615 cmn_err(CE_WARN,
1616 "!acpica: error probing Processor Device\n");
1617 return (status);
1618 }
1619
1620 if (!(di->Valid & ACPI_VALID_UID) ||
1621 ddi_strtoul(di->UniqueId.String, NULL, 10, &acpi_id) != 0) {
1622 ACPI_FREE(di);
1623 cmn_err(CE_WARN,
1624 "!acpica: error probing Processor Device _UID\n");
1625 return (AE_ERROR);
1626 }
1627 ACPI_FREE(di);
1628 }
1629 (void) acpica_add_processor_to_map(acpi_id, obj, UINT32_MAX);
1630
1631 return (AE_OK);
1632 }
1633
1634 void
1635 scan_d2a_map(void)
1636 {
1637 dev_info_t *dip, *cdip;
1638 ACPI_HANDLE acpiobj;
1639 char *device_type_prop;
1640 int bus;
1641 static int map_error = 0;
1642
1643 if (map_error || (d2a_done != 0))
1644 return;
1645
1646 scanning_d2a_map = 1;
1647
1648 /*
1649 * Find all child-of-root PCI buses, and find their corresponding
1650 * ACPI child-of-root PCI nodes. For each one, add to the
1651 * d2a table.
1652 */
1653
1654 for (dip = ddi_get_child(ddi_root_node());
1655 dip != NULL;
1656 dip = ddi_get_next_sibling(dip)) {
1657
1658 /* prune non-PCI nodes */
1659 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip,
1660 DDI_PROP_DONTPASS,
1661 "device_type", &device_type_prop) != DDI_PROP_SUCCESS)
1662 continue;
1663
1664 if ((strcmp("pci", device_type_prop) != 0) &&
1665 (strcmp("pciex", device_type_prop) != 0)) {
1666 ddi_prop_free(device_type_prop);
1667 continue;
1668 }
1669
1670 ddi_prop_free(device_type_prop);
1671
1672 /*
1673 * To get bus number of dip, get first child and get its
1674 * bus number. If NULL, just continue, because we don't
1675 * care about bus nodes with no children anyway.
1676 */
1677 if ((cdip = ddi_get_child(dip)) == NULL)
1678 continue;
1679
1680 if (acpica_get_bdf(cdip, &bus, NULL, NULL) < 0) {
1681 #ifdef D2ADEBUG
1682 cmn_err(CE_WARN, "Can't get bus number of PCI child?");
1683 #endif
1684 map_error = 1;
1685 scanning_d2a_map = 0;
1686 d2a_done = 1;
1687 return;
1688 }
1689
1690 if (acpica_find_pcibus(bus, &acpiobj) == AE_ERROR) {
1691 #ifdef D2ADEBUG
1692 cmn_err(CE_WARN, "No ACPI bus obj for bus %d?\n", bus);
1693 #endif
1694 map_error = 1;
1695 continue;
1696 }
1697
1698 acpica_tag_devinfo(dip, acpiobj);
1699
1700 /* call recursively to enumerate subtrees */
1701 scan_d2a_subtree(dip, acpiobj, bus);
1702 }
1703
1704 scanning_d2a_map = 0;
1705 d2a_done = 1;
1706 }
1707
1708 /*
1709 * For all acpi child devices of acpiobj, find their matching
1710 * dip under "dip" argument. (matching means "matches dev/fn").
1711 * bus is assumed to already be a match from caller, and is
1712 * used here only to record in the d2a entry. Recurse if necessary.
1713 */
1714 static void
1715 scan_d2a_subtree(dev_info_t *dip, ACPI_HANDLE acpiobj, int bus)
1716 {
1717 int acpi_devfn, hid;
1718 ACPI_HANDLE acld;
1719 dev_info_t *dcld;
1720 int dcld_b, dcld_d, dcld_f;
1721 int dev, func;
1722 char *device_type_prop;
1723
1724 acld = NULL;
1725 while (AcpiGetNextObject(ACPI_TYPE_DEVICE, acpiobj, acld, &acld)
1726 == AE_OK) {
1727 /* get the dev/func we're looking for in the devinfo tree */
1728 if (acpica_eval_int(acld, "_ADR", &acpi_devfn) != AE_OK)
1729 continue;
1730 dev = (acpi_devfn >> 16) & 0xFFFF;
1731 func = acpi_devfn & 0xFFFF;
1732
1733 /* look through all the immediate children of dip */
1734 for (dcld = ddi_get_child(dip); dcld != NULL;
1735 dcld = ddi_get_next_sibling(dcld)) {
1736 if (acpica_get_bdf(dcld, &dcld_b, &dcld_d, &dcld_f) < 0)
1737 continue;
1738
1739 /* dev must match; function must match or wildcard */
1740 if (dcld_d != dev ||
1741 (func != 0xFFFF && func != dcld_f))
1742 continue;
1743 bus = dcld_b;
1744
1745 /* found a match, record it */
1746 acpica_tag_devinfo(dcld, acld);
1747
1748 /* if we find a bridge, recurse from here */
1749 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dcld,
1750 DDI_PROP_DONTPASS, "device_type",
1751 &device_type_prop) == DDI_PROP_SUCCESS) {
1752 if ((strcmp("pci", device_type_prop) == 0) ||
1753 (strcmp("pciex", device_type_prop) == 0))
1754 scan_d2a_subtree(dcld, acld, bus);
1755 ddi_prop_free(device_type_prop);
1756 }
1757
1758 /* done finding a match, so break now */
1759 break;
1760 }
1761 }
1762 }
1763
1764 /*
1765 * Return bus/dev/fn for PCI dip (note: not the parent "pci" node).
1766 */
1767 int
1768 acpica_get_bdf(dev_info_t *dip, int *bus, int *device, int *func)
1769 {
1770 pci_regspec_t *pci_rp;
1771 int len;
1772
1773 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1774 "reg", (int **)&pci_rp, (uint_t *)&len) != DDI_SUCCESS)
1775 return (-1);
1776
1777 if (len < (sizeof (pci_regspec_t) / sizeof (int))) {
1778 ddi_prop_free(pci_rp);
1779 return (-1);
1780 }
1781 if (bus != NULL)
1782 *bus = (int)PCI_REG_BUS_G(pci_rp->pci_phys_hi);
1783 if (device != NULL)
1784 *device = (int)PCI_REG_DEV_G(pci_rp->pci_phys_hi);
1785 if (func != NULL)
1786 *func = (int)PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
1787 ddi_prop_free(pci_rp);
1788 return (0);
1789 }
1790
1791 /*
1792 * Return the ACPI device node matching this dev_info node, if it
1793 * exists in the ACPI tree.
1794 */
1795 ACPI_STATUS
1796 acpica_get_handle(dev_info_t *dip, ACPI_HANDLE *rh)
1797 {
1798 ACPI_STATUS status;
1799 char *acpiname;
1800
1801 #ifdef DEBUG
1802 if (d2a_done == 0)
1803 cmn_err(CE_WARN, "!acpica_get_handle:"
1804 " no ACPI mapping for %s", ddi_node_name(dip));
1805 #endif
1806
1807 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1808 "acpi-namespace", &acpiname) != DDI_PROP_SUCCESS) {
1809 return (AE_ERROR);
1810 }
1811
1812 status = AcpiGetHandle(NULL, acpiname, rh);
1813 ddi_prop_free((void *)acpiname);
1814 return (status);
1815 }
1816
1817
1818
1819 /*
1820 * Manage OS data attachment to ACPI nodes
1821 */
1822
1823 /*
1824 * Return the (dev_info_t *) associated with the ACPI node.
1825 */
1826 ACPI_STATUS
1827 acpica_get_devinfo(ACPI_HANDLE obj, dev_info_t **dipp)
1828 {
1829 ACPI_STATUS status;
1830 void *ptr;
1831
1832 status = AcpiGetData(obj, acpica_devinfo_handler, &ptr);
1833 if (status == AE_OK)
1834 *dipp = (dev_info_t *)ptr;
1835
1836 return (status);
1837 }
1838
1839 /*
1840 * Set the dev_info_t associated with the ACPI node.
1841 */
1842 static ACPI_STATUS
1843 acpica_set_devinfo(ACPI_HANDLE obj, dev_info_t *dip)
1844 {
1845 ACPI_STATUS status;
1846
1847 status = AcpiAttachData(obj, acpica_devinfo_handler, (void *)dip);
1848 return (status);
1849 }
1850
1851 /*
1852 * Unset the dev_info_t associated with the ACPI node.
1853 */
1854 static ACPI_STATUS
1855 acpica_unset_devinfo(ACPI_HANDLE obj)
1856 {
1857 return (AcpiDetachData(obj, acpica_devinfo_handler));
1858 }
1859
1860 /*
1861 *
1862 */
1863 void
1864 acpica_devinfo_handler(ACPI_HANDLE obj, void *data)
1865 {
1866 /* no-op */
1867 }
1868
1869 ACPI_STATUS
1870 acpica_build_processor_map(void)
1871 {
1872 ACPI_STATUS status;
1873 void *rv;
1874
1875 /*
1876 * shouldn't be called more than once anyway
1877 */
1878 if (cpu_map_built)
1879 return (AE_OK);
1880
1881 /*
1882 * ACPI device configuration driver has built mapping information
1883 * among processor id and object handle, no need to probe again.
1884 */
1885 if (acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1886 cpu_map_built = 1;
1887 return (AE_OK);
1888 }
1889
1890 /*
1891 * Look for Processor objects
1892 */
1893 status = AcpiWalkNamespace(ACPI_TYPE_PROCESSOR,
1894 ACPI_ROOT_OBJECT,
1895 4,
1896 acpica_probe_processor,
1897 NULL,
1898 NULL,
1899 &rv);
1900 ASSERT(status == AE_OK);
1901
1902 /*
1903 * Look for processor Device objects
1904 */
1905 status = AcpiGetDevices("ACPI0007",
1906 acpica_probe_processor,
1907 NULL,
1908 &rv);
1909 ASSERT(status == AE_OK);
1910 cpu_map_built = 1;
1911
1912 return (status);
1913 }
1914
1915 /*
1916 * Grow cpu map table on demand.
1917 */
1918 static void
1919 acpica_grow_cpu_map(void)
1920 {
1921 if (cpu_map_count == cpu_map_count_max) {
1922 size_t sz;
1923 struct cpu_map_item **new_map;
1924
1925 ASSERT(cpu_map_count_max < INT_MAX / 2);
1926 cpu_map_count_max += max_ncpus;
1927 new_map = kmem_zalloc(sizeof (cpu_map[0]) * cpu_map_count_max,
1928 KM_SLEEP);
1929 if (cpu_map_count != 0) {
1930 ASSERT(cpu_map != NULL);
1931 sz = sizeof (cpu_map[0]) * cpu_map_count;
1932 kcopy(cpu_map, new_map, sz);
1933 kmem_free(cpu_map, sz);
1934 }
1935 cpu_map = new_map;
1936 }
1937 }
1938
1939 /*
1940 * Maintain mapping information among (cpu id, ACPI processor id, APIC id,
1941 * ACPI handle). The mapping table will be setup in two steps:
1942 * 1) acpica_add_processor_to_map() builds mapping among APIC id, ACPI
1943 * processor id and ACPI object handle.
1944 * 2) acpica_map_cpu() builds mapping among cpu id and ACPI processor id.
1945 * On systems with which have ACPI device configuration for CPUs enabled,
1946 * acpica_map_cpu() will be called after acpica_add_processor_to_map(),
1947 * otherwise acpica_map_cpu() will be called before
1948 * acpica_add_processor_to_map().
1949 */
1950 ACPI_STATUS
1951 acpica_add_processor_to_map(UINT32 acpi_id, ACPI_HANDLE obj, UINT32 apic_id)
1952 {
1953 int i;
1954 ACPI_STATUS rc = AE_OK;
1955 struct cpu_map_item *item = NULL;
1956
1957 ASSERT(obj != NULL);
1958 if (obj == NULL) {
1959 return (AE_ERROR);
1960 }
1961
1962 mutex_enter(&cpu_map_lock);
1963
1964 /*
1965 * Special case for uppc
1966 * If we're a uppc system and ACPI device configuration for CPU has
1967 * been disabled, there won't be a CPU map yet because uppc psm doesn't
1968 * call acpica_map_cpu(). So create one and use the passed-in processor
1969 * as CPU 0
1970 * Assumption: the first CPU returned by
1971 * AcpiGetDevices/AcpiWalkNamespace will be the BSP.
1972 * Unfortunately there appears to be no good way to ASSERT this.
1973 */
1974 if (cpu_map == NULL &&
1975 !acpica_get_devcfg_feature(ACPI_DEVCFG_CPU)) {
1976 acpica_grow_cpu_map();
1977 ASSERT(cpu_map != NULL);
1978 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
1979 item->cpu_id = 0;
1980 item->proc_id = acpi_id;
1981 item->apic_id = apic_id;
1982 item->obj = obj;
1983 cpu_map[0] = item;
1984 cpu_map_count = 1;
1985 mutex_exit(&cpu_map_lock);
1986 return (AE_OK);
1987 }
1988
1989 for (i = 0; i < cpu_map_count; i++) {
1990 if (cpu_map[i]->obj == obj) {
1991 rc = AE_ALREADY_EXISTS;
1992 break;
1993 } else if (cpu_map[i]->proc_id == acpi_id) {
1994 ASSERT(item == NULL);
1995 item = cpu_map[i];
1996 }
1997 }
1998
1999 if (rc == AE_OK) {
2000 if (item != NULL) {
2001 /*
2002 * ACPI alias objects may cause more than one objects
2003 * with the same ACPI processor id, only remember the
2004 * the first object encountered.
2005 */
2006 if (item->obj == NULL) {
2007 item->obj = obj;
2008 item->apic_id = apic_id;
2009 } else {
2010 rc = AE_ALREADY_EXISTS;
2011 }
2012 } else if (cpu_map_count >= INT_MAX / 2) {
2013 rc = AE_NO_MEMORY;
2014 } else {
2015 acpica_grow_cpu_map();
2016 ASSERT(cpu_map != NULL);
2017 ASSERT(cpu_map_count < cpu_map_count_max);
2018 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2019 item->cpu_id = -1;
2020 item->proc_id = acpi_id;
2021 item->apic_id = apic_id;
2022 item->obj = obj;
2023 cpu_map[cpu_map_count] = item;
2024 cpu_map_count++;
2025 }
2026 }
2027
2028 mutex_exit(&cpu_map_lock);
2029
2030 return (rc);
2031 }
2032
2033 ACPI_STATUS
2034 acpica_remove_processor_from_map(UINT32 acpi_id)
2035 {
2036 int i;
2037 ACPI_STATUS rc = AE_NOT_EXIST;
2038
2039 mutex_enter(&cpu_map_lock);
2040 for (i = 0; i < cpu_map_count; i++) {
2041 if (cpu_map[i]->proc_id != acpi_id) {
2042 continue;
2043 }
2044 cpu_map[i]->obj = NULL;
2045 /* Free item if no more reference to it. */
2046 if (cpu_map[i]->cpu_id == -1) {
2047 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2048 cpu_map[i] = NULL;
2049 cpu_map_count--;
2050 if (i != cpu_map_count) {
2051 cpu_map[i] = cpu_map[cpu_map_count];
2052 cpu_map[cpu_map_count] = NULL;
2053 }
2054 }
2055 rc = AE_OK;
2056 break;
2057 }
2058 mutex_exit(&cpu_map_lock);
2059
2060 return (rc);
2061 }
2062
2063 ACPI_STATUS
2064 acpica_map_cpu(processorid_t cpuid, UINT32 acpi_id)
2065 {
2066 int i;
2067 ACPI_STATUS rc = AE_OK;
2068 struct cpu_map_item *item = NULL;
2069
2070 ASSERT(cpuid != -1);
2071 if (cpuid == -1) {
2072 return (AE_ERROR);
2073 }
2074
2075 mutex_enter(&cpu_map_lock);
2076 cpu_map_called = 1;
2077 for (i = 0; i < cpu_map_count; i++) {
2078 if (cpu_map[i]->cpu_id == cpuid) {
2079 rc = AE_ALREADY_EXISTS;
2080 break;
2081 } else if (cpu_map[i]->proc_id == acpi_id) {
2082 ASSERT(item == NULL);
2083 item = cpu_map[i];
2084 }
2085 }
2086 if (rc == AE_OK) {
2087 if (item != NULL) {
2088 if (item->cpu_id == -1) {
2089 item->cpu_id = cpuid;
2090 } else {
2091 rc = AE_ALREADY_EXISTS;
2092 }
2093 } else if (cpu_map_count >= INT_MAX / 2) {
2094 rc = AE_NO_MEMORY;
2095 } else {
2096 acpica_grow_cpu_map();
2097 ASSERT(cpu_map != NULL);
2098 ASSERT(cpu_map_count < cpu_map_count_max);
2099 item = kmem_zalloc(sizeof (*item), KM_SLEEP);
2100 item->cpu_id = cpuid;
2101 item->proc_id = acpi_id;
2102 item->apic_id = UINT32_MAX;
2103 item->obj = NULL;
2104 cpu_map[cpu_map_count] = item;
2105 cpu_map_count++;
2106 }
2107 }
2108 mutex_exit(&cpu_map_lock);
2109
2110 return (rc);
2111 }
2112
2113 ACPI_STATUS
2114 acpica_unmap_cpu(processorid_t cpuid)
2115 {
2116 int i;
2117 ACPI_STATUS rc = AE_NOT_EXIST;
2118
2119 ASSERT(cpuid != -1);
2120 if (cpuid == -1) {
2121 return (rc);
2122 }
2123
2124 mutex_enter(&cpu_map_lock);
2125 for (i = 0; i < cpu_map_count; i++) {
2126 if (cpu_map[i]->cpu_id != cpuid) {
2127 continue;
2128 }
2129 cpu_map[i]->cpu_id = -1;
2130 /* Free item if no more reference. */
2131 if (cpu_map[i]->obj == NULL) {
2132 kmem_free(cpu_map[i], sizeof (struct cpu_map_item));
2133 cpu_map[i] = NULL;
2134 cpu_map_count--;
2135 if (i != cpu_map_count) {
2136 cpu_map[i] = cpu_map[cpu_map_count];
2137 cpu_map[cpu_map_count] = NULL;
2138 }
2139 }
2140 rc = AE_OK;
2141 break;
2142 }
2143 mutex_exit(&cpu_map_lock);
2144
2145 return (rc);
2146 }
2147
2148 ACPI_STATUS
2149 acpica_get_cpu_object_by_cpuid(processorid_t cpuid, ACPI_HANDLE *hdlp)
2150 {
2151 int i;
2152 ACPI_STATUS rc = AE_NOT_EXIST;
2153
2154 ASSERT(cpuid != -1);
2155 if (cpuid == -1) {
2156 return (rc);
2157 }
2158
2159 mutex_enter(&cpu_map_lock);
2160 for (i = 0; i < cpu_map_count; i++) {
2161 if (cpu_map[i]->cpu_id == cpuid && cpu_map[i]->obj != NULL) {
2162 *hdlp = cpu_map[i]->obj;
2163 rc = AE_OK;
2164 break;
2165 }
2166 }
2167 mutex_exit(&cpu_map_lock);
2168
2169 return (rc);
2170 }
2171
2172 ACPI_STATUS
2173 acpica_get_cpu_object_by_procid(UINT32 procid, ACPI_HANDLE *hdlp)
2174 {
2175 int i;
2176 ACPI_STATUS rc = AE_NOT_EXIST;
2177
2178 mutex_enter(&cpu_map_lock);
2179 for (i = 0; i < cpu_map_count; i++) {
2180 if (cpu_map[i]->proc_id == procid && cpu_map[i]->obj != NULL) {
2181 *hdlp = cpu_map[i]->obj;
2182 rc = AE_OK;
2183 break;
2184 }
2185 }
2186 mutex_exit(&cpu_map_lock);
2187
2188 return (rc);
2189 }
2190
2191 ACPI_STATUS
2192 acpica_get_cpu_object_by_apicid(UINT32 apicid, ACPI_HANDLE *hdlp)
2193 {
2194 int i;
2195 ACPI_STATUS rc = AE_NOT_EXIST;
2196
2197 ASSERT(apicid != UINT32_MAX);
2198 if (apicid == UINT32_MAX) {
2199 return (rc);
2200 }
2201
2202 mutex_enter(&cpu_map_lock);
2203 for (i = 0; i < cpu_map_count; i++) {
2204 if (cpu_map[i]->apic_id == apicid && cpu_map[i]->obj != NULL) {
2205 *hdlp = cpu_map[i]->obj;
2206 rc = AE_OK;
2207 break;
2208 }
2209 }
2210 mutex_exit(&cpu_map_lock);
2211
2212 return (rc);
2213 }
2214
2215 ACPI_STATUS
2216 acpica_get_cpu_id_by_object(ACPI_HANDLE hdl, processorid_t *cpuidp)
2217 {
2218 int i;
2219 ACPI_STATUS rc = AE_NOT_EXIST;
2220
2221 ASSERT(cpuidp != NULL);
2222 if (hdl == NULL || cpuidp == NULL) {
2223 return (rc);
2224 }
2225
2226 *cpuidp = -1;
2227 mutex_enter(&cpu_map_lock);
2228 for (i = 0; i < cpu_map_count; i++) {
2229 if (cpu_map[i]->obj == hdl && cpu_map[i]->cpu_id != -1) {
2230 *cpuidp = cpu_map[i]->cpu_id;
2231 rc = AE_OK;
2232 break;
2233 }
2234 }
2235 mutex_exit(&cpu_map_lock);
2236
2237 return (rc);
2238 }
2239
2240 ACPI_STATUS
2241 acpica_get_apicid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2242 {
2243 int i;
2244 ACPI_STATUS rc = AE_NOT_EXIST;
2245
2246 ASSERT(rp != NULL);
2247 if (hdl == NULL || rp == NULL) {
2248 return (rc);
2249 }
2250
2251 *rp = UINT32_MAX;
2252 mutex_enter(&cpu_map_lock);
2253 for (i = 0; i < cpu_map_count; i++) {
2254 if (cpu_map[i]->obj == hdl &&
2255 cpu_map[i]->apic_id != UINT32_MAX) {
2256 *rp = cpu_map[i]->apic_id;
2257 rc = AE_OK;
2258 break;
2259 }
2260 }
2261 mutex_exit(&cpu_map_lock);
2262
2263 return (rc);
2264 }
2265
2266 ACPI_STATUS
2267 acpica_get_procid_by_object(ACPI_HANDLE hdl, UINT32 *rp)
2268 {
2269 int i;
2270 ACPI_STATUS rc = AE_NOT_EXIST;
2271
2272 ASSERT(rp != NULL);
2273 if (hdl == NULL || rp == NULL) {
2274 return (rc);
2275 }
2276
2277 *rp = UINT32_MAX;
2278 mutex_enter(&cpu_map_lock);
2279 for (i = 0; i < cpu_map_count; i++) {
2280 if (cpu_map[i]->obj == hdl) {
2281 *rp = cpu_map[i]->proc_id;
2282 rc = AE_OK;
2283 break;
2284 }
2285 }
2286 mutex_exit(&cpu_map_lock);
2287
2288 return (rc);
2289 }
2290
2291 void
2292 acpica_set_core_feature(uint64_t features)
2293 {
2294 atomic_or_64(&acpica_core_features, features);
2295 }
2296
2297 void
2298 acpica_clear_core_feature(uint64_t features)
2299 {
2300 atomic_and_64(&acpica_core_features, ~features);
2301 }
2302
2303 uint64_t
2304 acpica_get_core_feature(uint64_t features)
2305 {
2306 return (acpica_core_features & features);
2307 }
2308
2309 void
2310 acpica_set_devcfg_feature(uint64_t features)
2311 {
2312 atomic_or_64(&acpica_devcfg_features, features);
2313 }
2314
2315 void
2316 acpica_clear_devcfg_feature(uint64_t features)
2317 {
2318 atomic_and_64(&acpica_devcfg_features, ~features);
2319 }
2320
2321 uint64_t
2322 acpica_get_devcfg_feature(uint64_t features)
2323 {
2324 return (acpica_devcfg_features & features);
2325 }
2326
2327 void
2328 acpica_get_global_FADT(ACPI_TABLE_FADT **gbl_FADT)
2329 {
2330 *gbl_FADT = &AcpiGbl_FADT;
2331 }
2332
2333 void
2334 acpica_write_cpupm_capabilities(boolean_t pstates, boolean_t cstates)
2335 {
2336 if (pstates && AcpiGbl_FADT.PstateControl != 0)
2337 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2338 AcpiGbl_FADT.PstateControl);
2339
2340 if (cstates && AcpiGbl_FADT.CstControl != 0)
2341 (void) AcpiHwRegisterWrite(ACPI_REGISTER_SMI_COMMAND_BLOCK,
2342 AcpiGbl_FADT.CstControl);
2343 }