Print this page
PANKOVs restructure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/apix/apix_utils.c
+++ new/usr/src/uts/i86pc/io/apix/apix_utils.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25 /*
26 26 * Copyright (c) 2010, Intel Corporation.
27 27 * All rights reserved.
28 28 */
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
29 29 /*
30 30 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
31 31 * Copyright 2013 Pluribus Networks, Inc.
32 32 */
33 33
34 34 #include <sys/processor.h>
35 35 #include <sys/time.h>
36 36 #include <sys/psm.h>
37 37 #include <sys/smp_impldefs.h>
38 38 #include <sys/cram.h>
39 -#include <sys/acpi/acpi.h>
39 +#include <acpica/include/acpi.h>
40 40 #include <sys/acpica.h>
41 41 #include <sys/psm_common.h>
42 42 #include <sys/pit.h>
43 43 #include <sys/ddi.h>
44 44 #include <sys/sunddi.h>
45 45 #include <sys/ddi_impldefs.h>
46 46 #include <sys/pci.h>
47 47 #include <sys/promif.h>
48 48 #include <sys/x86_archext.h>
49 49 #include <sys/cpc_impl.h>
50 50 #include <sys/uadmin.h>
51 51 #include <sys/panic.h>
52 52 #include <sys/debug.h>
53 53 #include <sys/archsystm.h>
54 54 #include <sys/trap.h>
55 55 #include <sys/machsystm.h>
56 56 #include <sys/sysmacros.h>
57 57 #include <sys/cpuvar.h>
58 58 #include <sys/rm_platter.h>
59 59 #include <sys/privregs.h>
60 60 #include <sys/note.h>
61 61 #include <sys/pci_intr_lib.h>
62 62 #include <sys/spl.h>
63 63 #include <sys/clock.h>
64 64 #include <sys/dditypes.h>
65 65 #include <sys/sunddi.h>
66 66 #include <sys/x_call.h>
67 67 #include <sys/reboot.h>
68 68 #include <sys/apix.h>
69 69
70 70 static int apix_get_avail_vector_oncpu(uint32_t, int, int);
71 71 static apix_vector_t *apix_init_vector(processorid_t, uchar_t);
72 72 static void apix_cleanup_vector(apix_vector_t *);
73 73 static void apix_insert_av(apix_vector_t *, void *, avfunc, caddr_t, caddr_t,
74 74 uint64_t *, int, dev_info_t *);
75 75 static void apix_remove_av(apix_vector_t *, struct autovec *);
76 76 static void apix_clear_dev_map(dev_info_t *, int, int);
77 77 static boolean_t apix_is_cpu_enabled(processorid_t);
78 78 static void apix_wait_till_seen(processorid_t, int);
79 79
80 80 #define GET_INTR_INUM(ihdlp) \
81 81 (((ihdlp) != NULL) ? ((ddi_intr_handle_impl_t *)(ihdlp))->ih_inum : 0)
82 82
83 83 apix_rebind_info_t apix_rebindinfo = {0, 0, 0, NULL, 0, NULL};
84 84
85 85 /*
86 86 * Allocate IPI
87 87 *
88 88 * Return vector number or 0 on error
89 89 */
90 90 uchar_t
91 91 apix_alloc_ipi(int ipl)
92 92 {
93 93 apix_vector_t *vecp;
94 94 uchar_t vector;
95 95 int cpun;
96 96 int nproc;
97 97
98 98 APIX_ENTER_CPU_LOCK(0);
99 99
100 100 vector = apix_get_avail_vector_oncpu(0, APIX_IPI_MIN, APIX_IPI_MAX);
101 101 if (vector == 0) {
102 102 APIX_LEAVE_CPU_LOCK(0);
103 103 cmn_err(CE_WARN, "apix: no available IPI\n");
104 104 apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
105 105 return (0);
106 106 }
107 107
108 108 nproc = max(apic_nproc, apic_max_nproc);
109 109 for (cpun = 0; cpun < nproc; cpun++) {
110 110 vecp = xv_vector(cpun, vector);
111 111 if (vecp == NULL) {
112 112 vecp = kmem_zalloc(sizeof (apix_vector_t), KM_NOSLEEP);
113 113 if (vecp == NULL) {
114 114 cmn_err(CE_WARN, "apix: No memory for ipi");
115 115 goto fail;
116 116 }
117 117 xv_vector(cpun, vector) = vecp;
118 118 }
119 119 vecp->v_state = APIX_STATE_ALLOCED;
120 120 vecp->v_type = APIX_TYPE_IPI;
121 121 vecp->v_cpuid = vecp->v_bound_cpuid = cpun;
122 122 vecp->v_vector = vector;
123 123 vecp->v_pri = ipl;
124 124 }
125 125 APIX_LEAVE_CPU_LOCK(0);
126 126 return (vector);
127 127
128 128 fail:
129 129 while (--cpun >= 0)
130 130 apix_cleanup_vector(xv_vector(cpun, vector));
131 131 APIX_LEAVE_CPU_LOCK(0);
132 132 return (0);
133 133 }
134 134
135 135 /*
136 136 * Add IPI service routine
137 137 */
138 138 static int
139 139 apix_add_ipi(int ipl, avfunc xxintr, char *name, int vector,
140 140 caddr_t arg1, caddr_t arg2)
141 141 {
142 142 int cpun;
143 143 apix_vector_t *vecp;
144 144 int nproc;
145 145
146 146 ASSERT(vector >= APIX_IPI_MIN && vector <= APIX_IPI_MAX);
147 147
148 148 nproc = max(apic_nproc, apic_max_nproc);
149 149 for (cpun = 0; cpun < nproc; cpun++) {
150 150 APIX_ENTER_CPU_LOCK(cpun);
151 151 vecp = xv_vector(cpun, vector);
152 152 apix_insert_av(vecp, NULL, xxintr, arg1, arg2, NULL, ipl, NULL);
153 153 vecp->v_state = APIX_STATE_ENABLED;
154 154 APIX_LEAVE_CPU_LOCK(cpun);
155 155 }
156 156
157 157 APIC_VERBOSE(IPI, (CE_CONT, "apix: add ipi for %s, vector %x "
158 158 "ipl %x\n", name, vector, ipl));
159 159
160 160 return (1);
161 161 }
162 162
163 163 /*
164 164 * Find and return first free vector in range (start, end)
165 165 */
166 166 static int
167 167 apix_get_avail_vector_oncpu(uint32_t cpuid, int start, int end)
168 168 {
169 169 int i;
170 170 apix_impl_t *apixp = apixs[cpuid];
171 171
172 172 for (i = start; i <= end; i++) {
173 173 if (APIC_CHECK_RESERVE_VECTORS(i))
174 174 continue;
175 175 if (IS_VECT_FREE(apixp->x_vectbl[i]))
176 176 return (i);
177 177 }
178 178
179 179 return (0);
180 180 }
181 181
182 182 /*
183 183 * Allocate a vector on specified cpu
184 184 *
185 185 * Return NULL on error
186 186 */
187 187 static apix_vector_t *
188 188 apix_alloc_vector_oncpu(uint32_t cpuid, dev_info_t *dip, int inum, int type)
189 189 {
190 190 processorid_t tocpu = cpuid & ~IRQ_USER_BOUND;
191 191 apix_vector_t *vecp;
192 192 int vector;
193 193
194 194 ASSERT(APIX_CPU_LOCK_HELD(tocpu));
195 195
196 196 /* find free vector */
197 197 vector = apix_get_avail_vector_oncpu(tocpu, APIX_AVINTR_MIN,
198 198 APIX_AVINTR_MAX);
199 199 if (vector == 0)
200 200 return (NULL);
201 201
202 202 vecp = apix_init_vector(tocpu, vector);
203 203 vecp->v_type = (ushort_t)type;
204 204 vecp->v_inum = inum;
205 205 vecp->v_flags = (cpuid & IRQ_USER_BOUND) ? APIX_VECT_USER_BOUND : 0;
206 206
207 207 if (dip != NULL)
208 208 apix_set_dev_map(vecp, dip, inum);
209 209
210 210 return (vecp);
211 211 }
212 212
213 213 /*
214 214 * Allocates "count" contiguous MSI vectors starting at the proper alignment.
215 215 * Caller needs to make sure that count has to be power of 2 and should not
216 216 * be < 1.
217 217 *
218 218 * Return first vector number
219 219 */
220 220 apix_vector_t *
221 221 apix_alloc_nvectors_oncpu(uint32_t cpuid, dev_info_t *dip, int inum,
222 222 int count, int type)
223 223 {
224 224 int i, msibits, start = 0, navail = 0;
225 225 apix_vector_t *vecp, *startp = NULL;
226 226 processorid_t tocpu = cpuid & ~IRQ_USER_BOUND;
227 227 uint_t flags;
228 228
229 229 ASSERT(APIX_CPU_LOCK_HELD(tocpu));
230 230
231 231 /*
232 232 * msibits is the no. of lower order message data bits for the
233 233 * allocated MSI vectors and is used to calculate the aligned
234 234 * starting vector
235 235 */
236 236 msibits = count - 1;
237 237
238 238 /* It has to be contiguous */
239 239 for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
240 240 if (!IS_VECT_FREE(xv_vector(tocpu, i)))
241 241 continue;
242 242
243 243 /*
244 244 * starting vector has to be aligned accordingly for
245 245 * multiple MSIs
246 246 */
247 247 if (msibits)
248 248 i = (i + msibits) & ~msibits;
249 249
250 250 for (navail = 0, start = i; i <= APIX_AVINTR_MAX; i++) {
251 251 if (!IS_VECT_FREE(xv_vector(tocpu, i)))
252 252 break;
253 253 if (APIC_CHECK_RESERVE_VECTORS(i))
254 254 break;
255 255 if (++navail == count)
256 256 goto done;
257 257 }
258 258 }
259 259
260 260 return (NULL);
261 261
262 262 done:
263 263 flags = (cpuid & IRQ_USER_BOUND) ? APIX_VECT_USER_BOUND : 0;
264 264
265 265 for (i = 0; i < count; i++) {
266 266 if ((vecp = apix_init_vector(tocpu, start + i)) == NULL)
267 267 goto fail;
268 268
269 269 vecp->v_type = (ushort_t)type;
270 270 vecp->v_inum = inum + i;
271 271 vecp->v_flags = flags;
272 272
273 273 if (dip != NULL)
274 274 apix_set_dev_map(vecp, dip, inum + i);
275 275
276 276 if (i == 0)
277 277 startp = vecp;
278 278 }
279 279
280 280 return (startp);
281 281
282 282 fail:
283 283 while (i-- > 0) { /* Free allocated vectors */
284 284 vecp = xv_vector(tocpu, start + i);
285 285 apix_clear_dev_map(dip, inum + i, type);
286 286 apix_cleanup_vector(vecp);
287 287 }
288 288 return (NULL);
289 289 }
290 290
291 291 #define APIX_WRITE_MSI_DATA(_hdl, _cap, _ctrl, _v)\
292 292 do {\
293 293 if ((_ctrl) & PCI_MSI_64BIT_MASK)\
294 294 pci_config_put16((_hdl), (_cap) + PCI_MSI_64BIT_DATA, (_v));\
295 295 else\
296 296 pci_config_put16((_hdl), (_cap) + PCI_MSI_32BIT_DATA, (_v));\
297 297 _NOTE(CONSTCOND)} while (0)
298 298
299 299 static void
300 300 apix_pci_msi_enable_vector(apix_vector_t *vecp, dev_info_t *dip, int type,
301 301 int inum, int count, uchar_t vector, int target_apic_id)
302 302 {
303 303 uint64_t msi_addr, msi_data;
304 304 ushort_t msi_ctrl;
305 305 int i, cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
306 306 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(dip);
307 307 msi_regs_t msi_regs;
308 308 void *intrmap_tbl[PCI_MSI_MAX_INTRS];
309 309
310 310 DDI_INTR_IMPLDBG((CE_CONT, "apix_pci_msi_enable_vector: dip=0x%p\n"
311 311 "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip,
312 312 ddi_driver_name(dip), inum, vector, target_apic_id));
313 313
314 314 ASSERT((handle != NULL) && (cap_ptr != 0));
315 315
316 316 msi_regs.mr_data = vector;
317 317 msi_regs.mr_addr = target_apic_id;
318 318
319 319 for (i = 0; i < count; i++)
320 320 intrmap_tbl[i] = xv_intrmap_private(vecp->v_cpuid, vector + i);
321 321 apic_vt_ops->apic_intrmap_alloc_entry(intrmap_tbl, dip, type,
322 322 count, 0xff);
323 323 for (i = 0; i < count; i++)
324 324 xv_intrmap_private(vecp->v_cpuid, vector + i) = intrmap_tbl[i];
325 325
326 326 apic_vt_ops->apic_intrmap_map_entry(vecp->v_intrmap_private,
327 327 (void *)&msi_regs, type, count);
328 328 apic_vt_ops->apic_intrmap_record_msi(vecp->v_intrmap_private,
329 329 &msi_regs);
330 330
331 331 /* MSI Address */
332 332 msi_addr = msi_regs.mr_addr;
333 333
334 334 /* MSI Data: MSI is edge triggered according to spec */
335 335 msi_data = msi_regs.mr_data;
336 336
337 337 DDI_INTR_IMPLDBG((CE_CONT, "apix_pci_msi_enable_vector: addr=0x%lx "
338 338 "data=0x%lx\n", (long)msi_addr, (long)msi_data));
339 339
340 340 if (type == APIX_TYPE_MSI) {
341 341 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
342 342
343 343 /* Set the bits to inform how many MSIs are enabled */
344 344 msi_ctrl |= ((highbit(count) - 1) << PCI_MSI_MME_SHIFT);
345 345 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
346 346
347 347 if ((vecp->v_flags & APIX_VECT_MASKABLE) == 0)
348 348 APIX_WRITE_MSI_DATA(handle, cap_ptr, msi_ctrl,
349 349 APIX_RESV_VECTOR);
350 350
351 351 pci_config_put32(handle,
352 352 cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr);
353 353 if (msi_ctrl & PCI_MSI_64BIT_MASK)
354 354 pci_config_put32(handle,
355 355 cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32);
356 356
357 357 APIX_WRITE_MSI_DATA(handle, cap_ptr, msi_ctrl, msi_data);
358 358 } else if (type == APIX_TYPE_MSIX) {
359 359 uintptr_t off;
360 360 ddi_intr_msix_t *msix_p = i_ddi_get_msix(dip);
361 361
362 362 /* Offset into the "inum"th entry in the MSI-X table */
363 363 off = (uintptr_t)msix_p->msix_tbl_addr +
364 364 (inum * PCI_MSIX_VECTOR_SIZE);
365 365
366 366 ddi_put32(msix_p->msix_tbl_hdl,
367 367 (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data);
368 368 ddi_put32(msix_p->msix_tbl_hdl,
369 369 (uint32_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr);
370 370 ddi_put32(msix_p->msix_tbl_hdl,
371 371 (uint32_t *)(off + PCI_MSIX_UPPER_ADDR_OFFSET),
372 372 msi_addr >> 32);
373 373 }
374 374 }
375 375
376 376 static void
377 377 apix_pci_msi_enable_mode(dev_info_t *dip, int type, int inum)
378 378 {
379 379 ushort_t msi_ctrl;
380 380 int cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
381 381 ddi_acc_handle_t handle = i_ddi_get_pci_config_handle(dip);
382 382
383 383 ASSERT((handle != NULL) && (cap_ptr != 0));
384 384
385 385 if (type == APIX_TYPE_MSI) {
386 386 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
387 387 if ((msi_ctrl & PCI_MSI_ENABLE_BIT))
388 388 return;
389 389
390 390 msi_ctrl |= PCI_MSI_ENABLE_BIT;
391 391 pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
392 392
393 393 } else if (type == DDI_INTR_TYPE_MSIX) {
394 394 uintptr_t off;
395 395 uint32_t mask;
396 396 ddi_intr_msix_t *msix_p;
397 397
398 398 msix_p = i_ddi_get_msix(dip);
399 399
400 400 /* Offset into "inum"th entry in the MSI-X table & clear mask */
401 401 off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
402 402 PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
403 403
404 404 mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
405 405
406 406 ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask & ~1));
407 407
408 408 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
409 409
410 410 if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) {
411 411 msi_ctrl |= PCI_MSIX_ENABLE_BIT;
412 412 pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
413 413 msi_ctrl);
414 414 }
415 415 }
416 416 }
417 417
418 418 /*
419 419 * Setup interrupt, pogramming IO-APIC or MSI/X address/data.
420 420 */
421 421 void
422 422 apix_enable_vector(apix_vector_t *vecp)
423 423 {
424 424 int tocpu = vecp->v_cpuid, type = vecp->v_type;
425 425 apic_cpus_info_t *cpu_infop;
426 426 ulong_t iflag;
427 427
428 428 ASSERT(tocpu < apic_nproc);
429 429
430 430 cpu_infop = &apic_cpus[tocpu];
431 431 if (vecp->v_flags & APIX_VECT_USER_BOUND)
432 432 cpu_infop->aci_bound++;
433 433 else
434 434 cpu_infop->aci_temp_bound++;
435 435
436 436 iflag = intr_clear();
437 437 lock_set(&apic_ioapic_lock);
438 438
439 439 if (!DDI_INTR_IS_MSI_OR_MSIX(type)) { /* fixed */
440 440 apix_intx_enable(vecp->v_inum);
441 441 } else {
442 442 int inum = vecp->v_inum;
443 443 dev_info_t *dip = APIX_GET_DIP(vecp);
444 444 int count = i_ddi_intr_get_current_nintrs(dip);
445 445
446 446 if (type == APIX_TYPE_MSI) { /* MSI */
447 447 if (inum == apix_get_max_dev_inum(dip, type)) {
448 448 /* last one */
449 449 uchar_t start_inum = inum + 1 - count;
450 450 uchar_t start_vect = vecp->v_vector + 1 - count;
451 451 apix_vector_t *start_vecp =
452 452 xv_vector(vecp->v_cpuid, start_vect);
453 453
454 454 APIC_VERBOSE(INTR, (CE_CONT, "apix: call "
455 455 "apix_pci_msi_enable_vector\n"));
456 456 apix_pci_msi_enable_vector(start_vecp, dip,
457 457 type, start_inum, count, start_vect,
458 458 cpu_infop->aci_local_id);
459 459
460 460 APIC_VERBOSE(INTR, (CE_CONT, "apix: call "
461 461 "apix_pci_msi_enable_mode\n"));
462 462 apix_pci_msi_enable_mode(dip, type, inum);
463 463 }
464 464 } else { /* MSI-X */
465 465 apix_pci_msi_enable_vector(vecp, dip,
466 466 type, inum, 1, vecp->v_vector,
467 467 cpu_infop->aci_local_id);
468 468 apix_pci_msi_enable_mode(dip, type, inum);
469 469 }
470 470 }
471 471 vecp->v_state = APIX_STATE_ENABLED;
472 472 apic_redist_cpu_skip &= ~(1 << tocpu);
473 473
474 474 lock_clear(&apic_ioapic_lock);
475 475 intr_restore(iflag);
476 476 }
477 477
478 478 /*
479 479 * Disable the interrupt
480 480 */
481 481 void
482 482 apix_disable_vector(apix_vector_t *vecp)
483 483 {
484 484 struct autovec *avp = vecp->v_autovect;
485 485 ulong_t iflag;
486 486
487 487 ASSERT(avp != NULL);
488 488
489 489 iflag = intr_clear();
490 490 lock_set(&apic_ioapic_lock);
491 491
492 492 switch (vecp->v_type) {
493 493 case APIX_TYPE_MSI:
494 494 ASSERT(avp->av_vector != NULL && avp->av_dip != NULL);
495 495 /*
496 496 * Disable the MSI vector
497 497 * Make sure we only disable on the last
498 498 * of the multi-MSI support
499 499 */
500 500 if (i_ddi_intr_get_current_nenables(avp->av_dip) == 1) {
501 501 apic_pci_msi_disable_mode(avp->av_dip,
502 502 DDI_INTR_TYPE_MSI);
503 503 }
504 504 break;
505 505 case APIX_TYPE_MSIX:
506 506 ASSERT(avp->av_vector != NULL && avp->av_dip != NULL);
507 507 /*
508 508 * Disable the MSI-X vector
509 509 * needs to clear its mask and addr/data for each MSI-X
510 510 */
511 511 apic_pci_msi_unconfigure(avp->av_dip, DDI_INTR_TYPE_MSIX,
512 512 vecp->v_inum);
513 513 /*
514 514 * Make sure we only disable on the last MSI-X
515 515 */
516 516 if (i_ddi_intr_get_current_nenables(avp->av_dip) == 1) {
517 517 apic_pci_msi_disable_mode(avp->av_dip,
518 518 DDI_INTR_TYPE_MSIX);
519 519 }
520 520 break;
521 521 default:
522 522 apix_intx_disable(vecp->v_inum);
523 523 break;
524 524 }
525 525
526 526 if (!(apic_cpus[vecp->v_cpuid].aci_status & APIC_CPU_SUSPEND))
527 527 vecp->v_state = APIX_STATE_DISABLED;
528 528 apic_vt_ops->apic_intrmap_free_entry(&vecp->v_intrmap_private);
529 529 vecp->v_intrmap_private = NULL;
530 530
531 531 lock_clear(&apic_ioapic_lock);
532 532 intr_restore(iflag);
533 533 }
534 534
535 535 /*
536 536 * Mark vector as obsoleted or freed. The vector is marked
537 537 * obsoleted if there are pending requests on it. Otherwise,
538 538 * free the vector. The obsoleted vectors get freed after
539 539 * being serviced.
540 540 *
541 541 * Return 1 on being obosoleted and 0 on being freed.
542 542 */
543 543 #define INTR_BUSY(_avp)\
544 544 ((((volatile ushort_t)(_avp)->av_flags) &\
545 545 (AV_PENTRY_PEND | AV_PENTRY_ONPROC)) != 0)
546 546 #define LOCAL_WITH_INTR_DISABLED(_cpuid)\
547 547 ((_cpuid) == psm_get_cpu_id() && !interrupts_enabled())
548 548 static uint64_t dummy_tick;
549 549
550 550 int
551 551 apix_obsolete_vector(apix_vector_t *vecp)
552 552 {
553 553 struct autovec *avp = vecp->v_autovect;
554 554 int repeats, tries, ipl, busy = 0, cpuid = vecp->v_cpuid;
555 555 apix_impl_t *apixp = apixs[cpuid];
556 556
557 557 ASSERT(APIX_CPU_LOCK_HELD(cpuid));
558 558
559 559 for (avp = vecp->v_autovect; avp != NULL; avp = avp->av_link) {
560 560 if (avp->av_vector == NULL)
561 561 continue;
562 562
563 563 if (LOCAL_WITH_INTR_DISABLED(cpuid)) {
564 564 int bit, index, irr;
565 565
566 566 if (INTR_BUSY(avp)) {
567 567 busy++;
568 568 continue;
569 569 }
570 570
571 571 /* check IRR for pending interrupts */
572 572 index = vecp->v_vector / 32;
573 573 bit = vecp->v_vector % 32;
574 574 irr = apic_reg_ops->apic_read(APIC_IRR_REG + index);
575 575 if ((irr & (1 << bit)) != 0)
576 576 busy++;
577 577
578 578 if (!busy)
579 579 apix_remove_av(vecp, avp);
580 580
581 581 continue;
582 582 }
583 583
584 584 repeats = 0;
585 585 do {
586 586 repeats++;
587 587 for (tries = 0; tries < apic_max_reps_clear_pending;
588 588 tries++)
589 589 if (!INTR_BUSY(avp))
590 590 break;
591 591 } while (INTR_BUSY(avp) &&
592 592 (repeats < apic_max_reps_clear_pending));
593 593
594 594 if (INTR_BUSY(avp))
595 595 busy++;
596 596 else {
597 597 /*
598 598 * Interrupt is not in pending list or being serviced.
599 599 * However it might be cached in Local APIC's IRR
600 600 * register. It's impossible to check another CPU's
601 601 * IRR register. Then wait till lower levels finish
602 602 * running.
603 603 */
604 604 for (ipl = 1; ipl < MIN(LOCK_LEVEL, vecp->v_pri); ipl++)
605 605 apix_wait_till_seen(cpuid, ipl);
606 606 if (INTR_BUSY(avp))
607 607 busy++;
608 608 }
609 609
610 610 if (!busy)
611 611 apix_remove_av(vecp, avp);
612 612 }
613 613
614 614 if (busy) {
615 615 apix_vector_t *tp = apixp->x_obsoletes;
616 616
617 617 if (vecp->v_state == APIX_STATE_OBSOLETED)
618 618 return (1);
619 619
620 620 vecp->v_state = APIX_STATE_OBSOLETED;
621 621 vecp->v_next = NULL;
622 622 if (tp == NULL)
623 623 apixp->x_obsoletes = vecp;
624 624 else {
625 625 while (tp->v_next != NULL)
626 626 tp = tp->v_next;
627 627 tp->v_next = vecp;
628 628 }
629 629 return (1);
630 630 }
631 631
632 632 /* interrupt is not busy */
633 633 if (vecp->v_state == APIX_STATE_OBSOLETED) {
634 634 /* remove from obsoleted list */
635 635 apixp->x_obsoletes = vecp->v_next;
636 636 vecp->v_next = NULL;
637 637 }
638 638 apix_cleanup_vector(vecp);
639 639 return (0);
640 640 }
641 641
642 642 /*
643 643 * Duplicate number of continuous vectors to specified target vectors.
644 644 */
645 645 static void
646 646 apix_dup_vectors(apix_vector_t *oldp, apix_vector_t *newp, int count)
647 647 {
648 648 struct autovec *avp;
649 649 apix_vector_t *fromp, *top;
650 650 processorid_t oldcpu = oldp->v_cpuid, newcpu = newp->v_cpuid;
651 651 uchar_t oldvec = oldp->v_vector, newvec = newp->v_vector;
652 652 int i, inum;
653 653
654 654 ASSERT(oldp->v_type != APIX_TYPE_IPI);
655 655
656 656 for (i = 0; i < count; i++) {
657 657 fromp = xv_vector(oldcpu, oldvec + i);
658 658 top = xv_vector(newcpu, newvec + i);
659 659 ASSERT(fromp != NULL && top != NULL);
660 660
661 661 /* copy over original one */
662 662 top->v_state = fromp->v_state;
663 663 top->v_type = fromp->v_type;
664 664 top->v_bound_cpuid = fromp->v_bound_cpuid;
665 665 top->v_inum = fromp->v_inum;
666 666 top->v_flags = fromp->v_flags;
667 667 top->v_intrmap_private = fromp->v_intrmap_private;
668 668
669 669 for (avp = fromp->v_autovect; avp != NULL; avp = avp->av_link) {
670 670 if (avp->av_vector == NULL)
671 671 continue;
672 672
673 673 apix_insert_av(top, avp->av_intr_id, avp->av_vector,
674 674 avp->av_intarg1, avp->av_intarg2, avp->av_ticksp,
675 675 avp->av_prilevel, avp->av_dip);
676 676
677 677 if (fromp->v_type == APIX_TYPE_FIXED &&
678 678 avp->av_dip != NULL) {
679 679 inum = GET_INTR_INUM(avp->av_intr_id);
680 680 apix_set_dev_map(top, avp->av_dip, inum);
681 681 }
682 682 }
683 683
684 684 if (DDI_INTR_IS_MSI_OR_MSIX(fromp->v_type) &&
685 685 fromp->v_devp != NULL)
686 686 apix_set_dev_map(top, fromp->v_devp->dv_dip,
687 687 fromp->v_devp->dv_inum);
688 688 }
689 689 }
690 690
691 691 static apix_vector_t *
692 692 apix_init_vector(processorid_t cpuid, uchar_t vector)
693 693 {
694 694 apix_impl_t *apixp = apixs[cpuid];
695 695 apix_vector_t *vecp = apixp->x_vectbl[vector];
696 696
697 697 ASSERT(IS_VECT_FREE(vecp));
698 698
699 699 if (vecp == NULL) {
700 700 vecp = kmem_zalloc(sizeof (apix_vector_t), KM_NOSLEEP);
701 701 if (vecp == NULL) {
702 702 cmn_err(CE_WARN, "apix: no memory to allocate vector");
703 703 return (NULL);
704 704 }
705 705 apixp->x_vectbl[vector] = vecp;
706 706 }
707 707 vecp->v_state = APIX_STATE_ALLOCED;
708 708 vecp->v_cpuid = vecp->v_bound_cpuid = cpuid;
709 709 vecp->v_vector = vector;
710 710
711 711 return (vecp);
712 712 }
713 713
714 714 static void
715 715 apix_cleanup_vector(apix_vector_t *vecp)
716 716 {
717 717 ASSERT(vecp->v_share == 0);
718 718 vecp->v_bound_cpuid = IRQ_UNINIT;
719 719 vecp->v_state = APIX_STATE_FREED;
720 720 vecp->v_type = 0;
721 721 vecp->v_flags = 0;
722 722 vecp->v_busy = 0;
723 723 vecp->v_intrmap_private = NULL;
724 724 }
725 725
726 726 static void
727 727 apix_dprint_vector(apix_vector_t *vecp, dev_info_t *dip, int count)
728 728 {
729 729 #ifdef DEBUG
730 730 major_t major;
731 731 char *name, *drv_name;
732 732 int instance, len, t_len;
733 733 char mesg[1024] = "apix: ";
734 734
735 735 t_len = sizeof (mesg);
736 736 len = strlen(mesg);
737 737 if (dip != NULL) {
738 738 name = ddi_get_name(dip);
739 739 major = ddi_name_to_major(name);
740 740 drv_name = ddi_major_to_name(major);
741 741 instance = ddi_get_instance(dip);
742 742 (void) snprintf(mesg + len, t_len - len, "%s (%s) instance %d ",
743 743 name, drv_name, instance);
744 744 }
745 745 len = strlen(mesg);
746 746
747 747 switch (vecp->v_type) {
748 748 case APIX_TYPE_FIXED:
749 749 (void) snprintf(mesg + len, t_len - len, "irqno %d",
750 750 vecp->v_inum);
751 751 break;
752 752 case APIX_TYPE_MSI:
753 753 (void) snprintf(mesg + len, t_len - len,
754 754 "msi inum %d (count %d)", vecp->v_inum, count);
755 755 break;
756 756 case APIX_TYPE_MSIX:
757 757 (void) snprintf(mesg + len, t_len - len, "msi-x inum %d",
758 758 vecp->v_inum);
759 759 break;
760 760 default:
761 761 break;
762 762
763 763 }
764 764
765 765 APIC_VERBOSE(ALLOC, (CE_CONT, "%s allocated with vector 0x%x on "
766 766 "cpu %d\n", mesg, vecp->v_vector, vecp->v_cpuid));
767 767 #endif /* DEBUG */
768 768 }
769 769
770 770 /*
771 771 * Operations on avintr
772 772 */
773 773
774 774 #define INIT_AUTOVEC(p, intr_id, f, arg1, arg2, ticksp, ipl, dip) \
775 775 do { \
776 776 (p)->av_intr_id = intr_id; \
777 777 (p)->av_vector = f; \
778 778 (p)->av_intarg1 = arg1; \
779 779 (p)->av_intarg2 = arg2; \
780 780 (p)->av_ticksp = ticksp; \
781 781 (p)->av_prilevel = ipl; \
782 782 (p)->av_dip = dip; \
783 783 (p)->av_flags = 0; \
784 784 _NOTE(CONSTCOND)} while (0)
785 785
786 786 /*
787 787 * Insert an interrupt service routine into chain by its priority from
788 788 * high to low
789 789 */
790 790 static void
791 791 apix_insert_av(apix_vector_t *vecp, void *intr_id, avfunc f, caddr_t arg1,
792 792 caddr_t arg2, uint64_t *ticksp, int ipl, dev_info_t *dip)
793 793 {
794 794 struct autovec *p, *prep, *mem;
795 795
796 796 APIC_VERBOSE(INTR, (CE_CONT, "apix_insert_av: dip %p, vector 0x%x, "
797 797 "cpu %d\n", (void *)dip, vecp->v_vector, vecp->v_cpuid));
798 798
799 799 mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
800 800 INIT_AUTOVEC(mem, intr_id, f, arg1, arg2, ticksp, ipl, dip);
801 801 if (vecp->v_type == APIX_TYPE_FIXED && apic_level_intr[vecp->v_inum])
802 802 mem->av_flags |= AV_PENTRY_LEVEL;
803 803
804 804 vecp->v_share++;
805 805 vecp->v_pri = (ipl > vecp->v_pri) ? ipl : vecp->v_pri;
806 806 if (vecp->v_autovect == NULL) { /* Nothing on list - put it at head */
807 807 vecp->v_autovect = mem;
808 808 return;
809 809 }
810 810
811 811 if (DDI_INTR_IS_MSI_OR_MSIX(vecp->v_type)) { /* MSI/X */
812 812 ASSERT(vecp->v_share == 1); /* No sharing for MSI/X */
813 813
814 814 INIT_AUTOVEC(vecp->v_autovect, intr_id, f, arg1, arg2, ticksp,
815 815 ipl, dip);
816 816 prep = vecp->v_autovect->av_link;
817 817 vecp->v_autovect->av_link = NULL;
818 818
819 819 /* Free the following autovect chain */
820 820 while (prep != NULL) {
821 821 ASSERT(prep->av_vector == NULL);
822 822
823 823 p = prep;
824 824 prep = prep->av_link;
825 825 kmem_free(p, sizeof (struct autovec));
826 826 }
827 827
828 828 kmem_free(mem, sizeof (struct autovec));
829 829 return;
830 830 }
831 831
832 832 /* find where it goes in list */
833 833 prep = NULL;
834 834 for (p = vecp->v_autovect; p != NULL; p = p->av_link) {
835 835 if (p->av_vector && p->av_prilevel <= ipl)
836 836 break;
837 837 prep = p;
838 838 }
839 839 if (prep != NULL) {
840 840 if (prep->av_vector == NULL) { /* freed struct available */
841 841 INIT_AUTOVEC(prep, intr_id, f, arg1, arg2,
842 842 ticksp, ipl, dip);
843 843 prep->av_flags = mem->av_flags;
844 844 kmem_free(mem, sizeof (struct autovec));
845 845 return;
846 846 }
847 847
848 848 mem->av_link = prep->av_link;
849 849 prep->av_link = mem;
850 850 } else {
851 851 /* insert new intpt at beginning of chain */
852 852 mem->av_link = vecp->v_autovect;
853 853 vecp->v_autovect = mem;
854 854 }
855 855 }
856 856
857 857 /*
858 858 * After having made a change to an autovector list, wait until we have
859 859 * seen specified cpu not executing an interrupt at that level--so we
860 860 * know our change has taken effect completely (no old state in registers,
861 861 * etc).
862 862 */
863 863 #define APIX_CPU_ENABLED(_cp) \
864 864 (quiesce_active == 0 && \
865 865 (((_cp)->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) == 0))
866 866
867 867 static void
868 868 apix_wait_till_seen(processorid_t cpuid, int ipl)
869 869 {
870 870 struct cpu *cp = cpu[cpuid];
871 871
872 872 if (cp == NULL || LOCAL_WITH_INTR_DISABLED(cpuid))
873 873 return;
874 874
875 875 /*
876 876 * Don't wait if the CPU is quiesced or offlined. This can happen
877 877 * when a CPU is running pause thread but hardware triggered an
878 878 * interrupt and the interrupt gets queued.
879 879 */
880 880 for (;;) {
881 881 if (!INTR_ACTIVE((volatile struct cpu *)cpu[cpuid], ipl) &&
882 882 (!APIX_CPU_ENABLED(cp) ||
883 883 !INTR_PENDING((volatile apix_impl_t *)apixs[cpuid], ipl)))
884 884 return;
885 885 }
886 886 }
887 887
888 888 static void
889 889 apix_remove_av(apix_vector_t *vecp, struct autovec *target)
890 890 {
891 891 int hi_pri = 0;
892 892 struct autovec *p;
893 893
894 894 if (target == NULL)
895 895 return;
896 896
897 897 APIC_VERBOSE(INTR, (CE_CONT, "apix_remove_av: dip %p, vector 0x%x, "
898 898 "cpu %d\n", (void *)target->av_dip, vecp->v_vector, vecp->v_cpuid));
899 899
900 900 for (p = vecp->v_autovect; p; p = p->av_link) {
901 901 if (p == target || p->av_vector == NULL)
902 902 continue;
903 903 hi_pri = (p->av_prilevel > hi_pri) ? p->av_prilevel : hi_pri;
904 904 }
905 905
906 906 vecp->v_share--;
907 907 vecp->v_pri = hi_pri;
908 908
909 909 /*
910 910 * This drops the handler from the chain, it can no longer be called.
911 911 * However, there is no guarantee that the handler is not currently
912 912 * still executing.
913 913 */
914 914 target->av_vector = NULL;
915 915 /*
916 916 * There is a race where we could be just about to pick up the ticksp
917 917 * pointer to increment it after returning from the service routine
918 918 * in av_dispatch_autovect. Rather than NULL it out let's just point
919 919 * it off to something safe so that any final tick update attempt
920 920 * won't fault.
921 921 */
922 922 target->av_ticksp = &dummy_tick;
923 923 apix_wait_till_seen(vecp->v_cpuid, target->av_prilevel);
924 924 }
925 925
926 926 static struct autovec *
927 927 apix_find_av(apix_vector_t *vecp, void *intr_id, avfunc f)
928 928 {
929 929 struct autovec *p;
930 930
931 931 for (p = vecp->v_autovect; p; p = p->av_link) {
932 932 if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
933 933 /* found the handler */
934 934 return (p);
935 935 }
936 936 }
937 937
938 938 return (NULL);
939 939 }
940 940
941 941 static apix_vector_t *
942 942 apix_find_vector_by_avintr(void *intr_id, avfunc f)
943 943 {
944 944 apix_vector_t *vecp;
945 945 processorid_t n;
946 946 uchar_t v;
947 947
948 948 for (n = 0; n < apic_nproc; n++) {
949 949 if (!apix_is_cpu_enabled(n))
950 950 continue;
951 951
952 952 for (v = APIX_AVINTR_MIN; v <= APIX_AVINTR_MIN; v++) {
953 953 vecp = xv_vector(n, v);
954 954 if (vecp == NULL ||
955 955 vecp->v_state <= APIX_STATE_OBSOLETED)
956 956 continue;
957 957
958 958 if (apix_find_av(vecp, intr_id, f) != NULL)
959 959 return (vecp);
960 960 }
961 961 }
962 962
963 963 return (NULL);
964 964 }
965 965
966 966 /*
967 967 * Add interrupt service routine.
968 968 *
969 969 * For legacy interrupts (HPET timer, ACPI SCI), the vector is actually
970 970 * IRQ no. A vector is then allocated. Otherwise, the vector is already
971 971 * allocated. The input argument virt_vect is virtual vector of format
972 972 * APIX_VIRTVEC_VECTOR(cpuid, vector).
973 973 *
974 974 * Return 1 on success, 0 on failure.
975 975 */
976 976 int
977 977 apix_add_avintr(void *intr_id, int ipl, avfunc xxintr, char *name,
978 978 int virt_vect, caddr_t arg1, caddr_t arg2, uint64_t *ticksp,
979 979 dev_info_t *dip)
980 980 {
981 981 int cpuid;
982 982 uchar_t v = (uchar_t)APIX_VIRTVEC_VECTOR(virt_vect);
983 983 apix_vector_t *vecp;
984 984
985 985 if (xxintr == NULL) {
986 986 cmn_err(CE_WARN, "Attempt to add null for %s "
987 987 "on vector 0x%x,0x%x", name,
988 988 APIX_VIRTVEC_CPU(virt_vect),
989 989 APIX_VIRTVEC_VECTOR(virt_vect));
990 990 return (0);
991 991 }
992 992
993 993 if (v >= APIX_IPI_MIN) /* IPIs */
994 994 return (apix_add_ipi(ipl, xxintr, name, v, arg1, arg2));
995 995
996 996 if (!APIX_IS_VIRTVEC(virt_vect)) { /* got irq */
997 997 int irqno = virt_vect;
998 998 int inum = GET_INTR_INUM(intr_id);
999 999
1000 1000 /*
1001 1001 * Senarios include:
1002 1002 * a. add_avintr() is called before irqp initialized (legacy)
1003 1003 * b. irqp is initialized, vector is not allocated (fixed)
1004 1004 * c. irqp is initialized, vector is allocated (fixed & shared)
1005 1005 */
1006 1006 if ((vecp = apix_alloc_intx(dip, inum, irqno)) == NULL)
1007 1007 return (0);
1008 1008
1009 1009 cpuid = vecp->v_cpuid;
1010 1010 v = vecp->v_vector;
1011 1011 virt_vect = APIX_VIRTVECTOR(cpuid, v);
1012 1012 } else { /* got virtual vector */
1013 1013 cpuid = APIX_VIRTVEC_CPU(virt_vect);
1014 1014 vecp = xv_vector(cpuid, v);
1015 1015 ASSERT(vecp != NULL);
1016 1016 }
1017 1017
1018 1018 lock_set(&apix_lock);
1019 1019 if (vecp->v_state <= APIX_STATE_OBSOLETED) {
1020 1020 vecp = NULL;
1021 1021
1022 1022 /*
1023 1023 * Basically the allocated but not enabled interrupts
1024 1024 * will not get re-targeted. But MSIs in allocated state
1025 1025 * could be re-targeted due to group re-targeting.
1026 1026 */
1027 1027 if (intr_id != NULL && dip != NULL) {
1028 1028 ddi_intr_handle_impl_t *hdlp = intr_id;
1029 1029 vecp = apix_get_dev_map(dip, hdlp->ih_inum,
1030 1030 hdlp->ih_type);
1031 1031 ASSERT(vecp->v_state == APIX_STATE_ALLOCED);
1032 1032 }
1033 1033 if (vecp == NULL) {
1034 1034 lock_clear(&apix_lock);
1035 1035 cmn_err(CE_WARN, "Invalid interrupt 0x%x,0x%x "
1036 1036 " for %p to add", cpuid, v, intr_id);
1037 1037 return (0);
1038 1038 }
1039 1039 cpuid = vecp->v_cpuid;
1040 1040 virt_vect = APIX_VIRTVECTOR(cpuid, vecp->v_vector);
1041 1041 }
1042 1042
1043 1043 APIX_ENTER_CPU_LOCK(cpuid);
1044 1044 apix_insert_av(vecp, intr_id, xxintr, arg1, arg2, ticksp, ipl, dip);
1045 1045 APIX_LEAVE_CPU_LOCK(cpuid);
1046 1046
1047 1047 (void) apix_addspl(virt_vect, ipl, 0, 0);
1048 1048
1049 1049 lock_clear(&apix_lock);
1050 1050
1051 1051 return (1);
1052 1052 }
1053 1053
1054 1054 /*
1055 1055 * Remove avintr
1056 1056 *
1057 1057 * For fixed, if it's the last one of shared interrupts, free the vector.
1058 1058 * For msi/x, only disable the interrupt but not free the vector, which
1059 1059 * is freed by PSM_XXX_FREE_XXX.
1060 1060 */
1061 1061 void
1062 1062 apix_rem_avintr(void *intr_id, int ipl, avfunc xxintr, int virt_vect)
1063 1063 {
1064 1064 avfunc f;
1065 1065 apix_vector_t *vecp;
1066 1066 struct autovec *avp;
1067 1067 processorid_t cpuid;
1068 1068
1069 1069 if ((f = xxintr) == NULL)
1070 1070 return;
1071 1071
1072 1072 lock_set(&apix_lock);
1073 1073
1074 1074 if (!APIX_IS_VIRTVEC(virt_vect)) { /* got irq */
1075 1075 vecp = apix_intx_get_vector(virt_vect);
1076 1076 virt_vect = APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector);
1077 1077 } else /* got virtual vector */
1078 1078 vecp = xv_vector(APIX_VIRTVEC_CPU(virt_vect),
1079 1079 APIX_VIRTVEC_VECTOR(virt_vect));
1080 1080
1081 1081 if (vecp == NULL) {
1082 1082 lock_clear(&apix_lock);
1083 1083 cmn_err(CE_CONT, "Invalid interrupt 0x%x,0x%x to remove",
1084 1084 APIX_VIRTVEC_CPU(virt_vect),
1085 1085 APIX_VIRTVEC_VECTOR(virt_vect));
1086 1086 return;
1087 1087 }
1088 1088
1089 1089 if (vecp->v_state <= APIX_STATE_OBSOLETED ||
1090 1090 ((avp = apix_find_av(vecp, intr_id, f)) == NULL)) {
1091 1091 /*
1092 1092 * It's possible that the interrupt is rebound to a
1093 1093 * different cpu before rem_avintr() is called. Search
1094 1094 * through all vectors once it happens.
1095 1095 */
1096 1096 if ((vecp = apix_find_vector_by_avintr(intr_id, f))
1097 1097 == NULL) {
1098 1098 lock_clear(&apix_lock);
1099 1099 cmn_err(CE_CONT, "Unknown interrupt 0x%x,0x%x "
1100 1100 "for %p to remove", APIX_VIRTVEC_CPU(virt_vect),
1101 1101 APIX_VIRTVEC_VECTOR(virt_vect), intr_id);
1102 1102 return;
1103 1103 }
1104 1104 virt_vect = APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector);
1105 1105 avp = apix_find_av(vecp, intr_id, f);
1106 1106 }
1107 1107 cpuid = vecp->v_cpuid;
1108 1108
1109 1109 /* disable interrupt */
1110 1110 (void) apix_delspl(virt_vect, ipl, 0, 0);
1111 1111
1112 1112 /* remove ISR entry */
1113 1113 APIX_ENTER_CPU_LOCK(cpuid);
1114 1114 apix_remove_av(vecp, avp);
1115 1115 APIX_LEAVE_CPU_LOCK(cpuid);
1116 1116
1117 1117 lock_clear(&apix_lock);
1118 1118 }
1119 1119
1120 1120 /*
1121 1121 * Device to vector mapping table
1122 1122 */
1123 1123
1124 1124 static void
1125 1125 apix_clear_dev_map(dev_info_t *dip, int inum, int type)
1126 1126 {
1127 1127 char *name;
1128 1128 major_t major;
1129 1129 apix_dev_vector_t *dvp, *prev = NULL;
1130 1130 int found = 0;
1131 1131
1132 1132 name = ddi_get_name(dip);
1133 1133 major = ddi_name_to_major(name);
1134 1134
1135 1135 mutex_enter(&apix_mutex);
1136 1136
1137 1137 for (dvp = apix_dev_vector[major]; dvp != NULL;
1138 1138 prev = dvp, dvp = dvp->dv_next) {
1139 1139 if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1140 1140 dvp->dv_type == type) {
1141 1141 found++;
1142 1142 break;
1143 1143 }
1144 1144 }
1145 1145
1146 1146 if (!found) {
1147 1147 mutex_exit(&apix_mutex);
1148 1148 return;
1149 1149 }
1150 1150
1151 1151 if (prev != NULL)
1152 1152 prev->dv_next = dvp->dv_next;
1153 1153
1154 1154 if (apix_dev_vector[major] == dvp)
1155 1155 apix_dev_vector[major] = dvp->dv_next;
1156 1156
1157 1157 dvp->dv_vector->v_devp = NULL;
1158 1158
1159 1159 mutex_exit(&apix_mutex);
1160 1160
1161 1161 kmem_free(dvp, sizeof (apix_dev_vector_t));
1162 1162 }
1163 1163
1164 1164 void
1165 1165 apix_set_dev_map(apix_vector_t *vecp, dev_info_t *dip, int inum)
1166 1166 {
1167 1167 apix_dev_vector_t *dvp;
1168 1168 char *name;
1169 1169 major_t major;
1170 1170 uint32_t found = 0;
1171 1171
1172 1172 ASSERT(dip != NULL);
1173 1173 name = ddi_get_name(dip);
1174 1174 major = ddi_name_to_major(name);
1175 1175
1176 1176 mutex_enter(&apix_mutex);
1177 1177
1178 1178 for (dvp = apix_dev_vector[major]; dvp != NULL;
1179 1179 dvp = dvp->dv_next) {
1180 1180 if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1181 1181 dvp->dv_type == vecp->v_type) {
1182 1182 found++;
1183 1183 break;
1184 1184 }
1185 1185 }
1186 1186
1187 1187 if (found == 0) { /* not found */
1188 1188 dvp = kmem_zalloc(sizeof (apix_dev_vector_t), KM_SLEEP);
1189 1189 dvp->dv_dip = dip;
1190 1190 dvp->dv_inum = inum;
1191 1191 dvp->dv_type = vecp->v_type;
1192 1192
1193 1193 dvp->dv_next = apix_dev_vector[major];
1194 1194 apix_dev_vector[major] = dvp;
1195 1195 }
1196 1196 dvp->dv_vector = vecp;
1197 1197 vecp->v_devp = dvp;
1198 1198
1199 1199 mutex_exit(&apix_mutex);
1200 1200
1201 1201 DDI_INTR_IMPLDBG((CE_CONT, "apix_set_dev_map: dip=0x%p "
1202 1202 "inum=0x%x vector=0x%x/0x%x\n",
1203 1203 (void *)dip, inum, vecp->v_cpuid, vecp->v_vector));
1204 1204 }
1205 1205
1206 1206 apix_vector_t *
1207 1207 apix_get_dev_map(dev_info_t *dip, int inum, int type)
1208 1208 {
1209 1209 char *name;
1210 1210 major_t major;
1211 1211 apix_dev_vector_t *dvp;
1212 1212 apix_vector_t *vecp;
1213 1213
1214 1214 name = ddi_get_name(dip);
1215 1215 if ((major = ddi_name_to_major(name)) == DDI_MAJOR_T_NONE)
1216 1216 return (NULL);
1217 1217
1218 1218 mutex_enter(&apix_mutex);
1219 1219 for (dvp = apix_dev_vector[major]; dvp != NULL;
1220 1220 dvp = dvp->dv_next) {
1221 1221 if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1222 1222 dvp->dv_type == type) {
1223 1223 vecp = dvp->dv_vector;
1224 1224 mutex_exit(&apix_mutex);
1225 1225 return (vecp);
1226 1226 }
1227 1227 }
1228 1228 mutex_exit(&apix_mutex);
1229 1229
1230 1230 return (NULL);
1231 1231 }
1232 1232
1233 1233 /*
1234 1234 * Get minimum inum for specified device, used for MSI
1235 1235 */
1236 1236 int
1237 1237 apix_get_min_dev_inum(dev_info_t *dip, int type)
1238 1238 {
1239 1239 char *name;
1240 1240 major_t major;
1241 1241 apix_dev_vector_t *dvp;
1242 1242 int inum = -1;
1243 1243
1244 1244 name = ddi_get_name(dip);
1245 1245 major = ddi_name_to_major(name);
1246 1246
1247 1247 mutex_enter(&apix_mutex);
1248 1248 for (dvp = apix_dev_vector[major]; dvp != NULL;
1249 1249 dvp = dvp->dv_next) {
1250 1250 if (dvp->dv_dip == dip && dvp->dv_type == type) {
1251 1251 if (inum == -1)
1252 1252 inum = dvp->dv_inum;
1253 1253 else
1254 1254 inum = (dvp->dv_inum < inum) ?
1255 1255 dvp->dv_inum : inum;
1256 1256 }
1257 1257 }
1258 1258 mutex_exit(&apix_mutex);
1259 1259
1260 1260 return (inum);
1261 1261 }
1262 1262
1263 1263 int
1264 1264 apix_get_max_dev_inum(dev_info_t *dip, int type)
1265 1265 {
1266 1266 char *name;
1267 1267 major_t major;
1268 1268 apix_dev_vector_t *dvp;
1269 1269 int inum = -1;
1270 1270
1271 1271 name = ddi_get_name(dip);
1272 1272 major = ddi_name_to_major(name);
1273 1273
1274 1274 mutex_enter(&apix_mutex);
1275 1275 for (dvp = apix_dev_vector[major]; dvp != NULL;
1276 1276 dvp = dvp->dv_next) {
1277 1277 if (dvp->dv_dip == dip && dvp->dv_type == type) {
1278 1278 if (inum == -1)
1279 1279 inum = dvp->dv_inum;
1280 1280 else
1281 1281 inum = (dvp->dv_inum > inum) ?
1282 1282 dvp->dv_inum : inum;
1283 1283 }
1284 1284 }
1285 1285 mutex_exit(&apix_mutex);
1286 1286
1287 1287 return (inum);
1288 1288 }
1289 1289
1290 1290 /*
1291 1291 * Major to cpu binding, for INTR_ROUND_ROBIN_WITH_AFFINITY cpu
1292 1292 * binding policy
1293 1293 */
1294 1294
1295 1295 static uint32_t
1296 1296 apix_get_dev_binding(dev_info_t *dip)
1297 1297 {
1298 1298 major_t major;
1299 1299 char *name;
1300 1300 uint32_t cpu = IRQ_UNINIT;
1301 1301
1302 1302 name = ddi_get_name(dip);
1303 1303 major = ddi_name_to_major(name);
1304 1304 if (major < devcnt) {
1305 1305 mutex_enter(&apix_mutex);
1306 1306 cpu = apix_major_to_cpu[major];
1307 1307 mutex_exit(&apix_mutex);
1308 1308 }
1309 1309
1310 1310 return (cpu);
1311 1311 }
1312 1312
1313 1313 static void
1314 1314 apix_set_dev_binding(dev_info_t *dip, uint32_t cpu)
1315 1315 {
1316 1316 major_t major;
1317 1317 char *name;
1318 1318
1319 1319 /* setup major to cpu mapping */
1320 1320 name = ddi_get_name(dip);
1321 1321 major = ddi_name_to_major(name);
1322 1322 if (apix_major_to_cpu[major] == IRQ_UNINIT) {
1323 1323 mutex_enter(&apix_mutex);
1324 1324 apix_major_to_cpu[major] = cpu;
1325 1325 mutex_exit(&apix_mutex);
1326 1326 }
1327 1327 }
1328 1328
1329 1329 /*
1330 1330 * return the cpu to which this intr should be bound.
1331 1331 * Check properties or any other mechanism to see if user wants it
1332 1332 * bound to a specific CPU. If so, return the cpu id with high bit set.
1333 1333 * If not, use the policy to choose a cpu and return the id.
1334 1334 */
1335 1335 uint32_t
1336 1336 apix_bind_cpu(dev_info_t *dip)
1337 1337 {
1338 1338 int instance, instno, prop_len, bind_cpu, count;
1339 1339 uint_t i, rc;
1340 1340 major_t major;
1341 1341 char *name, *drv_name, *prop_val, *cptr;
1342 1342 char prop_name[32];
1343 1343
1344 1344 lock_set(&apix_lock);
1345 1345
1346 1346 if (apic_intr_policy == INTR_LOWEST_PRIORITY) {
1347 1347 cmn_err(CE_WARN, "apix: unsupported interrupt binding policy "
1348 1348 "LOWEST PRIORITY, use ROUND ROBIN instead");
1349 1349 apic_intr_policy = INTR_ROUND_ROBIN;
1350 1350 }
1351 1351
1352 1352 if (apic_nproc == 1) {
1353 1353 lock_clear(&apix_lock);
1354 1354 return (0);
1355 1355 }
1356 1356
1357 1357 drv_name = NULL;
1358 1358 rc = DDI_PROP_NOT_FOUND;
1359 1359 major = (major_t)-1;
1360 1360 if (dip != NULL) {
1361 1361 name = ddi_get_name(dip);
1362 1362 major = ddi_name_to_major(name);
1363 1363 drv_name = ddi_major_to_name(major);
1364 1364 instance = ddi_get_instance(dip);
1365 1365 if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) {
1366 1366 bind_cpu = apix_get_dev_binding(dip);
1367 1367 if (bind_cpu != IRQ_UNINIT) {
1368 1368 lock_clear(&apix_lock);
1369 1369 return (bind_cpu);
1370 1370 }
1371 1371 }
1372 1372 /*
1373 1373 * search for "drvname"_intpt_bind_cpus property first, the
1374 1374 * syntax of the property should be "a[,b,c,...]" where
1375 1375 * instance 0 binds to cpu a, instance 1 binds to cpu b,
1376 1376 * instance 3 binds to cpu c...
1377 1377 * ddi_getlongprop() will search /option first, then /
1378 1378 * if "drvname"_intpt_bind_cpus doesn't exist, then find
1379 1379 * intpt_bind_cpus property. The syntax is the same, and
1380 1380 * it applies to all the devices if its "drvname" specific
1381 1381 * property doesn't exist
1382 1382 */
1383 1383 (void) strcpy(prop_name, drv_name);
1384 1384 (void) strcat(prop_name, "_intpt_bind_cpus");
1385 1385 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, prop_name,
1386 1386 (caddr_t)&prop_val, &prop_len);
1387 1387 if (rc != DDI_PROP_SUCCESS) {
1388 1388 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0,
1389 1389 "intpt_bind_cpus", (caddr_t)&prop_val, &prop_len);
1390 1390 }
1391 1391 }
1392 1392 if (rc == DDI_PROP_SUCCESS) {
1393 1393 for (i = count = 0; i < (prop_len - 1); i++)
1394 1394 if (prop_val[i] == ',')
1395 1395 count++;
1396 1396 if (prop_val[i-1] != ',')
1397 1397 count++;
1398 1398 /*
1399 1399 * if somehow the binding instances defined in the
1400 1400 * property are not enough for this instno., then
1401 1401 * reuse the pattern for the next instance until
1402 1402 * it reaches the requested instno
1403 1403 */
1404 1404 instno = instance % count;
1405 1405 i = 0;
1406 1406 cptr = prop_val;
1407 1407 while (i < instno)
1408 1408 if (*cptr++ == ',')
1409 1409 i++;
1410 1410 bind_cpu = stoi(&cptr);
1411 1411 kmem_free(prop_val, prop_len);
1412 1412 /* if specific cpu is bogus, then default to cpu 0 */
1413 1413 if (bind_cpu >= apic_nproc) {
1414 1414 cmn_err(CE_WARN, "apix: %s=%s: CPU %d not present",
1415 1415 prop_name, prop_val, bind_cpu);
1416 1416 bind_cpu = 0;
1417 1417 } else {
1418 1418 /* indicate that we are bound at user request */
1419 1419 bind_cpu |= IRQ_USER_BOUND;
1420 1420 }
1421 1421 /*
1422 1422 * no need to check apic_cpus[].aci_status, if specific cpu is
1423 1423 * not up, then post_cpu_start will handle it.
1424 1424 */
1425 1425 } else {
1426 1426 bind_cpu = apic_get_next_bind_cpu();
1427 1427 }
1428 1428
1429 1429 lock_clear(&apix_lock);
1430 1430
1431 1431 return ((uint32_t)bind_cpu);
1432 1432 }
1433 1433
1434 1434 static boolean_t
1435 1435 apix_is_cpu_enabled(processorid_t cpuid)
1436 1436 {
1437 1437 apic_cpus_info_t *cpu_infop;
1438 1438
1439 1439 cpu_infop = &apic_cpus[cpuid];
1440 1440
1441 1441 if ((cpu_infop->aci_status & APIC_CPU_INTR_ENABLE) == 0)
1442 1442 return (B_FALSE);
1443 1443
1444 1444 return (B_TRUE);
1445 1445 }
1446 1446
1447 1447 /*
1448 1448 * Must be called with apix_lock held. This function can be
1449 1449 * called from above lock level by apix_intr_redistribute().
1450 1450 *
1451 1451 * Arguments:
1452 1452 * vecp : Vector to be rebound
1453 1453 * tocpu : Target cpu. IRQ_UNINIT means target is vecp->v_cpuid.
1454 1454 * count : Number of continuous vectors
1455 1455 *
1456 1456 * Return new vector being bound to
1457 1457 */
1458 1458 apix_vector_t *
1459 1459 apix_rebind(apix_vector_t *vecp, processorid_t newcpu, int count)
1460 1460 {
1461 1461 apix_vector_t *newp, *oldp;
1462 1462 processorid_t oldcpu = vecp->v_cpuid;
1463 1463 uchar_t newvec, oldvec = vecp->v_vector;
1464 1464 int i;
1465 1465
1466 1466 ASSERT(LOCK_HELD(&apix_lock) && count > 0);
1467 1467
1468 1468 if (!apix_is_cpu_enabled(newcpu))
1469 1469 return (NULL);
1470 1470
1471 1471 if (vecp->v_cpuid == newcpu) /* rebind to the same cpu */
1472 1472 return (vecp);
1473 1473
1474 1474 APIX_ENTER_CPU_LOCK(oldcpu);
1475 1475 APIX_ENTER_CPU_LOCK(newcpu);
1476 1476
1477 1477 /* allocate vector */
1478 1478 if (count == 1)
1479 1479 newp = apix_alloc_vector_oncpu(newcpu, NULL, 0, vecp->v_type);
1480 1480 else {
1481 1481 ASSERT(vecp->v_type == APIX_TYPE_MSI);
1482 1482 newp = apix_alloc_nvectors_oncpu(newcpu, NULL, 0, count,
1483 1483 vecp->v_type);
1484 1484 }
1485 1485 if (newp == NULL) {
1486 1486 APIX_LEAVE_CPU_LOCK(newcpu);
1487 1487 APIX_LEAVE_CPU_LOCK(oldcpu);
1488 1488 return (NULL);
1489 1489 }
1490 1490
1491 1491 newvec = newp->v_vector;
1492 1492 apix_dup_vectors(vecp, newp, count);
1493 1493
1494 1494 APIX_LEAVE_CPU_LOCK(newcpu);
1495 1495 APIX_LEAVE_CPU_LOCK(oldcpu);
1496 1496
1497 1497 if (!DDI_INTR_IS_MSI_OR_MSIX(vecp->v_type)) {
1498 1498 ASSERT(count == 1);
1499 1499 if (apix_intx_rebind(vecp->v_inum, newcpu, newvec) != 0) {
1500 1500 struct autovec *avp;
1501 1501 int inum;
1502 1502
1503 1503 /* undo duplication */
1504 1504 APIX_ENTER_CPU_LOCK(oldcpu);
1505 1505 APIX_ENTER_CPU_LOCK(newcpu);
1506 1506 for (avp = newp->v_autovect; avp != NULL;
1507 1507 avp = avp->av_link) {
1508 1508 if (avp->av_dip != NULL) {
1509 1509 inum = GET_INTR_INUM(avp->av_intr_id);
1510 1510 apix_set_dev_map(vecp, avp->av_dip,
1511 1511 inum);
1512 1512 }
1513 1513 apix_remove_av(newp, avp);
1514 1514 }
1515 1515 apix_cleanup_vector(newp);
1516 1516 APIX_LEAVE_CPU_LOCK(newcpu);
1517 1517 APIX_LEAVE_CPU_LOCK(oldcpu);
1518 1518 APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind fixed "
1519 1519 "interrupt 0x%x to cpu %d failed\n",
1520 1520 vecp->v_inum, newcpu));
1521 1521 return (NULL);
1522 1522 }
1523 1523
1524 1524 APIX_ENTER_CPU_LOCK(oldcpu);
1525 1525 (void) apix_obsolete_vector(vecp);
1526 1526 APIX_LEAVE_CPU_LOCK(oldcpu);
1527 1527 APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind fixed interrupt"
1528 1528 " 0x%x/0x%x to 0x%x/0x%x\n",
1529 1529 oldcpu, oldvec, newcpu, newvec));
1530 1530 return (newp);
1531 1531 }
1532 1532
1533 1533 for (i = 0; i < count; i++) {
1534 1534 oldp = xv_vector(oldcpu, oldvec + i);
1535 1535 newp = xv_vector(newcpu, newvec + i);
1536 1536
1537 1537 if (newp->v_share > 0) {
1538 1538 APIX_SET_REBIND_INFO(oldp, newp);
1539 1539
1540 1540 apix_enable_vector(newp);
1541 1541
1542 1542 APIX_CLR_REBIND_INFO();
1543 1543 }
1544 1544
1545 1545 APIX_ENTER_CPU_LOCK(oldcpu);
1546 1546 (void) apix_obsolete_vector(oldp);
1547 1547 APIX_LEAVE_CPU_LOCK(oldcpu);
1548 1548 }
1549 1549 APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind vector 0x%x/0x%x "
1550 1550 "to 0x%x/0x%x, count=%d\n",
1551 1551 oldcpu, oldvec, newcpu, newvec, count));
1552 1552
1553 1553 return (xv_vector(newcpu, newvec));
1554 1554 }
1555 1555
1556 1556 /*
1557 1557 * Senarios include:
1558 1558 * a. add_avintr() is called before irqp initialized (legacy)
1559 1559 * b. irqp is initialized, vector is not allocated (fixed interrupts)
1560 1560 * c. irqp is initialized, vector is allocated (shared interrupts)
1561 1561 */
1562 1562 apix_vector_t *
1563 1563 apix_alloc_intx(dev_info_t *dip, int inum, int irqno)
1564 1564 {
1565 1565 apic_irq_t *irqp;
1566 1566 apix_vector_t *vecp;
1567 1567
1568 1568 /*
1569 1569 * Allocate IRQ. Caller is later responsible for the
1570 1570 * initialization
1571 1571 */
1572 1572 mutex_enter(&airq_mutex);
1573 1573 if ((irqp = apic_irq_table[irqno]) == NULL) {
1574 1574 /* allocate irq */
1575 1575 irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP);
1576 1576 irqp->airq_mps_intr_index = FREE_INDEX;
1577 1577 apic_irq_table[irqno] = irqp;
1578 1578 }
1579 1579 if (irqp->airq_mps_intr_index == FREE_INDEX) {
1580 1580 irqp->airq_mps_intr_index = DEFAULT_INDEX;
1581 1581 irqp->airq_cpu = IRQ_UNINIT;
1582 1582 irqp->airq_origirq = (uchar_t)irqno;
1583 1583 }
1584 1584
1585 1585 mutex_exit(&airq_mutex);
1586 1586
1587 1587 /*
1588 1588 * allocate vector
1589 1589 */
1590 1590 if (irqp->airq_cpu == IRQ_UNINIT) {
1591 1591 uint32_t bindcpu, cpuid;
1592 1592
1593 1593 /* select cpu by system policy */
1594 1594 bindcpu = apix_bind_cpu(dip);
1595 1595 cpuid = bindcpu & ~IRQ_USER_BOUND;
1596 1596
1597 1597 /* allocate vector */
1598 1598 APIX_ENTER_CPU_LOCK(cpuid);
1599 1599
1600 1600 if ((vecp = apix_alloc_vector_oncpu(bindcpu, dip, inum,
1601 1601 APIX_TYPE_FIXED)) == NULL) {
1602 1602 cmn_err(CE_WARN, "No interrupt vector for irq %x",
1603 1603 irqno);
1604 1604 APIX_LEAVE_CPU_LOCK(cpuid);
1605 1605 return (NULL);
1606 1606 }
1607 1607 vecp->v_inum = irqno;
1608 1608 vecp->v_flags |= APIX_VECT_MASKABLE;
1609 1609
1610 1610 apix_intx_set_vector(irqno, vecp->v_cpuid, vecp->v_vector);
1611 1611
1612 1612 APIX_LEAVE_CPU_LOCK(cpuid);
1613 1613 } else {
1614 1614 vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector);
1615 1615 ASSERT(!IS_VECT_FREE(vecp));
1616 1616
1617 1617 if (dip != NULL)
1618 1618 apix_set_dev_map(vecp, dip, inum);
1619 1619 }
1620 1620
1621 1621 if ((dip != NULL) &&
1622 1622 (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1623 1623 ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1624 1624 apix_set_dev_binding(dip, vecp->v_cpuid);
1625 1625
1626 1626 apix_dprint_vector(vecp, dip, 1);
1627 1627
1628 1628 return (vecp);
1629 1629 }
1630 1630
1631 1631 int
1632 1632 apix_alloc_msi(dev_info_t *dip, int inum, int count, int behavior)
1633 1633 {
1634 1634 int i, cap_ptr, rcount = count;
1635 1635 apix_vector_t *vecp;
1636 1636 processorid_t bindcpu, cpuid;
1637 1637 ushort_t msi_ctrl;
1638 1638 ddi_acc_handle_t handle;
1639 1639
1640 1640 DDI_INTR_IMPLDBG((CE_CONT, "apix_alloc_msi_vectors: dip=0x%p "
1641 1641 "inum=0x%x count=0x%x behavior=%d\n",
1642 1642 (void *)dip, inum, count, behavior));
1643 1643
1644 1644 if (count > 1) {
1645 1645 if (behavior == DDI_INTR_ALLOC_STRICT &&
1646 1646 apic_multi_msi_enable == 0)
1647 1647 return (0);
1648 1648 if (apic_multi_msi_enable == 0)
1649 1649 count = 1;
1650 1650 }
1651 1651
1652 1652 /* Check whether it supports per-vector masking */
1653 1653 cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
1654 1654 handle = i_ddi_get_pci_config_handle(dip);
1655 1655 msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1656 1656
1657 1657 /* bind to cpu */
1658 1658 bindcpu = apix_bind_cpu(dip);
1659 1659 cpuid = bindcpu & ~IRQ_USER_BOUND;
1660 1660
1661 1661 /* if not ISP2, then round it down */
1662 1662 if (!ISP2(rcount))
1663 1663 rcount = 1 << (highbit(rcount) - 1);
1664 1664
1665 1665 APIX_ENTER_CPU_LOCK(cpuid);
1666 1666 for (vecp = NULL; rcount > 0; rcount >>= 1) {
1667 1667 vecp = apix_alloc_nvectors_oncpu(bindcpu, dip, inum, rcount,
1668 1668 APIX_TYPE_MSI);
1669 1669 if (vecp != NULL || behavior == DDI_INTR_ALLOC_STRICT)
1670 1670 break;
1671 1671 }
1672 1672 for (i = 0; vecp && i < rcount; i++)
1673 1673 xv_vector(vecp->v_cpuid, vecp->v_vector + i)->v_flags |=
1674 1674 (msi_ctrl & PCI_MSI_PVM_MASK) ? APIX_VECT_MASKABLE : 0;
1675 1675 APIX_LEAVE_CPU_LOCK(cpuid);
1676 1676 if (vecp == NULL) {
1677 1677 APIC_VERBOSE(INTR, (CE_CONT,
1678 1678 "apix_alloc_msi: no %d cont vectors found on cpu 0x%x\n",
1679 1679 count, bindcpu));
1680 1680 return (0);
1681 1681 }
1682 1682
1683 1683 /* major to cpu binding */
1684 1684 if ((apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1685 1685 ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1686 1686 apix_set_dev_binding(dip, vecp->v_cpuid);
1687 1687
1688 1688 apix_dprint_vector(vecp, dip, rcount);
1689 1689
1690 1690 return (rcount);
1691 1691 }
1692 1692
1693 1693 int
1694 1694 apix_alloc_msix(dev_info_t *dip, int inum, int count, int behavior)
1695 1695 {
1696 1696 apix_vector_t *vecp;
1697 1697 processorid_t bindcpu, cpuid;
1698 1698 int i;
1699 1699
1700 1700 for (i = 0; i < count; i++) {
1701 1701 /* select cpu by system policy */
1702 1702 bindcpu = apix_bind_cpu(dip);
1703 1703 cpuid = bindcpu & ~IRQ_USER_BOUND;
1704 1704
1705 1705 /* allocate vector */
1706 1706 APIX_ENTER_CPU_LOCK(cpuid);
1707 1707 if ((vecp = apix_alloc_vector_oncpu(bindcpu, dip, inum + i,
1708 1708 APIX_TYPE_MSIX)) == NULL) {
1709 1709 APIX_LEAVE_CPU_LOCK(cpuid);
1710 1710 APIC_VERBOSE(INTR, (CE_CONT, "apix_alloc_msix: "
1711 1711 "allocate msix for device dip=%p, inum=%d on"
1712 1712 " cpu %d failed", (void *)dip, inum + i, bindcpu));
1713 1713 break;
1714 1714 }
1715 1715 vecp->v_flags |= APIX_VECT_MASKABLE;
1716 1716 APIX_LEAVE_CPU_LOCK(cpuid);
1717 1717
1718 1718 /* major to cpu mapping */
1719 1719 if ((i == 0) &&
1720 1720 (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1721 1721 ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1722 1722 apix_set_dev_binding(dip, vecp->v_cpuid);
1723 1723
1724 1724 apix_dprint_vector(vecp, dip, 1);
1725 1725 }
1726 1726
1727 1727 if (i < count && behavior == DDI_INTR_ALLOC_STRICT) {
1728 1728 APIC_VERBOSE(INTR, (CE_WARN, "apix_alloc_msix: "
1729 1729 "strictly allocate %d vectors failed, got %d\n",
1730 1730 count, i));
1731 1731 apix_free_vectors(dip, inum, i, APIX_TYPE_MSIX);
1732 1732 i = 0;
1733 1733 }
1734 1734
1735 1735 return (i);
1736 1736 }
1737 1737
1738 1738 /*
1739 1739 * A rollback free for vectors allocated by apix_alloc_xxx().
1740 1740 */
1741 1741 void
1742 1742 apix_free_vectors(dev_info_t *dip, int inum, int count, int type)
1743 1743 {
1744 1744 int i, cpuid;
1745 1745 apix_vector_t *vecp;
1746 1746
1747 1747 DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: dip: %p inum: %x "
1748 1748 "count: %x type: %x\n",
1749 1749 (void *)dip, inum, count, type));
1750 1750
1751 1751 lock_set(&apix_lock);
1752 1752
1753 1753 for (i = 0; i < count; i++, inum++) {
1754 1754 if ((vecp = apix_get_dev_map(dip, inum, type)) == NULL) {
1755 1755 lock_clear(&apix_lock);
1756 1756 DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: "
1757 1757 "dip=0x%p inum=0x%x type=0x%x apix_find_intr() "
1758 1758 "failed\n", (void *)dip, inum, type));
1759 1759 continue;
1760 1760 }
1761 1761
1762 1762 APIX_ENTER_CPU_LOCK(vecp->v_cpuid);
1763 1763 cpuid = vecp->v_cpuid;
1764 1764
1765 1765 DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: "
1766 1766 "dip=0x%p inum=0x%x type=0x%x vector 0x%x (share %d)\n",
1767 1767 (void *)dip, inum, type, vecp->v_vector, vecp->v_share));
1768 1768
1769 1769 /* tear down device interrupt to vector mapping */
1770 1770 apix_clear_dev_map(dip, inum, type);
1771 1771
1772 1772 if (vecp->v_type == APIX_TYPE_FIXED) {
1773 1773 if (vecp->v_share > 0) { /* share IRQ line */
1774 1774 APIX_LEAVE_CPU_LOCK(cpuid);
1775 1775 continue;
1776 1776 }
1777 1777
1778 1778 /* Free apic_irq_table entry */
1779 1779 apix_intx_free(vecp->v_inum);
1780 1780 }
1781 1781
1782 1782 /* free vector */
1783 1783 apix_cleanup_vector(vecp);
1784 1784
1785 1785 APIX_LEAVE_CPU_LOCK(cpuid);
1786 1786 }
1787 1787
1788 1788 lock_clear(&apix_lock);
1789 1789 }
1790 1790
1791 1791 /*
1792 1792 * Must be called with apix_lock held
1793 1793 */
1794 1794 apix_vector_t *
1795 1795 apix_setup_io_intr(apix_vector_t *vecp)
1796 1796 {
1797 1797 processorid_t bindcpu;
1798 1798 int ret;
1799 1799
1800 1800 ASSERT(LOCK_HELD(&apix_lock));
1801 1801
1802 1802 /*
1803 1803 * Interrupts are enabled on the CPU, programme IOAPIC RDT
1804 1804 * entry or MSI/X address/data to enable the interrupt.
1805 1805 */
1806 1806 if (apix_is_cpu_enabled(vecp->v_cpuid)) {
1807 1807 apix_enable_vector(vecp);
1808 1808 return (vecp);
1809 1809 }
1810 1810
1811 1811 /*
1812 1812 * CPU is not up or interrupts are disabled. Fall back to the
1813 1813 * first avialable CPU.
1814 1814 */
1815 1815 bindcpu = apic_find_cpu(APIC_CPU_INTR_ENABLE);
1816 1816
1817 1817 if (vecp->v_type == APIX_TYPE_MSI)
1818 1818 return (apix_grp_set_cpu(vecp, bindcpu, &ret));
1819 1819
1820 1820 return (apix_set_cpu(vecp, bindcpu, &ret));
1821 1821 }
1822 1822
1823 1823 /*
1824 1824 * For interrupts which call add_avintr() before apic is initialized.
1825 1825 * ioapix_setup_intr() will
1826 1826 * - allocate vector
1827 1827 * - copy over ISR
1828 1828 */
1829 1829 static void
1830 1830 ioapix_setup_intr(int irqno, iflag_t *flagp)
1831 1831 {
1832 1832 extern struct av_head autovect[];
1833 1833 apix_vector_t *vecp;
1834 1834 apic_irq_t *irqp;
1835 1835 uchar_t ioapicindex, ipin;
1836 1836 ulong_t iflag;
1837 1837 struct autovec *avp;
1838 1838
1839 1839 ioapicindex = acpi_find_ioapic(irqno);
1840 1840 ASSERT(ioapicindex != 0xFF);
1841 1841 ipin = irqno - apic_io_vectbase[ioapicindex];
1842 1842
1843 1843 mutex_enter(&airq_mutex);
1844 1844 irqp = apic_irq_table[irqno];
1845 1845
1846 1846 /*
1847 1847 * The irq table entry shouldn't exist unless the interrupts are shared.
1848 1848 * In that case, make sure it matches what we would initialize it to.
1849 1849 */
1850 1850 if (irqp != NULL) {
1851 1851 ASSERT(irqp->airq_mps_intr_index == ACPI_INDEX);
1852 1852 ASSERT(irqp->airq_intin_no == ipin &&
1853 1853 irqp->airq_ioapicindex == ioapicindex);
1854 1854 vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector);
1855 1855 ASSERT(!IS_VECT_FREE(vecp));
1856 1856 mutex_exit(&airq_mutex);
1857 1857 } else {
1858 1858 irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP);
1859 1859
1860 1860 irqp->airq_cpu = IRQ_UNINIT;
1861 1861 irqp->airq_origirq = (uchar_t)irqno;
1862 1862 irqp->airq_mps_intr_index = ACPI_INDEX;
1863 1863 irqp->airq_ioapicindex = ioapicindex;
1864 1864 irqp->airq_intin_no = ipin;
1865 1865 irqp->airq_iflag = *flagp;
1866 1866 irqp->airq_share++;
1867 1867
1868 1868 apic_irq_table[irqno] = irqp;
1869 1869 mutex_exit(&airq_mutex);
1870 1870
1871 1871 vecp = apix_alloc_intx(NULL, 0, irqno);
1872 1872 }
1873 1873
1874 1874 /* copy over autovect */
1875 1875 for (avp = autovect[irqno].avh_link; avp; avp = avp->av_link)
1876 1876 apix_insert_av(vecp, avp->av_intr_id, avp->av_vector,
1877 1877 avp->av_intarg1, avp->av_intarg2, avp->av_ticksp,
1878 1878 avp->av_prilevel, avp->av_dip);
1879 1879
1880 1880 /* Program I/O APIC */
1881 1881 iflag = intr_clear();
1882 1882 lock_set(&apix_lock);
1883 1883
1884 1884 (void) apix_setup_io_intr(vecp);
1885 1885
1886 1886 lock_clear(&apix_lock);
1887 1887 intr_restore(iflag);
1888 1888
1889 1889 APIC_VERBOSE_IOAPIC((CE_CONT, "apix: setup ioapic, irqno %x "
1890 1890 "(ioapic %x, ipin %x) is bound to cpu %x, vector %x\n",
1891 1891 irqno, ioapicindex, ipin, irqp->airq_cpu, irqp->airq_vector));
1892 1892 }
1893 1893
1894 1894 void
1895 1895 ioapix_init_intr(int mask_apic)
1896 1896 {
1897 1897 int ioapicindex;
1898 1898 int i, j;
1899 1899
1900 1900 /* mask interrupt vectors */
1901 1901 for (j = 0; j < apic_io_max && mask_apic; j++) {
1902 1902 int intin_max;
1903 1903
1904 1904 ioapicindex = j;
1905 1905 /* Bits 23-16 define the maximum redirection entries */
1906 1906 intin_max = (ioapic_read(ioapicindex, APIC_VERS_CMD) >> 16)
1907 1907 & 0xff;
1908 1908 for (i = 0; i <= intin_max; i++)
1909 1909 ioapic_write(ioapicindex, APIC_RDT_CMD + 2 * i,
1910 1910 AV_MASK);
1911 1911 }
1912 1912
1913 1913 /*
1914 1914 * Hack alert: deal with ACPI SCI interrupt chicken/egg here
1915 1915 */
1916 1916 if (apic_sci_vect > 0)
1917 1917 ioapix_setup_intr(apic_sci_vect, &apic_sci_flags);
1918 1918
1919 1919 /*
1920 1920 * Hack alert: deal with ACPI HPET interrupt chicken/egg here.
1921 1921 */
1922 1922 if (apic_hpet_vect > 0)
1923 1923 ioapix_setup_intr(apic_hpet_vect, &apic_hpet_flags);
1924 1924 }
↓ open down ↓ |
1875 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX