Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/avintr.c
+++ new/usr/src/uts/common/io/avintr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Autovectored Interrupt Configuration and Deconfiguration
27 27 */
28 28
29 29 #include <sys/param.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/trap.h>
32 32 #include <sys/t_lock.h>
33 33 #include <sys/avintr.h>
34 34 #include <sys/kmem.h>
35 35 #include <sys/machlock.h>
36 36 #include <sys/systm.h>
37 37 #include <sys/machsystm.h>
38 38 #include <sys/sunddi.h>
39 39 #include <sys/x_call.h>
40 40 #include <sys/cpuvar.h>
41 41 #include <sys/atomic.h>
42 42 #include <sys/smp_impldefs.h>
43 43 #include <sys/sdt.h>
44 44 #include <sys/stack.h>
45 45 #include <sys/ddi_impldefs.h>
46 46 #ifdef __xpv
47 47 #include <sys/evtchn_impl.h>
48 48 #endif
49 49
50 50 typedef struct av_softinfo {
51 51 cpuset_t av_pending; /* pending bitmasks */
52 52 } av_softinfo_t;
53 53
54 54 static void insert_av(void *intr_id, struct av_head *vectp, avfunc f,
55 55 caddr_t arg1, caddr_t arg2, uint64_t *ticksp, int pri_level,
56 56 dev_info_t *dip);
57 57 static void remove_av(void *intr_id, struct av_head *vectp, avfunc f,
58 58 int pri_level, int vect);
59 59
60 60 /*
61 61 * Arrange for a driver to be called when a particular
62 62 * auto-vectored interrupt occurs.
63 63 * NOTE: if a device can generate interrupts on more than
64 64 * one level, or if a driver services devices that interrupt
65 65 * on more than one level, then the driver should install
66 66 * itself on each of those levels.
67 67 */
68 68 static char badsoft[] =
69 69 "add_avintr: bad soft interrupt level %d for driver '%s'\n";
70 70 static char multilevel[] =
71 71 "!IRQ%d is being shared by drivers with different interrupt levels.\n"
72 72 "This may result in reduced system performance.";
73 73 static char multilevel2[] =
74 74 "Cannot register interrupt for '%s' device at IPL %d because it\n"
75 75 "conflicts with another device using the same vector %d with an IPL\n"
76 76 "of %d. Reconfigure the conflicting devices to use different vectors.";
77 77
78 78 #ifdef __xpv
79 79 #define MAX_VECT NR_IRQS
80 80 #else
81 81 #define MAX_VECT 256
82 82 #endif
83 83
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
84 84 struct autovec *nmivect = NULL;
85 85 struct av_head autovect[MAX_VECT];
86 86 struct av_head softvect[LOCK_LEVEL + 1];
87 87 kmutex_t av_lock;
88 88 /*
89 89 * These are software interrupt handlers dedicated to ddi timer.
90 90 * The interrupt levels up to 10 are supported, but high interrupts
91 91 * must not be used there.
92 92 */
93 93 ddi_softint_hdl_impl_t softlevel_hdl[DDI_IPL_10] = {
94 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 1 */
95 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 2 */
96 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 3 */
97 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 4 */
98 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 5 */
99 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 6 */
100 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 7 */
101 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 8 */
102 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 9 */
103 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL}, /* level 10 */
94 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 1 */
95 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 2 */
96 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 3 */
97 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 4 */
98 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 5 */
99 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 6 */
100 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 7 */
101 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 8 */
102 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 9 */
103 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}, /* level 10 */
104 104 };
105 105 ddi_softint_hdl_impl_t softlevel1_hdl =
106 - {0, NULL, NULL, NULL, 0, NULL, NULL, NULL};
106 + {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL};
107 107
108 108 /*
109 109 * clear/check softint pending flag corresponding for
110 110 * the current CPU
111 111 */
112 112 void
113 113 av_clear_softint_pending(av_softinfo_t *infop)
114 114 {
115 115 CPUSET_ATOMIC_DEL(infop->av_pending, CPU->cpu_seqid);
116 116 }
117 117
118 118 boolean_t
119 119 av_check_softint_pending(av_softinfo_t *infop, boolean_t check_all)
120 120 {
121 121 if (check_all)
122 122 return (!CPUSET_ISNULL(infop->av_pending));
123 123 else
124 124 return (CPU_IN_SET(infop->av_pending, CPU->cpu_seqid) != 0);
125 125 }
126 126
127 127 /*
128 128 * This is the wrapper function which is generally used to set a softint
129 129 * pending
130 130 */
131 131 void
132 132 av_set_softint_pending(int pri, av_softinfo_t *infop)
133 133 {
134 134 kdi_av_set_softint_pending(pri, infop);
135 135 }
136 136
137 137 /*
138 138 * This is kmdb's private entry point to setsoftint called from kdi_siron
139 139 * It first sets our av softint pending bit for the current CPU,
140 140 * then it sets the CPU softint pending bit for pri.
141 141 */
142 142 void
143 143 kdi_av_set_softint_pending(int pri, av_softinfo_t *infop)
144 144 {
145 145 CPUSET_ATOMIC_ADD(infop->av_pending, CPU->cpu_seqid);
146 146
147 147 atomic_or_32((uint32_t *)&CPU->cpu_softinfo.st_pending, 1 << pri);
148 148 }
149 149
150 150 /*
151 151 * register nmi interrupt routine. The first arg is used only to order
152 152 * various nmi interrupt service routines in the chain. Higher lvls will
153 153 * be called first
154 154 */
155 155 int
156 156 add_nmintr(int lvl, avfunc nmintr, char *name, caddr_t arg)
157 157 {
158 158 struct autovec *mem;
159 159 struct autovec *p, *prev = NULL;
160 160
161 161 if (nmintr == NULL) {
162 162 printf("Attempt to add null vect for %s on nmi\n", name);
163 163 return (0);
164 164
165 165 }
166 166
167 167 mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
168 168 mem->av_vector = nmintr;
169 169 mem->av_intarg1 = arg;
170 170 mem->av_intarg2 = NULL;
171 171 mem->av_intr_id = NULL;
172 172 mem->av_prilevel = lvl;
173 173 mem->av_dip = NULL;
174 174 mem->av_link = NULL;
175 175
176 176 mutex_enter(&av_lock);
177 177
178 178 if (!nmivect) {
179 179 nmivect = mem;
180 180 mutex_exit(&av_lock);
181 181 return (1);
182 182 }
183 183 /* find where it goes in list */
184 184 for (p = nmivect; p != NULL; p = p->av_link) {
185 185 if (p->av_vector == nmintr && p->av_intarg1 == arg) {
186 186 /*
187 187 * already in list
188 188 * So? Somebody added the same interrupt twice.
189 189 */
190 190 cmn_err(CE_WARN, "Driver already registered '%s'",
191 191 name);
192 192 kmem_free(mem, sizeof (struct autovec));
193 193 mutex_exit(&av_lock);
194 194 return (0);
195 195 }
196 196 if (p->av_prilevel < lvl) {
197 197 if (p == nmivect) { /* it's at head of list */
198 198 mem->av_link = p;
199 199 nmivect = mem;
200 200 } else {
201 201 mem->av_link = p;
202 202 prev->av_link = mem;
203 203 }
204 204 mutex_exit(&av_lock);
205 205 return (1);
206 206 }
207 207 prev = p;
208 208
209 209 }
210 210 /* didn't find it, add it to the end */
211 211 prev->av_link = mem;
212 212 mutex_exit(&av_lock);
213 213 return (1);
214 214
215 215 }
216 216
217 217 /*
218 218 * register a hardware interrupt handler.
219 219 *
220 220 * The autovect data structure only supports globally 256 interrupts.
221 221 * In order to support 256 * #LocalAPIC interrupts, a new PSM module
222 222 * apix is introduced. It defines PSM private data structures for the
223 223 * interrupt handlers. The PSM module initializes addintr to a PSM
224 224 * private function so that it could override add_avintr() to operate
225 225 * on its private data structures.
226 226 */
227 227 int
228 228 add_avintr(void *intr_id, int lvl, avfunc xxintr, char *name, int vect,
229 229 caddr_t arg1, caddr_t arg2, uint64_t *ticksp, dev_info_t *dip)
230 230 {
231 231 struct av_head *vecp = (struct av_head *)0;
232 232 avfunc f;
233 233 int s, vectindex; /* save old spl value */
234 234 ushort_t hi_pri;
235 235
236 236 if (addintr) {
237 237 return ((*addintr)(intr_id, lvl, xxintr, name, vect,
238 238 arg1, arg2, ticksp, dip));
239 239 }
240 240
241 241 if ((f = xxintr) == NULL) {
242 242 printf("Attempt to add null vect for %s on vector %d\n",
243 243 name, vect);
244 244 return (0);
245 245
246 246 }
247 247 vectindex = vect % MAX_VECT;
248 248
249 249 vecp = &autovect[vectindex];
250 250
251 251 /*
252 252 * "hi_pri == 0" implies all entries on list are "unused",
253 253 * which means that it's OK to just insert this one.
254 254 */
255 255 hi_pri = vecp->avh_hi_pri;
256 256 if (vecp->avh_link && (hi_pri != 0)) {
257 257 if (((hi_pri > LOCK_LEVEL) && (lvl < LOCK_LEVEL)) ||
258 258 ((hi_pri < LOCK_LEVEL) && (lvl > LOCK_LEVEL))) {
259 259 cmn_err(CE_WARN, multilevel2, name, lvl, vect,
260 260 hi_pri);
261 261 return (0);
262 262 }
263 263 if ((vecp->avh_lo_pri != lvl) || (hi_pri != lvl))
264 264 cmn_err(CE_NOTE, multilevel, vect);
265 265 }
266 266
267 267 insert_av(intr_id, vecp, f, arg1, arg2, ticksp, lvl, dip);
268 268 s = splhi();
269 269 /*
270 270 * do what ever machine specific things are necessary
271 271 * to set priority level (e.g. set picmasks)
272 272 */
273 273 mutex_enter(&av_lock);
274 274 (*addspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
275 275 mutex_exit(&av_lock);
276 276 splx(s);
277 277 return (1);
278 278
279 279 }
280 280
281 281 void
282 282 update_avsoftintr_args(void *intr_id, int lvl, caddr_t arg2)
283 283 {
284 284 struct autovec *p;
285 285 struct autovec *target = NULL;
286 286 struct av_head *vectp = (struct av_head *)&softvect[lvl];
287 287
288 288 for (p = vectp->avh_link; p && p->av_vector; p = p->av_link) {
289 289 if (p->av_intr_id == intr_id) {
290 290 target = p;
291 291 break;
292 292 }
293 293 }
294 294
295 295 if (target == NULL)
296 296 return;
297 297 target->av_intarg2 = arg2;
298 298 }
299 299
300 300 /*
301 301 * Register a software interrupt handler
302 302 */
303 303 int
304 304 add_avsoftintr(void *intr_id, int lvl, avfunc xxintr, char *name,
305 305 caddr_t arg1, caddr_t arg2)
306 306 {
307 307 int slvl;
308 308 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
309 309
310 310 if ((slvl = slvltovect(lvl)) != -1)
311 311 return (add_avintr(intr_id, lvl, xxintr,
312 312 name, slvl, arg1, arg2, NULL, NULL));
313 313
314 314 if (intr_id == NULL) {
315 315 printf("Attempt to add null intr_id for %s on level %d\n",
316 316 name, lvl);
317 317 return (0);
318 318 }
319 319
320 320 if (xxintr == NULL) {
321 321 printf("Attempt to add null handler for %s on level %d\n",
322 322 name, lvl);
323 323 return (0);
324 324 }
325 325
326 326 if (lvl <= 0 || lvl > LOCK_LEVEL) {
327 327 printf(badsoft, lvl, name);
328 328 return (0);
329 329 }
330 330
331 331 if (hdlp->ih_pending == NULL) {
332 332 hdlp->ih_pending =
333 333 kmem_zalloc(sizeof (av_softinfo_t), KM_SLEEP);
334 334 }
335 335
336 336 insert_av(intr_id, &softvect[lvl], xxintr, arg1, arg2, NULL, lvl, NULL);
337 337
338 338 return (1);
339 339 }
340 340
341 341 /*
342 342 * insert an interrupt vector into chain by its priority from high
343 343 * to low
344 344 */
345 345 static void
346 346 insert_av(void *intr_id, struct av_head *vectp, avfunc f, caddr_t arg1,
347 347 caddr_t arg2, uint64_t *ticksp, int pri_level, dev_info_t *dip)
348 348 {
349 349 /*
350 350 * Protect rewrites of the list
351 351 */
352 352 struct autovec *p, *prep, *mem;
353 353
354 354 mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
355 355 mem->av_vector = f;
356 356 mem->av_intarg1 = arg1;
357 357 mem->av_intarg2 = arg2;
358 358 mem->av_ticksp = ticksp;
359 359 mem->av_intr_id = intr_id;
360 360 mem->av_prilevel = pri_level;
361 361 mem->av_dip = dip;
362 362 mem->av_link = NULL;
363 363
364 364 mutex_enter(&av_lock);
365 365
366 366 if (vectp->avh_link == NULL) { /* Nothing on list - put it at head */
367 367 vectp->avh_link = mem;
368 368 vectp->avh_hi_pri = vectp->avh_lo_pri = (ushort_t)pri_level;
369 369
370 370 mutex_exit(&av_lock);
371 371 return;
372 372 }
373 373
374 374 /* find where it goes in list */
375 375 prep = NULL;
376 376 for (p = vectp->avh_link; p != NULL; p = p->av_link) {
377 377 if (p->av_vector && p->av_prilevel <= pri_level)
378 378 break;
379 379 prep = p;
380 380 }
381 381 if (prep != NULL) {
382 382 if (prep->av_vector == NULL) { /* freed struct available */
383 383 p = prep;
384 384 p->av_intarg1 = arg1;
385 385 p->av_intarg2 = arg2;
386 386 p->av_ticksp = ticksp;
387 387 p->av_intr_id = intr_id;
388 388 p->av_prilevel = pri_level;
389 389 p->av_dip = dip;
390 390 if (pri_level > (int)vectp->avh_hi_pri) {
391 391 vectp->avh_hi_pri = (ushort_t)pri_level;
392 392 }
393 393 if (pri_level < (int)vectp->avh_lo_pri) {
394 394 vectp->avh_lo_pri = (ushort_t)pri_level;
395 395 }
396 396 /*
397 397 * To prevent calling service routine before args
398 398 * and ticksp are ready fill in vector last.
399 399 */
400 400 p->av_vector = f;
401 401 mutex_exit(&av_lock);
402 402 kmem_free(mem, sizeof (struct autovec));
403 403 return;
404 404 }
405 405
406 406 mem->av_link = prep->av_link;
407 407 prep->av_link = mem;
408 408 } else {
409 409 /* insert new intpt at beginning of chain */
410 410 mem->av_link = vectp->avh_link;
411 411 vectp->avh_link = mem;
412 412 }
413 413 if (pri_level > (int)vectp->avh_hi_pri) {
414 414 vectp->avh_hi_pri = (ushort_t)pri_level;
415 415 }
416 416 if (pri_level < (int)vectp->avh_lo_pri) {
417 417 vectp->avh_lo_pri = (ushort_t)pri_level;
418 418 }
419 419 mutex_exit(&av_lock);
420 420 }
421 421
422 422 static int
423 423 av_rem_softintr(void *intr_id, int lvl, avfunc xxintr, boolean_t rem_softinfo)
424 424 {
425 425 struct av_head *vecp = (struct av_head *)0;
426 426 int slvl;
427 427 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
428 428 av_softinfo_t *infop = (av_softinfo_t *)hdlp->ih_pending;
429 429
430 430 if (xxintr == NULL)
431 431 return (0);
432 432
433 433 if ((slvl = slvltovect(lvl)) != -1) {
434 434 rem_avintr(intr_id, lvl, xxintr, slvl);
435 435 return (1);
436 436 }
437 437
438 438 if (lvl <= 0 && lvl >= LOCK_LEVEL) {
439 439 return (0);
440 440 }
441 441 vecp = &softvect[lvl];
442 442 remove_av(intr_id, vecp, xxintr, lvl, 0);
443 443
444 444 if (rem_softinfo) {
445 445 kmem_free(infop, sizeof (av_softinfo_t));
446 446 hdlp->ih_pending = NULL;
447 447 }
448 448
449 449 return (1);
450 450 }
451 451
452 452 int
453 453 av_softint_movepri(void *intr_id, int old_lvl)
454 454 {
455 455 int ret;
456 456 ddi_softint_hdl_impl_t *hdlp = (ddi_softint_hdl_impl_t *)intr_id;
457 457
458 458 ret = add_avsoftintr(intr_id, hdlp->ih_pri, hdlp->ih_cb_func,
459 459 DEVI(hdlp->ih_dip)->devi_name, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
460 460
461 461 if (ret) {
462 462 (void) av_rem_softintr(intr_id, old_lvl, hdlp->ih_cb_func,
463 463 B_FALSE);
464 464 }
465 465
466 466 return (ret);
467 467 }
468 468
469 469 /*
470 470 * Remove a driver from the autovector list.
471 471 */
472 472 int
473 473 rem_avsoftintr(void *intr_id, int lvl, avfunc xxintr)
474 474 {
475 475 return (av_rem_softintr(intr_id, lvl, xxintr, B_TRUE));
476 476 }
477 477
478 478 /*
479 479 * Remove specified interrupt handler.
480 480 *
481 481 * PSM module could initialize remintr to some PSM private function
482 482 * so that it could override rem_avintr() to operate on its private
483 483 * data structures.
484 484 */
485 485 void
486 486 rem_avintr(void *intr_id, int lvl, avfunc xxintr, int vect)
487 487 {
488 488 struct av_head *vecp = (struct av_head *)0;
489 489 avfunc f;
490 490 int s, vectindex; /* save old spl value */
491 491
492 492 if (remintr) {
493 493 (*remintr)(intr_id, lvl, xxintr, vect);
494 494 return;
495 495 }
496 496
497 497 if ((f = xxintr) == NULL)
498 498 return;
499 499
500 500 vectindex = vect % MAX_VECT;
501 501 vecp = &autovect[vectindex];
502 502 remove_av(intr_id, vecp, f, lvl, vect);
503 503 s = splhi();
504 504 mutex_enter(&av_lock);
505 505 (*delspl)(vect, lvl, vecp->avh_lo_pri, vecp->avh_hi_pri);
506 506 mutex_exit(&av_lock);
507 507 splx(s);
508 508 }
509 509
510 510
511 511 /*
512 512 * After having made a change to an autovector list, wait until we have
513 513 * seen each cpu not executing an interrupt at that level--so we know our
514 514 * change has taken effect completely (no old state in registers, etc).
515 515 */
516 516 void
517 517 wait_till_seen(int ipl)
518 518 {
519 519 int cpu_in_chain, cix;
520 520 struct cpu *cpup;
521 521 cpuset_t cpus_to_check;
522 522
523 523 CPUSET_ALL(cpus_to_check);
524 524 do {
525 525 cpu_in_chain = 0;
526 526 for (cix = 0; cix < NCPU; cix++) {
527 527 cpup = cpu[cix];
528 528 if (cpup != NULL && CPU_IN_SET(cpus_to_check, cix)) {
529 529 if (INTR_ACTIVE(cpup, ipl)) {
530 530 cpu_in_chain = 1;
531 531 } else {
532 532 CPUSET_DEL(cpus_to_check, cix);
533 533 }
534 534 }
535 535 }
536 536 } while (cpu_in_chain);
537 537 }
538 538
539 539 static uint64_t dummy_tick;
540 540
541 541 /* remove an interrupt vector from the chain */
542 542 static void
543 543 remove_av(void *intr_id, struct av_head *vectp, avfunc f, int pri_level,
544 544 int vect)
545 545 {
546 546 struct autovec *p, *target;
547 547 int lo_pri, hi_pri;
548 548 int ipl;
549 549 /*
550 550 * Protect rewrites of the list
551 551 */
552 552 target = NULL;
553 553
554 554 mutex_enter(&av_lock);
555 555 ipl = pri_level;
556 556 lo_pri = MAXIPL;
557 557 hi_pri = 0;
558 558 for (p = vectp->avh_link; p; p = p->av_link) {
559 559 if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
560 560 /* found the handler */
561 561 target = p;
562 562 continue;
563 563 }
564 564 if (p->av_vector != NULL) {
565 565 if (p->av_prilevel > hi_pri)
566 566 hi_pri = p->av_prilevel;
567 567 if (p->av_prilevel < lo_pri)
568 568 lo_pri = p->av_prilevel;
569 569 }
570 570 }
571 571 if (ipl < hi_pri)
572 572 ipl = hi_pri;
573 573 if (target == NULL) { /* not found */
574 574 printf("Couldn't remove function %p at %d, %d\n",
575 575 (void *)f, vect, pri_level);
576 576 mutex_exit(&av_lock);
577 577 return;
578 578 }
579 579
580 580 /*
581 581 * This drops the handler from the chain, it can no longer be called.
582 582 * However, there is no guarantee that the handler is not currently
583 583 * still executing.
584 584 */
585 585 target->av_vector = NULL;
586 586 /*
587 587 * There is a race where we could be just about to pick up the ticksp
588 588 * pointer to increment it after returning from the service routine
589 589 * in av_dispatch_autovect. Rather than NULL it out let's just point
590 590 * it off to something safe so that any final tick update attempt
591 591 * won't fault.
592 592 */
593 593 target->av_ticksp = &dummy_tick;
594 594 wait_till_seen(ipl);
595 595
596 596 if (lo_pri > hi_pri) { /* the chain is now empty */
597 597 /* Leave the unused entries here for probable future use */
598 598 vectp->avh_lo_pri = MAXIPL;
599 599 vectp->avh_hi_pri = 0;
600 600 } else {
601 601 if ((int)vectp->avh_lo_pri < lo_pri)
602 602 vectp->avh_lo_pri = (ushort_t)lo_pri;
603 603 if ((int)vectp->avh_hi_pri > hi_pri)
604 604 vectp->avh_hi_pri = (ushort_t)hi_pri;
605 605 }
606 606 mutex_exit(&av_lock);
607 607 wait_till_seen(ipl);
608 608 }
609 609
610 610 /*
611 611 * kmdb uses siron (and thus setsoftint) while the world is stopped in order to
612 612 * inform its driver component that there's work to be done. We need to keep
613 613 * DTrace from instrumenting kmdb's siron and setsoftint. We duplicate siron,
614 614 * giving kmdb's version a kdi prefix to keep DTrace at bay. We also
615 615 * provide a version of the various setsoftint functions available for kmdb to
616 616 * use using a kdi_ prefix while the main *setsoftint() functionality is
617 617 * implemented as a wrapper. This allows tracing, while still providing a
618 618 * way for kmdb to sneak in unmolested.
619 619 */
620 620 void
621 621 kdi_siron(void)
622 622 {
623 623 (*kdisetsoftint)(1, softlevel1_hdl.ih_pending);
624 624 }
625 625
626 626 /*
627 627 * Trigger a soft interrupt.
628 628 */
629 629 void
630 630 siron(void)
631 631 {
632 632 /* Level 1 software interrupt */
633 633 (*setsoftint)(1, softlevel1_hdl.ih_pending);
634 634 }
635 635
636 636 /*
637 637 * Trigger software interrupts dedicated to ddi timer.
638 638 */
639 639 void
640 640 sir_on(int level)
641 641 {
642 642 ASSERT(level >= DDI_IPL_1 && level <= DDI_IPL_10);
643 643 (*setsoftint)(level, softlevel_hdl[level-1].ih_pending);
644 644 }
645 645
646 646 /*
647 647 * The handler which is executed on the target CPU.
648 648 */
649 649 /*ARGSUSED*/
650 650 static int
651 651 siron_poke_intr(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
652 652 {
653 653 siron();
654 654 return (0);
655 655 }
656 656
657 657 /*
658 658 * May get called from softcall to poke CPUs.
659 659 */
660 660 void
661 661 siron_poke_cpu(cpuset_t poke)
662 662 {
663 663 int cpuid = CPU->cpu_id;
664 664
665 665 /*
666 666 * If we are poking to ourself then we can simply
667 667 * generate level1 using siron()
668 668 */
669 669 if (CPU_IN_SET(poke, cpuid)) {
670 670 siron();
671 671 CPUSET_DEL(poke, cpuid);
672 672 if (CPUSET_ISNULL(poke))
673 673 return;
674 674 }
675 675
676 676 xc_call(0, 0, 0, CPUSET2BV(poke), (xc_func_t)siron_poke_intr);
677 677 }
678 678
679 679 /*
680 680 * Walk the autovector table for this vector, invoking each
681 681 * interrupt handler as we go.
682 682 */
683 683
684 684 extern uint64_t intr_get_time(void);
685 685
686 686 void
687 687 av_dispatch_autovect(uint_t vec)
688 688 {
689 689 struct autovec *av;
690 690
691 691 ASSERT_STACK_ALIGNED();
692 692
693 693 while ((av = autovect[vec].avh_link) != NULL) {
694 694 uint_t numcalled = 0;
695 695 uint_t claimed = 0;
696 696
697 697 for (; av; av = av->av_link) {
698 698 uint_t r;
699 699 uint_t (*intr)() = av->av_vector;
700 700 caddr_t arg1 = av->av_intarg1;
701 701 caddr_t arg2 = av->av_intarg2;
702 702 dev_info_t *dip = av->av_dip;
703 703
704 704 /*
705 705 * We must walk the entire chain. Removed handlers
706 706 * may be anywhere in the chain.
707 707 */
708 708 if (intr == NULL)
709 709 continue;
710 710
711 711 DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
712 712 void *, intr, caddr_t, arg1, caddr_t, arg2);
713 713 r = (*intr)(arg1, arg2);
714 714 DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
715 715 void *, intr, caddr_t, arg1, uint_t, r);
716 716 numcalled++;
717 717 claimed |= r;
718 718 if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
719 719 atomic_add_64(av->av_ticksp, intr_get_time());
720 720 }
721 721
722 722 /*
723 723 * If there's only one interrupt handler in the chain,
724 724 * or if no-one claimed the interrupt at all give up now.
725 725 */
726 726 if (numcalled == 1 || claimed == 0)
727 727 break;
728 728 }
729 729 }
730 730
731 731 /*
732 732 * Call every soft interrupt handler we can find at this level once.
733 733 */
734 734 void
735 735 av_dispatch_softvect(uint_t pil)
736 736 {
737 737 struct autovec *av;
738 738 ddi_softint_hdl_impl_t *hdlp;
739 739 uint_t (*intr)();
740 740 caddr_t arg1;
741 741 caddr_t arg2;
742 742
743 743 ASSERT_STACK_ALIGNED();
744 744 ASSERT(pil >= 0 && pil <= PIL_MAX);
745 745
746 746 for (av = softvect[pil].avh_link; av; av = av->av_link) {
747 747 /*
748 748 * We must walk the entire chain. Removed handlers
749 749 * may be anywhere in the chain.
750 750 */
751 751 if ((intr = av->av_vector) == NULL)
752 752 continue;
753 753 arg1 = av->av_intarg1;
754 754 arg2 = av->av_intarg2;
755 755
756 756 hdlp = (ddi_softint_hdl_impl_t *)av->av_intr_id;
757 757 ASSERT(hdlp);
758 758
759 759 /*
760 760 * Each cpu has its own pending bit in hdlp->ih_pending,
761 761 * here av_check/clear_softint_pending is just checking
762 762 * and clearing the pending bit for the current cpu, who
763 763 * has just triggered a softint.
764 764 */
765 765 if (av_check_softint_pending(hdlp->ih_pending, B_FALSE)) {
766 766 av_clear_softint_pending(hdlp->ih_pending);
767 767 (void) (*intr)(arg1, arg2);
768 768 }
769 769 }
770 770 }
771 771
772 772 struct regs;
773 773
774 774 /*
775 775 * Call every NMI handler we know of once.
776 776 */
777 777 void
778 778 av_dispatch_nmivect(struct regs *rp)
779 779 {
780 780 struct autovec *av;
781 781
782 782 ASSERT_STACK_ALIGNED();
783 783
784 784 for (av = nmivect; av; av = av->av_link)
785 785 (void) (av->av_vector)(av->av_intarg1, rp);
786 786 }
↓ open down ↓ |
670 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX