Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/apix/apix_intr.c
+++ new/usr/src/uts/i86pc/io/apix/apix_intr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2018 Western Digital Corporation. All rights reserved.
25 + * Copyright 2018 Joyent, Inc.
25 26 */
26 27
27 28 #include <sys/cpuvar.h>
28 29 #include <sys/cpu_event.h>
29 30 #include <sys/param.h>
30 31 #include <sys/cmn_err.h>
31 32 #include <sys/t_lock.h>
32 33 #include <sys/kmem.h>
33 34 #include <sys/machlock.h>
34 35 #include <sys/systm.h>
35 36 #include <sys/archsystm.h>
36 37 #include <sys/atomic.h>
37 38 #include <sys/sdt.h>
38 39 #include <sys/processor.h>
39 40 #include <sys/time.h>
40 41 #include <sys/psm.h>
41 42 #include <sys/smp_impldefs.h>
42 43 #include <sys/cram.h>
43 44 #include <sys/apic.h>
44 45 #include <sys/pit.h>
45 46 #include <sys/ddi.h>
46 47 #include <sys/sunddi.h>
47 48 #include <sys/ddi_impldefs.h>
48 49 #include <sys/pci.h>
49 50 #include <sys/promif.h>
50 51 #include <sys/x86_archext.h>
51 52 #include <sys/cpc_impl.h>
52 53 #include <sys/uadmin.h>
53 54 #include <sys/panic.h>
54 55 #include <sys/debug.h>
55 56 #include <sys/trap.h>
56 57 #include <sys/machsystm.h>
57 58 #include <sys/sysmacros.h>
58 59 #include <sys/rm_platter.h>
59 60 #include <sys/privregs.h>
60 61 #include <sys/note.h>
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
61 62 #include <sys/pci_intr_lib.h>
62 63 #include <sys/spl.h>
63 64 #include <sys/clock.h>
64 65 #include <sys/dditypes.h>
65 66 #include <sys/sunddi.h>
66 67 #include <sys/x_call.h>
67 68 #include <sys/reboot.h>
68 69 #include <vm/hat_i86.h>
69 70 #include <sys/stack.h>
70 71 #include <sys/apix.h>
72 +#include <sys/ht.h>
71 73
72 74 static void apix_post_hardint(int);
73 75
74 76 /*
75 77 * Insert an vector into the tail of the interrupt pending list
76 78 */
77 79 static __inline__ void
78 80 apix_insert_pending_av(apix_impl_t *apixp, struct autovec *avp, int ipl)
79 81 {
80 82 struct autovec **head = apixp->x_intr_head;
81 83 struct autovec **tail = apixp->x_intr_tail;
82 84
83 85 avp->av_ipl_link = NULL;
84 86 if (tail[ipl] == NULL) {
85 87 head[ipl] = tail[ipl] = avp;
86 88 return;
87 89 }
88 90
89 91 tail[ipl]->av_ipl_link = avp;
90 92 tail[ipl] = avp;
91 93 }
92 94
93 95 /*
94 96 * Remove and return an vector from the head of hardware interrupt
95 97 * pending list.
96 98 */
97 99 static __inline__ struct autovec *
98 100 apix_remove_pending_av(apix_impl_t *apixp, int ipl)
99 101 {
100 102 struct cpu *cpu = CPU;
101 103 struct autovec **head = apixp->x_intr_head;
102 104 struct autovec **tail = apixp->x_intr_tail;
103 105 struct autovec *avp = head[ipl];
104 106
105 107 if (avp == NULL)
106 108 return (NULL);
107 109
108 110 if (avp->av_vector != NULL && avp->av_prilevel < cpu->cpu_base_spl) {
109 111 /*
110 112 * If there is blocked higher level interrupts, return
111 113 * NULL to quit handling of current IPL level.
112 114 */
113 115 apixp->x_intr_pending |= (1 << avp->av_prilevel);
114 116 return (NULL);
115 117 }
116 118
117 119 avp->av_flags &= ~AV_PENTRY_PEND;
118 120 avp->av_flags |= AV_PENTRY_ONPROC;
119 121 head[ipl] = avp->av_ipl_link;
120 122 avp->av_ipl_link = NULL;
121 123
122 124 if (head[ipl] == NULL)
123 125 tail[ipl] = NULL;
124 126
125 127 return (avp);
126 128 }
127 129
128 130 /*
129 131 * add_pending_hardint:
130 132 *
131 133 * Add hardware interrupts to the interrupt pending list.
132 134 */
133 135 static void
134 136 apix_add_pending_hardint(int vector)
135 137 {
136 138 uint32_t cpuid = psm_get_cpu_id();
137 139 apix_impl_t *apixp = apixs[cpuid];
138 140 apix_vector_t *vecp = apixp->x_vectbl[vector];
139 141 struct autovec *p, *prevp = NULL;
140 142 int ipl;
141 143
142 144 /*
143 145 * The MSI interrupt not supporting per-vector masking could
144 146 * be triggered on a false vector as a result of rebinding
145 147 * operation cannot programme MSI address & data atomically.
146 148 * Add ISR of this interrupt to the pending list for such
147 149 * suspicious interrupt.
148 150 */
149 151 APIX_DO_FAKE_INTR(cpuid, vector);
150 152 if (vecp == NULL)
151 153 return;
152 154
153 155 for (p = vecp->v_autovect; p != NULL; p = p->av_link) {
154 156 if (p->av_vector == NULL)
155 157 continue; /* skip freed entry */
156 158
157 159 ipl = p->av_prilevel;
158 160 prevp = p;
159 161
160 162 /* set pending at specified priority level */
161 163 apixp->x_intr_pending |= (1 << ipl);
162 164
163 165 if (p->av_flags & AV_PENTRY_PEND)
164 166 continue; /* already in the pending list */
165 167 p->av_flags |= AV_PENTRY_PEND;
166 168
167 169 /* insert into pending list by it original IPL */
168 170 apix_insert_pending_av(apixp, p, ipl);
169 171 }
170 172
171 173 /* last one of the linked list */
172 174 if (prevp && ((prevp->av_flags & AV_PENTRY_LEVEL) != 0))
173 175 prevp->av_flags |= (vector & AV_PENTRY_VECTMASK);
174 176 }
175 177
176 178 /*
177 179 * Walk pending hardware interrupts at given priority level, invoking
178 180 * each interrupt handler as we go.
179 181 */
180 182 extern uint64_t intr_get_time(void);
181 183
182 184 static void
183 185 apix_dispatch_pending_autovect(uint_t ipl)
184 186 {
185 187 uint32_t cpuid = psm_get_cpu_id();
186 188 apix_impl_t *apixp = apixs[cpuid];
187 189 struct autovec *av;
188 190
189 191 while ((av = apix_remove_pending_av(apixp, ipl)) != NULL) {
190 192 uint_t r;
191 193 uint_t (*intr)() = av->av_vector;
192 194 caddr_t arg1 = av->av_intarg1;
193 195 caddr_t arg2 = av->av_intarg2;
194 196 dev_info_t *dip = av->av_dip;
195 197 uchar_t vector = av->av_flags & AV_PENTRY_VECTMASK;
196 198
197 199 if (intr == NULL)
198 200 continue;
199 201
200 202 /* Don't enable interrupts during x-calls */
201 203 if (ipl != XC_HI_PIL)
202 204 sti();
203 205
204 206 DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
205 207 void *, intr, caddr_t, arg1, caddr_t, arg2);
206 208 r = (*intr)(arg1, arg2);
207 209 DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
208 210 void *, intr, caddr_t, arg1, uint_t, r);
209 211
210 212 if (av->av_ticksp && av->av_prilevel <= LOCK_LEVEL)
211 213 atomic_add_64(av->av_ticksp, intr_get_time());
212 214
213 215 cli();
214 216
215 217 if (vector) {
216 218 if ((av->av_flags & AV_PENTRY_PEND) == 0)
217 219 av->av_flags &= ~AV_PENTRY_VECTMASK;
218 220
219 221 apix_post_hardint(vector);
220 222 }
221 223
222 224 /* mark it as idle */
223 225 av->av_flags &= ~AV_PENTRY_ONPROC;
224 226 }
225 227 }
226 228
227 229 static caddr_t
228 230 apix_do_softint_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil,
229 231 caddr_t stackptr)
230 232 {
231 233 kthread_t *t, *volatile it;
232 234 struct machcpu *mcpu = &cpu->cpu_m;
233 235 hrtime_t now;
234 236
235 237 UNREFERENCED_1PARAMETER(oldpil);
236 238 ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
237 239
238 240 atomic_and_32((uint32_t *)&mcpu->mcpu_softinfo.st_pending, ~(1 << pil));
239 241
240 242 mcpu->mcpu_pri = pil;
241 243
242 244 now = tsc_read();
243 245
244 246 /*
245 247 * Get set to run interrupt thread.
246 248 * There should always be an interrupt thread since we
247 249 * allocate one for each level on the CPU.
248 250 */
249 251 it = cpu->cpu_intr_thread;
250 252 ASSERT(it != NULL);
251 253 cpu->cpu_intr_thread = it->t_link;
252 254
253 255 /* t_intr_start could be zero due to cpu_intr_swtch_enter. */
254 256 t = cpu->cpu_thread;
255 257 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
256 258 hrtime_t intrtime = now - t->t_intr_start;
257 259 mcpu->intrstat[pil][0] += intrtime;
258 260 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
259 261 t->t_intr_start = 0;
260 262 }
261 263
262 264 /*
263 265 * Note that the code in kcpc_overflow_intr -relies- on the
264 266 * ordering of events here - in particular that t->t_lwp of
265 267 * the interrupt thread is set to the pinned thread *before*
266 268 * curthread is changed.
267 269 */
268 270 it->t_lwp = t->t_lwp;
269 271 it->t_state = TS_ONPROC;
270 272
271 273 /*
272 274 * Push interrupted thread onto list from new thread.
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
273 275 * Set the new thread as the current one.
274 276 * Set interrupted thread's T_SP because if it is the idle thread,
275 277 * resume() may use that stack between threads.
276 278 */
277 279
278 280 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
279 281 t->t_sp = (uintptr_t)stackptr;
280 282
281 283 it->t_intr = t;
282 284 cpu->cpu_thread = it;
285 + ht_begin_intr(pil);
283 286
284 287 /*
285 288 * Set bit for this pil in CPU's interrupt active bitmask.
286 289 */
287 290 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
288 291 cpu->cpu_intr_actv |= (1 << pil);
289 292
290 293 /*
291 294 * Initialize thread priority level from intr_pri
292 295 */
293 296 it->t_pil = (uchar_t)pil;
294 297 it->t_pri = (pri_t)pil + intr_pri;
295 298 it->t_intr_start = now;
296 299
297 300 return (it->t_stk);
298 301 }
299 302
300 303 static void
301 304 apix_do_softint_epilog(struct cpu *cpu, uint_t oldpil)
302 305 {
303 306 struct machcpu *mcpu = &cpu->cpu_m;
304 307 kthread_t *t, *it;
305 308 uint_t pil, basespl;
306 309 hrtime_t intrtime;
307 310 hrtime_t now = tsc_read();
308 311
309 312 it = cpu->cpu_thread;
310 313 pil = it->t_pil;
311 314
312 315 cpu->cpu_stats.sys.intr[pil - 1]++;
313 316
314 317 ASSERT(cpu->cpu_intr_actv & (1 << pil));
315 318 cpu->cpu_intr_actv &= ~(1 << pil);
316 319
317 320 intrtime = now - it->t_intr_start;
318 321 mcpu->intrstat[pil][0] += intrtime;
319 322 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
320 323
321 324 /*
322 325 * If there is still an interrupted thread underneath this one
323 326 * then the interrupt was never blocked and the return is
324 327 * fairly simple. Otherwise it isn't.
325 328 */
326 329 if ((t = it->t_intr) == NULL) {
327 330 /*
328 331 * Put thread back on the interrupt thread list.
329 332 * This was an interrupt thread, so set CPU's base SPL.
330 333 */
331 334 set_base_spl();
332 335 /* mcpu->mcpu_pri = cpu->cpu_base_spl; */
333 336
334 337 /*
335 338 * If there are pending interrupts, send a softint to
336 339 * re-enter apix_do_interrupt() and get them processed.
337 340 */
338 341 if (apixs[cpu->cpu_id]->x_intr_pending)
339 342 siron();
340 343
341 344 it->t_state = TS_FREE;
342 345 it->t_link = cpu->cpu_intr_thread;
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
343 346 cpu->cpu_intr_thread = it;
344 347 (void) splhigh();
345 348 sti();
346 349 swtch();
347 350 /*NOTREACHED*/
348 351 panic("dosoftint_epilog: swtch returned");
349 352 }
350 353 it->t_link = cpu->cpu_intr_thread;
351 354 cpu->cpu_intr_thread = it;
352 355 it->t_state = TS_FREE;
356 + ht_end_intr();
353 357 cpu->cpu_thread = t;
358 +
354 359 if (t->t_flag & T_INTR_THREAD)
355 360 t->t_intr_start = now;
356 361 basespl = cpu->cpu_base_spl;
357 362 pil = MAX(oldpil, basespl);
358 363 mcpu->mcpu_pri = pil;
359 364 }
360 365
361 366 /*
362 367 * Dispatch a soft interrupt
363 368 */
364 369 static void
365 370 apix_dispatch_softint(uint_t oldpil, uint_t arg2)
366 371 {
367 372 struct cpu *cpu = CPU;
368 373
369 374 UNREFERENCED_1PARAMETER(arg2);
370 375
371 376 sti();
372 377 av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
373 378 cli();
374 379
375 380 /*
376 381 * Must run softint_epilog() on the interrupt thread stack, since
377 382 * there may not be a return from it if the interrupt thread blocked.
378 383 */
379 384 apix_do_softint_epilog(cpu, oldpil);
380 385 }
381 386
382 387 /*
383 388 * Deliver any softints the current interrupt priority allows.
384 389 * Called with interrupts disabled.
385 390 */
386 391 int
387 392 apix_do_softint(struct regs *regs)
388 393 {
389 394 struct cpu *cpu = CPU;
390 395 int oldipl;
391 396 int newipl;
392 397 volatile uint16_t pending;
393 398 caddr_t newsp;
394 399
395 400 while ((pending = cpu->cpu_softinfo.st_pending) != 0) {
396 401 newipl = bsrw_insn(pending);
397 402 oldipl = cpu->cpu_pri;
398 403 if (newipl <= oldipl || newipl <= cpu->cpu_base_spl)
399 404 return (-1);
400 405
401 406 newsp = apix_do_softint_prolog(cpu, newipl, oldipl,
402 407 (caddr_t)regs);
403 408 ASSERT(newsp != NULL);
404 409 switch_sp_and_call(newsp, apix_dispatch_softint, oldipl, 0);
405 410 }
406 411
407 412 return (0);
408 413 }
409 414
410 415 static int
411 416 apix_hilevel_intr_prolog(struct cpu *cpu, uint_t pil, uint_t oldpil,
412 417 struct regs *rp)
413 418 {
414 419 struct machcpu *mcpu = &cpu->cpu_m;
415 420 hrtime_t intrtime;
416 421 hrtime_t now = tsc_read();
417 422 apix_impl_t *apixp = apixs[cpu->cpu_id];
418 423 uint_t mask;
419 424
420 425 ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
421 426
422 427 if (pil == CBE_HIGH_PIL) { /* 14 */
423 428 cpu->cpu_profile_pil = oldpil;
424 429 if (USERMODE(rp->r_cs)) {
425 430 cpu->cpu_profile_pc = 0;
426 431 cpu->cpu_profile_upc = rp->r_pc;
427 432 cpu->cpu_cpcprofile_pc = 0;
428 433 cpu->cpu_cpcprofile_upc = rp->r_pc;
429 434 } else {
430 435 cpu->cpu_profile_pc = rp->r_pc;
431 436 cpu->cpu_profile_upc = 0;
432 437 cpu->cpu_cpcprofile_pc = rp->r_pc;
433 438 cpu->cpu_cpcprofile_upc = 0;
434 439 }
435 440 }
436 441
437 442 mcpu->mcpu_pri = pil;
438 443
439 444 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
440 445 if (mask != 0) {
441 446 int nestpil;
442 447
443 448 /*
444 449 * We have interrupted another high-level interrupt.
445 450 * Load starting timestamp, compute interval, update
446 451 * cumulative counter.
447 452 */
448 453 nestpil = bsrw_insn((uint16_t)mask);
449 454 intrtime = now -
450 455 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)];
451 456 mcpu->intrstat[nestpil][0] += intrtime;
452 457 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
453 458 } else {
454 459 kthread_t *t = cpu->cpu_thread;
455 460
456 461 /*
457 462 * See if we are interrupting a low-level interrupt thread.
458 463 * If so, account for its time slice only if its time stamp
↓ open down ↓ |
95 lines elided |
↑ open up ↑ |
459 464 * is non-zero.
460 465 */
461 466 if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) {
462 467 intrtime = now - t->t_intr_start;
463 468 mcpu->intrstat[t->t_pil][0] += intrtime;
464 469 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
465 470 t->t_intr_start = 0;
466 471 }
467 472 }
468 473
474 + ht_begin_intr(pil);
475 +
469 476 /* store starting timestamp in CPu structure for this IPL */
470 477 mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
471 478
472 479 if (pil == 15) {
473 480 /*
474 481 * To support reentrant level 15 interrupts, we maintain a
475 482 * recursion count in the top half of cpu_intr_actv. Only
476 483 * when this count hits zero do we clear the PIL 15 bit from
477 484 * the lower half of cpu_intr_actv.
478 485 */
479 486 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
480 487 (*refcntp)++;
481 488 }
482 489
483 490 cpu->cpu_intr_actv |= (1 << pil);
484 491 /* clear pending ipl level bit */
485 492 apixp->x_intr_pending &= ~(1 << pil);
486 493
487 494 return (mask);
488 495 }
489 496
490 497 static int
491 498 apix_hilevel_intr_epilog(struct cpu *cpu, uint_t oldpil)
492 499 {
493 500 struct machcpu *mcpu = &cpu->cpu_m;
494 501 uint_t mask, pil;
495 502 hrtime_t intrtime;
496 503 hrtime_t now = tsc_read();
497 504
498 505 pil = mcpu->mcpu_pri;
499 506 cpu->cpu_stats.sys.intr[pil - 1]++;
500 507
501 508 ASSERT(cpu->cpu_intr_actv & (1 << pil));
502 509
503 510 if (pil == 15) {
504 511 /*
505 512 * To support reentrant level 15 interrupts, we maintain a
506 513 * recursion count in the top half of cpu_intr_actv. Only
507 514 * when this count hits zero do we clear the PIL 15 bit from
508 515 * the lower half of cpu_intr_actv.
509 516 */
510 517 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
511 518
512 519 ASSERT(*refcntp > 0);
513 520
514 521 if (--(*refcntp) == 0)
515 522 cpu->cpu_intr_actv &= ~(1 << pil);
516 523 } else {
517 524 cpu->cpu_intr_actv &= ~(1 << pil);
518 525 }
519 526
520 527 ASSERT(mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] != 0);
521 528
522 529 intrtime = now - mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)];
523 530 mcpu->intrstat[pil][0] += intrtime;
524 531 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
525 532
526 533 /*
527 534 * Check for lower-pil nested high-level interrupt beneath
528 535 * current one. If so, place a starting timestamp in its
529 536 * pil_high_start entry.
530 537 */
531 538 mask = cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK;
532 539 if (mask != 0) {
533 540 int nestpil;
534 541
535 542 /*
536 543 * find PIL of nested interrupt
537 544 */
538 545 nestpil = bsrw_insn((uint16_t)mask);
539 546 ASSERT(nestpil < pil);
540 547 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = now;
541 548 /*
542 549 * (Another high-level interrupt is active below this one,
543 550 * so there is no need to check for an interrupt
544 551 * thread. That will be done by the lowest priority
545 552 * high-level interrupt active.)
546 553 */
547 554 } else {
548 555 /*
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
549 556 * Check to see if there is a low-level interrupt active.
550 557 * If so, place a starting timestamp in the thread
551 558 * structure.
552 559 */
553 560 kthread_t *t = cpu->cpu_thread;
554 561
555 562 if (t->t_flag & T_INTR_THREAD)
556 563 t->t_intr_start = now;
557 564 }
558 565
566 + ht_end_intr();
567 +
559 568 mcpu->mcpu_pri = oldpil;
560 569 if (pil < CBE_HIGH_PIL)
561 570 (void) (*setlvlx)(oldpil, 0);
562 571
563 572 return (mask);
564 573 }
565 574
566 575 /*
567 576 * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
568 577 */
569 578 static void
570 579 apix_dispatch_pending_hilevel(uint_t ipl, uint_t arg2)
571 580 {
572 581 UNREFERENCED_1PARAMETER(arg2);
573 582
574 583 apix_dispatch_pending_autovect(ipl);
575 584 }
576 585
577 586 static __inline__ int
578 587 apix_do_pending_hilevel(struct cpu *cpu, struct regs *rp)
579 588 {
580 589 volatile uint16_t pending;
581 590 uint_t newipl, oldipl;
582 591 caddr_t newsp;
583 592
584 593 while ((pending = HILEVEL_PENDING(cpu)) != 0) {
585 594 newipl = bsrw_insn(pending);
586 595 ASSERT(newipl > LOCK_LEVEL && newipl > cpu->cpu_base_spl);
587 596 oldipl = cpu->cpu_pri;
588 597 if (newipl <= oldipl)
589 598 return (-1);
590 599
591 600 /*
592 601 * High priority interrupts run on this cpu's interrupt stack.
593 602 */
594 603 if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
595 604 newsp = cpu->cpu_intr_stack;
596 605 switch_sp_and_call(newsp, apix_dispatch_pending_hilevel,
597 606 newipl, 0);
598 607 } else { /* already on the interrupt stack */
599 608 apix_dispatch_pending_hilevel(newipl, 0);
600 609 }
601 610 (void) apix_hilevel_intr_epilog(cpu, oldipl);
602 611 }
603 612
604 613 return (0);
605 614 }
606 615
607 616 /*
608 617 * Get an interrupt thread and swith to it. It's called from do_interrupt().
609 618 * The IF flag is cleared and thus all maskable interrupts are blocked at
610 619 * the time of calling.
611 620 */
612 621 static caddr_t
613 622 apix_intr_thread_prolog(struct cpu *cpu, uint_t pil, caddr_t stackptr)
614 623 {
615 624 apix_impl_t *apixp = apixs[cpu->cpu_id];
616 625 struct machcpu *mcpu = &cpu->cpu_m;
617 626 hrtime_t now = tsc_read();
618 627 kthread_t *t, *volatile it;
619 628
620 629 ASSERT(pil > mcpu->mcpu_pri && pil > cpu->cpu_base_spl);
621 630
622 631 apixp->x_intr_pending &= ~(1 << pil);
623 632 ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
624 633 cpu->cpu_intr_actv |= (1 << pil);
625 634 mcpu->mcpu_pri = pil;
626 635
627 636 /*
628 637 * Get set to run interrupt thread.
629 638 * There should always be an interrupt thread since we
630 639 * allocate one for each level on the CPU.
631 640 */
632 641 /* t_intr_start could be zero due to cpu_intr_swtch_enter. */
633 642 t = cpu->cpu_thread;
634 643 if ((t->t_flag & T_INTR_THREAD) && t->t_intr_start != 0) {
635 644 hrtime_t intrtime = now - t->t_intr_start;
636 645 mcpu->intrstat[pil][0] += intrtime;
637 646 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
638 647 t->t_intr_start = 0;
639 648 }
640 649
641 650 /*
642 651 * Push interrupted thread onto list from new thread.
643 652 * Set the new thread as the current one.
644 653 * Set interrupted thread's T_SP because if it is the idle thread,
645 654 * resume() may use that stack between threads.
646 655 */
647 656
648 657 ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
649 658
650 659 t->t_sp = (uintptr_t)stackptr; /* mark stack in curthread for resume */
651 660
652 661 /*
653 662 * Note that the code in kcpc_overflow_intr -relies- on the
654 663 * ordering of events here - in particular that t->t_lwp of
655 664 * the interrupt thread is set to the pinned thread *before*
656 665 * curthread is changed.
657 666 */
658 667 it = cpu->cpu_intr_thread;
659 668 cpu->cpu_intr_thread = it->t_link;
660 669 it->t_intr = t;
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
661 670 it->t_lwp = t->t_lwp;
662 671
663 672 /*
664 673 * (threads on the interrupt thread free list could have state
665 674 * preset to TS_ONPROC, but it helps in debugging if
666 675 * they're TS_FREE.)
667 676 */
668 677 it->t_state = TS_ONPROC;
669 678
670 679 cpu->cpu_thread = it;
680 + ht_begin_intr(pil);
671 681
672 682 /*
673 683 * Initialize thread priority level from intr_pri
674 684 */
675 685 it->t_pil = (uchar_t)pil;
676 686 it->t_pri = (pri_t)pil + intr_pri;
677 687 it->t_intr_start = now;
678 688
679 689 return (it->t_stk);
680 690 }
681 691
682 692 static void
683 693 apix_intr_thread_epilog(struct cpu *cpu, uint_t oldpil)
684 694 {
685 695 struct machcpu *mcpu = &cpu->cpu_m;
686 696 kthread_t *t, *it = cpu->cpu_thread;
687 697 uint_t pil, basespl;
688 698 hrtime_t intrtime;
689 699 hrtime_t now = tsc_read();
690 700
691 701 pil = it->t_pil;
692 702 cpu->cpu_stats.sys.intr[pil - 1]++;
693 703
694 704 ASSERT(cpu->cpu_intr_actv & (1 << pil));
695 705 cpu->cpu_intr_actv &= ~(1 << pil);
696 706
697 707 ASSERT(it->t_intr_start != 0);
698 708 intrtime = now - it->t_intr_start;
699 709 mcpu->intrstat[pil][0] += intrtime;
700 710 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
701 711
702 712 /*
703 713 * If there is still an interrupted thread underneath this one
704 714 * then the interrupt was never blocked and the return is
705 715 * fairly simple. Otherwise it isn't.
706 716 */
707 717 if ((t = it->t_intr) == NULL) {
708 718 /*
709 719 * The interrupted thread is no longer pinned underneath
710 720 * the interrupt thread. This means the interrupt must
711 721 * have blocked, and the interrupted thread has been
712 722 * unpinned, and has probably been running around the
713 723 * system for a while.
714 724 *
715 725 * Since there is no longer a thread under this one, put
716 726 * this interrupt thread back on the CPU's free list and
717 727 * resume the idle thread which will dispatch the next
718 728 * thread to run.
719 729 */
720 730 cpu->cpu_stats.sys.intrblk++;
721 731
722 732 /*
723 733 * Put thread back on the interrupt thread list.
724 734 * This was an interrupt thread, so set CPU's base SPL.
725 735 */
726 736 set_base_spl();
727 737 basespl = cpu->cpu_base_spl;
728 738 mcpu->mcpu_pri = basespl;
729 739 (*setlvlx)(basespl, 0);
730 740
731 741 /*
732 742 * If there are pending interrupts, send a softint to
733 743 * re-enter apix_do_interrupt() and get them processed.
734 744 */
735 745 if (apixs[cpu->cpu_id]->x_intr_pending)
736 746 siron();
737 747
738 748 it->t_state = TS_FREE;
739 749 /*
740 750 * Return interrupt thread to pool
741 751 */
742 752 it->t_link = cpu->cpu_intr_thread;
743 753 cpu->cpu_intr_thread = it;
744 754
745 755 (void) splhigh();
746 756 sti();
747 757 swtch();
748 758 /*NOTREACHED*/
↓ open down ↓ |
68 lines elided |
↑ open up ↑ |
749 759 panic("dosoftint_epilog: swtch returned");
750 760 }
751 761
752 762 /*
753 763 * Return interrupt thread to the pool
754 764 */
755 765 it->t_link = cpu->cpu_intr_thread;
756 766 cpu->cpu_intr_thread = it;
757 767 it->t_state = TS_FREE;
758 768
769 + ht_end_intr();
759 770 cpu->cpu_thread = t;
771 +
760 772 if (t->t_flag & T_INTR_THREAD)
761 773 t->t_intr_start = now;
762 774 basespl = cpu->cpu_base_spl;
763 775 mcpu->mcpu_pri = MAX(oldpil, basespl);
764 776 (*setlvlx)(mcpu->mcpu_pri, 0);
765 777 }
766 778
767 779
768 780 static void
769 781 apix_dispatch_pending_hardint(uint_t oldpil, uint_t arg2)
770 782 {
771 783 struct cpu *cpu = CPU;
772 784
773 785 UNREFERENCED_1PARAMETER(arg2);
774 786
775 787 apix_dispatch_pending_autovect((int)cpu->cpu_thread->t_pil);
776 788
777 789 /*
778 790 * Must run intr_thread_epilog() on the interrupt thread stack, since
779 791 * there may not be a return from it if the interrupt thread blocked.
780 792 */
781 793 apix_intr_thread_epilog(cpu, oldpil);
782 794 }
783 795
784 796 static __inline__ int
785 797 apix_do_pending_hardint(struct cpu *cpu, struct regs *rp)
786 798 {
787 799 volatile uint16_t pending;
788 800 uint_t newipl, oldipl;
789 801 caddr_t newsp;
790 802
791 803 while ((pending = LOWLEVEL_PENDING(cpu)) != 0) {
792 804 newipl = bsrw_insn(pending);
793 805 ASSERT(newipl <= LOCK_LEVEL);
794 806 oldipl = cpu->cpu_pri;
795 807 if (newipl <= oldipl || newipl <= cpu->cpu_base_spl)
796 808 return (-1);
797 809
798 810 /*
799 811 * Run this interrupt in a separate thread.
800 812 */
801 813 newsp = apix_intr_thread_prolog(cpu, newipl, (caddr_t)rp);
802 814 ASSERT(newsp != NULL);
803 815 switch_sp_and_call(newsp, apix_dispatch_pending_hardint,
804 816 oldipl, 0);
805 817 }
806 818
807 819 return (0);
808 820 }
809 821
810 822 /*
811 823 * Unmask level triggered interrupts
812 824 */
813 825 static void
814 826 apix_post_hardint(int vector)
815 827 {
816 828 apix_vector_t *vecp = xv_vector(psm_get_cpu_id(), vector);
817 829 int irqno = vecp->v_inum;
818 830
819 831 ASSERT(vecp->v_type == APIX_TYPE_FIXED && apic_level_intr[irqno]);
820 832
821 833 apix_level_intr_post_dispatch(irqno);
822 834 }
823 835
824 836 static void
825 837 apix_dispatch_by_vector(uint_t vector)
826 838 {
827 839 struct cpu *cpu = CPU;
828 840 apix_vector_t *vecp = xv_vector(cpu->cpu_id, vector);
829 841 struct autovec *avp;
830 842 uint_t r, (*intr)();
831 843 caddr_t arg1, arg2;
832 844 dev_info_t *dip;
833 845
834 846 if (vecp == NULL ||
835 847 (avp = vecp->v_autovect) == NULL || avp->av_vector == NULL)
836 848 return;
837 849
838 850 avp->av_flags |= AV_PENTRY_ONPROC;
839 851 intr = avp->av_vector;
840 852 arg1 = avp->av_intarg1;
841 853 arg2 = avp->av_intarg2;
842 854 dip = avp->av_dip;
843 855
844 856 if (avp->av_prilevel != XC_HI_PIL)
845 857 sti();
846 858
847 859 DTRACE_PROBE4(interrupt__start, dev_info_t *, dip,
848 860 void *, intr, caddr_t, arg1, caddr_t, arg2);
849 861 r = (*intr)(arg1, arg2);
850 862 DTRACE_PROBE4(interrupt__complete, dev_info_t *, dip,
851 863 void *, intr, caddr_t, arg1, uint_t, r);
852 864
853 865 cli();
854 866 avp->av_flags &= ~AV_PENTRY_ONPROC;
855 867 }
856 868
857 869
858 870 static void
859 871 apix_dispatch_hilevel(uint_t vector, uint_t arg2)
860 872 {
861 873 UNREFERENCED_1PARAMETER(arg2);
862 874
863 875 apix_dispatch_by_vector(vector);
864 876 }
865 877
866 878 static void
867 879 apix_dispatch_lowlevel(uint_t vector, uint_t oldipl)
868 880 {
869 881 struct cpu *cpu = CPU;
870 882
871 883 apix_dispatch_by_vector(vector);
872 884
873 885 /*
874 886 * Must run intr_thread_epilog() on the interrupt thread stack, since
875 887 * there may not be a return from it if the interrupt thread blocked.
876 888 */
877 889 apix_intr_thread_epilog(cpu, oldipl);
878 890 }
879 891
880 892 /*
881 893 * Interrupt service routine, called with interrupts disabled.
882 894 */
883 895 void
884 896 apix_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
885 897 {
886 898 struct cpu *cpu = CPU;
887 899 int vector = rp->r_trapno, newipl, oldipl = cpu->cpu_pri, ret;
888 900 apix_vector_t *vecp = NULL;
889 901
890 902 #ifdef TRAPTRACE
891 903 ttp->ttr_marker = TT_INTERRUPT;
892 904 ttp->ttr_cpuid = cpu->cpu_id;
893 905 ttp->ttr_ipl = 0xff;
894 906 ttp->ttr_pri = (uchar_t)oldipl;
895 907 ttp->ttr_spl = cpu->cpu_base_spl;
896 908 ttp->ttr_vector = 0xff;
897 909 #endif /* TRAPTRACE */
898 910
899 911 cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);
900 912
901 913 ++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;
902 914
903 915 /*
904 916 * If it's a softint go do it now.
905 917 */
906 918 if (rp->r_trapno == T_SOFTINT) {
907 919 /*
908 920 * It might be the case that when an interrupt is triggered,
909 921 * the spl is raised to high by splhigh(). Later when do_splx()
910 922 * is called to restore the spl, both hardware and software
911 923 * interrupt pending flags are check and an SOFTINT is faked
912 924 * accordingly.
913 925 */
914 926 (void) apix_do_pending_hilevel(cpu, rp);
915 927 (void) apix_do_pending_hardint(cpu, rp);
916 928 (void) apix_do_softint(rp);
917 929 ASSERT(!interrupts_enabled());
918 930 #ifdef TRAPTRACE
919 931 ttp->ttr_vector = T_SOFTINT;
920 932 #endif
921 933 /*
922 934 * We need to check again for pending interrupts that may have
923 935 * arrived while the softint was running.
924 936 */
925 937 goto do_pending;
926 938 }
927 939
928 940 /*
929 941 * Send EOI to local APIC
930 942 */
931 943 newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
932 944 #ifdef TRAPTRACE
933 945 ttp->ttr_ipl = (uchar_t)newipl;
934 946 #endif /* TRAPTRACE */
935 947
936 948 /*
937 949 * Bail if it is a spurious interrupt
938 950 */
939 951 if (newipl == -1)
940 952 return;
941 953
942 954 vector = rp->r_trapno;
943 955 vecp = xv_vector(cpu->cpu_id, vector);
944 956 #ifdef TRAPTRACE
945 957 ttp->ttr_vector = (short)vector;
946 958 #endif /* TRAPTRACE */
947 959
948 960 /*
949 961 * Direct dispatch for IPI, MSI, MSI-X
950 962 */
951 963 if (vecp && vecp->v_type != APIX_TYPE_FIXED &&
952 964 newipl > MAX(oldipl, cpu->cpu_base_spl)) {
953 965 caddr_t newsp;
954 966
955 967 if (INTR_PENDING(apixs[cpu->cpu_id], newipl)) {
956 968 /*
957 969 * There are already vectors pending at newipl,
958 970 * queue this one and fall through to process
959 971 * all pending.
960 972 */
961 973 apix_add_pending_hardint(vector);
962 974 } else if (newipl > LOCK_LEVEL) {
963 975 if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp)
964 976 == 0) {
965 977 newsp = cpu->cpu_intr_stack;
966 978 switch_sp_and_call(newsp, apix_dispatch_hilevel,
967 979 vector, 0);
968 980 } else {
969 981 apix_dispatch_hilevel(vector, 0);
970 982 }
971 983 (void) apix_hilevel_intr_epilog(cpu, oldipl);
972 984 } else {
973 985 newsp = apix_intr_thread_prolog(cpu, newipl,
974 986 (caddr_t)rp);
975 987 switch_sp_and_call(newsp, apix_dispatch_lowlevel,
976 988 vector, oldipl);
977 989 }
978 990 } else {
979 991 /* Add to per-pil pending queue */
980 992 apix_add_pending_hardint(vector);
981 993 if (newipl <= MAX(oldipl, cpu->cpu_base_spl) ||
982 994 !apixs[cpu->cpu_id]->x_intr_pending)
983 995 return;
984 996 }
985 997
986 998 do_pending:
987 999 if (apix_do_pending_hilevel(cpu, rp) < 0)
988 1000 return;
989 1001
990 1002 do {
991 1003 ret = apix_do_pending_hardint(cpu, rp);
992 1004
993 1005 /*
994 1006 * Deliver any pending soft interrupts.
995 1007 */
996 1008 (void) apix_do_softint(rp);
997 1009 } while (!ret && LOWLEVEL_PENDING(cpu));
998 1010 }
↓ open down ↓ |
229 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX