Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>


   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2018 Western Digital Corporation.  All rights reserved.

  25  */
  26 
  27 #include <sys/cpuvar.h>
  28 #include <sys/cpu_event.h>
  29 #include <sys/param.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/t_lock.h>
  32 #include <sys/kmem.h>
  33 #include <sys/machlock.h>
  34 #include <sys/systm.h>
  35 #include <sys/archsystm.h>
  36 #include <sys/atomic.h>
  37 #include <sys/sdt.h>
  38 #include <sys/processor.h>
  39 #include <sys/time.h>
  40 #include <sys/psm.h>
  41 #include <sys/smp_impldefs.h>
  42 #include <sys/cram.h>
  43 #include <sys/apic.h>
  44 #include <sys/pit.h>


  51 #include <sys/cpc_impl.h>
  52 #include <sys/uadmin.h>
  53 #include <sys/panic.h>
  54 #include <sys/debug.h>
  55 #include <sys/trap.h>
  56 #include <sys/machsystm.h>
  57 #include <sys/sysmacros.h>
  58 #include <sys/rm_platter.h>
  59 #include <sys/privregs.h>
  60 #include <sys/note.h>
  61 #include <sys/pci_intr_lib.h>
  62 #include <sys/spl.h>
  63 #include <sys/clock.h>
  64 #include <sys/dditypes.h>
  65 #include <sys/sunddi.h>
  66 #include <sys/x_call.h>
  67 #include <sys/reboot.h>
  68 #include <vm/hat_i86.h>
  69 #include <sys/stack.h>
  70 #include <sys/apix.h>

  71 
  72 static void apix_post_hardint(int);
  73 
  74 /*
  75  * Insert an vector into the tail of the interrupt pending list
  76  */
  77 static __inline__ void
  78 apix_insert_pending_av(apix_impl_t *apixp, struct autovec *avp, int ipl)
  79 {
  80         struct autovec **head = apixp->x_intr_head;
  81         struct autovec **tail = apixp->x_intr_tail;
  82 
  83         avp->av_ipl_link = NULL;
  84         if (tail[ipl] == NULL) {
  85                 head[ipl] = tail[ipl] = avp;
  86                 return;
  87         }
  88 
  89         tail[ipl]->av_ipl_link = avp;
  90         tail[ipl] = avp;


 263          * Note that the code in kcpc_overflow_intr -relies- on the
 264          * ordering of events here - in particular that t->t_lwp of
 265          * the interrupt thread is set to the pinned thread *before*
 266          * curthread is changed.
 267          */
 268         it->t_lwp = t->t_lwp;
 269         it->t_state = TS_ONPROC;
 270 
 271         /*
 272          * Push interrupted thread onto list from new thread.
 273          * Set the new thread as the current one.
 274          * Set interrupted thread's T_SP because if it is the idle thread,
 275          * resume() may use that stack between threads.
 276          */
 277 
 278         ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
 279         t->t_sp = (uintptr_t)stackptr;
 280 
 281         it->t_intr = t;
 282         cpu->cpu_thread = it;

 283 
 284         /*
 285          * Set bit for this pil in CPU's interrupt active bitmask.
 286          */
 287         ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
 288         cpu->cpu_intr_actv |= (1 << pil);
 289 
 290         /*
 291          * Initialize thread priority level from intr_pri
 292          */
 293         it->t_pil = (uchar_t)pil;
 294         it->t_pri = (pri_t)pil + intr_pri;
 295         it->t_intr_start = now;
 296 
 297         return (it->t_stk);
 298 }
 299 
 300 static void
 301 apix_do_softint_epilog(struct cpu *cpu, uint_t oldpil)
 302 {


 333 
 334                 /*
 335                  * If there are pending interrupts, send a softint to
 336                  * re-enter apix_do_interrupt() and get them processed.
 337                  */
 338                 if (apixs[cpu->cpu_id]->x_intr_pending)
 339                         siron();
 340 
 341                 it->t_state = TS_FREE;
 342                 it->t_link = cpu->cpu_intr_thread;
 343                 cpu->cpu_intr_thread = it;
 344                 (void) splhigh();
 345                 sti();
 346                 swtch();
 347                 /*NOTREACHED*/
 348                 panic("dosoftint_epilog: swtch returned");
 349         }
 350         it->t_link = cpu->cpu_intr_thread;
 351         cpu->cpu_intr_thread = it;
 352         it->t_state = TS_FREE;

 353         cpu->cpu_thread = t;

 354         if (t->t_flag & T_INTR_THREAD)
 355                 t->t_intr_start = now;
 356         basespl = cpu->cpu_base_spl;
 357         pil = MAX(oldpil, basespl);
 358         mcpu->mcpu_pri = pil;
 359 }
 360 
 361 /*
 362  * Dispatch a soft interrupt
 363  */
 364 static void
 365 apix_dispatch_softint(uint_t oldpil, uint_t arg2)
 366 {
 367         struct cpu *cpu = CPU;
 368 
 369         UNREFERENCED_1PARAMETER(arg2);
 370 
 371         sti();
 372         av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
 373         cli();


 449                 intrtime = now -
 450                     mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)];
 451                 mcpu->intrstat[nestpil][0] += intrtime;
 452                 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
 453         } else {
 454                 kthread_t *t = cpu->cpu_thread;
 455 
 456                 /*
 457                  * See if we are interrupting a low-level interrupt thread.
 458                  * If so, account for its time slice only if its time stamp
 459                  * is non-zero.
 460                  */
 461                 if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) {
 462                         intrtime = now - t->t_intr_start;
 463                         mcpu->intrstat[t->t_pil][0] += intrtime;
 464                         cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
 465                         t->t_intr_start = 0;
 466                 }
 467         }
 468 


 469         /* store starting timestamp in CPu structure for this IPL */
 470         mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
 471 
 472         if (pil == 15) {
 473                 /*
 474                  * To support reentrant level 15 interrupts, we maintain a
 475                  * recursion count in the top half of cpu_intr_actv.  Only
 476                  * when this count hits zero do we clear the PIL 15 bit from
 477                  * the lower half of cpu_intr_actv.
 478                  */
 479                 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
 480                 (*refcntp)++;
 481         }
 482 
 483         cpu->cpu_intr_actv |= (1 << pil);
 484         /* clear pending ipl level bit */
 485         apixp->x_intr_pending &= ~(1 << pil);
 486 
 487         return (mask);
 488 }


 539                 ASSERT(nestpil < pil);
 540                 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = now;
 541                 /*
 542                  * (Another high-level interrupt is active below this one,
 543                  * so there is no need to check for an interrupt
 544                  * thread.  That will be done by the lowest priority
 545                  * high-level interrupt active.)
 546                  */
 547         } else {
 548                 /*
 549                  * Check to see if there is a low-level interrupt active.
 550                  * If so, place a starting timestamp in the thread
 551                  * structure.
 552                  */
 553                 kthread_t *t = cpu->cpu_thread;
 554 
 555                 if (t->t_flag & T_INTR_THREAD)
 556                         t->t_intr_start = now;
 557         }
 558 


 559         mcpu->mcpu_pri = oldpil;
 560         if (pil < CBE_HIGH_PIL)
 561                 (void) (*setlvlx)(oldpil, 0);
 562 
 563         return (mask);
 564 }
 565 
 566 /*
 567  * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
 568  */
 569 static void
 570 apix_dispatch_pending_hilevel(uint_t ipl, uint_t arg2)
 571 {
 572         UNREFERENCED_1PARAMETER(arg2);
 573 
 574         apix_dispatch_pending_autovect(ipl);
 575 }
 576 
 577 static __inline__ int
 578 apix_do_pending_hilevel(struct cpu *cpu, struct regs *rp)


 651 
 652         /*
 653          * Note that the code in kcpc_overflow_intr -relies- on the
 654          * ordering of events here - in particular that t->t_lwp of
 655          * the interrupt thread is set to the pinned thread *before*
 656          * curthread is changed.
 657          */
 658         it = cpu->cpu_intr_thread;
 659         cpu->cpu_intr_thread = it->t_link;
 660         it->t_intr = t;
 661         it->t_lwp = t->t_lwp;
 662 
 663         /*
 664          * (threads on the interrupt thread free list could have state
 665          * preset to TS_ONPROC, but it helps in debugging if
 666          * they're TS_FREE.)
 667          */
 668         it->t_state = TS_ONPROC;
 669 
 670         cpu->cpu_thread = it;

 671 
 672         /*
 673          * Initialize thread priority level from intr_pri
 674          */
 675         it->t_pil = (uchar_t)pil;
 676         it->t_pri = (pri_t)pil + intr_pri;
 677         it->t_intr_start = now;
 678 
 679         return (it->t_stk);
 680 }
 681 
 682 static void
 683 apix_intr_thread_epilog(struct cpu *cpu, uint_t oldpil)
 684 {
 685         struct machcpu *mcpu = &cpu->cpu_m;
 686         kthread_t *t, *it = cpu->cpu_thread;
 687         uint_t pil, basespl;
 688         hrtime_t intrtime;
 689         hrtime_t now = tsc_read();
 690 


 739                 /*
 740                  * Return interrupt thread to pool
 741                  */
 742                 it->t_link = cpu->cpu_intr_thread;
 743                 cpu->cpu_intr_thread = it;
 744 
 745                 (void) splhigh();
 746                 sti();
 747                 swtch();
 748                 /*NOTREACHED*/
 749                 panic("dosoftint_epilog: swtch returned");
 750         }
 751 
 752         /*
 753          * Return interrupt thread to the pool
 754          */
 755         it->t_link = cpu->cpu_intr_thread;
 756         cpu->cpu_intr_thread = it;
 757         it->t_state = TS_FREE;
 758 

 759         cpu->cpu_thread = t;

 760         if (t->t_flag & T_INTR_THREAD)
 761                 t->t_intr_start = now;
 762         basespl = cpu->cpu_base_spl;
 763         mcpu->mcpu_pri = MAX(oldpil, basespl);
 764         (*setlvlx)(mcpu->mcpu_pri, 0);
 765 }
 766 
 767 
 768 static void
 769 apix_dispatch_pending_hardint(uint_t oldpil, uint_t arg2)
 770 {
 771         struct cpu *cpu = CPU;
 772 
 773         UNREFERENCED_1PARAMETER(arg2);
 774 
 775         apix_dispatch_pending_autovect((int)cpu->cpu_thread->t_pil);
 776 
 777         /*
 778          * Must run intr_thread_epilog() on the interrupt thread stack, since
 779          * there may not be a return from it if the interrupt thread blocked.




   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright 2018 Western Digital Corporation.  All rights reserved.
  25  * Copyright 2018 Joyent, Inc.
  26  */
  27 
  28 #include <sys/cpuvar.h>
  29 #include <sys/cpu_event.h>
  30 #include <sys/param.h>
  31 #include <sys/cmn_err.h>
  32 #include <sys/t_lock.h>
  33 #include <sys/kmem.h>
  34 #include <sys/machlock.h>
  35 #include <sys/systm.h>
  36 #include <sys/archsystm.h>
  37 #include <sys/atomic.h>
  38 #include <sys/sdt.h>
  39 #include <sys/processor.h>
  40 #include <sys/time.h>
  41 #include <sys/psm.h>
  42 #include <sys/smp_impldefs.h>
  43 #include <sys/cram.h>
  44 #include <sys/apic.h>
  45 #include <sys/pit.h>


  52 #include <sys/cpc_impl.h>
  53 #include <sys/uadmin.h>
  54 #include <sys/panic.h>
  55 #include <sys/debug.h>
  56 #include <sys/trap.h>
  57 #include <sys/machsystm.h>
  58 #include <sys/sysmacros.h>
  59 #include <sys/rm_platter.h>
  60 #include <sys/privregs.h>
  61 #include <sys/note.h>
  62 #include <sys/pci_intr_lib.h>
  63 #include <sys/spl.h>
  64 #include <sys/clock.h>
  65 #include <sys/dditypes.h>
  66 #include <sys/sunddi.h>
  67 #include <sys/x_call.h>
  68 #include <sys/reboot.h>
  69 #include <vm/hat_i86.h>
  70 #include <sys/stack.h>
  71 #include <sys/apix.h>
  72 #include <sys/ht.h>
  73 
  74 static void apix_post_hardint(int);
  75 
  76 /*
  77  * Insert an vector into the tail of the interrupt pending list
  78  */
  79 static __inline__ void
  80 apix_insert_pending_av(apix_impl_t *apixp, struct autovec *avp, int ipl)
  81 {
  82         struct autovec **head = apixp->x_intr_head;
  83         struct autovec **tail = apixp->x_intr_tail;
  84 
  85         avp->av_ipl_link = NULL;
  86         if (tail[ipl] == NULL) {
  87                 head[ipl] = tail[ipl] = avp;
  88                 return;
  89         }
  90 
  91         tail[ipl]->av_ipl_link = avp;
  92         tail[ipl] = avp;


 265          * Note that the code in kcpc_overflow_intr -relies- on the
 266          * ordering of events here - in particular that t->t_lwp of
 267          * the interrupt thread is set to the pinned thread *before*
 268          * curthread is changed.
 269          */
 270         it->t_lwp = t->t_lwp;
 271         it->t_state = TS_ONPROC;
 272 
 273         /*
 274          * Push interrupted thread onto list from new thread.
 275          * Set the new thread as the current one.
 276          * Set interrupted thread's T_SP because if it is the idle thread,
 277          * resume() may use that stack between threads.
 278          */
 279 
 280         ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr);
 281         t->t_sp = (uintptr_t)stackptr;
 282 
 283         it->t_intr = t;
 284         cpu->cpu_thread = it;
 285         ht_begin_intr(pil);
 286 
 287         /*
 288          * Set bit for this pil in CPU's interrupt active bitmask.
 289          */
 290         ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
 291         cpu->cpu_intr_actv |= (1 << pil);
 292 
 293         /*
 294          * Initialize thread priority level from intr_pri
 295          */
 296         it->t_pil = (uchar_t)pil;
 297         it->t_pri = (pri_t)pil + intr_pri;
 298         it->t_intr_start = now;
 299 
 300         return (it->t_stk);
 301 }
 302 
 303 static void
 304 apix_do_softint_epilog(struct cpu *cpu, uint_t oldpil)
 305 {


 336 
 337                 /*
 338                  * If there are pending interrupts, send a softint to
 339                  * re-enter apix_do_interrupt() and get them processed.
 340                  */
 341                 if (apixs[cpu->cpu_id]->x_intr_pending)
 342                         siron();
 343 
 344                 it->t_state = TS_FREE;
 345                 it->t_link = cpu->cpu_intr_thread;
 346                 cpu->cpu_intr_thread = it;
 347                 (void) splhigh();
 348                 sti();
 349                 swtch();
 350                 /*NOTREACHED*/
 351                 panic("dosoftint_epilog: swtch returned");
 352         }
 353         it->t_link = cpu->cpu_intr_thread;
 354         cpu->cpu_intr_thread = it;
 355         it->t_state = TS_FREE;
 356         ht_end_intr();
 357         cpu->cpu_thread = t;
 358 
 359         if (t->t_flag & T_INTR_THREAD)
 360                 t->t_intr_start = now;
 361         basespl = cpu->cpu_base_spl;
 362         pil = MAX(oldpil, basespl);
 363         mcpu->mcpu_pri = pil;
 364 }
 365 
 366 /*
 367  * Dispatch a soft interrupt
 368  */
 369 static void
 370 apix_dispatch_softint(uint_t oldpil, uint_t arg2)
 371 {
 372         struct cpu *cpu = CPU;
 373 
 374         UNREFERENCED_1PARAMETER(arg2);
 375 
 376         sti();
 377         av_dispatch_softvect((int)cpu->cpu_thread->t_pil);
 378         cli();


 454                 intrtime = now -
 455                     mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)];
 456                 mcpu->intrstat[nestpil][0] += intrtime;
 457                 cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
 458         } else {
 459                 kthread_t *t = cpu->cpu_thread;
 460 
 461                 /*
 462                  * See if we are interrupting a low-level interrupt thread.
 463                  * If so, account for its time slice only if its time stamp
 464                  * is non-zero.
 465                  */
 466                 if ((t->t_flag & T_INTR_THREAD) != 0 && t->t_intr_start != 0) {
 467                         intrtime = now - t->t_intr_start;
 468                         mcpu->intrstat[t->t_pil][0] += intrtime;
 469                         cpu->cpu_intracct[cpu->cpu_mstate] += intrtime;
 470                         t->t_intr_start = 0;
 471                 }
 472         }
 473 
 474         ht_begin_intr(pil);
 475 
 476         /* store starting timestamp in CPu structure for this IPL */
 477         mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
 478 
 479         if (pil == 15) {
 480                 /*
 481                  * To support reentrant level 15 interrupts, we maintain a
 482                  * recursion count in the top half of cpu_intr_actv.  Only
 483                  * when this count hits zero do we clear the PIL 15 bit from
 484                  * the lower half of cpu_intr_actv.
 485                  */
 486                 uint16_t *refcntp = (uint16_t *)&cpu->cpu_intr_actv + 1;
 487                 (*refcntp)++;
 488         }
 489 
 490         cpu->cpu_intr_actv |= (1 << pil);
 491         /* clear pending ipl level bit */
 492         apixp->x_intr_pending &= ~(1 << pil);
 493 
 494         return (mask);
 495 }


 546                 ASSERT(nestpil < pil);
 547                 mcpu->pil_high_start[nestpil - (LOCK_LEVEL + 1)] = now;
 548                 /*
 549                  * (Another high-level interrupt is active below this one,
 550                  * so there is no need to check for an interrupt
 551                  * thread.  That will be done by the lowest priority
 552                  * high-level interrupt active.)
 553                  */
 554         } else {
 555                 /*
 556                  * Check to see if there is a low-level interrupt active.
 557                  * If so, place a starting timestamp in the thread
 558                  * structure.
 559                  */
 560                 kthread_t *t = cpu->cpu_thread;
 561 
 562                 if (t->t_flag & T_INTR_THREAD)
 563                         t->t_intr_start = now;
 564         }
 565 
 566         ht_end_intr();
 567 
 568         mcpu->mcpu_pri = oldpil;
 569         if (pil < CBE_HIGH_PIL)
 570                 (void) (*setlvlx)(oldpil, 0);
 571 
 572         return (mask);
 573 }
 574 
 575 /*
 576  * Dispatch a hilevel interrupt (one above LOCK_LEVEL)
 577  */
 578 static void
 579 apix_dispatch_pending_hilevel(uint_t ipl, uint_t arg2)
 580 {
 581         UNREFERENCED_1PARAMETER(arg2);
 582 
 583         apix_dispatch_pending_autovect(ipl);
 584 }
 585 
 586 static __inline__ int
 587 apix_do_pending_hilevel(struct cpu *cpu, struct regs *rp)


 660 
 661         /*
 662          * Note that the code in kcpc_overflow_intr -relies- on the
 663          * ordering of events here - in particular that t->t_lwp of
 664          * the interrupt thread is set to the pinned thread *before*
 665          * curthread is changed.
 666          */
 667         it = cpu->cpu_intr_thread;
 668         cpu->cpu_intr_thread = it->t_link;
 669         it->t_intr = t;
 670         it->t_lwp = t->t_lwp;
 671 
 672         /*
 673          * (threads on the interrupt thread free list could have state
 674          * preset to TS_ONPROC, but it helps in debugging if
 675          * they're TS_FREE.)
 676          */
 677         it->t_state = TS_ONPROC;
 678 
 679         cpu->cpu_thread = it;
 680         ht_begin_intr(pil);
 681 
 682         /*
 683          * Initialize thread priority level from intr_pri
 684          */
 685         it->t_pil = (uchar_t)pil;
 686         it->t_pri = (pri_t)pil + intr_pri;
 687         it->t_intr_start = now;
 688 
 689         return (it->t_stk);
 690 }
 691 
 692 static void
 693 apix_intr_thread_epilog(struct cpu *cpu, uint_t oldpil)
 694 {
 695         struct machcpu *mcpu = &cpu->cpu_m;
 696         kthread_t *t, *it = cpu->cpu_thread;
 697         uint_t pil, basespl;
 698         hrtime_t intrtime;
 699         hrtime_t now = tsc_read();
 700 


 749                 /*
 750                  * Return interrupt thread to pool
 751                  */
 752                 it->t_link = cpu->cpu_intr_thread;
 753                 cpu->cpu_intr_thread = it;
 754 
 755                 (void) splhigh();
 756                 sti();
 757                 swtch();
 758                 /*NOTREACHED*/
 759                 panic("dosoftint_epilog: swtch returned");
 760         }
 761 
 762         /*
 763          * Return interrupt thread to the pool
 764          */
 765         it->t_link = cpu->cpu_intr_thread;
 766         cpu->cpu_intr_thread = it;
 767         it->t_state = TS_FREE;
 768 
 769         ht_end_intr();
 770         cpu->cpu_thread = t;
 771 
 772         if (t->t_flag & T_INTR_THREAD)
 773                 t->t_intr_start = now;
 774         basespl = cpu->cpu_base_spl;
 775         mcpu->mcpu_pri = MAX(oldpil, basespl);
 776         (*setlvlx)(mcpu->mcpu_pri, 0);
 777 }
 778 
 779 
 780 static void
 781 apix_dispatch_pending_hardint(uint_t oldpil, uint_t arg2)
 782 {
 783         struct cpu *cpu = CPU;
 784 
 785         UNREFERENCED_1PARAMETER(arg2);
 786 
 787         apix_dispatch_pending_autovect((int)cpu->cpu_thread->t_pil);
 788 
 789         /*
 790          * Must run intr_thread_epilog() on the interrupt thread stack, since
 791          * there may not be a return from it if the interrupt thread blocked.