Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>

*** 464,492 **** #include <sys/archsystm.h> #include <sys/machsystm.h> #include <sys/ontrap.h> #include <sys/x86_archext.h> #include <sys/promif.h> #include <vm/hat_i86.h> #if defined(__xpv) #include <sys/hypervisor.h> #endif ! #if defined(__amd64) && !defined(__xpv) ! /* If this fails, then the padding numbers in machcpuvar.h are wrong. */ ! CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad)) < ! MMU_PAGESIZE); ! CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti)) >= ! MMU_PAGESIZE); ! CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_kpti_dbg)) < ! 2 * MMU_PAGESIZE); ! CTASSERT((offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, mcpu_pad2)) < ! 2 * MMU_PAGESIZE); CTASSERT(((sizeof (struct kpti_frame)) & 0xF) == 0); - CTASSERT(((offsetof(cpu_t, cpu_m) + - offsetof(struct machcpu, mcpu_kpti_dbg)) & 0xF) == 0); CTASSERT((offsetof(struct kpti_frame, kf_tr_rsp) & 0xF) == 0); #endif #if defined(__xpv) && defined(DEBUG) /* --- 464,489 ---- #include <sys/archsystm.h> #include <sys/machsystm.h> #include <sys/ontrap.h> #include <sys/x86_archext.h> #include <sys/promif.h> + #include <sys/ht.h> #include <vm/hat_i86.h> #if defined(__xpv) #include <sys/hypervisor.h> #endif ! /* If these fail, then the padding numbers in machcpuvar.h are wrong. */ ! #if !defined(__xpv) ! #define MCOFF(member) \ ! (offsetof(cpu_t, cpu_m) + offsetof(struct machcpu, member)) ! CTASSERT(MCOFF(mcpu_pad) == MACHCPU_SIZE); ! CTASSERT(MCOFF(mcpu_pad2) == MMU_PAGESIZE); ! CTASSERT((MCOFF(mcpu_kpti) & 0xF) == 0); CTASSERT(((sizeof (struct kpti_frame)) & 0xF) == 0); CTASSERT((offsetof(struct kpti_frame, kf_tr_rsp) & 0xF) == 0); + CTASSERT(MCOFF(mcpu_pad3) < 2 * MMU_PAGESIZE); #endif #if defined(__xpv) && defined(DEBUG) /*
*** 598,607 **** --- 595,606 ---- cpu->cpu_intracct[cpu->cpu_mstate] += intrtime; t->t_intr_start = 0; } } + ht_begin_intr(pil); + /* * Store starting timestamp in CPU structure for this PIL. */ mcpu->pil_high_start[pil - (LOCK_LEVEL + 1)] = now;
*** 702,711 **** --- 701,712 ---- if (t->t_flag & T_INTR_THREAD) t->t_intr_start = now; } + ht_end_intr(); + mcpu->mcpu_pri = oldpil; (void) (*setlvlx)(oldpil, vecnum); return (cpu->cpu_intr_actv & CPU_INTR_ACTV_HIGH_LEVEL_MASK); }
*** 764,773 **** --- 765,776 ---- * they're TS_FREE.) */ it->t_state = TS_ONPROC; cpu->cpu_thread = it; /* new curthread on this cpu */ + ht_begin_intr(pil); + it->t_pil = (uchar_t)pil; it->t_pri = intr_pri + (pri_t)pil; it->t_intr_start = now; return (it->t_stk);
*** 854,863 **** --- 857,867 ---- basespl = cpu->cpu_base_spl; pil = MAX(oldpil, basespl); mcpu->mcpu_pri = pil; (*setlvlx)(pil, vec); t->t_intr_start = now; + ht_end_intr(); cpu->cpu_thread = t; } /* * intr_get_time() is a resource for interrupt handlers to determine how
*** 1041,1050 **** --- 1045,1055 ---- ASSERT(SA((uintptr_t)stackptr) == (uintptr_t)stackptr); t->t_sp = (uintptr_t)stackptr; it->t_intr = t; cpu->cpu_thread = it; + ht_begin_intr(pil); /* * Set bit for this pil in CPU's interrupt active bitmask. */ ASSERT((cpu->cpu_intr_actv & (1 << pil)) == 0);
*** 1101,1111 **** --- 1106,1118 ---- panic("dosoftint_epilog: swtch returned"); } it->t_link = cpu->cpu_intr_thread; cpu->cpu_intr_thread = it; it->t_state = TS_FREE; + ht_end_intr(); cpu->cpu_thread = t; + if (t->t_flag & T_INTR_THREAD) t->t_intr_start = now; basespl = cpu->cpu_base_spl; pil = MAX(oldpil, basespl); mcpu->mcpu_pri = pil;