Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>
@@ -73,10 +73,11 @@
#include <sys/schedctl.h>
#include <sys/waitq.h>
#include <sys/cpucaps.h>
#include <sys/kiconv.h>
#include <sys/ctype.h>
+#include <sys/ht.h>
struct kmem_cache *thread_cache; /* cache of free threads */
struct kmem_cache *lwp_cache; /* cache of free lwps */
struct kmem_cache *turnstile_cache; /* cache of free turnstiles */
@@ -485,41 +486,38 @@
kpreempt_disable();
curthread->t_prev->t_next = t;
curthread->t_prev = t;
/*
- * Threads should never have a NULL t_cpu pointer so assign it
- * here. If the thread is being created with state TS_RUN a
- * better CPU may be chosen when it is placed on the run queue.
- *
- * We need to keep kernel preemption disabled when setting all
- * three fields to keep them in sync. Also, always create in
- * the default partition since that's where kernel threads go
- * (if this isn't a kernel thread, t_cpupart will be changed
- * in lwp_create before setting the thread runnable).
+ * We'll always create in the default partition since that's where
+ * kernel threads go (we'll change this later if needed, in
+ * lwp_create()).
*/
t->t_cpupart = &cp_default;
/*
* For now, affiliate this thread with the root lgroup.
* Since the kernel does not (presently) allocate its memory
* in a locality aware fashion, the root is an appropriate home.
* If this thread is later associated with an lwp, it will have
- * it's lgroup re-assigned at that time.
+ * its lgroup re-assigned at that time.
*/
lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
/*
- * Inherit the current cpu. If this cpu isn't part of the chosen
- * lgroup, a new cpu will be chosen by cpu_choose when the thread
- * is ready to run.
+ * If the current CPU is in the default cpupart, use it. Otherwise,
+ * pick one that is; before entering the dispatcher code, we'll
+ * make sure to keep the invariant that ->t_cpu is set. (In fact, we
+ * rely on this, in ht_should_run(), in the call tree of
+ * disp_lowpri_cpu().)
*/
- if (CPU->cpu_part == &cp_default)
+ if (CPU->cpu_part == &cp_default) {
t->t_cpu = CPU;
- else
- t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
- t->t_pri, NULL);
+ } else {
+ t->t_cpu = cp_default.cp_cpulist;
+ t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
+ }
t->t_disp_queue = t->t_cpu->cpu_disp;
kpreempt_enable();
/*
@@ -1324,10 +1322,12 @@
ASSERT(t->t_intr != NULL);
itp = t->t_intr; /* interrupted thread */
t->t_intr = NULL; /* clear interrupt ptr */
+ ht_end_intr();
+
/*
* Get state from interrupt thread for the one
* it interrupted.
*/