Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

@@ -397,10 +397,13 @@
  * CPU affinity is guaranteed after return of thread_affinity_set().  If a
  * caller setting affinity to CPU_CURRENT requires that its thread not migrate
  * CPUs prior to a successful return, it should take extra precautions (such as
  * their own call to kpreempt_disable) to ensure that safety.
  *
+ * CPU_BEST can be used to pick a "best" CPU to migrate to, including
+ * potentially the current CPU.
+ *
  * A CPU affinity reference count is maintained by thread_affinity_set and
  * thread_affinity_clear (incrementing and decrementing it, respectively),
  * maintaining CPU affinity while the count is non-zero, and allowing regions
  * of code which require affinity to be nested.
  */

@@ -413,10 +416,14 @@
 
         if (cpu_id == CPU_CURRENT) {
                 VERIFY3P(t, ==, curthread);
                 kpreempt_disable();
                 cp = CPU;
+        } else if (cpu_id == CPU_BEST) {
+                VERIFY3P(t, ==, curthread);
+                kpreempt_disable();
+                cp = disp_choose_best_cpu();
         } else {
                 /*
                  * We should be asserting that cpu_lock is held here, but
                  * the NCA code doesn't acquire it.  The following assert
                  * should be uncommented when the NCA code is fixed.

@@ -450,13 +457,12 @@
                 force_thread_migrate(t);        /* drops thread lock */
         } else {
                 thread_unlock(t);
         }
 
-        if (cpu_id == CPU_CURRENT) {
+        if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
                 kpreempt_enable();
-        }
 }
 
 /*
  *      Wrapper for backward compatibility.
  */

@@ -1487,12 +1493,12 @@
 
                                 /*
                                  * Update CPU last ran on if it was this CPU
                                  */
                                 if (t->t_cpu == cp && t->t_bound_cpu != cp)
-                                        t->t_cpu = disp_lowpri_cpu(ncp,
-                                            t->t_lpl, t->t_pri, NULL);
+                                        t->t_cpu = disp_lowpri_cpu(ncp, t,
+                                            t->t_pri);
                                 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
                                     t->t_weakbound_cpu == cp);
 
                                 t = t->t_forw;
                         } while (t != p->p_tlist);

@@ -1530,14 +1536,13 @@
 
                         /*
                          * Update CPU last ran on if it was this CPU
                          */
 
-                        if (t->t_cpu == cp && t->t_bound_cpu != cp) {
-                                t->t_cpu = disp_lowpri_cpu(ncp,
-                                    t->t_lpl, t->t_pri, NULL);
-                        }
+                        if (t->t_cpu == cp && t->t_bound_cpu != cp)
+                                t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
+
                         ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
                             t->t_weakbound_cpu == cp);
                         t = t->t_next;
 
                 } while (t != curthread);