Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/thread.c
          +++ new/usr/src/uts/common/disp/thread.c
↓ open down ↓ 67 lines elided ↑ open up ↑
  68   68  #include <sys/tsol/tndb.h>
  69   69  #include <sys/cpc_impl.h>
  70   70  #include <sys/sdt.h>
  71   71  #include <sys/reboot.h>
  72   72  #include <sys/kdi.h>
  73   73  #include <sys/schedctl.h>
  74   74  #include <sys/waitq.h>
  75   75  #include <sys/cpucaps.h>
  76   76  #include <sys/kiconv.h>
  77   77  #include <sys/ctype.h>
       78 +#include <sys/ht.h>
  78   79  
  79   80  struct kmem_cache *thread_cache;        /* cache of free threads */
  80   81  struct kmem_cache *lwp_cache;           /* cache of free lwps */
  81   82  struct kmem_cache *turnstile_cache;     /* cache of free turnstiles */
  82   83  
  83   84  /*
  84   85   * allthreads is only for use by kmem_readers.  All kernel loops can use
  85   86   * the current thread as a start/end point.
  86   87   */
  87   88  kthread_t *allthreads = &t0;    /* circular list of all threads */
↓ open down ↓ 392 lines elided ↑ open up ↑
 480  481           * cpu_offline walks the thread list looking for threads
 481  482           * with t_cpu pointing to the CPU being offlined.  We want
 482  483           * to make sure that the list is consistent and that if t_cpu
 483  484           * is set, the thread is on the list.
 484  485           */
 485  486          kpreempt_disable();
 486  487          curthread->t_prev->t_next = t;
 487  488          curthread->t_prev = t;
 488  489  
 489  490          /*
 490      -         * Threads should never have a NULL t_cpu pointer so assign it
 491      -         * here.  If the thread is being created with state TS_RUN a
 492      -         * better CPU may be chosen when it is placed on the run queue.
 493      -         *
 494      -         * We need to keep kernel preemption disabled when setting all
 495      -         * three fields to keep them in sync.  Also, always create in
 496      -         * the default partition since that's where kernel threads go
 497      -         * (if this isn't a kernel thread, t_cpupart will be changed
 498      -         * in lwp_create before setting the thread runnable).
      491 +         * We'll always create in the default partition since that's where
      492 +         * kernel threads go (we'll change this later if needed, in
      493 +         * lwp_create()).
 499  494           */
 500  495          t->t_cpupart = &cp_default;
 501  496  
 502  497          /*
 503  498           * For now, affiliate this thread with the root lgroup.
 504  499           * Since the kernel does not (presently) allocate its memory
 505  500           * in a locality aware fashion, the root is an appropriate home.
 506  501           * If this thread is later associated with an lwp, it will have
 507      -         * it's lgroup re-assigned at that time.
      502 +         * its lgroup re-assigned at that time.
 508  503           */
 509  504          lgrp_move_thread(t, &cp_default.cp_lgrploads[LGRP_ROOTID], 1);
 510  505  
 511  506          /*
 512      -         * Inherit the current cpu.  If this cpu isn't part of the chosen
 513      -         * lgroup, a new cpu will be chosen by cpu_choose when the thread
 514      -         * is ready to run.
      507 +         * If the current CPU is in the default cpupart, use it.  Otherwise,
      508 +         * pick one that is; before entering the dispatcher code, we'll
      509 +         * make sure to keep the invariant that ->t_cpu is set.  (In fact, we
      510 +         * rely on this, in ht_should_run(), in the call tree of
      511 +         * disp_lowpri_cpu().)
 515  512           */
 516      -        if (CPU->cpu_part == &cp_default)
      513 +        if (CPU->cpu_part == &cp_default) {
 517  514                  t->t_cpu = CPU;
 518      -        else
 519      -                t->t_cpu = disp_lowpri_cpu(cp_default.cp_cpulist, t->t_lpl,
 520      -                    t->t_pri, NULL);
      515 +        } else {
      516 +                t->t_cpu = cp_default.cp_cpulist;
      517 +                t->t_cpu = disp_lowpri_cpu(t->t_cpu, t, t->t_pri);
      518 +        }
 521  519  
 522  520          t->t_disp_queue = t->t_cpu->cpu_disp;
 523  521          kpreempt_enable();
 524  522  
 525  523          /*
 526  524           * Initialize thread state and the dispatcher lock pointer.
 527  525           * Need to hold onto pidlock to block allthreads walkers until
 528  526           * the state is set.
 529  527           */
 530  528          switch (state) {
↓ open down ↓ 332 lines elided ↑ open up ↑
 863  861          mutex_enter(&reaplock);
 864  862          /*
 865  863           * Pull threads and lwps associated with zone off deathrow lists.
 866  864           */
 867  865          t = thread_zone_cleanup(&thread_deathrow, &thread_reapcnt, zoneid);
 868  866          l = thread_zone_cleanup(&lwp_deathrow, &lwp_reapcnt, zoneid);
 869  867          mutex_exit(&reaplock);
 870  868  
 871  869          /*
 872  870           * Guard against race condition in mutex_owner_running:
 873      -         *      thread=owner(mutex)
 874      -         *      <interrupt>
 875      -         *                              thread exits mutex
 876      -         *                              thread exits
 877      -         *                              thread reaped
 878      -         *                              thread struct freed
      871 +         *      thread=owner(mutex)
      872 +         *      <interrupt>
      873 +         *                              thread exits mutex
      874 +         *                              thread exits
      875 +         *                              thread reaped
      876 +         *                              thread struct freed
 879  877           * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 880  878           * A cross call to all cpus will cause the interrupt handler
 881  879           * to reset the PC if it is in mutex_owner_running, refreshing
 882  880           * stale thread pointers.
 883  881           */
 884  882          mutex_sync();   /* sync with mutex code */
 885  883  
 886  884          /*
 887  885           * Reap threads
 888  886           */
↓ open down ↓ 36 lines elided ↑ open up ↑
 925  923                  t = thread_deathrow;
 926  924                  l = lwp_deathrow;
 927  925                  thread_deathrow = NULL;
 928  926                  lwp_deathrow = NULL;
 929  927                  thread_reapcnt = 0;
 930  928                  lwp_reapcnt = 0;
 931  929                  mutex_exit(&reaplock);
 932  930  
 933  931                  /*
 934  932                   * Guard against race condition in mutex_owner_running:
 935      -                 *      thread=owner(mutex)
 936      -                 *      <interrupt>
 937      -                 *                              thread exits mutex
 938      -                 *                              thread exits
 939      -                 *                              thread reaped
 940      -                 *                              thread struct freed
      933 +                 *      thread=owner(mutex)
      934 +                 *      <interrupt>
      935 +                 *                              thread exits mutex
      936 +                 *                              thread exits
      937 +                 *                              thread reaped
      938 +                 *                              thread struct freed
 941  939                   * cpu = thread->t_cpu <- BAD POINTER DEREFERENCE.
 942  940                   * A cross call to all cpus will cause the interrupt handler
 943  941                   * to reset the PC if it is in mutex_owner_running, refreshing
 944  942                   * stale thread pointers.
 945  943                   */
 946  944                  mutex_sync();   /* sync with mutex code */
 947  945                  /*
 948  946                   * Reap threads
 949  947                   */
 950  948                  thread_reap_list(t);
↓ open down ↓ 368 lines elided ↑ open up ↑
1319 1317          kthread_t       *t = curthread; /* current thread */
1320 1318          kthread_t       *itp;           /* interrupted thread */
1321 1319          int             i;              /* interrupt level */
1322 1320          extern int      intr_passivate();
1323 1321  
1324 1322          ASSERT(t->t_intr != NULL);
1325 1323  
1326 1324          itp = t->t_intr;                /* interrupted thread */
1327 1325          t->t_intr = NULL;               /* clear interrupt ptr */
1328 1326  
     1327 +        ht_end_intr();
     1328 +
1329 1329          /*
1330 1330           * Get state from interrupt thread for the one
1331 1331           * it interrupted.
1332 1332           */
1333 1333  
1334 1334          i = intr_passivate(t, itp);
1335 1335  
1336 1336          TRACE_5(TR_FAC_INTR, TR_INTR_PASSIVATE,
1337 1337              "intr_passivate:level %d curthread %p (%T) ithread %p (%T)",
1338 1338              i, t, t, itp, itp);
↓ open down ↓ 76 lines elided ↑ open up ↑
1415 1415          tp->t_link = cp->cpu_intr_thread;
1416 1416          cp->cpu_intr_thread = tp;
1417 1417  }
1418 1418  
1419 1419  /*
1420 1420   * TSD -- THREAD SPECIFIC DATA
1421 1421   */
1422 1422  static kmutex_t         tsd_mutex;       /* linked list spin lock */
1423 1423  static uint_t           tsd_nkeys;       /* size of destructor array */
1424 1424  /* per-key destructor funcs */
1425      -static void             (**tsd_destructor)(void *);
     1425 +static void             (**tsd_destructor)(void *);
1426 1426  /* list of tsd_thread's */
1427 1427  static struct tsd_thread        *tsd_list;
1428 1428  
1429 1429  /*
1430 1430   * Default destructor
1431 1431   *      Needed because NULL destructor means that the key is unused
1432 1432   */
1433 1433  /* ARGSUSED */
1434 1434  void
1435 1435  tsd_defaultdestructor(void *value)
↓ open down ↓ 772 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX