Print this page
10924 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Peter Tribble <peter.tribble@gmail.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/cpu.c
          +++ new/usr/src/uts/common/os/cpu.c
↓ open down ↓ 392 lines elided ↑ open up ↑
 393  393   * curthread, will set affinity to the CPU on which the thread is currently
 394  394   * running.  For other cpu_id values, the caller must ensure that the
 395  395   * referenced CPU remains valid, which can be done by holding cpu_lock across
 396  396   * this call.
 397  397   *
 398  398   * CPU affinity is guaranteed after return of thread_affinity_set().  If a
 399  399   * caller setting affinity to CPU_CURRENT requires that its thread not migrate
 400  400   * CPUs prior to a successful return, it should take extra precautions (such as
 401  401   * their own call to kpreempt_disable) to ensure that safety.
 402  402   *
      403 + * CPU_BEST can be used to pick a "best" CPU to migrate to, including
      404 + * potentially the current CPU.
      405 + *
 403  406   * A CPU affinity reference count is maintained by thread_affinity_set and
 404  407   * thread_affinity_clear (incrementing and decrementing it, respectively),
 405  408   * maintaining CPU affinity while the count is non-zero, and allowing regions
 406  409   * of code which require affinity to be nested.
 407  410   */
 408  411  void
 409  412  thread_affinity_set(kthread_id_t t, int cpu_id)
 410  413  {
 411  414          cpu_t *cp;
 412  415  
 413  416          ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
 414  417  
 415  418          if (cpu_id == CPU_CURRENT) {
 416  419                  VERIFY3P(t, ==, curthread);
 417  420                  kpreempt_disable();
 418  421                  cp = CPU;
      422 +        } else if (cpu_id == CPU_BEST) {
      423 +                VERIFY3P(t, ==, curthread);
      424 +                kpreempt_disable();
      425 +                cp = disp_choose_best_cpu();
 419  426          } else {
 420  427                  /*
 421  428                   * We should be asserting that cpu_lock is held here, but
 422  429                   * the NCA code doesn't acquire it.  The following assert
 423  430                   * should be uncommented when the NCA code is fixed.
 424  431                   *
 425  432                   * ASSERT(MUTEX_HELD(&cpu_lock));
 426  433                   */
 427  434                  VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
 428  435                  cp = cpu[cpu_id];
↓ open down ↓ 17 lines elided ↑ open up ↑
 446  453          /*
 447  454           * Make sure we're running on the right CPU.
 448  455           */
 449  456          if (cp != t->t_cpu || t != curthread) {
 450  457                  ASSERT(cpu_id != CPU_CURRENT);
 451  458                  force_thread_migrate(t);        /* drops thread lock */
 452  459          } else {
 453  460                  thread_unlock(t);
 454  461          }
 455  462  
 456      -        if (cpu_id == CPU_CURRENT) {
      463 +        if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
 457  464                  kpreempt_enable();
 458      -        }
 459  465  }
 460  466  
 461  467  /*
 462  468   *      Wrapper for backward compatibility.
 463  469   */
 464  470  void
 465  471  affinity_set(int cpu_id)
 466  472  {
 467  473          thread_affinity_set(curthread, cpu_id);
 468  474  }
↓ open down ↓ 1014 lines elided ↑ open up ↑
1483 1489                                          else if (t->t_lpl->lpl_lgrpid ==
1484 1490                                              cpu_lpl->lpl_lgrpid)
1485 1491                                                  lgrp_diff_lpl++;
1486 1492                                  }
1487 1493                                  ASSERT(t->t_lpl->lpl_ncpu > 0);
1488 1494  
1489 1495                                  /*
1490 1496                                   * Update CPU last ran on if it was this CPU
1491 1497                                   */
1492 1498                                  if (t->t_cpu == cp && t->t_bound_cpu != cp)
1493      -                                        t->t_cpu = disp_lowpri_cpu(ncp,
1494      -                                            t->t_lpl, t->t_pri, NULL);
     1499 +                                        t->t_cpu = disp_lowpri_cpu(ncp, t,
     1500 +                                            t->t_pri);
1495 1501                                  ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1496 1502                                      t->t_weakbound_cpu == cp);
1497 1503  
1498 1504                                  t = t->t_forw;
1499 1505                          } while (t != p->p_tlist);
1500 1506  
1501 1507                          /*
1502 1508                           * Didn't find any threads in the same lgroup as this
1503 1509                           * CPU with a different lpl, so remove the lgroup from
1504 1510                           * the process lgroup bitmask.
↓ open down ↓ 21 lines elided ↑ open up ↑
1526 1532                          if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1527 1533                                  lgrp_move_thread(t,
1528 1534                                      lgrp_choose(t, t->t_cpupart), 1);
1529 1535  
1530 1536                          ASSERT(t->t_lpl->lpl_ncpu > 0);
1531 1537  
1532 1538                          /*
1533 1539                           * Update CPU last ran on if it was this CPU
1534 1540                           */
1535 1541  
1536      -                        if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1537      -                                t->t_cpu = disp_lowpri_cpu(ncp,
1538      -                                    t->t_lpl, t->t_pri, NULL);
1539      -                        }
     1542 +                        if (t->t_cpu == cp && t->t_bound_cpu != cp)
     1543 +                                t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
     1544 +
1540 1545                          ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1541 1546                              t->t_weakbound_cpu == cp);
1542 1547                          t = t->t_next;
1543 1548  
1544 1549                  } while (t != curthread);
1545 1550                  ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1546 1551                  cp->cpu_flags |= CPU_OFFLINE;
1547 1552                  disp_cpu_inactive(cp);
1548 1553                  if (!no_quiesce)
1549 1554                          cp->cpu_flags |= CPU_QUIESCED;
↓ open down ↓ 2015 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX