Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/cpu.c
          +++ new/usr/src/uts/common/os/cpu.c
↓ open down ↓ 391 lines elided ↑ open up ↑
 392  392   * curthread, will set affinity to the CPU on which the thread is currently
 393  393   * running.  For other cpu_id values, the caller must ensure that the
 394  394   * referenced CPU remains valid, which can be done by holding cpu_lock across
 395  395   * this call.
 396  396   *
 397  397   * CPU affinity is guaranteed after return of thread_affinity_set().  If a
 398  398   * caller setting affinity to CPU_CURRENT requires that its thread not migrate
 399  399   * CPUs prior to a successful return, it should take extra precautions (such as
 400  400   * their own call to kpreempt_disable) to ensure that safety.
 401  401   *
      402 + * CPU_BEST can be used to pick a "best" CPU to migrate to, including
      403 + * potentially the current CPU.
      404 + *
 402  405   * A CPU affinity reference count is maintained by thread_affinity_set and
 403  406   * thread_affinity_clear (incrementing and decrementing it, respectively),
 404  407   * maintaining CPU affinity while the count is non-zero, and allowing regions
 405  408   * of code which require affinity to be nested.
 406  409   */
 407  410  void
 408  411  thread_affinity_set(kthread_id_t t, int cpu_id)
 409  412  {
 410  413          cpu_t *cp;
 411  414  
 412  415          ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
 413  416  
 414  417          if (cpu_id == CPU_CURRENT) {
 415  418                  VERIFY3P(t, ==, curthread);
 416  419                  kpreempt_disable();
 417  420                  cp = CPU;
      421 +        } else if (cpu_id == CPU_BEST) {
      422 +                VERIFY3P(t, ==, curthread);
      423 +                kpreempt_disable();
      424 +                cp = disp_choose_best_cpu();
 418  425          } else {
 419  426                  /*
 420  427                   * We should be asserting that cpu_lock is held here, but
 421  428                   * the NCA code doesn't acquire it.  The following assert
 422  429                   * should be uncommented when the NCA code is fixed.
 423  430                   *
 424  431                   * ASSERT(MUTEX_HELD(&cpu_lock));
 425  432                   */
 426  433                  VERIFY((cpu_id >= 0) && (cpu_id < NCPU));
 427  434                  cp = cpu[cpu_id];
↓ open down ↓ 17 lines elided ↑ open up ↑
 445  452          /*
 446  453           * Make sure we're running on the right CPU.
 447  454           */
 448  455          if (cp != t->t_cpu || t != curthread) {
 449  456                  ASSERT(cpu_id != CPU_CURRENT);
 450  457                  force_thread_migrate(t);        /* drops thread lock */
 451  458          } else {
 452  459                  thread_unlock(t);
 453  460          }
 454  461  
 455      -        if (cpu_id == CPU_CURRENT) {
      462 +        if (cpu_id == CPU_CURRENT || cpu_id == CPU_BEST)
 456  463                  kpreempt_enable();
 457      -        }
 458  464  }
 459  465  
 460  466  /*
 461  467   *      Wrapper for backward compatibility.
 462  468   */
 463  469  void
 464  470  affinity_set(int cpu_id)
 465  471  {
 466  472          thread_affinity_set(curthread, cpu_id);
 467  473  }
↓ open down ↓ 1014 lines elided ↑ open up ↑
1482 1488                                          else if (t->t_lpl->lpl_lgrpid ==
1483 1489                                              cpu_lpl->lpl_lgrpid)
1484 1490                                                  lgrp_diff_lpl++;
1485 1491                                  }
1486 1492                                  ASSERT(t->t_lpl->lpl_ncpu > 0);
1487 1493  
1488 1494                                  /*
1489 1495                                   * Update CPU last ran on if it was this CPU
1490 1496                                   */
1491 1497                                  if (t->t_cpu == cp && t->t_bound_cpu != cp)
1492      -                                        t->t_cpu = disp_lowpri_cpu(ncp,
1493      -                                            t->t_lpl, t->t_pri, NULL);
     1498 +                                        t->t_cpu = disp_lowpri_cpu(ncp, t,
     1499 +                                            t->t_pri);
1494 1500                                  ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1495 1501                                      t->t_weakbound_cpu == cp);
1496 1502  
1497 1503                                  t = t->t_forw;
1498 1504                          } while (t != p->p_tlist);
1499 1505  
1500 1506                          /*
1501 1507                           * Didn't find any threads in the same lgroup as this
1502 1508                           * CPU with a different lpl, so remove the lgroup from
1503 1509                           * the process lgroup bitmask.
↓ open down ↓ 21 lines elided ↑ open up ↑
1525 1531                          if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1526 1532                                  lgrp_move_thread(t,
1527 1533                                      lgrp_choose(t, t->t_cpupart), 1);
1528 1534  
1529 1535                          ASSERT(t->t_lpl->lpl_ncpu > 0);
1530 1536  
1531 1537                          /*
1532 1538                           * Update CPU last ran on if it was this CPU
1533 1539                           */
1534 1540  
1535      -                        if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1536      -                                t->t_cpu = disp_lowpri_cpu(ncp,
1537      -                                    t->t_lpl, t->t_pri, NULL);
1538      -                        }
     1541 +                        if (t->t_cpu == cp && t->t_bound_cpu != cp)
     1542 +                                t->t_cpu = disp_lowpri_cpu(ncp, t, t->t_pri);
     1543 +
1539 1544                          ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1540 1545                              t->t_weakbound_cpu == cp);
1541 1546                          t = t->t_next;
1542 1547  
1543 1548                  } while (t != curthread);
1544 1549                  ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1545 1550                  cp->cpu_flags |= CPU_OFFLINE;
1546 1551                  disp_cpu_inactive(cp);
1547 1552                  if (!no_quiesce)
1548 1553                          cp->cpu_flags |= CPU_QUIESCED;
↓ open down ↓ 1919 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX