Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/cpupart.c
          +++ new/usr/src/uts/common/disp/cpupart.c
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
  23      - * Copyright (c) 2017 by Delphix. All rights reserved.
       23 + * Copyright 2018 Joyent, Inc.
  24   24   */
  25   25  
  26   26  #include <sys/types.h>
  27   27  #include <sys/systm.h>
  28   28  #include <sys/cmn_err.h>
  29   29  #include <sys/cpuvar.h>
  30   30  #include <sys/thread.h>
  31   31  #include <sys/disp.h>
  32   32  #include <sys/kmem.h>
  33   33  #include <sys/debug.h>
↓ open down ↓ 284 lines elided ↑ open up ↑
 318  318  
 319  319  
 320  320  static int
 321  321  cpupart_move_cpu(cpu_t *cp, cpupart_t *newpp, int forced)
 322  322  {
 323  323          cpupart_t *oldpp;
 324  324          cpu_t   *ncp, *newlist;
 325  325          kthread_t *t;
 326  326          int     move_threads = 1;
 327  327          lgrp_id_t lgrpid;
 328      -        proc_t  *p;
      328 +        proc_t  *p;
 329  329          int lgrp_diff_lpl;
 330  330          lpl_t   *cpu_lpl;
 331  331          int     ret;
 332  332          boolean_t unbind_all_threads = (forced != 0);
 333  333  
 334  334          ASSERT(MUTEX_HELD(&cpu_lock));
 335  335          ASSERT(newpp != NULL);
 336  336  
 337  337          oldpp = cp->cpu_part;
 338  338          ASSERT(oldpp != NULL);
↓ open down ↓ 224 lines elided ↑ open up ↑
 563  563                                   */
 564  564                                  ASSERT(t->t_lpl >= t->t_cpupart->cp_lgrploads &&
 565  565                                      (t->t_lpl < t->t_cpupart->cp_lgrploads +
 566  566                                      t->t_cpupart->cp_nlgrploads));
 567  567  
 568  568                                  ASSERT(t->t_lpl->lpl_ncpu > 0);
 569  569  
 570  570                                  /* Update CPU last ran on if it was this CPU */
 571  571                                  if (t->t_cpu == cp && t->t_cpupart == oldpp &&
 572  572                                      t->t_bound_cpu != cp) {
 573      -                                        t->t_cpu = disp_lowpri_cpu(ncp,
 574      -                                            t->t_lpl, t->t_pri, NULL);
      573 +                                        t->t_cpu = disp_lowpri_cpu(ncp, t,
      574 +                                            t->t_pri);
 575  575                                  }
 576  576                                  t = t->t_forw;
 577  577                          } while (t != p->p_tlist);
 578  578  
 579  579                          /*
 580  580                           * Didn't find any threads in the same lgroup as this
 581  581                           * CPU with a different lpl, so remove the lgroup from
 582  582                           * the process lgroup bitmask.
 583  583                           */
 584  584  
↓ open down ↓ 31 lines elided ↑ open up ↑
 616  616                          /* make sure lpl points to our own partition */
 617  617                          ASSERT((t->t_lpl >= t->t_cpupart->cp_lgrploads) &&
 618  618                              (t->t_lpl < t->t_cpupart->cp_lgrploads +
 619  619                              t->t_cpupart->cp_nlgrploads));
 620  620  
 621  621                          ASSERT(t->t_lpl->lpl_ncpu > 0);
 622  622  
 623  623                          /* Update CPU last ran on if it was this CPU */
 624  624                          if (t->t_cpu == cp && t->t_cpupart == oldpp &&
 625  625                              t->t_bound_cpu != cp) {
 626      -                                t->t_cpu = disp_lowpri_cpu(ncp, t->t_lpl,
 627      -                                    t->t_pri, NULL);
      626 +                                t->t_cpu = disp_lowpri_cpu(ncp, t,
      627 +                                    t->t_pri);
 628  628                          }
 629  629  
 630  630                          t = t->t_next;
 631  631                  } while (t != curthread);
 632  632  
 633  633                  /*
 634  634                   * Clear off the CPU's run queue, and the kp queue if the
 635  635                   * partition is now empty.
 636  636                   */
 637  637                  disp_cpu_inactive(cp);
↓ open down ↓ 234 lines elided ↑ open up ↑
 872  872          return (0);
 873  873  }
 874  874  
 875  875  /*
 876  876   * Move threads from specified partition to cp_default. If `force' is specified,
 877  877   * move all threads, otherwise move only soft-bound threads.
 878  878   */
 879  879  static int
 880  880  cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
 881  881  {
 882      -        void    *projbuf, *zonebuf;
      882 +        void    *projbuf, *zonebuf;
 883  883          kthread_t *t;
 884  884          proc_t  *p;
 885  885          int     err = 0;
 886  886          psetid_t psid = pp->cp_id;
 887  887  
 888  888          ASSERT(pool_lock_held());
 889  889          ASSERT(MUTEX_HELD(&cpu_lock));
 890  890  
 891  891          if (pp == NULL || pp == &cp_default) {
 892  892                  return (EINVAL);
↓ open down ↓ 335 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX