Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>


   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.

  24  */
  25 
  26 /*
  27  * Basic NUMA support in terms of locality groups
  28  *
  29  * Solaris needs to know which CPUs, memory, etc. are near each other to
  30  * provide good performance on NUMA machines by optimizing for locality.
  31  * In order to do this, a new abstraction called a "locality group (lgroup)"
  32  * has been introduced to keep track of which CPU-like and memory-like hardware
  33  * resources are close to each other.  Currently, latency is the only measure
  34  * used to determine how to group hardware resources into lgroups, but this
  35  * does not limit the groupings to be based solely on latency.  Other factors
  36  * may be used to determine the groupings in the future.
  37  *
  38  * Lgroups are organized into a hieararchy or topology that represents the
  39  * latency topology of the machine.  There is always at least a root lgroup in
  40  * the system.  It represents all the hardware resources in the machine at a
  41  * latency big enough that any hardware resource can at least access any other
  42  * hardware resource within that latency.  A Uniform Memory Access (UMA)
  43  * machine is represented with one lgroup (the root).  In contrast, a NUMA


  73 #include <sys/param.h>
  74 #include <sys/var.h>
  75 #include <sys/thread.h>
  76 #include <sys/cpuvar.h>
  77 #include <sys/cpupart.h>
  78 #include <sys/kmem.h>
  79 #include <vm/seg.h>
  80 #include <vm/seg_kmem.h>
  81 #include <vm/seg_spt.h>
  82 #include <vm/seg_vn.h>
  83 #include <vm/as.h>
  84 #include <sys/atomic.h>
  85 #include <sys/systm.h>
  86 #include <sys/errno.h>
  87 #include <sys/cmn_err.h>
  88 #include <sys/kstat.h>
  89 #include <sys/sysmacros.h>
  90 #include <sys/pg.h>
  91 #include <sys/promif.h>
  92 #include <sys/sdt.h>

  93 
  94 lgrp_gen_t      lgrp_gen = 0;           /* generation of lgroup hierarchy */
  95 lgrp_t *lgrp_table[NLGRPS_MAX]; /* table of all initialized lgrp_t structs */
  96                                 /* indexed by lgrp_id */
  97 int     nlgrps;                 /* number of lgroups in machine */
  98 int     lgrp_alloc_hint = -1;   /* hint for where to try to allocate next */
  99 int     lgrp_alloc_max = 0;     /* max lgroup ID allocated so far */
 100 
 101 /*
 102  * Kstat data for lgroups.
 103  *
 104  * Actual kstat data is collected in lgrp_stats array.
 105  * The lgrp_kstat_data array of named kstats is used to extract data from
 106  * lgrp_stats and present it to kstat framework. It is protected from partallel
 107  * modifications by lgrp_kstat_mutex. This may cause some contention when
 108  * several kstat commands run in parallel but this is not the
 109  * performance-critical path.
 110  */
 111 extern struct lgrp_stats lgrp_stats[];  /* table of per-lgrp stats */
 112 


 503         lgrp_kstat_init();
 504         /*
 505          * cpu0 is finally where it should be, so create it's lgroup's kstats
 506          */
 507         mutex_enter(&cpu_lock);
 508         lgrp_kstat_create(cp);
 509         mutex_exit(&cpu_lock);
 510 
 511         lgrp_initialized = 1;
 512 }
 513 
 514 /*
 515  * Finish lgrp initialization after all CPUS are brought on-line.
 516  * This routine is called after start_other_cpus().
 517  */
 518 static void
 519 lgrp_main_mp_init(void)
 520 {
 521         klgrpset_t changed;
 522 


 523         /*
 524          * Update lgroup topology (if necessary)
 525          */
 526         klgrpset_clear(changed);
 527         (void) lgrp_topo_update(lgrp_table, lgrp_alloc_max + 1, &changed);
 528         lgrp_topo_initialized = 1;
 529 }
 530 
 531 /*
 532  * Change latency of lgroup with specified lgroup platform handle (if one is
 533  * given) or change all lgroups with old latency to new latency
 534  */
 535 void
 536 lgrp_latency_change(lgrp_handle_t hand, u_longlong_t oldtime,
 537     u_longlong_t newtime)
 538 {
 539         lgrp_t          *lgrp;
 540         int             i;
 541 
 542         for (i = 0; i <= lgrp_alloc_max; i++) {




   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  * Copyright 2018 Joyent, Inc.
  25  */
  26 
  27 /*
  28  * Basic NUMA support in terms of locality groups
  29  *
  30  * Solaris needs to know which CPUs, memory, etc. are near each other to
  31  * provide good performance on NUMA machines by optimizing for locality.
  32  * In order to do this, a new abstraction called a "locality group (lgroup)"
  33  * has been introduced to keep track of which CPU-like and memory-like hardware
  34  * resources are close to each other.  Currently, latency is the only measure
  35  * used to determine how to group hardware resources into lgroups, but this
  36  * does not limit the groupings to be based solely on latency.  Other factors
  37  * may be used to determine the groupings in the future.
  38  *
  39  * Lgroups are organized into a hieararchy or topology that represents the
  40  * latency topology of the machine.  There is always at least a root lgroup in
  41  * the system.  It represents all the hardware resources in the machine at a
  42  * latency big enough that any hardware resource can at least access any other
  43  * hardware resource within that latency.  A Uniform Memory Access (UMA)
  44  * machine is represented with one lgroup (the root).  In contrast, a NUMA


  74 #include <sys/param.h>
  75 #include <sys/var.h>
  76 #include <sys/thread.h>
  77 #include <sys/cpuvar.h>
  78 #include <sys/cpupart.h>
  79 #include <sys/kmem.h>
  80 #include <vm/seg.h>
  81 #include <vm/seg_kmem.h>
  82 #include <vm/seg_spt.h>
  83 #include <vm/seg_vn.h>
  84 #include <vm/as.h>
  85 #include <sys/atomic.h>
  86 #include <sys/systm.h>
  87 #include <sys/errno.h>
  88 #include <sys/cmn_err.h>
  89 #include <sys/kstat.h>
  90 #include <sys/sysmacros.h>
  91 #include <sys/pg.h>
  92 #include <sys/promif.h>
  93 #include <sys/sdt.h>
  94 #include <sys/ht.h>
  95 
  96 lgrp_gen_t      lgrp_gen = 0;           /* generation of lgroup hierarchy */
  97 lgrp_t *lgrp_table[NLGRPS_MAX]; /* table of all initialized lgrp_t structs */
  98                                 /* indexed by lgrp_id */
  99 int     nlgrps;                 /* number of lgroups in machine */
 100 int     lgrp_alloc_hint = -1;   /* hint for where to try to allocate next */
 101 int     lgrp_alloc_max = 0;     /* max lgroup ID allocated so far */
 102 
 103 /*
 104  * Kstat data for lgroups.
 105  *
 106  * Actual kstat data is collected in lgrp_stats array.
 107  * The lgrp_kstat_data array of named kstats is used to extract data from
 108  * lgrp_stats and present it to kstat framework. It is protected from partallel
 109  * modifications by lgrp_kstat_mutex. This may cause some contention when
 110  * several kstat commands run in parallel but this is not the
 111  * performance-critical path.
 112  */
 113 extern struct lgrp_stats lgrp_stats[];  /* table of per-lgrp stats */
 114 


 505         lgrp_kstat_init();
 506         /*
 507          * cpu0 is finally where it should be, so create it's lgroup's kstats
 508          */
 509         mutex_enter(&cpu_lock);
 510         lgrp_kstat_create(cp);
 511         mutex_exit(&cpu_lock);
 512 
 513         lgrp_initialized = 1;
 514 }
 515 
 516 /*
 517  * Finish lgrp initialization after all CPUS are brought on-line.
 518  * This routine is called after start_other_cpus().
 519  */
 520 static void
 521 lgrp_main_mp_init(void)
 522 {
 523         klgrpset_t changed;
 524 
 525         ht_init();
 526 
 527         /*
 528          * Update lgroup topology (if necessary)
 529          */
 530         klgrpset_clear(changed);
 531         (void) lgrp_topo_update(lgrp_table, lgrp_alloc_max + 1, &changed);
 532         lgrp_topo_initialized = 1;
 533 }
 534 
 535 /*
 536  * Change latency of lgroup with specified lgroup platform handle (if one is
 537  * given) or change all lgroups with old latency to new latency
 538  */
 539 void
 540 lgrp_latency_change(lgrp_handle_t hand, u_longlong_t oldtime,
 541     u_longlong_t newtime)
 542 {
 543         lgrp_t          *lgrp;
 544         int             i;
 545 
 546         for (i = 0; i <= lgrp_alloc_max; i++) {