Print this page
update

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/os/cpuid.c
          +++ new/usr/src/uts/i86pc/os/cpuid.c
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
  24   24   * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
  25   25   * Copyright 2014 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
       26 + * Copyright 2020 Joyent, Inc.
  26   27   */
  27   28  /*
  28   29   * Copyright (c) 2010, Intel Corporation.
  29   30   * All rights reserved.
  30   31   */
  31   32  /*
  32   33   * Portions Copyright 2009 Advanced Micro Devices, Inc.
  33   34   */
  34      -/*
  35      - * Copyright 2020 Joyent, Inc.
  36      - */
  37   35  
  38   36  /*
  39   37   * CPU Identification logic
  40   38   *
  41   39   * The purpose of this file and its companion, cpuid_subr.c, is to help deal
  42   40   * with the identification of CPUs, their features, and their topologies. More
  43   41   * specifically, this file helps drive the following:
  44   42   *
  45   43   * 1. Enumeration of features of the processor which are used by the kernel to
  46   44   *    determine what features to enable or disable. These may be instruction set
↓ open down ↓ 1173 lines elided ↑ open up ↑
1220 1218   * microcode during the first cpuid pass, then we'll disable TSX completely such
1221 1219   * that user land never has a chance to observe the bit. However, if we are late
1222 1220   * loading the microcode, then we must use the functionality to cause
1223 1221   * transactions to automatically abort. This is necessary for user land's sake.
1224 1222   * Once a program sees a cpuid bit, it must not be taken away.
1225 1223   *
1226 1224   * We track whether or not we should do this based on what cpuid pass we're in.
1227 1225   * Whenever we hit cpuid_scan_security() on the boot CPU and we're still on pass
1228 1226   * 1 of the cpuid logic, then we can completely turn off TSX. Notably this
1229 1227   * should happen twice. Once in the normal cpuid_pass1() code and then a second
1230      - * time after we do the initial microcode update.
     1228 + * time after we do the initial microcode update.  As a result we need to be
     1229 + * careful in cpuid_apply_tsx() to only use the MSR if we've loaded a suitable
     1230 + * microcode on the current CPU (which happens prior to cpuid_pass_ucode()).
1231 1231   *
1232 1232   * If TAA has been fixed, then it will be enumerated in IA32_ARCH_CAPABILITIES
1233 1233   * as TAA_NO. In such a case, we will still disable TSX: it's proven to be an
1234 1234   * unfortunate feature in a number of ways, and taking the opportunity to
1235 1235   * finally be able to turn it off is likely to be of benefit in the future.
1236 1236   *
1237 1237   * SUMMARY
1238 1238   *
1239 1239   * The following table attempts to summarize the mitigations for various issues
1240 1240   * and what's done in various places:
↓ open down ↓ 1591 lines elided ↑ open up ↑
2832 2832           * Otherwise, we'll fall back to causing transactions to abort as our
2833 2833           * mitigation. TSX-using code will always take the fallback path.
2834 2834           */
2835 2835          if (cpi->cpi_pass < 4) {
2836 2836                  x86_taa_mitigation = X86_TAA_TSX_DISABLE;
2837 2837          } else {
2838 2838                  x86_taa_mitigation = X86_TAA_TSX_FORCE_ABORT;
2839 2839          }
2840 2840  }
2841 2841  
     2842 +/*
     2843 + * As mentioned, we should only touch the MSR when we've got a suitable
     2844 + * microcode loaded on this CPU.
     2845 + */
2842 2846  static void
2843      -cpuid_apply_tsx(x86_taa_mitigation_t taa)
     2847 +cpuid_apply_tsx(x86_taa_mitigation_t taa, uchar_t *featureset)
2844 2848  {
2845 2849          uint64_t val;
2846 2850  
2847 2851          switch (taa) {
2848 2852          case X86_TAA_TSX_DISABLE:
     2853 +                if (!is_x86_feature(featureset, X86FSET_TSX_CTRL))
     2854 +                        return;
2849 2855                  val = rdmsr(MSR_IA32_TSX_CTRL);
2850 2856                  val |= IA32_TSX_CTRL_CPUID_CLEAR | IA32_TSX_CTRL_RTM_DISABLE;
2851 2857                  wrmsr(MSR_IA32_TSX_CTRL, val);
2852 2858                  break;
2853 2859          case X86_TAA_TSX_FORCE_ABORT:
     2860 +                if (!is_x86_feature(featureset, X86FSET_TSX_CTRL))
     2861 +                        return;
2854 2862                  val = rdmsr(MSR_IA32_TSX_CTRL);
2855 2863                  val |= IA32_TSX_CTRL_RTM_DISABLE;
2856 2864                  wrmsr(MSR_IA32_TSX_CTRL, val);
2857 2865                  break;
2858 2866          case X86_TAA_HW_MITIGATED:
2859 2867          case X86_TAA_MD_CLEAR:
2860 2868          case X86_TAA_DISABLED:
2861 2869          case X86_TAA_NOTHING:
2862 2870                  break;
2863 2871          }
↓ open down ↓ 105 lines elided ↑ open up ↑
2969 2977                          add_x86_feature(featureset, X86FSET_SSBD);
2970 2978  
2971 2979                  if (ecp->cp_edx & CPUID_INTC_EDX_7_0_FLUSH_CMD)
2972 2980                          add_x86_feature(featureset, X86FSET_FLUSH_CMD);
2973 2981          }
2974 2982  
2975 2983          /*
2976 2984           * Take care of certain mitigations on the non-boot CPU. The boot CPU
2977 2985           * will have already run this function and determined what we need to
2978 2986           * do. This gives us a hook for per-HW thread mitigations such as
2979      -         * enhanced IBRS, or disabling TSX.  For TSX disabling, we need to be
2980      -         * careful that we've had a chance to load ucode that enables the new
2981      -         * MSRs.
     2987 +         * enhanced IBRS, or disabling TSX.
2982 2988           */
2983 2989          if (cpu->cpu_id != 0) {
2984 2990                  if (x86_spectrev2_mitigation == X86_SPECTREV2_ENHANCED_IBRS) {
2985 2991                          cpuid_enable_enhanced_ibrs();
2986 2992                  }
2987 2993  
2988      -                if (cpi->cpi_pass >= 1)
2989      -                        cpuid_apply_tsx(x86_taa_mitigation);
     2994 +                cpuid_apply_tsx(x86_taa_mitigation, featureset);
2990 2995                  return;
2991 2996          }
2992 2997  
2993 2998          /*
2994 2999           * Go through and initialize various security mechanisms that we should
2995 3000           * only do on a single CPU. This includes Spectre V2, L1TF, MDS, and
2996 3001           * TAA.
2997 3002           */
2998 3003  
2999 3004          /*
↓ open down ↓ 42 lines elided ↑ open up ↑
3042 3047           * Determine whether SMT exclusion is required and whether or not we
3043 3048           * need to perform an l1d flush.
3044 3049           */
3045 3050          cpuid_update_l1d_flush(cpu, featureset);
3046 3051  
3047 3052          /*
3048 3053           * Determine what our mitigation strategy should be for TAA and then
3049 3054           * also apply TAA mitigations.
3050 3055           */
3051 3056          cpuid_update_tsx(cpu, featureset);
3052      -        cpuid_apply_tsx(x86_taa_mitigation);
     3057 +        cpuid_apply_tsx(x86_taa_mitigation, featureset);
3053 3058  }
3054 3059  
3055 3060  /*
3056 3061   * Setup XFeature_Enabled_Mask register. Required by xsave feature.
3057 3062   */
3058 3063  void
3059 3064  setup_xfem(void)
3060 3065  {
3061 3066          uint64_t flags = XFEATURE_LEGACY_FP;
3062 3067  
↓ open down ↓ 4482 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX