1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright (c) 2009,  Intel Corporation.
  26  * All Rights Reserved.
  27  */
  28 
  29 #include <sys/x86_archext.h>
  30 #include <sys/machsystm.h>
  31 #include <sys/archsystm.h>
  32 #include <sys/x_call.h>
  33 #include <sys/acpi/acpi.h>
  34 #include <sys/acpica.h>
  35 #include <sys/speedstep.h>
  36 #include <sys/cpu_acpi.h>
  37 #include <sys/cpupm.h>
  38 #include <sys/dtrace.h>
  39 #include <sys/sdt.h>
  40 
  41 static int speedstep_init(cpu_t *);
  42 static void speedstep_fini(cpu_t *);
  43 static void speedstep_power(cpuset_t, uint32_t);
  44 static void speedstep_stop(cpu_t *);
  45 static boolean_t speedstep_turbo_supported(void);
  46 
  47 /*
  48  * Interfaces for modules implementing Intel's Enhanced SpeedStep.
  49  */
  50 cpupm_state_ops_t speedstep_ops = {
  51         "Enhanced SpeedStep Technology",
  52         speedstep_init,
  53         speedstep_fini,
  54         speedstep_power,
  55         speedstep_stop
  56 };
  57 
  58 /*
  59  * Error returns
  60  */
  61 #define ESS_RET_SUCCESS         0x00
  62 #define ESS_RET_NO_PM           0x01
  63 #define ESS_RET_UNSUP_STATE     0x02
  64 
  65 /*
  66  * MSR registers for changing and reading processor power state.
  67  */
  68 #define IA32_PERF_STAT_MSR              0x198
  69 #define IA32_PERF_CTL_MSR               0x199
  70 
  71 #define IA32_CPUID_TSC_CONSTANT         0xF30
  72 #define IA32_MISC_ENABLE_MSR            0x1A0
  73 #define IA32_MISC_ENABLE_EST            (1<<16)
  74 #define IA32_MISC_ENABLE_CXE            (1<<25)
  75 
  76 #define CPUID_TURBO_SUPPORT             (1 << 1)
  77 
  78 /*
  79  * Debugging support
  80  */
  81 #ifdef  DEBUG
  82 volatile int ess_debug = 0;
  83 #define ESSDEBUG(arglist) if (ess_debug) printf arglist;
  84 #else
  85 #define ESSDEBUG(arglist)
  86 #endif
  87 
  88 /*
  89  * Write the ctrl register. How it is written, depends upon the _PCT
  90  * APCI object value.
  91  */
  92 static void
  93 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
  94 {
  95         cpu_acpi_pct_t *pct_ctrl;
  96         uint64_t reg;
  97 
  98         pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
  99 
 100         switch (pct_ctrl->cr_addrspace_id) {
 101         case ACPI_ADR_SPACE_FIXED_HARDWARE:
 102                 /*
 103                  * Read current power state because reserved bits must be
 104                  * preserved, compose new value, and write it.
 105                  */
 106                 reg = rdmsr(IA32_PERF_CTL_MSR);
 107                 reg &= ~((uint64_t)0xFFFF);
 108                 reg |= ctrl;
 109                 wrmsr(IA32_PERF_CTL_MSR, reg);
 110                 break;
 111 
 112         case ACPI_ADR_SPACE_SYSTEM_IO:
 113                 (void) cpu_acpi_write_port(pct_ctrl->cr_address, ctrl,
 114                     pct_ctrl->cr_width);
 115                 break;
 116 
 117         default:
 118                 DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t,
 119                     pct_ctrl->cr_addrspace_id);
 120                 return;
 121         }
 122 
 123         DTRACE_PROBE1(ess_ctrl_write, uint32_t, ctrl);
 124 }
 125 
 126 /*
 127  * Transition the current processor to the requested state.
 128  */
 129 void
 130 speedstep_pstate_transition(uint32_t req_state)
 131 {
 132         cpupm_mach_state_t *mach_state =
 133             (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
 134         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 135         cpu_acpi_pstate_t *req_pstate;
 136         uint32_t ctrl;
 137 
 138         req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
 139         req_pstate += req_state;
 140 
 141         DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate));
 142 
 143         /*
 144          * Initiate the processor p-state change.
 145          */
 146         ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
 147         write_ctrl(handle, ctrl);
 148 
 149         if (mach_state->ms_turbo != NULL)
 150                 cpupm_record_turbo_info(mach_state->ms_turbo,
 151                     mach_state->ms_pstate.cma_state.pstate, req_state);
 152 
 153         mach_state->ms_pstate.cma_state.pstate = req_state;
 154         cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
 155 }
 156 
 157 static void
 158 speedstep_power(cpuset_t set, uint32_t req_state)
 159 {
 160         /*
 161          * If thread is already running on target CPU then just
 162          * make the transition request. Otherwise, we'll need to
 163          * make a cross-call.
 164          */
 165         kpreempt_disable();
 166         if (CPU_IN_SET(set, CPU->cpu_id)) {
 167                 speedstep_pstate_transition(req_state);
 168                 CPUSET_DEL(set, CPU->cpu_id);
 169         }
 170         if (!CPUSET_ISNULL(set)) {
 171                 xc_call((xc_arg_t)req_state, NULL, NULL, CPUSET2BV(set),
 172                     (xc_func_t)speedstep_pstate_transition);
 173         }
 174         kpreempt_enable();
 175 }
 176 
 177 /*
 178  * Validate that this processor supports Speedstep and if so,
 179  * get the P-state data from ACPI and cache it.
 180  */
 181 static int
 182 speedstep_init(cpu_t *cp)
 183 {
 184         cpupm_mach_state_t *mach_state =
 185             (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
 186         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 187         cpu_acpi_pct_t *pct_stat;
 188         static int logged = 0;
 189 
 190         ESSDEBUG(("speedstep_init: processor %d\n", cp->cpu_id));
 191 
 192         /*
 193          * Cache the P-state specific ACPI data.
 194          */
 195         if (cpu_acpi_cache_pstate_data(handle) != 0) {
 196                 if (!logged) {
 197                         cmn_err(CE_NOTE, "!SpeedStep support is being "
 198                             "disabled due to errors parsing ACPI P-state "
 199                             "objects exported by BIOS.");
 200                         logged = 1;
 201                 }
 202                 speedstep_fini(cp);
 203                 return (ESS_RET_NO_PM);
 204         }
 205 
 206         pct_stat = CPU_ACPI_PCT_STATUS(handle);
 207         switch (pct_stat->cr_addrspace_id) {
 208         case ACPI_ADR_SPACE_FIXED_HARDWARE:
 209                 ESSDEBUG(("Transitions will use fixed hardware\n"));
 210                 break;
 211         case ACPI_ADR_SPACE_SYSTEM_IO:
 212                 ESSDEBUG(("Transitions will use system IO\n"));
 213                 break;
 214         default:
 215                 cmn_err(CE_WARN, "!_PCT conifgured for unsupported "
 216                     "addrspace = %d.", pct_stat->cr_addrspace_id);
 217                 cmn_err(CE_NOTE, "!CPU power management will not function.");
 218                 speedstep_fini(cp);
 219                 return (ESS_RET_NO_PM);
 220         }
 221 
 222         cpupm_alloc_domains(cp, CPUPM_P_STATES);
 223 
 224         if (speedstep_turbo_supported())
 225                 mach_state->ms_turbo = cpupm_turbo_init(cp);
 226 
 227         ESSDEBUG(("Processor %d succeeded.\n", cp->cpu_id))
 228         return (ESS_RET_SUCCESS);
 229 }
 230 
 231 /*
 232  * Free resources allocated by speedstep_init().
 233  */
 234 static void
 235 speedstep_fini(cpu_t *cp)
 236 {
 237         cpupm_mach_state_t *mach_state =
 238             (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
 239         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 240 
 241         cpupm_free_domains(&cpupm_pstate_domains);
 242         cpu_acpi_free_pstate_data(handle);
 243 
 244         if (mach_state->ms_turbo != NULL)
 245                 cpupm_turbo_fini(mach_state->ms_turbo);
 246         mach_state->ms_turbo = NULL;
 247 }
 248 
 249 static void
 250 speedstep_stop(cpu_t *cp)
 251 {
 252         cpupm_mach_state_t *mach_state =
 253             (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
 254         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 255 
 256         cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
 257         cpu_acpi_free_pstate_data(handle);
 258 
 259         if (mach_state->ms_turbo != NULL)
 260                 cpupm_turbo_fini(mach_state->ms_turbo);
 261         mach_state->ms_turbo = NULL;
 262 }
 263 
 264 boolean_t
 265 speedstep_supported(uint_t family, uint_t model)
 266 {
 267         struct cpuid_regs cpu_regs;
 268 
 269         /* Required features */
 270         if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
 271             !is_x86_feature(x86_featureset, X86FSET_MSR)) {
 272                 return (B_FALSE);
 273         }
 274 
 275         /*
 276          * We only support family/model combinations which
 277          * are P-state TSC invariant.
 278          */
 279         if (!((family == 0xf && model >= 0x3) ||
 280             (family == 0x6 && model >= 0xe))) {
 281                 return (B_FALSE);
 282         }
 283 
 284         /*
 285          * Enhanced SpeedStep supported?
 286          */
 287         cpu_regs.cp_eax = 0x1;
 288         (void) __cpuid_insn(&cpu_regs);
 289         if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
 290                 return (B_FALSE);
 291         }
 292 
 293         return (B_TRUE);
 294 }
 295 
 296 boolean_t
 297 speedstep_turbo_supported(void)
 298 {
 299         struct cpuid_regs cpu_regs;
 300 
 301         /* Required features */
 302         if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
 303             !is_x86_feature(x86_featureset, X86FSET_MSR)) {
 304                 return (B_FALSE);
 305         }
 306 
 307         /*
 308          * turbo mode supported?
 309          */
 310         cpu_regs.cp_eax = 0x6;
 311         (void) __cpuid_insn(&cpu_regs);
 312         if (!(cpu_regs.cp_eax & CPUID_TURBO_SUPPORT)) {
 313                 return (B_FALSE);
 314         }
 315 
 316         return (B_TRUE);
 317 }