1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/x86_archext.h>
  26 #include <sys/machsystm.h>
  27 #include <sys/x_call.h>
  28 #include <acpica/include/acpi.h>
  29 #include <sys/acpica.h>
  30 #include <sys/pwrnow.h>
  31 #include <sys/cpu_acpi.h>
  32 #include <sys/cpupm.h>
  33 #include <sys/dtrace.h>
  34 #include <sys/sdt.h>
  35 
  36 static int pwrnow_init(cpu_t *);
  37 static void pwrnow_fini(cpu_t *);
  38 static void pwrnow_power(cpuset_t, uint32_t);
  39 static void pwrnow_stop(cpu_t *);
  40 
  41 static boolean_t pwrnow_cpb_supported(void);
  42 
  43 /*
  44  * Interfaces for modules implementing AMD's PowerNow!.
  45  */
  46 cpupm_state_ops_t pwrnow_ops = {
  47         "PowerNow! Technology",
  48         pwrnow_init,
  49         pwrnow_fini,
  50         pwrnow_power,
  51         pwrnow_stop
  52 };
  53 
  54 /*
  55  * Error returns
  56  */
  57 #define PWRNOW_RET_SUCCESS              0x00
  58 #define PWRNOW_RET_NO_PM                0x01
  59 #define PWRNOW_RET_UNSUP_STATE          0x02
  60 #define PWRNOW_RET_TRANS_INCOMPLETE     0x03
  61 
  62 #define PWRNOW_LATENCY_WAIT             10
  63 
  64 /*
  65  * MSR registers for changing and reading processor power state.
  66  */
  67 #define PWRNOW_PERF_CTL_MSR             0xC0010062
  68 #define PWRNOW_PERF_STATUS_MSR          0xC0010063
  69 
  70 #define AMD_CPUID_PSTATE_HARDWARE       (1<<7)
  71 #define AMD_CPUID_TSC_CONSTANT          (1<<8)
  72 #define AMD_CPUID_CPB                   (1<<9)
  73 
  74 /*
  75  * Debugging support
  76  */
  77 #ifdef  DEBUG
  78 volatile int pwrnow_debug = 0;
  79 #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
  80 #else
  81 #define PWRNOW_DEBUG(arglist)
  82 #endif
  83 
  84 /*
  85  * Write the ctrl register.
  86  */
  87 static void
  88 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
  89 {
  90         cpu_acpi_pct_t *pct_ctrl;
  91         uint64_t reg;
  92 
  93         pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
  94 
  95         switch (pct_ctrl->cr_addrspace_id) {
  96         case ACPI_ADR_SPACE_FIXED_HARDWARE:
  97                 reg = ctrl;
  98                 wrmsr(PWRNOW_PERF_CTL_MSR, reg);
  99                 break;
 100 
 101         default:
 102                 DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
 103                     pct_ctrl->cr_addrspace_id);
 104                 return;
 105         }
 106 
 107         DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
 108 }
 109 
 110 /*
 111  * Transition the current processor to the requested state.
 112  */
 113 static void
 114 pwrnow_pstate_transition(uint32_t req_state)
 115 {
 116         cpupm_mach_state_t *mach_state =
 117             (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
 118         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 119         cpu_acpi_pstate_t *req_pstate;
 120         uint32_t ctrl;
 121 
 122         req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
 123         req_pstate += req_state;
 124 
 125         DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
 126             CPU_ACPI_FREQ(req_pstate));
 127 
 128         /*
 129          * Initiate the processor p-state change.
 130          */
 131         ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
 132         write_ctrl(handle, ctrl);
 133 
 134         if (mach_state->ms_turbo != NULL)
 135                 cpupm_record_turbo_info(mach_state->ms_turbo,
 136                     mach_state->ms_pstate.cma_state.pstate, req_state);
 137 
 138         mach_state->ms_pstate.cma_state.pstate = req_state;
 139         cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
 140 }
 141 
 142 static void
 143 pwrnow_power(cpuset_t set, uint32_t req_state)
 144 {
 145         /*
 146          * If thread is already running on target CPU then just
 147          * make the transition request. Otherwise, we'll need to
 148          * make a cross-call.
 149          */
 150         kpreempt_disable();
 151         if (CPU_IN_SET(set, CPU->cpu_id)) {
 152                 pwrnow_pstate_transition(req_state);
 153                 CPUSET_DEL(set, CPU->cpu_id);
 154         }
 155         if (!CPUSET_ISNULL(set)) {
 156                 xc_call((xc_arg_t)req_state, NULL, NULL,
 157                     CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition);
 158         }
 159         kpreempt_enable();
 160 }
 161 
 162 /*
 163  * Validate that this processor supports PowerNow! and if so,
 164  * get the P-state data from ACPI and cache it.
 165  */
 166 static int
 167 pwrnow_init(cpu_t *cp)
 168 {
 169         cpupm_mach_state_t *mach_state =
 170             (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
 171         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 172         cpu_acpi_pct_t *pct_stat;
 173         static int logged = 0;
 174 
 175         PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
 176 
 177         /*
 178          * Cache the P-state specific ACPI data.
 179          */
 180         if (cpu_acpi_cache_pstate_data(handle) != 0) {
 181                 if (!logged) {
 182                         cmn_err(CE_NOTE, "!PowerNow! support is being "
 183                             "disabled due to errors parsing ACPI P-state "
 184                             "objects exported by BIOS.");
 185                         logged = 1;
 186                 }
 187                 pwrnow_fini(cp);
 188                 return (PWRNOW_RET_NO_PM);
 189         }
 190 
 191         pct_stat = CPU_ACPI_PCT_STATUS(handle);
 192         switch (pct_stat->cr_addrspace_id) {
 193         case ACPI_ADR_SPACE_FIXED_HARDWARE:
 194                 PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
 195                 break;
 196         default:
 197                 cmn_err(CE_WARN, "!_PCT configured for unsupported "
 198                     "addrspace = %d.", pct_stat->cr_addrspace_id);
 199                 cmn_err(CE_NOTE, "!CPU power management will not function.");
 200                 pwrnow_fini(cp);
 201                 return (PWRNOW_RET_NO_PM);
 202         }
 203 
 204         cpupm_alloc_domains(cp, CPUPM_P_STATES);
 205 
 206         /*
 207          * Check for Core Performance Boost support
 208          */
 209         if (pwrnow_cpb_supported())
 210                 mach_state->ms_turbo = cpupm_turbo_init(cp);
 211 
 212         PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
 213         return (PWRNOW_RET_SUCCESS);
 214 }
 215 
 216 /*
 217  * Free resources allocated by pwrnow_init().
 218  */
 219 static void
 220 pwrnow_fini(cpu_t *cp)
 221 {
 222         cpupm_mach_state_t *mach_state =
 223             (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
 224         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 225 
 226         cpupm_free_domains(&cpupm_pstate_domains);
 227         cpu_acpi_free_pstate_data(handle);
 228 
 229         if (mach_state->ms_turbo != NULL)
 230                 cpupm_turbo_fini(mach_state->ms_turbo);
 231         mach_state->ms_turbo = NULL;
 232 }
 233 
 234 boolean_t
 235 pwrnow_supported()
 236 {
 237         struct cpuid_regs cpu_regs;
 238 
 239         /* Required features */
 240         if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
 241             !is_x86_feature(x86_featureset, X86FSET_MSR)) {
 242                 PWRNOW_DEBUG(("No CPUID or MSR support."));
 243                 return (B_FALSE);
 244         }
 245 
 246         /*
 247          * Get the Advanced Power Management Information.
 248          */
 249         cpu_regs.cp_eax = 0x80000007;
 250         (void) __cpuid_insn(&cpu_regs);
 251 
 252         /*
 253          * We currently only support CPU power management of
 254          * processors that are P-state TSC invariant
 255          */
 256         if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
 257                 PWRNOW_DEBUG(("No support for CPUs that are not P-state "
 258                     "TSC invariant.\n"));
 259                 return (B_FALSE);
 260         }
 261 
 262         /*
 263          * We only support the "Fire and Forget" style of PowerNow! (i.e.,
 264          * single MSR write to change speed).
 265          */
 266         if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
 267                 PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
 268                 return (B_FALSE);
 269         }
 270         return (B_TRUE);
 271 }
 272 
 273 static boolean_t
 274 pwrnow_cpb_supported(void)
 275 {
 276         struct cpuid_regs cpu_regs;
 277 
 278         /* Required features */
 279         if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
 280             !is_x86_feature(x86_featureset, X86FSET_MSR)) {
 281                 PWRNOW_DEBUG(("No CPUID or MSR support."));
 282                 return (B_FALSE);
 283         }
 284 
 285         /*
 286          * Get the Advanced Power Management Information.
 287          */
 288         cpu_regs.cp_eax = 0x80000007;
 289         (void) __cpuid_insn(&cpu_regs);
 290 
 291         if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
 292                 return (B_FALSE);
 293 
 294         return (B_TRUE);
 295 }
 296 
 297 static void
 298 pwrnow_stop(cpu_t *cp)
 299 {
 300         cpupm_mach_state_t *mach_state =
 301             (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
 302         cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
 303 
 304         cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
 305         cpu_acpi_free_pstate_data(handle);
 306 
 307         if (mach_state->ms_turbo != NULL)
 308                 cpupm_turbo_fini(mach_state->ms_turbo);
 309         mach_state->ms_turbo = NULL;
 310 }