Print this page
PANKOVs restructure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/os/cpupm/speedstep.c
+++ new/usr/src/uts/i86pc/os/cpupm/speedstep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 */
24 24 /*
25 25 * Copyright (c) 2009, Intel Corporation.
26 26 * All Rights Reserved.
27 27 */
28 28
29 29 #include <sys/x86_archext.h>
30 30 #include <sys/machsystm.h>
31 31 #include <sys/archsystm.h>
32 32 #include <sys/x_call.h>
33 -#include <sys/acpi/acpi.h>
33 +#include <acpica/include/acpi.h>
34 34 #include <sys/acpica.h>
35 35 #include <sys/speedstep.h>
36 36 #include <sys/cpu_acpi.h>
37 37 #include <sys/cpupm.h>
38 38 #include <sys/dtrace.h>
39 39 #include <sys/sdt.h>
40 40
41 41 static int speedstep_init(cpu_t *);
42 42 static void speedstep_fini(cpu_t *);
43 43 static void speedstep_power(cpuset_t, uint32_t);
44 44 static void speedstep_stop(cpu_t *);
45 45 static boolean_t speedstep_turbo_supported(void);
46 46
47 47 /*
48 48 * Interfaces for modules implementing Intel's Enhanced SpeedStep.
49 49 */
50 50 cpupm_state_ops_t speedstep_ops = {
51 51 "Enhanced SpeedStep Technology",
52 52 speedstep_init,
53 53 speedstep_fini,
54 54 speedstep_power,
55 55 speedstep_stop
56 56 };
57 57
58 58 /*
59 59 * Error returns
60 60 */
61 61 #define ESS_RET_SUCCESS 0x00
62 62 #define ESS_RET_NO_PM 0x01
63 63 #define ESS_RET_UNSUP_STATE 0x02
64 64
65 65 /*
66 66 * MSR registers for changing and reading processor power state.
67 67 */
68 68 #define IA32_PERF_STAT_MSR 0x198
69 69 #define IA32_PERF_CTL_MSR 0x199
70 70
71 71 #define IA32_CPUID_TSC_CONSTANT 0xF30
72 72 #define IA32_MISC_ENABLE_MSR 0x1A0
73 73 #define IA32_MISC_ENABLE_EST (1<<16)
74 74 #define IA32_MISC_ENABLE_CXE (1<<25)
75 75
76 76 #define CPUID_TURBO_SUPPORT (1 << 1)
77 77
78 78 /*
79 79 * Debugging support
80 80 */
81 81 #ifdef DEBUG
82 82 volatile int ess_debug = 0;
83 83 #define ESSDEBUG(arglist) if (ess_debug) printf arglist;
84 84 #else
85 85 #define ESSDEBUG(arglist)
86 86 #endif
87 87
88 88 /*
89 89 * Write the ctrl register. How it is written, depends upon the _PCT
90 90 * APCI object value.
91 91 */
92 92 static void
93 93 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
94 94 {
95 95 cpu_acpi_pct_t *pct_ctrl;
96 96 uint64_t reg;
97 97
98 98 pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
99 99
100 100 switch (pct_ctrl->cr_addrspace_id) {
101 101 case ACPI_ADR_SPACE_FIXED_HARDWARE:
102 102 /*
103 103 * Read current power state because reserved bits must be
104 104 * preserved, compose new value, and write it.
105 105 */
106 106 reg = rdmsr(IA32_PERF_CTL_MSR);
107 107 reg &= ~((uint64_t)0xFFFF);
108 108 reg |= ctrl;
109 109 wrmsr(IA32_PERF_CTL_MSR, reg);
110 110 break;
111 111
112 112 case ACPI_ADR_SPACE_SYSTEM_IO:
113 113 (void) cpu_acpi_write_port(pct_ctrl->cr_address, ctrl,
114 114 pct_ctrl->cr_width);
115 115 break;
116 116
117 117 default:
118 118 DTRACE_PROBE1(ess_ctrl_unsupported_type, uint8_t,
119 119 pct_ctrl->cr_addrspace_id);
120 120 return;
121 121 }
122 122
123 123 DTRACE_PROBE1(ess_ctrl_write, uint32_t, ctrl);
124 124 }
125 125
126 126 /*
127 127 * Transition the current processor to the requested state.
128 128 */
129 129 void
130 130 speedstep_pstate_transition(uint32_t req_state)
131 131 {
132 132 cpupm_mach_state_t *mach_state =
133 133 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
134 134 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
135 135 cpu_acpi_pstate_t *req_pstate;
136 136 uint32_t ctrl;
137 137
138 138 req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
139 139 req_pstate += req_state;
140 140
141 141 DTRACE_PROBE1(ess_transition, uint32_t, CPU_ACPI_FREQ(req_pstate));
142 142
143 143 /*
144 144 * Initiate the processor p-state change.
145 145 */
146 146 ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
147 147 write_ctrl(handle, ctrl);
148 148
149 149 if (mach_state->ms_turbo != NULL)
150 150 cpupm_record_turbo_info(mach_state->ms_turbo,
151 151 mach_state->ms_pstate.cma_state.pstate, req_state);
152 152
153 153 mach_state->ms_pstate.cma_state.pstate = req_state;
154 154 cpu_set_curr_clock(((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000));
155 155 }
156 156
157 157 static void
158 158 speedstep_power(cpuset_t set, uint32_t req_state)
159 159 {
160 160 /*
161 161 * If thread is already running on target CPU then just
162 162 * make the transition request. Otherwise, we'll need to
163 163 * make a cross-call.
164 164 */
165 165 kpreempt_disable();
166 166 if (CPU_IN_SET(set, CPU->cpu_id)) {
167 167 speedstep_pstate_transition(req_state);
168 168 CPUSET_DEL(set, CPU->cpu_id);
169 169 }
170 170 if (!CPUSET_ISNULL(set)) {
171 171 xc_call((xc_arg_t)req_state, NULL, NULL, CPUSET2BV(set),
172 172 (xc_func_t)speedstep_pstate_transition);
173 173 }
174 174 kpreempt_enable();
175 175 }
176 176
177 177 /*
178 178 * Validate that this processor supports Speedstep and if so,
179 179 * get the P-state data from ACPI and cache it.
180 180 */
181 181 static int
182 182 speedstep_init(cpu_t *cp)
183 183 {
184 184 cpupm_mach_state_t *mach_state =
185 185 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
186 186 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
187 187 cpu_acpi_pct_t *pct_stat;
188 188 static int logged = 0;
189 189
190 190 ESSDEBUG(("speedstep_init: processor %d\n", cp->cpu_id));
191 191
192 192 /*
193 193 * Cache the P-state specific ACPI data.
194 194 */
195 195 if (cpu_acpi_cache_pstate_data(handle) != 0) {
196 196 if (!logged) {
197 197 cmn_err(CE_NOTE, "!SpeedStep support is being "
198 198 "disabled due to errors parsing ACPI P-state "
199 199 "objects exported by BIOS.");
200 200 logged = 1;
201 201 }
202 202 speedstep_fini(cp);
203 203 return (ESS_RET_NO_PM);
204 204 }
205 205
206 206 pct_stat = CPU_ACPI_PCT_STATUS(handle);
207 207 switch (pct_stat->cr_addrspace_id) {
208 208 case ACPI_ADR_SPACE_FIXED_HARDWARE:
209 209 ESSDEBUG(("Transitions will use fixed hardware\n"));
210 210 break;
211 211 case ACPI_ADR_SPACE_SYSTEM_IO:
212 212 ESSDEBUG(("Transitions will use system IO\n"));
213 213 break;
214 214 default:
215 215 cmn_err(CE_WARN, "!_PCT conifgured for unsupported "
216 216 "addrspace = %d.", pct_stat->cr_addrspace_id);
217 217 cmn_err(CE_NOTE, "!CPU power management will not function.");
218 218 speedstep_fini(cp);
219 219 return (ESS_RET_NO_PM);
220 220 }
221 221
222 222 cpupm_alloc_domains(cp, CPUPM_P_STATES);
223 223
224 224 if (speedstep_turbo_supported())
225 225 mach_state->ms_turbo = cpupm_turbo_init(cp);
226 226
227 227 ESSDEBUG(("Processor %d succeeded.\n", cp->cpu_id))
228 228 return (ESS_RET_SUCCESS);
229 229 }
230 230
231 231 /*
232 232 * Free resources allocated by speedstep_init().
233 233 */
234 234 static void
235 235 speedstep_fini(cpu_t *cp)
236 236 {
237 237 cpupm_mach_state_t *mach_state =
238 238 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
239 239 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
240 240
241 241 cpupm_free_domains(&cpupm_pstate_domains);
242 242 cpu_acpi_free_pstate_data(handle);
243 243
244 244 if (mach_state->ms_turbo != NULL)
245 245 cpupm_turbo_fini(mach_state->ms_turbo);
246 246 mach_state->ms_turbo = NULL;
247 247 }
248 248
249 249 static void
250 250 speedstep_stop(cpu_t *cp)
251 251 {
252 252 cpupm_mach_state_t *mach_state =
253 253 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
254 254 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
255 255
256 256 cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
257 257 cpu_acpi_free_pstate_data(handle);
258 258
259 259 if (mach_state->ms_turbo != NULL)
260 260 cpupm_turbo_fini(mach_state->ms_turbo);
261 261 mach_state->ms_turbo = NULL;
262 262 }
263 263
264 264 boolean_t
265 265 speedstep_supported(uint_t family, uint_t model)
266 266 {
267 267 struct cpuid_regs cpu_regs;
268 268
269 269 /* Required features */
270 270 if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
271 271 !is_x86_feature(x86_featureset, X86FSET_MSR)) {
272 272 return (B_FALSE);
273 273 }
274 274
275 275 /*
276 276 * We only support family/model combinations which
277 277 * are P-state TSC invariant.
278 278 */
279 279 if (!((family == 0xf && model >= 0x3) ||
280 280 (family == 0x6 && model >= 0xe))) {
281 281 return (B_FALSE);
282 282 }
283 283
284 284 /*
285 285 * Enhanced SpeedStep supported?
286 286 */
287 287 cpu_regs.cp_eax = 0x1;
288 288 (void) __cpuid_insn(&cpu_regs);
289 289 if (!(cpu_regs.cp_ecx & CPUID_INTC_ECX_EST)) {
290 290 return (B_FALSE);
291 291 }
292 292
293 293 return (B_TRUE);
294 294 }
295 295
296 296 boolean_t
297 297 speedstep_turbo_supported(void)
298 298 {
299 299 struct cpuid_regs cpu_regs;
300 300
301 301 /* Required features */
302 302 if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
303 303 !is_x86_feature(x86_featureset, X86FSET_MSR)) {
304 304 return (B_FALSE);
305 305 }
306 306
307 307 /*
308 308 * turbo mode supported?
309 309 */
310 310 cpu_regs.cp_eax = 0x6;
311 311 (void) __cpuid_insn(&cpu_regs);
312 312 if (!(cpu_regs.cp_eax & CPUID_TURBO_SUPPORT)) {
313 313 return (B_FALSE);
314 314 }
315 315
316 316 return (B_TRUE);
317 317 }
↓ open down ↓ |
274 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX