Print this page
PANKOVs restructure
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/os/cpupm/pwrnow.c
+++ new/usr/src/uts/i86pc/os/cpupm/pwrnow.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/x86_archext.h>
26 26 #include <sys/machsystm.h>
27 27 #include <sys/x_call.h>
28 -#include <sys/acpi/acpi.h>
28 +#include <acpica/include/acpi.h>
29 29 #include <sys/acpica.h>
30 30 #include <sys/pwrnow.h>
31 31 #include <sys/cpu_acpi.h>
32 32 #include <sys/cpupm.h>
33 33 #include <sys/dtrace.h>
34 34 #include <sys/sdt.h>
35 35
36 36 static int pwrnow_init(cpu_t *);
37 37 static void pwrnow_fini(cpu_t *);
38 38 static void pwrnow_power(cpuset_t, uint32_t);
39 39 static void pwrnow_stop(cpu_t *);
40 40
41 41 static boolean_t pwrnow_cpb_supported(void);
42 42
43 43 /*
44 44 * Interfaces for modules implementing AMD's PowerNow!.
45 45 */
46 46 cpupm_state_ops_t pwrnow_ops = {
47 47 "PowerNow! Technology",
48 48 pwrnow_init,
49 49 pwrnow_fini,
50 50 pwrnow_power,
51 51 pwrnow_stop
52 52 };
53 53
54 54 /*
55 55 * Error returns
56 56 */
57 57 #define PWRNOW_RET_SUCCESS 0x00
58 58 #define PWRNOW_RET_NO_PM 0x01
59 59 #define PWRNOW_RET_UNSUP_STATE 0x02
60 60 #define PWRNOW_RET_TRANS_INCOMPLETE 0x03
61 61
62 62 #define PWRNOW_LATENCY_WAIT 10
63 63
64 64 /*
65 65 * MSR registers for changing and reading processor power state.
66 66 */
67 67 #define PWRNOW_PERF_CTL_MSR 0xC0010062
68 68 #define PWRNOW_PERF_STATUS_MSR 0xC0010063
69 69
70 70 #define AMD_CPUID_PSTATE_HARDWARE (1<<7)
71 71 #define AMD_CPUID_TSC_CONSTANT (1<<8)
72 72 #define AMD_CPUID_CPB (1<<9)
73 73
74 74 /*
75 75 * Debugging support
76 76 */
77 77 #ifdef DEBUG
78 78 volatile int pwrnow_debug = 0;
79 79 #define PWRNOW_DEBUG(arglist) if (pwrnow_debug) printf arglist;
80 80 #else
81 81 #define PWRNOW_DEBUG(arglist)
82 82 #endif
83 83
84 84 /*
85 85 * Write the ctrl register.
86 86 */
87 87 static void
88 88 write_ctrl(cpu_acpi_handle_t handle, uint32_t ctrl)
89 89 {
90 90 cpu_acpi_pct_t *pct_ctrl;
91 91 uint64_t reg;
92 92
93 93 pct_ctrl = CPU_ACPI_PCT_CTRL(handle);
94 94
95 95 switch (pct_ctrl->cr_addrspace_id) {
96 96 case ACPI_ADR_SPACE_FIXED_HARDWARE:
97 97 reg = ctrl;
98 98 wrmsr(PWRNOW_PERF_CTL_MSR, reg);
99 99 break;
100 100
101 101 default:
102 102 DTRACE_PROBE1(pwrnow_ctrl_unsupported_type, uint8_t,
103 103 pct_ctrl->cr_addrspace_id);
104 104 return;
105 105 }
106 106
107 107 DTRACE_PROBE1(pwrnow_ctrl_write, uint32_t, ctrl);
108 108 }
109 109
110 110 /*
111 111 * Transition the current processor to the requested state.
112 112 */
113 113 static void
114 114 pwrnow_pstate_transition(uint32_t req_state)
115 115 {
116 116 cpupm_mach_state_t *mach_state =
117 117 (cpupm_mach_state_t *)CPU->cpu_m.mcpu_pm_mach_state;
118 118 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
119 119 cpu_acpi_pstate_t *req_pstate;
120 120 uint32_t ctrl;
121 121
122 122 req_pstate = (cpu_acpi_pstate_t *)CPU_ACPI_PSTATES(handle);
123 123 req_pstate += req_state;
124 124
125 125 DTRACE_PROBE1(pwrnow_transition_freq, uint32_t,
126 126 CPU_ACPI_FREQ(req_pstate));
127 127
128 128 /*
129 129 * Initiate the processor p-state change.
130 130 */
131 131 ctrl = CPU_ACPI_PSTATE_CTRL(req_pstate);
132 132 write_ctrl(handle, ctrl);
133 133
134 134 if (mach_state->ms_turbo != NULL)
135 135 cpupm_record_turbo_info(mach_state->ms_turbo,
136 136 mach_state->ms_pstate.cma_state.pstate, req_state);
137 137
138 138 mach_state->ms_pstate.cma_state.pstate = req_state;
139 139 cpu_set_curr_clock((uint64_t)CPU_ACPI_FREQ(req_pstate) * 1000000);
140 140 }
141 141
142 142 static void
143 143 pwrnow_power(cpuset_t set, uint32_t req_state)
144 144 {
145 145 /*
146 146 * If thread is already running on target CPU then just
147 147 * make the transition request. Otherwise, we'll need to
148 148 * make a cross-call.
149 149 */
150 150 kpreempt_disable();
151 151 if (CPU_IN_SET(set, CPU->cpu_id)) {
152 152 pwrnow_pstate_transition(req_state);
153 153 CPUSET_DEL(set, CPU->cpu_id);
154 154 }
155 155 if (!CPUSET_ISNULL(set)) {
156 156 xc_call((xc_arg_t)req_state, NULL, NULL,
157 157 CPUSET2BV(set), (xc_func_t)pwrnow_pstate_transition);
158 158 }
159 159 kpreempt_enable();
160 160 }
161 161
162 162 /*
163 163 * Validate that this processor supports PowerNow! and if so,
164 164 * get the P-state data from ACPI and cache it.
165 165 */
166 166 static int
167 167 pwrnow_init(cpu_t *cp)
168 168 {
169 169 cpupm_mach_state_t *mach_state =
170 170 (cpupm_mach_state_t *)cp->cpu_m.mcpu_pm_mach_state;
171 171 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
172 172 cpu_acpi_pct_t *pct_stat;
173 173 static int logged = 0;
174 174
175 175 PWRNOW_DEBUG(("pwrnow_init: processor %d\n", cp->cpu_id));
176 176
177 177 /*
178 178 * Cache the P-state specific ACPI data.
179 179 */
180 180 if (cpu_acpi_cache_pstate_data(handle) != 0) {
181 181 if (!logged) {
182 182 cmn_err(CE_NOTE, "!PowerNow! support is being "
183 183 "disabled due to errors parsing ACPI P-state "
184 184 "objects exported by BIOS.");
185 185 logged = 1;
186 186 }
187 187 pwrnow_fini(cp);
188 188 return (PWRNOW_RET_NO_PM);
189 189 }
190 190
191 191 pct_stat = CPU_ACPI_PCT_STATUS(handle);
192 192 switch (pct_stat->cr_addrspace_id) {
193 193 case ACPI_ADR_SPACE_FIXED_HARDWARE:
194 194 PWRNOW_DEBUG(("Transitions will use fixed hardware\n"));
195 195 break;
196 196 default:
197 197 cmn_err(CE_WARN, "!_PCT configured for unsupported "
198 198 "addrspace = %d.", pct_stat->cr_addrspace_id);
199 199 cmn_err(CE_NOTE, "!CPU power management will not function.");
200 200 pwrnow_fini(cp);
201 201 return (PWRNOW_RET_NO_PM);
202 202 }
203 203
204 204 cpupm_alloc_domains(cp, CPUPM_P_STATES);
205 205
206 206 /*
207 207 * Check for Core Performance Boost support
208 208 */
209 209 if (pwrnow_cpb_supported())
210 210 mach_state->ms_turbo = cpupm_turbo_init(cp);
211 211
212 212 PWRNOW_DEBUG(("Processor %d succeeded.\n", cp->cpu_id))
213 213 return (PWRNOW_RET_SUCCESS);
214 214 }
215 215
216 216 /*
217 217 * Free resources allocated by pwrnow_init().
218 218 */
219 219 static void
220 220 pwrnow_fini(cpu_t *cp)
221 221 {
222 222 cpupm_mach_state_t *mach_state =
223 223 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
224 224 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
225 225
226 226 cpupm_free_domains(&cpupm_pstate_domains);
227 227 cpu_acpi_free_pstate_data(handle);
228 228
229 229 if (mach_state->ms_turbo != NULL)
230 230 cpupm_turbo_fini(mach_state->ms_turbo);
231 231 mach_state->ms_turbo = NULL;
232 232 }
233 233
234 234 boolean_t
235 235 pwrnow_supported()
236 236 {
237 237 struct cpuid_regs cpu_regs;
238 238
239 239 /* Required features */
240 240 if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
241 241 !is_x86_feature(x86_featureset, X86FSET_MSR)) {
242 242 PWRNOW_DEBUG(("No CPUID or MSR support."));
243 243 return (B_FALSE);
244 244 }
245 245
246 246 /*
247 247 * Get the Advanced Power Management Information.
248 248 */
249 249 cpu_regs.cp_eax = 0x80000007;
250 250 (void) __cpuid_insn(&cpu_regs);
251 251
252 252 /*
253 253 * We currently only support CPU power management of
254 254 * processors that are P-state TSC invariant
255 255 */
256 256 if (!(cpu_regs.cp_edx & AMD_CPUID_TSC_CONSTANT)) {
257 257 PWRNOW_DEBUG(("No support for CPUs that are not P-state "
258 258 "TSC invariant.\n"));
259 259 return (B_FALSE);
260 260 }
261 261
262 262 /*
263 263 * We only support the "Fire and Forget" style of PowerNow! (i.e.,
264 264 * single MSR write to change speed).
265 265 */
266 266 if (!(cpu_regs.cp_edx & AMD_CPUID_PSTATE_HARDWARE)) {
267 267 PWRNOW_DEBUG(("Hardware P-State control is not supported.\n"));
268 268 return (B_FALSE);
269 269 }
270 270 return (B_TRUE);
271 271 }
272 272
273 273 static boolean_t
274 274 pwrnow_cpb_supported(void)
275 275 {
276 276 struct cpuid_regs cpu_regs;
277 277
278 278 /* Required features */
279 279 if (!is_x86_feature(x86_featureset, X86FSET_CPUID) ||
280 280 !is_x86_feature(x86_featureset, X86FSET_MSR)) {
281 281 PWRNOW_DEBUG(("No CPUID or MSR support."));
282 282 return (B_FALSE);
283 283 }
284 284
285 285 /*
286 286 * Get the Advanced Power Management Information.
287 287 */
288 288 cpu_regs.cp_eax = 0x80000007;
289 289 (void) __cpuid_insn(&cpu_regs);
290 290
291 291 if (!(cpu_regs.cp_edx & AMD_CPUID_CPB))
292 292 return (B_FALSE);
293 293
294 294 return (B_TRUE);
295 295 }
296 296
297 297 static void
298 298 pwrnow_stop(cpu_t *cp)
299 299 {
300 300 cpupm_mach_state_t *mach_state =
301 301 (cpupm_mach_state_t *)(cp->cpu_m.mcpu_pm_mach_state);
302 302 cpu_acpi_handle_t handle = mach_state->ms_acpi_handle;
303 303
304 304 cpupm_remove_domains(cp, CPUPM_P_STATES, &cpupm_pstate_domains);
305 305 cpu_acpi_free_pstate_data(handle);
306 306
307 307 if (mach_state->ms_turbo != NULL)
308 308 cpupm_turbo_fini(mach_state->ms_turbo);
309 309 mach_state->ms_turbo = NULL;
310 310 }
↓ open down ↓ |
272 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX