Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/cpudrv.c
+++ new/usr/src/uts/common/io/cpudrv.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25 /*
26 26 * Copyright (c) 2009, Intel Corporation.
27 27 * All Rights Reserved.
28 28 */
29 29
30 30 /*
31 31 * CPU Device driver. The driver is not DDI-compliant.
32 32 *
33 33 * The driver supports following features:
34 34 * - Power management.
35 35 */
36 36
37 37 #include <sys/types.h>
38 38 #include <sys/param.h>
39 39 #include <sys/errno.h>
40 40 #include <sys/modctl.h>
41 41 #include <sys/kmem.h>
42 42 #include <sys/conf.h>
43 43 #include <sys/cmn_err.h>
44 44 #include <sys/stat.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/systm.h>
47 47 #include <sys/ddi.h>
48 48 #include <sys/sunddi.h>
49 49 #include <sys/sdt.h>
50 50 #include <sys/epm.h>
51 51 #include <sys/machsystm.h>
52 52 #include <sys/x_call.h>
53 53 #include <sys/cpudrv_mach.h>
54 54 #include <sys/msacct.h>
55 55
56 56 /*
57 57 * CPU power management
58 58 *
59 59 * The supported power saving model is to slow down the CPU (on SPARC by
60 60 * dividing the CPU clock and on x86 by dropping down a P-state).
61 61 * Periodically we determine the amount of time the CPU is running
62 62 * idle thread and threads in user mode during the last quantum. If the idle
63 63 * thread was running less than its low water mark for current speed for
64 64 * number of consecutive sampling periods, or number of running threads in
65 65 * user mode are above its high water mark, we arrange to go to the higher
66 66 * speed. If the idle thread was running more than its high water mark without
67 67 * dropping a number of consecutive times below the mark, and number of threads
68 68 * running in user mode are below its low water mark, we arrange to go to the
69 69 * next lower speed. While going down, we go through all the speeds. While
70 70 * going up we go to the maximum speed to minimize impact on the user, but have
71 71 * provisions in the driver to go to other speeds.
72 72 *
73 73 * The driver does not have knowledge of a particular implementation of this
74 74 * scheme and will work with all CPUs supporting this model. On SPARC, the
75 75 * driver determines supported speeds by looking at 'clock-divisors' property
76 76 * created by OBP. On x86, the driver retrieves the supported speeds from
77 77 * ACPI.
78 78 */
79 79
80 80 /*
81 81 * Configuration function prototypes and data structures
82 82 */
83 83 static int cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
84 84 static int cpudrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
85 85 static int cpudrv_power(dev_info_t *dip, int comp, int level);
86 86
87 87 struct dev_ops cpudrv_ops = {
88 88 DEVO_REV, /* rev */
89 89 0, /* refcnt */
90 90 nodev, /* getinfo */
91 91 nulldev, /* identify */
92 92 nulldev, /* probe */
93 93 cpudrv_attach, /* attach */
94 94 cpudrv_detach, /* detach */
95 95 nodev, /* reset */
96 96 (struct cb_ops *)NULL, /* cb_ops */
97 97 (struct bus_ops *)NULL, /* bus_ops */
98 98 cpudrv_power, /* power */
99 99 ddi_quiesce_not_needed, /* quiesce */
↓ open down ↓ |
99 lines elided |
↑ open up ↑ |
100 100 };
101 101
102 102 static struct modldrv modldrv = {
103 103 &mod_driverops, /* modops */
104 104 "CPU Driver", /* linkinfo */
105 105 &cpudrv_ops, /* dev_ops */
106 106 };
107 107
108 108 static struct modlinkage modlinkage = {
109 109 MODREV_1, /* rev */
110 - &modldrv, /* linkage */
111 - NULL
110 + { &modldrv, NULL } /* linkage */
112 111 };
113 112
114 113 /*
115 114 * Function prototypes
116 115 */
117 116 static int cpudrv_init(cpudrv_devstate_t *cpudsp);
118 117 static void cpudrv_free(cpudrv_devstate_t *cpudsp);
119 118 static int cpudrv_comp_create(cpudrv_devstate_t *cpudsp);
120 119 static void cpudrv_monitor_disp(void *arg);
121 120 static void cpudrv_monitor(void *arg);
122 121
123 122 /*
124 123 * Driver global variables
125 124 */
126 125 uint_t cpudrv_debug = 0;
127 126 void *cpudrv_state;
128 127 static uint_t cpudrv_idle_hwm = CPUDRV_IDLE_HWM;
129 128 static uint_t cpudrv_idle_lwm = CPUDRV_IDLE_LWM;
130 129 static uint_t cpudrv_idle_buf_zone = CPUDRV_IDLE_BUF_ZONE;
131 130 static uint_t cpudrv_idle_bhwm_cnt_max = CPUDRV_IDLE_BHWM_CNT_MAX;
132 131 static uint_t cpudrv_idle_blwm_cnt_max = CPUDRV_IDLE_BLWM_CNT_MAX;
133 132 static uint_t cpudrv_user_hwm = CPUDRV_USER_HWM;
134 133
135 134 boolean_t cpudrv_enabled = B_TRUE;
136 135
137 136 /*
138 137 * cpudrv_direct_pm allows user applications to directly control the
139 138 * power state transitions (direct pm) without following the normal
140 139 * direct pm protocol. This is needed because the normal protocol
141 140 * requires that a device only be lowered when it is idle, and be
142 141 * brought up when it request to do so by calling pm_raise_power().
143 142 * Ignoring this protocol is harmless for CPU (other than speed).
144 143 * Moreover it might be the case that CPU is never idle or wants
145 144 * to be at higher speed because of the addition CPU cycles required
146 145 * to run the user application.
147 146 *
148 147 * The driver will still report idle/busy status to the framework. Although
149 148 * framework will ignore this information for direct pm devices and not
150 149 * try to bring them down when idle, user applications can still use this
151 150 * information if they wants.
152 151 *
153 152 * In the future, provide an ioctl to control setting of this mode. In
154 153 * that case, this variable should move to the state structure and
155 154 * be protected by the lock in the state structure.
156 155 */
157 156 int cpudrv_direct_pm = 0;
158 157
159 158 /*
160 159 * Arranges for the handler function to be called at the interval suitable
161 160 * for current speed.
162 161 */
163 162 #define CPUDRV_MONITOR_INIT(cpudsp) { \
164 163 if (cpudrv_is_enabled(cpudsp)) { \
165 164 ASSERT(mutex_owned(&(cpudsp)->lock)); \
166 165 (cpudsp)->cpudrv_pm.timeout_id = \
167 166 timeout(cpudrv_monitor_disp, \
168 167 (cpudsp), (((cpudsp)->cpudrv_pm.cur_spd == NULL) ? \
169 168 CPUDRV_QUANT_CNT_OTHR : \
170 169 (cpudsp)->cpudrv_pm.cur_spd->quant_cnt)); \
171 170 } \
172 171 }
173 172
174 173 /*
175 174 * Arranges for the handler function not to be called back.
176 175 */
177 176 #define CPUDRV_MONITOR_FINI(cpudsp) { \
178 177 timeout_id_t tmp_tid; \
179 178 ASSERT(mutex_owned(&(cpudsp)->lock)); \
180 179 tmp_tid = (cpudsp)->cpudrv_pm.timeout_id; \
181 180 (cpudsp)->cpudrv_pm.timeout_id = 0; \
182 181 mutex_exit(&(cpudsp)->lock); \
183 182 if (tmp_tid != 0) { \
184 183 (void) untimeout(tmp_tid); \
185 184 mutex_enter(&(cpudsp)->cpudrv_pm.timeout_lock); \
186 185 while ((cpudsp)->cpudrv_pm.timeout_count != 0) \
187 186 cv_wait(&(cpudsp)->cpudrv_pm.timeout_cv, \
188 187 &(cpudsp)->cpudrv_pm.timeout_lock); \
189 188 mutex_exit(&(cpudsp)->cpudrv_pm.timeout_lock); \
190 189 } \
191 190 mutex_enter(&(cpudsp)->lock); \
192 191 }
193 192
194 193 int
195 194 _init(void)
196 195 {
197 196 int error;
198 197
199 198 DPRINTF(D_INIT, (" _init: function called\n"));
200 199 if ((error = ddi_soft_state_init(&cpudrv_state,
201 200 sizeof (cpudrv_devstate_t), 0)) != 0) {
202 201 return (error);
203 202 }
204 203
205 204 if ((error = mod_install(&modlinkage)) != 0) {
206 205 ddi_soft_state_fini(&cpudrv_state);
207 206 }
208 207
209 208 /*
210 209 * Callbacks used by the PPM driver.
211 210 */
212 211 CPUDRV_SET_PPM_CALLBACKS();
213 212 return (error);
214 213 }
215 214
216 215 int
217 216 _fini(void)
218 217 {
219 218 int error;
220 219
221 220 DPRINTF(D_FINI, (" _fini: function called\n"));
222 221 if ((error = mod_remove(&modlinkage)) == 0) {
223 222 ddi_soft_state_fini(&cpudrv_state);
224 223 }
225 224
226 225 return (error);
227 226 }
228 227
229 228 int
230 229 _info(struct modinfo *modinfop)
231 230 {
232 231 return (mod_info(&modlinkage, modinfop));
233 232 }
234 233
235 234 /*
236 235 * Driver attach(9e) entry point.
237 236 */
238 237 static int
239 238 cpudrv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
240 239 {
241 240 int instance;
242 241 cpudrv_devstate_t *cpudsp;
243 242
244 243 instance = ddi_get_instance(dip);
245 244
246 245 switch (cmd) {
247 246 case DDI_ATTACH:
248 247 DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
249 248 "DDI_ATTACH called\n", instance));
250 249 if (!cpudrv_is_enabled(NULL))
251 250 return (DDI_FAILURE);
252 251 if (ddi_soft_state_zalloc(cpudrv_state, instance) !=
253 252 DDI_SUCCESS) {
254 253 cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
255 254 "can't allocate state", instance);
256 255 cpudrv_enabled = B_FALSE;
257 256 return (DDI_FAILURE);
258 257 }
259 258 if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) ==
260 259 NULL) {
261 260 cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
262 261 "can't get state", instance);
263 262 ddi_soft_state_free(cpudrv_state, instance);
264 263 cpudrv_enabled = B_FALSE;
265 264 return (DDI_FAILURE);
266 265 }
267 266 cpudsp->dip = dip;
268 267
269 268 /*
270 269 * Find CPU number for this dev_info node.
271 270 */
272 271 if (!cpudrv_get_cpu_id(dip, &(cpudsp->cpu_id))) {
273 272 cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
274 273 "can't convert dip to cpu_id", instance);
275 274 ddi_soft_state_free(cpudrv_state, instance);
276 275 cpudrv_enabled = B_FALSE;
277 276 return (DDI_FAILURE);
278 277 }
279 278
280 279 mutex_init(&cpudsp->lock, NULL, MUTEX_DRIVER, NULL);
281 280 if (cpudrv_is_enabled(cpudsp)) {
282 281 if (cpudrv_init(cpudsp) != DDI_SUCCESS) {
283 282 cpudrv_enabled = B_FALSE;
284 283 cpudrv_free(cpudsp);
285 284 ddi_soft_state_free(cpudrv_state, instance);
286 285 return (DDI_FAILURE);
287 286 }
288 287 if (cpudrv_comp_create(cpudsp) != DDI_SUCCESS) {
289 288 cpudrv_enabled = B_FALSE;
290 289 cpudrv_free(cpudsp);
291 290 ddi_soft_state_free(cpudrv_state, instance);
292 291 return (DDI_FAILURE);
293 292 }
294 293 if (ddi_prop_update_string(DDI_DEV_T_NONE,
295 294 dip, "pm-class", "CPU") != DDI_PROP_SUCCESS) {
296 295 cpudrv_enabled = B_FALSE;
297 296 cpudrv_free(cpudsp);
298 297 ddi_soft_state_free(cpudrv_state, instance);
299 298 return (DDI_FAILURE);
300 299 }
301 300
302 301 /*
303 302 * Taskq is used to dispatch routine to monitor CPU
304 303 * activities.
305 304 */
306 305 cpudsp->cpudrv_pm.tq = ddi_taskq_create(dip,
307 306 "cpudrv_monitor", CPUDRV_TASKQ_THREADS,
308 307 TASKQ_DEFAULTPRI, 0);
309 308
310 309 mutex_init(&cpudsp->cpudrv_pm.timeout_lock, NULL,
311 310 MUTEX_DRIVER, NULL);
312 311 cv_init(&cpudsp->cpudrv_pm.timeout_cv, NULL,
313 312 CV_DEFAULT, NULL);
314 313
315 314 /*
316 315 * Driver needs to assume that CPU is running at
317 316 * unknown speed at DDI_ATTACH and switch it to the
318 317 * needed speed. We assume that initial needed speed
319 318 * is full speed for us.
320 319 */
321 320 /*
322 321 * We need to take the lock because cpudrv_monitor()
323 322 * will start running in parallel with attach().
324 323 */
325 324 mutex_enter(&cpudsp->lock);
326 325 cpudsp->cpudrv_pm.cur_spd = NULL;
327 326 cpudsp->cpudrv_pm.pm_started = B_FALSE;
328 327 /*
329 328 * We don't call pm_raise_power() directly from attach
330 329 * because driver attach for a slave CPU node can
331 330 * happen before the CPU is even initialized. We just
332 331 * start the monitoring system which understands
333 332 * unknown speed and moves CPU to top speed when it
334 333 * has been initialized.
335 334 */
336 335 CPUDRV_MONITOR_INIT(cpudsp);
337 336 mutex_exit(&cpudsp->lock);
338 337
339 338 }
340 339
341 340 if (!cpudrv_mach_init(cpudsp)) {
342 341 cmn_err(CE_WARN, "cpudrv_attach: instance %d: "
343 342 "cpudrv_mach_init failed", instance);
344 343 cpudrv_enabled = B_FALSE;
345 344 cpudrv_free(cpudsp);
346 345 ddi_soft_state_free(cpudrv_state, instance);
347 346 return (DDI_FAILURE);
348 347 }
349 348
350 349 CPUDRV_INSTALL_MAX_CHANGE_HANDLER(cpudsp);
351 350
352 351 (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip,
353 352 DDI_NO_AUTODETACH, 1);
354 353 ddi_report_dev(dip);
355 354 return (DDI_SUCCESS);
356 355
357 356 case DDI_RESUME:
358 357 DPRINTF(D_ATTACH, ("cpudrv_attach: instance %d: "
359 358 "DDI_RESUME called\n", instance));
360 359
361 360 cpudsp = ddi_get_soft_state(cpudrv_state, instance);
362 361 ASSERT(cpudsp != NULL);
363 362
364 363 /*
365 364 * Nothing to do for resume, if not doing active PM.
366 365 */
367 366 if (!cpudrv_is_enabled(cpudsp))
368 367 return (DDI_SUCCESS);
369 368
370 369 mutex_enter(&cpudsp->lock);
371 370 /*
372 371 * Driver needs to assume that CPU is running at unknown speed
373 372 * at DDI_RESUME and switch it to the needed speed. We assume
374 373 * that the needed speed is full speed for us.
375 374 */
376 375 cpudsp->cpudrv_pm.cur_spd = NULL;
377 376 CPUDRV_MONITOR_INIT(cpudsp);
378 377 mutex_exit(&cpudsp->lock);
379 378 CPUDRV_REDEFINE_TOPSPEED(dip);
380 379 return (DDI_SUCCESS);
381 380
382 381 default:
383 382 return (DDI_FAILURE);
384 383 }
385 384 }
386 385
387 386 /*
388 387 * Driver detach(9e) entry point.
389 388 */
390 389 static int
391 390 cpudrv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
392 391 {
393 392 int instance;
394 393 cpudrv_devstate_t *cpudsp;
395 394 cpudrv_pm_t *cpupm;
396 395
397 396 instance = ddi_get_instance(dip);
398 397
399 398 switch (cmd) {
400 399 case DDI_DETACH:
401 400 DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: "
402 401 "DDI_DETACH called\n", instance));
403 402
404 403 #if defined(__x86)
405 404 cpudsp = ddi_get_soft_state(cpudrv_state, instance);
406 405 ASSERT(cpudsp != NULL);
407 406
408 407 /*
409 408 * Nothing to do for detach, if no doing active PM.
410 409 */
411 410 if (!cpudrv_is_enabled(cpudsp))
412 411 return (DDI_SUCCESS);
413 412
414 413 /*
415 414 * uninstall PPC/_TPC change notification handler
416 415 */
417 416 CPUDRV_UNINSTALL_MAX_CHANGE_HANDLER(cpudsp);
418 417
419 418 /*
420 419 * destruct platform specific resource
421 420 */
422 421 if (!cpudrv_mach_fini(cpudsp))
423 422 return (DDI_FAILURE);
424 423
425 424 mutex_enter(&cpudsp->lock);
426 425 CPUDRV_MONITOR_FINI(cpudsp);
427 426 cv_destroy(&cpudsp->cpudrv_pm.timeout_cv);
428 427 mutex_destroy(&cpudsp->cpudrv_pm.timeout_lock);
429 428 ddi_taskq_destroy(cpudsp->cpudrv_pm.tq);
430 429 cpudrv_free(cpudsp);
431 430 mutex_exit(&cpudsp->lock);
432 431 mutex_destroy(&cpudsp->lock);
433 432 ddi_soft_state_free(cpudrv_state, instance);
434 433 (void) ddi_prop_update_int(DDI_DEV_T_NONE, dip,
435 434 DDI_NO_AUTODETACH, 0);
436 435 return (DDI_SUCCESS);
437 436
438 437 #else
439 438 /*
440 439 * If the only thing supported by the driver is power
441 440 * management, we can in future enhance the driver and
442 441 * framework that loads it to unload the driver when
443 442 * user has disabled CPU power management.
444 443 */
445 444 return (DDI_FAILURE);
446 445 #endif
447 446
448 447 case DDI_SUSPEND:
449 448 DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: "
450 449 "DDI_SUSPEND called\n", instance));
451 450
452 451 cpudsp = ddi_get_soft_state(cpudrv_state, instance);
453 452 ASSERT(cpudsp != NULL);
454 453
455 454 /*
456 455 * Nothing to do for suspend, if not doing active PM.
457 456 */
458 457 if (!cpudrv_is_enabled(cpudsp))
459 458 return (DDI_SUCCESS);
460 459
461 460 /*
462 461 * During a checkpoint-resume sequence, framework will
463 462 * stop interrupts to quiesce kernel activity. This will
464 463 * leave our monitoring system ineffective. Handle this
465 464 * by stopping our monitoring system and bringing CPU
466 465 * to full speed. In case we are in special direct pm
467 466 * mode, we leave the CPU at whatever speed it is. This
468 467 * is harmless other than speed.
469 468 */
470 469 mutex_enter(&cpudsp->lock);
471 470 cpupm = &(cpudsp->cpudrv_pm);
472 471
473 472 DPRINTF(D_DETACH, ("cpudrv_detach: instance %d: DDI_SUSPEND - "
474 473 "cur_spd %d, topspeed %d\n", instance,
475 474 cpupm->cur_spd->pm_level,
476 475 CPUDRV_TOPSPEED(cpupm)->pm_level));
477 476
478 477 CPUDRV_MONITOR_FINI(cpudsp);
479 478
480 479 if (!cpudrv_direct_pm && (cpupm->cur_spd !=
481 480 CPUDRV_TOPSPEED(cpupm))) {
482 481 if (cpupm->pm_busycnt < 1) {
483 482 if ((pm_busy_component(dip, CPUDRV_COMP_NUM)
484 483 == DDI_SUCCESS)) {
485 484 cpupm->pm_busycnt++;
486 485 } else {
487 486 CPUDRV_MONITOR_INIT(cpudsp);
488 487 mutex_exit(&cpudsp->lock);
489 488 cmn_err(CE_WARN, "cpudrv_detach: "
490 489 "instance %d: can't busy CPU "
491 490 "component", instance);
492 491 return (DDI_FAILURE);
493 492 }
494 493 }
495 494 mutex_exit(&cpudsp->lock);
496 495 if (pm_raise_power(dip, CPUDRV_COMP_NUM,
497 496 CPUDRV_TOPSPEED(cpupm)->pm_level) !=
498 497 DDI_SUCCESS) {
499 498 mutex_enter(&cpudsp->lock);
500 499 CPUDRV_MONITOR_INIT(cpudsp);
501 500 mutex_exit(&cpudsp->lock);
502 501 cmn_err(CE_WARN, "cpudrv_detach: instance %d: "
503 502 "can't raise CPU power level to %d",
504 503 instance,
505 504 CPUDRV_TOPSPEED(cpupm)->pm_level);
506 505 return (DDI_FAILURE);
507 506 } else {
508 507 return (DDI_SUCCESS);
509 508 }
510 509 } else {
511 510 mutex_exit(&cpudsp->lock);
512 511 return (DDI_SUCCESS);
513 512 }
514 513
515 514 default:
516 515 return (DDI_FAILURE);
517 516 }
518 517 }
519 518
520 519 /*
521 520 * Driver power(9e) entry point.
522 521 *
523 522 * Driver's notion of current power is set *only* in power(9e) entry point
524 523 * after actual power change operation has been successfully completed.
525 524 */
526 525 /* ARGSUSED */
527 526 static int
528 527 cpudrv_power(dev_info_t *dip, int comp, int level)
529 528 {
530 529 int instance;
531 530 cpudrv_devstate_t *cpudsp;
532 531 cpudrv_pm_t *cpudrvpm;
533 532 cpudrv_pm_spd_t *new_spd;
534 533 boolean_t is_ready;
535 534 int ret;
536 535
537 536 instance = ddi_get_instance(dip);
538 537
539 538 DPRINTF(D_POWER, ("cpudrv_power: instance %d: level %d\n",
540 539 instance, level));
541 540
542 541 if ((cpudsp = ddi_get_soft_state(cpudrv_state, instance)) == NULL) {
543 542 cmn_err(CE_WARN, "cpudrv_power: instance %d: can't "
544 543 "get state", instance);
545 544 return (DDI_FAILURE);
546 545 }
547 546
548 547 /*
549 548 * We're not ready until we can get a cpu_t
550 549 */
551 550 is_ready = (cpudrv_get_cpu(cpudsp) == DDI_SUCCESS);
552 551
553 552 mutex_enter(&cpudsp->lock);
554 553 cpudrvpm = &(cpudsp->cpudrv_pm);
555 554
556 555 /*
557 556 * In normal operation, we fail if we are busy and request is
558 557 * to lower the power level. We let this go through if the driver
559 558 * is in special direct pm mode. On x86, we also let this through
560 559 * if the change is due to a request to govern the max speed.
561 560 */
562 561 if (!cpudrv_direct_pm && (cpudrvpm->pm_busycnt >= 1) &&
563 562 !cpudrv_is_governor_thread(cpudrvpm)) {
564 563 if ((cpudrvpm->cur_spd != NULL) &&
565 564 (level < cpudrvpm->cur_spd->pm_level)) {
566 565 mutex_exit(&cpudsp->lock);
567 566 return (DDI_FAILURE);
568 567 }
569 568 }
570 569
571 570 for (new_spd = cpudrvpm->head_spd; new_spd; new_spd =
572 571 new_spd->down_spd) {
573 572 if (new_spd->pm_level == level)
574 573 break;
575 574 }
576 575 if (!new_spd) {
577 576 CPUDRV_RESET_GOVERNOR_THREAD(cpudrvpm);
578 577 mutex_exit(&cpudsp->lock);
579 578 cmn_err(CE_WARN, "cpudrv_power: instance %d: "
580 579 "can't locate new CPU speed", instance);
581 580 return (DDI_FAILURE);
582 581 }
583 582
584 583 /*
585 584 * We currently refuse to power manage if the CPU is not ready to
586 585 * take cross calls (cross calls fail silently if CPU is not ready
587 586 * for it).
588 587 *
589 588 * Additionally, for x86 platforms we cannot power manage an instance,
590 589 * until it has been initialized.
591 590 */
592 591 if (is_ready) {
593 592 is_ready = CPUDRV_XCALL_IS_READY(cpudsp->cpu_id);
594 593 if (!is_ready) {
595 594 DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
596 595 "CPU not ready for x-calls\n", instance));
597 596 } else if (!(is_ready = cpudrv_power_ready(cpudsp->cp))) {
598 597 DPRINTF(D_POWER, ("cpudrv_power: instance %d: "
599 598 "waiting for all CPUs to be power manageable\n",
600 599 instance));
601 600 }
602 601 }
603 602 if (!is_ready) {
604 603 CPUDRV_RESET_GOVERNOR_THREAD(cpudrvpm);
605 604 mutex_exit(&cpudsp->lock);
606 605 return (DDI_FAILURE);
607 606 }
608 607
609 608 /*
610 609 * Execute CPU specific routine on the requested CPU to
611 610 * change its speed to normal-speed/divisor.
612 611 */
613 612 if ((ret = cpudrv_change_speed(cpudsp, new_spd)) != DDI_SUCCESS) {
614 613 cmn_err(CE_WARN, "cpudrv_power: "
615 614 "cpudrv_change_speed() return = %d", ret);
616 615 mutex_exit(&cpudsp->lock);
617 616 return (DDI_FAILURE);
618 617 }
619 618
620 619 /*
621 620 * Reset idle threshold time for the new power level.
622 621 */
623 622 if ((cpudrvpm->cur_spd != NULL) && (level <
624 623 cpudrvpm->cur_spd->pm_level)) {
625 624 if (pm_idle_component(dip, CPUDRV_COMP_NUM) ==
626 625 DDI_SUCCESS) {
627 626 if (cpudrvpm->pm_busycnt >= 1)
628 627 cpudrvpm->pm_busycnt--;
629 628 } else {
630 629 cmn_err(CE_WARN, "cpudrv_power: instance %d: "
631 630 "can't idle CPU component",
632 631 ddi_get_instance(dip));
633 632 }
634 633 }
635 634 /*
636 635 * Reset various parameters because we are now running at new speed.
637 636 */
638 637 cpudrvpm->lastquan_mstate[CMS_IDLE] = 0;
639 638 cpudrvpm->lastquan_mstate[CMS_SYSTEM] = 0;
640 639 cpudrvpm->lastquan_mstate[CMS_USER] = 0;
641 640 cpudrvpm->lastquan_ticks = 0;
642 641 cpudrvpm->cur_spd = new_spd;
643 642 CPUDRV_RESET_GOVERNOR_THREAD(cpudrvpm);
644 643 mutex_exit(&cpudsp->lock);
645 644
646 645 return (DDI_SUCCESS);
647 646 }
648 647
649 648 /*
650 649 * Initialize power management data.
651 650 */
652 651 static int
653 652 cpudrv_init(cpudrv_devstate_t *cpudsp)
654 653 {
655 654 cpudrv_pm_t *cpupm = &(cpudsp->cpudrv_pm);
656 655 cpudrv_pm_spd_t *cur_spd;
657 656 cpudrv_pm_spd_t *prev_spd = NULL;
658 657 int *speeds;
659 658 uint_t nspeeds;
660 659 int idle_cnt_percent;
661 660 int user_cnt_percent;
662 661 int i;
663 662
664 663 CPUDRV_GET_SPEEDS(cpudsp, speeds, nspeeds);
665 664 if (nspeeds < 2) {
666 665 /* Need at least two speeds to power manage */
667 666 CPUDRV_FREE_SPEEDS(speeds, nspeeds);
668 667 return (DDI_FAILURE);
669 668 }
670 669 cpupm->num_spd = nspeeds;
671 670
672 671 /*
673 672 * Calculate the watermarks and other parameters based on the
674 673 * supplied speeds.
675 674 *
676 675 * One of the basic assumption is that for X amount of CPU work,
677 676 * if CPU is slowed down by a factor of N, the time it takes to
678 677 * do the same work will be N * X.
679 678 *
680 679 * The driver declares that a CPU is idle and ready for slowed down,
681 680 * if amount of idle thread is more than the current speed idle_hwm
682 681 * without dropping below idle_hwm a number of consecutive sampling
683 682 * intervals and number of running threads in user mode are below
684 683 * user_lwm. We want to set the current user_lwm such that if we
685 684 * just switched to the next slower speed with no change in real work
686 685 * load, the amount of user threads at the slower speed will be such
687 686 * that it falls below the slower speed's user_hwm. If we didn't do
688 687 * that then we will just come back to the higher speed as soon as we
689 688 * go down even with no change in work load.
690 689 * The user_hwm is a fixed precentage and not calculated dynamically.
691 690 *
692 691 * We bring the CPU up if idle thread at current speed is less than
693 692 * the current speed idle_lwm for a number of consecutive sampling
694 693 * intervals or user threads are above the user_hwm for the current
695 694 * speed.
696 695 */
697 696 for (i = 0; i < nspeeds; i++) {
698 697 cur_spd = kmem_zalloc(sizeof (cpudrv_pm_spd_t), KM_SLEEP);
699 698 cur_spd->speed = speeds[i];
700 699 if (i == 0) { /* normal speed */
701 700 cpupm->head_spd = cur_spd;
702 701 CPUDRV_TOPSPEED(cpupm) = cur_spd;
703 702 cur_spd->quant_cnt = CPUDRV_QUANT_CNT_NORMAL;
704 703 cur_spd->idle_hwm =
705 704 (cpudrv_idle_hwm * cur_spd->quant_cnt) / 100;
706 705 /* can't speed anymore */
707 706 cur_spd->idle_lwm = 0;
708 707 cur_spd->user_hwm = UINT_MAX;
709 708 } else {
710 709 cur_spd->quant_cnt = CPUDRV_QUANT_CNT_OTHR;
711 710 ASSERT(prev_spd != NULL);
712 711 prev_spd->down_spd = cur_spd;
713 712 cur_spd->up_spd = cpupm->head_spd;
714 713
715 714 /*
716 715 * Let's assume CPU is considered idle at full speed
717 716 * when it is spending I% of time in running the idle
718 717 * thread. At full speed, CPU will be busy (100 - I) %
719 718 * of times. This % of busyness increases by factor of
720 719 * N as CPU slows down. CPU that is idle I% of times
721 720 * in full speed, it is idle (100 - ((100 - I) * N)) %
722 721 * of times in N speed. The idle_lwm is a fixed
723 722 * percentage. A large value of N may result in
724 723 * idle_hwm to go below idle_lwm. We need to make sure
725 724 * that there is at least a buffer zone seperation
726 725 * between the idle_lwm and idle_hwm values.
727 726 */
728 727 idle_cnt_percent = CPUDRV_IDLE_CNT_PERCENT(
729 728 cpudrv_idle_hwm, speeds, i);
730 729 idle_cnt_percent = max(idle_cnt_percent,
731 730 (cpudrv_idle_lwm + cpudrv_idle_buf_zone));
732 731 cur_spd->idle_hwm =
733 732 (idle_cnt_percent * cur_spd->quant_cnt) / 100;
734 733 cur_spd->idle_lwm =
735 734 (cpudrv_idle_lwm * cur_spd->quant_cnt) / 100;
736 735
737 736 /*
738 737 * The lwm for user threads are determined such that
739 738 * if CPU slows down, the load of work in the
740 739 * new speed would still keep the CPU at or below the
741 740 * user_hwm in the new speed. This is to prevent
742 741 * the quick jump back up to higher speed.
743 742 */
744 743 cur_spd->user_hwm = (cpudrv_user_hwm *
745 744 cur_spd->quant_cnt) / 100;
746 745 user_cnt_percent = CPUDRV_USER_CNT_PERCENT(
747 746 cpudrv_user_hwm, speeds, i);
748 747 prev_spd->user_lwm =
749 748 (user_cnt_percent * prev_spd->quant_cnt) / 100;
750 749 }
751 750 prev_spd = cur_spd;
752 751 }
753 752 /* Slowest speed. Can't slow down anymore */
754 753 cur_spd->idle_hwm = UINT_MAX;
755 754 cur_spd->user_lwm = -1;
756 755 #ifdef DEBUG
757 756 DPRINTF(D_PM_INIT, ("cpudrv_init: instance %d: head_spd spd %d, "
758 757 "num_spd %d\n", ddi_get_instance(cpudsp->dip),
759 758 cpupm->head_spd->speed, cpupm->num_spd));
760 759 for (cur_spd = cpupm->head_spd; cur_spd; cur_spd = cur_spd->down_spd) {
761 760 DPRINTF(D_PM_INIT, ("cpudrv_init: instance %d: speed %d, "
762 761 "down_spd spd %d, idle_hwm %d, user_lwm %d, "
763 762 "up_spd spd %d, idle_lwm %d, user_hwm %d, "
764 763 "quant_cnt %d\n", ddi_get_instance(cpudsp->dip),
765 764 cur_spd->speed,
766 765 (cur_spd->down_spd ? cur_spd->down_spd->speed : 0),
767 766 cur_spd->idle_hwm, cur_spd->user_lwm,
768 767 (cur_spd->up_spd ? cur_spd->up_spd->speed : 0),
769 768 cur_spd->idle_lwm, cur_spd->user_hwm,
770 769 cur_spd->quant_cnt));
771 770 }
772 771 #endif /* DEBUG */
773 772 CPUDRV_FREE_SPEEDS(speeds, nspeeds);
774 773 return (DDI_SUCCESS);
775 774 }
776 775
777 776 /*
778 777 * Free CPU power management data.
779 778 */
780 779 static void
781 780 cpudrv_free(cpudrv_devstate_t *cpudsp)
782 781 {
783 782 cpudrv_pm_t *cpupm = &(cpudsp->cpudrv_pm);
784 783 cpudrv_pm_spd_t *cur_spd, *next_spd;
785 784
786 785 cur_spd = cpupm->head_spd;
787 786 while (cur_spd) {
788 787 next_spd = cur_spd->down_spd;
789 788 kmem_free(cur_spd, sizeof (cpudrv_pm_spd_t));
790 789 cur_spd = next_spd;
791 790 }
792 791 bzero(cpupm, sizeof (cpudrv_pm_t));
793 792 }
794 793
795 794 /*
796 795 * Create pm-components property.
797 796 */
798 797 static int
799 798 cpudrv_comp_create(cpudrv_devstate_t *cpudsp)
800 799 {
801 800 cpudrv_pm_t *cpupm = &(cpudsp->cpudrv_pm);
802 801 cpudrv_pm_spd_t *cur_spd;
803 802 char **pmc;
804 803 int size;
805 804 char name[] = "NAME=CPU Speed";
806 805 int i, j;
807 806 uint_t comp_spd;
808 807 int result = DDI_FAILURE;
809 808
810 809 pmc = kmem_zalloc((cpupm->num_spd + 1) * sizeof (char *), KM_SLEEP);
811 810 size = CPUDRV_COMP_SIZE();
812 811 if (cpupm->num_spd > CPUDRV_COMP_MAX_VAL) {
813 812 cmn_err(CE_WARN, "cpudrv_comp_create: instance %d: "
814 813 "number of speeds exceeded limits",
815 814 ddi_get_instance(cpudsp->dip));
816 815 kmem_free(pmc, (cpupm->num_spd + 1) * sizeof (char *));
817 816 return (result);
818 817 }
819 818
820 819 for (i = cpupm->num_spd, cur_spd = cpupm->head_spd; i > 0;
821 820 i--, cur_spd = cur_spd->down_spd) {
822 821 cur_spd->pm_level = i;
823 822 pmc[i] = kmem_zalloc((size * sizeof (char)), KM_SLEEP);
824 823 comp_spd = CPUDRV_COMP_SPEED(cpupm, cur_spd);
825 824 if (comp_spd > CPUDRV_COMP_MAX_VAL) {
826 825 cmn_err(CE_WARN, "cpudrv_comp_create: "
827 826 "instance %d: speed exceeded limits",
828 827 ddi_get_instance(cpudsp->dip));
829 828 for (j = cpupm->num_spd; j >= i; j--) {
830 829 kmem_free(pmc[j], size * sizeof (char));
831 830 }
832 831 kmem_free(pmc, (cpupm->num_spd + 1) *
833 832 sizeof (char *));
834 833 return (result);
835 834 }
836 835 CPUDRV_COMP_SPRINT(pmc[i], cpupm, cur_spd, comp_spd)
837 836 DPRINTF(D_PM_COMP_CREATE, ("cpudrv_comp_create: "
838 837 "instance %d: pm-components power level %d string '%s'\n",
839 838 ddi_get_instance(cpudsp->dip), i, pmc[i]));
840 839 }
841 840 pmc[0] = kmem_zalloc(sizeof (name), KM_SLEEP);
842 841 (void) strcat(pmc[0], name);
843 842 DPRINTF(D_PM_COMP_CREATE, ("cpudrv_comp_create: instance %d: "
844 843 "pm-components component name '%s'\n",
845 844 ddi_get_instance(cpudsp->dip), pmc[0]));
846 845
847 846 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, cpudsp->dip,
848 847 "pm-components", pmc, cpupm->num_spd + 1) == DDI_PROP_SUCCESS) {
849 848 result = DDI_SUCCESS;
850 849 } else {
851 850 cmn_err(CE_WARN, "cpudrv_comp_create: instance %d: "
852 851 "can't create pm-components property",
853 852 ddi_get_instance(cpudsp->dip));
854 853 }
855 854
856 855 for (i = cpupm->num_spd; i > 0; i--) {
857 856 kmem_free(pmc[i], size * sizeof (char));
858 857 }
859 858 kmem_free(pmc[0], sizeof (name));
860 859 kmem_free(pmc, (cpupm->num_spd + 1) * sizeof (char *));
861 860 return (result);
862 861 }
863 862
864 863 /*
865 864 * Mark a component idle.
866 865 */
867 866 #define CPUDRV_MONITOR_PM_IDLE_COMP(dip, cpupm) { \
868 867 if ((cpupm)->pm_busycnt >= 1) { \
869 868 if (pm_idle_component((dip), CPUDRV_COMP_NUM) == \
870 869 DDI_SUCCESS) { \
871 870 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: " \
872 871 "instance %d: pm_idle_component called\n", \
873 872 ddi_get_instance((dip)))); \
874 873 (cpupm)->pm_busycnt--; \
875 874 } else { \
876 875 cmn_err(CE_WARN, "cpudrv_monitor: instance %d: " \
877 876 "can't idle CPU component", \
878 877 ddi_get_instance((dip))); \
879 878 } \
880 879 } \
881 880 }
882 881
883 882 /*
884 883 * Marks a component busy in both PM framework and driver state structure.
885 884 */
886 885 #define CPUDRV_MONITOR_PM_BUSY_COMP(dip, cpupm) { \
887 886 if ((cpupm)->pm_busycnt < 1) { \
888 887 if (pm_busy_component((dip), CPUDRV_COMP_NUM) == \
889 888 DDI_SUCCESS) { \
890 889 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: " \
891 890 "instance %d: pm_busy_component called\n", \
892 891 ddi_get_instance((dip)))); \
893 892 (cpupm)->pm_busycnt++; \
894 893 } else { \
895 894 cmn_err(CE_WARN, "cpudrv_monitor: instance %d: " \
896 895 "can't busy CPU component", \
897 896 ddi_get_instance((dip))); \
898 897 } \
899 898 } \
900 899 }
901 900
902 901 /*
903 902 * Marks a component busy and calls pm_raise_power().
904 903 */
905 904 #define CPUDRV_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm, new_spd) { \
906 905 int ret; \
907 906 /* \
908 907 * Mark driver and PM framework busy first so framework doesn't try \
909 908 * to bring CPU to lower speed when we need to be at higher speed. \
910 909 */ \
911 910 CPUDRV_MONITOR_PM_BUSY_COMP((dip), (cpupm)); \
912 911 mutex_exit(&(cpudsp)->lock); \
913 912 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: instance %d: " \
914 913 "pm_raise_power called to %d\n", ddi_get_instance((dip)), \
915 914 (new_spd->pm_level))); \
916 915 ret = pm_raise_power((dip), CPUDRV_COMP_NUM, (new_spd->pm_level)); \
917 916 if (ret != DDI_SUCCESS) { \
918 917 cmn_err(CE_WARN, "cpudrv_monitor: instance %d: can't " \
919 918 "raise CPU power level", ddi_get_instance((dip))); \
920 919 } \
921 920 mutex_enter(&(cpudsp)->lock); \
922 921 if (ret == DDI_SUCCESS && cpudsp->cpudrv_pm.cur_spd == NULL) { \
923 922 cpudsp->cpudrv_pm.cur_spd = new_spd; \
924 923 } \
925 924 }
926 925
927 926 /*
928 927 * In order to monitor a CPU, we need to hold cpu_lock to access CPU
929 928 * statistics. Holding cpu_lock is not allowed from a callout routine.
930 929 * We dispatch a taskq to do that job.
931 930 */
932 931 static void
933 932 cpudrv_monitor_disp(void *arg)
934 933 {
935 934 cpudrv_devstate_t *cpudsp = (cpudrv_devstate_t *)arg;
936 935
937 936 /*
938 937 * We are here because the last task has scheduled a timeout.
939 938 * The queue should be empty at this time.
940 939 */
941 940 mutex_enter(&cpudsp->cpudrv_pm.timeout_lock);
942 941 if ((ddi_taskq_dispatch(cpudsp->cpudrv_pm.tq, cpudrv_monitor, arg,
943 942 DDI_NOSLEEP)) != DDI_SUCCESS) {
944 943 mutex_exit(&cpudsp->cpudrv_pm.timeout_lock);
945 944 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor_disp: failed to "
946 945 "dispatch the cpudrv_monitor taskq\n"));
947 946 mutex_enter(&cpudsp->lock);
948 947 CPUDRV_MONITOR_INIT(cpudsp);
949 948 mutex_exit(&cpudsp->lock);
950 949 return;
951 950 }
952 951 cpudsp->cpudrv_pm.timeout_count++;
953 952 mutex_exit(&cpudsp->cpudrv_pm.timeout_lock);
954 953 }
955 954
956 955 /*
957 956 * Monitors each CPU for the amount of time idle thread was running in the
958 957 * last quantum and arranges for the CPU to go to the lower or higher speed.
959 958 * Called at the time interval appropriate for the current speed. The
960 959 * time interval for normal speed is CPUDRV_QUANT_CNT_NORMAL. The time
961 960 * interval for other speeds (including unknown speed) is
962 961 * CPUDRV_QUANT_CNT_OTHR.
963 962 */
964 963 static void
965 964 cpudrv_monitor(void *arg)
966 965 {
967 966 cpudrv_devstate_t *cpudsp = (cpudrv_devstate_t *)arg;
968 967 cpudrv_pm_t *cpupm;
969 968 cpudrv_pm_spd_t *cur_spd, *new_spd;
970 969 dev_info_t *dip;
971 970 uint_t idle_cnt, user_cnt, system_cnt;
972 971 clock_t ticks;
973 972 uint_t tick_cnt;
974 973 hrtime_t msnsecs[NCMSTATES];
975 974 boolean_t is_ready;
976 975
977 976 #define GET_CPU_MSTATE_CNT(state, cnt) \
978 977 msnsecs[state] = NSEC_TO_TICK(msnsecs[state]); \
979 978 if (cpupm->lastquan_mstate[state] > msnsecs[state]) \
980 979 msnsecs[state] = cpupm->lastquan_mstate[state]; \
981 980 cnt = msnsecs[state] - cpupm->lastquan_mstate[state]; \
982 981 cpupm->lastquan_mstate[state] = msnsecs[state]
983 982
984 983 /*
985 984 * We're not ready until we can get a cpu_t
986 985 */
987 986 is_ready = (cpudrv_get_cpu(cpudsp) == DDI_SUCCESS);
988 987
989 988 mutex_enter(&cpudsp->lock);
990 989 cpupm = &(cpudsp->cpudrv_pm);
991 990 if (cpupm->timeout_id == 0) {
992 991 mutex_exit(&cpudsp->lock);
993 992 goto do_return;
994 993 }
995 994 cur_spd = cpupm->cur_spd;
996 995 dip = cpudsp->dip;
997 996
998 997 /*
999 998 * We assume that a CPU is initialized and has a valid cpu_t
1000 999 * structure, if it is ready for cross calls. If this changes,
1001 1000 * additional checks might be needed.
1002 1001 *
1003 1002 * Additionally, for x86 platforms we cannot power manage an
1004 1003 * instance, until it has been initialized.
1005 1004 */
1006 1005 if (is_ready) {
1007 1006 is_ready = CPUDRV_XCALL_IS_READY(cpudsp->cpu_id);
1008 1007 if (!is_ready) {
1009 1008 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: instance %d: "
1010 1009 "CPU not ready for x-calls\n",
1011 1010 ddi_get_instance(dip)));
1012 1011 } else if (!(is_ready = cpudrv_power_ready(cpudsp->cp))) {
1013 1012 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: instance %d: "
1014 1013 "waiting for all CPUs to be power manageable\n",
1015 1014 ddi_get_instance(dip)));
1016 1015 }
1017 1016 }
1018 1017 if (!is_ready) {
1019 1018 /*
1020 1019 * Make sure that we are busy so that framework doesn't
1021 1020 * try to bring us down in this situation.
1022 1021 */
1023 1022 CPUDRV_MONITOR_PM_BUSY_COMP(dip, cpupm);
1024 1023 CPUDRV_MONITOR_INIT(cpudsp);
1025 1024 mutex_exit(&cpudsp->lock);
1026 1025 goto do_return;
1027 1026 }
1028 1027
1029 1028 /*
1030 1029 * Make sure that we are still not at unknown power level.
1031 1030 */
1032 1031 if (cur_spd == NULL) {
1033 1032 DPRINTF(D_PM_MONITOR, ("cpudrv_monitor: instance %d: "
1034 1033 "cur_spd is unknown\n", ddi_get_instance(dip)));
1035 1034 CPUDRV_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm,
1036 1035 CPUDRV_TOPSPEED(cpupm));
1037 1036 /*
1038 1037 * We just changed the speed. Wait till at least next
1039 1038 * call to this routine before proceeding ahead.
1040 1039 */
1041 1040 CPUDRV_MONITOR_INIT(cpudsp);
1042 1041 mutex_exit(&cpudsp->lock);
1043 1042 goto do_return;
1044 1043 }
1045 1044
1046 1045 if (!cpupm->pm_started) {
1047 1046 cpupm->pm_started = B_TRUE;
1048 1047 cpudrv_set_supp_freqs(cpudsp);
1049 1048 }
1050 1049
1051 1050 get_cpu_mstate(cpudsp->cp, msnsecs);
1052 1051 GET_CPU_MSTATE_CNT(CMS_IDLE, idle_cnt);
1053 1052 GET_CPU_MSTATE_CNT(CMS_USER, user_cnt);
1054 1053 GET_CPU_MSTATE_CNT(CMS_SYSTEM, system_cnt);
1055 1054
1056 1055 /*
1057 1056 * We can't do anything when we have just switched to a state
1058 1057 * because there is no valid timestamp.
1059 1058 */
1060 1059 if (cpupm->lastquan_ticks == 0) {
1061 1060 cpupm->lastquan_ticks = NSEC_TO_TICK(gethrtime());
1062 1061 CPUDRV_MONITOR_INIT(cpudsp);
1063 1062 mutex_exit(&cpudsp->lock);
1064 1063 goto do_return;
1065 1064 }
1066 1065
1067 1066 /*
1068 1067 * Various watermarks are based on this routine being called back
1069 1068 * exactly at the requested period. This is not guaranteed
1070 1069 * because this routine is called from a taskq that is dispatched
1071 1070 * from a timeout routine. Handle this by finding out how many
1072 1071 * ticks have elapsed since the last call and adjusting
1073 1072 * the idle_cnt based on the delay added to the requested period
1074 1073 * by timeout and taskq.
1075 1074 */
1076 1075 ticks = NSEC_TO_TICK(gethrtime());
1077 1076 tick_cnt = ticks - cpupm->lastquan_ticks;
1078 1077 ASSERT(tick_cnt != 0);
1079 1078 cpupm->lastquan_ticks = ticks;
1080 1079
1081 1080 /*
1082 1081 * Time taken between recording the current counts and
1083 1082 * arranging the next call of this routine is an error in our
1084 1083 * calculation. We minimize the error by calling
1085 1084 * CPUDRV_MONITOR_INIT() here instead of end of this routine.
1086 1085 */
1087 1086 CPUDRV_MONITOR_INIT(cpudsp);
1088 1087 DPRINTF(D_PM_MONITOR_VERBOSE, ("cpudrv_monitor: instance %d: "
1089 1088 "idle count %d, user count %d, system count %d, pm_level %d, "
1090 1089 "pm_busycnt %d\n", ddi_get_instance(dip), idle_cnt, user_cnt,
1091 1090 system_cnt, cur_spd->pm_level, cpupm->pm_busycnt));
1092 1091
1093 1092 #ifdef DEBUG
1094 1093 /*
1095 1094 * Notify that timeout and taskq has caused delays and we need to
1096 1095 * scale our parameters accordingly.
1097 1096 *
1098 1097 * To get accurate result, don't turn on other DPRINTFs with
1099 1098 * the following DPRINTF. PROM calls generated by other
1100 1099 * DPRINTFs changes the timing.
1101 1100 */
1102 1101 if (tick_cnt > cur_spd->quant_cnt) {
1103 1102 DPRINTF(D_PM_MONITOR_DELAY, ("cpudrv_monitor: instance %d: "
1104 1103 "tick count %d > quantum_count %u\n",
1105 1104 ddi_get_instance(dip), tick_cnt, cur_spd->quant_cnt));
1106 1105 }
1107 1106 #endif /* DEBUG */
1108 1107
1109 1108 /*
1110 1109 * Adjust counts based on the delay added by timeout and taskq.
1111 1110 */
1112 1111 idle_cnt = (idle_cnt * cur_spd->quant_cnt) / tick_cnt;
1113 1112 user_cnt = (user_cnt * cur_spd->quant_cnt) / tick_cnt;
1114 1113
1115 1114 if ((user_cnt > cur_spd->user_hwm) || (idle_cnt < cur_spd->idle_lwm &&
1116 1115 cur_spd->idle_blwm_cnt >= cpudrv_idle_blwm_cnt_max)) {
1117 1116 cur_spd->idle_blwm_cnt = 0;
1118 1117 cur_spd->idle_bhwm_cnt = 0;
1119 1118 /*
1120 1119 * In normal situation, arrange to go to next higher speed.
1121 1120 * If we are running in special direct pm mode, we just stay
1122 1121 * at the current speed.
1123 1122 */
1124 1123 if (cur_spd == cur_spd->up_spd || cpudrv_direct_pm) {
1125 1124 CPUDRV_MONITOR_PM_BUSY_COMP(dip, cpupm);
1126 1125 } else {
1127 1126 new_spd = cur_spd->up_spd;
1128 1127 CPUDRV_MONITOR_PM_BUSY_AND_RAISE(dip, cpudsp, cpupm,
1129 1128 new_spd);
1130 1129 }
1131 1130 } else if ((user_cnt <= cur_spd->user_lwm) &&
1132 1131 (idle_cnt >= cur_spd->idle_hwm) || !CPU_ACTIVE(cpudsp->cp)) {
1133 1132 cur_spd->idle_blwm_cnt = 0;
1134 1133 cur_spd->idle_bhwm_cnt = 0;
1135 1134 /*
1136 1135 * Arrange to go to next lower speed by informing our idle
1137 1136 * status to the power management framework.
1138 1137 */
1139 1138 CPUDRV_MONITOR_PM_IDLE_COMP(dip, cpupm);
1140 1139 } else {
1141 1140 /*
1142 1141 * If we are between the idle water marks and have not
1143 1142 * been here enough consecutive times to be considered
1144 1143 * busy, just increment the count and return.
1145 1144 */
1146 1145 if ((idle_cnt < cur_spd->idle_hwm) &&
1147 1146 (idle_cnt >= cur_spd->idle_lwm) &&
1148 1147 (cur_spd->idle_bhwm_cnt < cpudrv_idle_bhwm_cnt_max)) {
1149 1148 cur_spd->idle_blwm_cnt = 0;
1150 1149 cur_spd->idle_bhwm_cnt++;
1151 1150 mutex_exit(&cpudsp->lock);
1152 1151 goto do_return;
1153 1152 }
1154 1153 if (idle_cnt < cur_spd->idle_lwm) {
1155 1154 cur_spd->idle_blwm_cnt++;
1156 1155 cur_spd->idle_bhwm_cnt = 0;
1157 1156 }
1158 1157 /*
1159 1158 * Arranges to stay at the current speed.
1160 1159 */
1161 1160 CPUDRV_MONITOR_PM_BUSY_COMP(dip, cpupm);
1162 1161 }
1163 1162 mutex_exit(&cpudsp->lock);
1164 1163 do_return:
1165 1164 mutex_enter(&cpupm->timeout_lock);
1166 1165 ASSERT(cpupm->timeout_count > 0);
1167 1166 cpupm->timeout_count--;
1168 1167 cv_signal(&cpupm->timeout_cv);
1169 1168 mutex_exit(&cpupm->timeout_lock);
1170 1169 }
1171 1170
1172 1171 /*
1173 1172 * get cpu_t structure for cpudrv_devstate_t
1174 1173 */
1175 1174 int
1176 1175 cpudrv_get_cpu(cpudrv_devstate_t *cpudsp)
1177 1176 {
1178 1177 ASSERT(cpudsp != NULL);
1179 1178
1180 1179 /*
1181 1180 * return DDI_SUCCESS if cpudrv_devstate_t
1182 1181 * already contains cpu_t structure
1183 1182 */
1184 1183 if (cpudsp->cp != NULL)
1185 1184 return (DDI_SUCCESS);
1186 1185
1187 1186 if (MUTEX_HELD(&cpu_lock)) {
1188 1187 cpudsp->cp = cpu_get(cpudsp->cpu_id);
1189 1188 } else {
1190 1189 mutex_enter(&cpu_lock);
1191 1190 cpudsp->cp = cpu_get(cpudsp->cpu_id);
1192 1191 mutex_exit(&cpu_lock);
1193 1192 }
1194 1193
1195 1194 if (cpudsp->cp == NULL)
1196 1195 return (DDI_FAILURE);
1197 1196
1198 1197 return (DDI_SUCCESS);
1199 1198 }
↓ open down ↓ |
1078 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX