Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/pm.c
+++ new/usr/src/uts/common/io/pm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * pm This driver now only handles the ioctl interface. The scanning
28 28 * and policy stuff now lives in common/os/sunpm.c.
29 29 * Not DDI compliant
30 30 */
31 31
32 32 #include <sys/types.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/modctl.h>
35 35 #include <sys/callb.h> /* callback registration for cpu_deep_idle */
36 36 #include <sys/conf.h> /* driver flags and functions */
37 37 #include <sys/open.h> /* OTYP_CHR definition */
38 38 #include <sys/stat.h> /* S_IFCHR definition */
39 39 #include <sys/pathname.h> /* name -> dev_info xlation */
40 40 #include <sys/kmem.h> /* memory alloc stuff */
41 41 #include <sys/debug.h>
42 42 #include <sys/pm.h>
43 43 #include <sys/ddi.h>
44 44 #include <sys/sunddi.h>
45 45 #include <sys/epm.h>
46 46 #include <sys/vfs.h>
47 47 #include <sys/mode.h>
48 48 #include <sys/mkdev.h>
49 49 #include <sys/promif.h>
50 50 #include <sys/consdev.h>
51 51 #include <sys/ddi_impldefs.h>
52 52 #include <sys/poll.h>
53 53 #include <sys/note.h>
54 54 #include <sys/taskq.h>
55 55 #include <sys/policy.h>
56 56 #include <sys/cpu_pm.h>
57 57
58 58 /*
59 59 * Minor number is instance<<8 + clone minor from range 1-254; (0 reserved
60 60 * for "original")
61 61 */
62 62 #define PM_MINOR_TO_CLONE(minor) ((minor) & (PM_MAX_CLONE -1))
63 63
64 64 #define PM_NUMCMPTS(dip) (DEVI(dip)->devi_pm_num_components)
65 65 #define PM_IS_CFB(dip) (DEVI(dip)->devi_pm_flags & PMC_CONSOLE_FB)
66 66 #define PM_MAJOR(dip) ddi_driver_major(dip)
67 67 #define PM_RELE(dip) ddi_release_devi(dip)
68 68
69 69 #define PM_IDLEDOWN_TIME 10
70 70 #define MAXSMBIOSSTRLEN 64 /* from SMBIOS spec */
71 71 #define MAXCOPYBUF (MAXSMBIOSSTRLEN + 1)
72 72
73 73 extern kmutex_t pm_scan_lock; /* protects autopm_enable, pm_scans_disabled */
74 74 extern kmutex_t pm_clone_lock; /* protects pm_clones array */
75 75 extern int autopm_enabled;
76 76 extern pm_cpupm_t cpupm;
77 77 extern pm_cpupm_t cpupm_default_mode;
78 78 extern int pm_default_idle_threshold;
79 79 extern int pm_system_idle_threshold;
80 80 extern int pm_cpu_idle_threshold;
81 81 extern kcondvar_t pm_clones_cv[PM_MAX_CLONE];
82 82 extern uint_t pm_poll_cnt[PM_MAX_CLONE];
83 83 extern int autoS3_enabled;
84 84 extern void pm_record_thresh(pm_thresh_rec_t *);
85 85 extern void pm_register_watcher(int, dev_info_t *);
86 86 extern int pm_get_current_power(dev_info_t *, int, int *);
87 87 extern int pm_interest_registered(int);
88 88 extern void pm_all_to_default_thresholds(void);
89 89 extern int pm_current_threshold(dev_info_t *, int, int *);
90 90 extern void pm_deregister_watcher(int, dev_info_t *);
91 91 extern void pm_unrecord_threshold(char *);
92 92 extern int pm_S3_enabled;
93 93 extern int pm_ppm_searchlist(pm_searchargs_t *);
94 94 extern psce_t *pm_psc_clone_to_direct(int);
95 95 extern psce_t *pm_psc_clone_to_interest(int);
96 96
97 97 /*
98 98 * The soft state of the power manager. Since there will only
99 99 * one of these, just reference it through a static pointer.
100 100 */
101 101 static struct pmstate {
102 102 dev_info_t *pm_dip; /* ptr to our dev_info node */
103 103 int pm_instance; /* for ddi_get_instance() */
104 104 timeout_id_t pm_idledown_id; /* pm idledown timeout id */
105 105 uchar_t pm_clones[PM_MAX_CLONE]; /* uniqueify multiple opens */
106 106 struct cred *pm_cred[PM_MAX_CLONE]; /* cred for each unique open */
107 107 } pm_state = { NULL, -1, (timeout_id_t)0 };
108 108 typedef struct pmstate *pm_state_t;
109 109 static pm_state_t pmstp = &pm_state;
110 110
111 111 static int pm_open(dev_t *, int, int, cred_t *);
112 112 static int pm_close(dev_t, int, int, cred_t *);
113 113 static int pm_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
114 114 static int pm_chpoll(dev_t, short, int, short *, struct pollhead **);
115 115
116 116 static struct cb_ops pm_cb_ops = {
117 117 pm_open, /* open */
118 118 pm_close, /* close */
119 119 nodev, /* strategy */
120 120 nodev, /* print */
121 121 nodev, /* dump */
122 122 nodev, /* read */
123 123 nodev, /* write */
124 124 pm_ioctl, /* ioctl */
125 125 nodev, /* devmap */
126 126 nodev, /* mmap */
127 127 nodev, /* segmap */
128 128 pm_chpoll, /* poll */
129 129 ddi_prop_op, /* prop_op */
130 130 NULL, /* streamtab */
131 131 D_NEW | D_MP /* driver compatibility flag */
132 132 };
133 133
134 134 static int pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
135 135 void **result);
136 136 static int pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
137 137 static int pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
138 138
139 139 static struct dev_ops pm_ops = {
140 140 DEVO_REV, /* devo_rev */
141 141 0, /* refcnt */
142 142 pm_getinfo, /* info */
143 143 nulldev, /* identify */
144 144 nulldev, /* probe */
145 145 pm_attach, /* attach */
146 146 pm_detach, /* detach */
147 147 nodev, /* reset */
148 148 &pm_cb_ops, /* driver operations */
149 149 NULL, /* bus operations */
150 150 NULL, /* power */
↓ open down ↓ |
150 lines elided |
↑ open up ↑ |
151 151 ddi_quiesce_not_needed, /* quiesce */
152 152 };
153 153
154 154 static struct modldrv modldrv = {
155 155 &mod_driverops,
156 156 "power management driver",
157 157 &pm_ops
158 158 };
159 159
160 160 static struct modlinkage modlinkage = {
161 - MODREV_1, &modldrv, 0
161 + MODREV_1, { &modldrv, NULL }
162 162 };
163 163
164 164 /* Local functions */
165 165 #ifdef DEBUG
166 166 static int print_info(dev_info_t *, void *);
167 167
168 168 #endif
169 169
170 170 int
171 171 _init(void)
172 172 {
173 173 return (mod_install(&modlinkage));
174 174 }
175 175
176 176 int
177 177 _fini(void)
178 178 {
179 179 return (mod_remove(&modlinkage));
180 180 }
181 181
182 182 int
183 183 _info(struct modinfo *modinfop)
184 184 {
185 185 return (mod_info(&modlinkage, modinfop));
186 186 }
187 187
188 188 static int
189 189 pm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
190 190 {
191 191 int i;
192 192
193 193 switch (cmd) {
194 194
195 195 case DDI_ATTACH:
196 196 if (pmstp->pm_instance != -1) /* Only allow one instance */
197 197 return (DDI_FAILURE);
198 198 pmstp->pm_instance = ddi_get_instance(dip);
199 199 if (ddi_create_minor_node(dip, "pm", S_IFCHR,
200 200 (pmstp->pm_instance << 8) + 0,
201 201 DDI_PSEUDO, 0) != DDI_SUCCESS) {
202 202 return (DDI_FAILURE);
203 203 }
204 204 pmstp->pm_dip = dip; /* pm_init and getinfo depend on it */
205 205
206 206 for (i = 0; i < PM_MAX_CLONE; i++)
207 207 cv_init(&pm_clones_cv[i], NULL, CV_DEFAULT, NULL);
208 208
209 209 ddi_report_dev(dip);
210 210 return (DDI_SUCCESS);
211 211
212 212 default:
213 213 return (DDI_FAILURE);
214 214 }
215 215 }
216 216
217 217 /* ARGSUSED */
218 218 static int
219 219 pm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
220 220 {
221 221 int i;
222 222
223 223 switch (cmd) {
224 224 case DDI_DETACH:
225 225 /*
226 226 * Don't detach while idledown timeout is pending. Note that
227 227 * we already know we're not in pm_ioctl() due to framework
228 228 * synchronization, so this is a sufficient test
229 229 */
230 230 if (pmstp->pm_idledown_id)
231 231 return (DDI_FAILURE);
232 232
233 233 for (i = 0; i < PM_MAX_CLONE; i++)
234 234 cv_destroy(&pm_clones_cv[i]);
235 235
236 236 ddi_remove_minor_node(dip, NULL);
237 237 pmstp->pm_instance = -1;
238 238 return (DDI_SUCCESS);
239 239
240 240 default:
241 241 return (DDI_FAILURE);
242 242 }
243 243 }
244 244
245 245 static int
246 246 pm_close_direct_pm_device(dev_info_t *dip, void *arg)
247 247 {
248 248 int clone;
249 249 char *pathbuf;
250 250 pm_info_t *info = PM_GET_PM_INFO(dip);
251 251
252 252 clone = *((int *)arg);
253 253
254 254 if (!info)
255 255 return (DDI_WALK_CONTINUE);
256 256
257 257 pathbuf = kmem_alloc(MAXPATHLEN, KM_SLEEP);
258 258 PM_LOCK_DIP(dip);
259 259 if (clone == info->pmi_clone) {
260 260 PMD(PMD_CLOSE, ("pm_close: found %s@%s(%s#%d)\n",
261 261 PM_DEVICE(dip)))
262 262 ASSERT(PM_ISDIRECT(dip));
263 263 info->pmi_dev_pm_state &= ~PM_DIRECT;
264 264 PM_UNLOCK_DIP(dip);
265 265 pm_proceed(dip, PMP_RELEASE, -1, -1);
266 266 /* Bring ourselves up if there is a keeper that is up */
267 267 (void) ddi_pathname(dip, pathbuf);
268 268 pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF, NULL,
269 269 pathbuf, PM_DEP_NOWAIT, NULL, 0);
270 270 PM_LOCK_DIP(dip);
271 271 info->pmi_clone = 0;
272 272 PM_UNLOCK_DIP(dip);
273 273 } else {
274 274 PM_UNLOCK_DIP(dip);
275 275 }
276 276 kmem_free(pathbuf, MAXPATHLEN);
277 277
278 278 /* restart autopm on device released from direct pm */
279 279 pm_rescan(dip);
280 280
281 281 return (DDI_WALK_CONTINUE);
282 282 }
283 283
284 284 #define PM_REQ 1
285 285 #define NOSTRUCT 2
286 286 #define DIP 3
287 287 #define NODIP 4
288 288 #define NODEP 5
289 289 #define DEP 6
290 290 #define PM_PSC 7
291 291 #define PM_SRCH 8
292 292
293 293 #define CHECKPERMS 0x001
294 294 #define SU 0x002
295 295 #define SG 0x004
296 296 #define OWNER 0x008
297 297
298 298 #define INWHO 0x001
299 299 #define INDATAINT 0x002
300 300 #define INDATASTRING 0x004
301 301 #define INDEP 0x008
302 302 #define INDATAOUT 0x010
303 303 #define INDATA (INDATAOUT | INDATAINT | INDATASTRING | INDEP)
304 304
305 305 struct pm_cmd_info {
306 306 int cmd; /* command code */
307 307 char *name; /* printable string */
308 308 int supported; /* true if still supported */
309 309 int str_type; /* PM_REQ or NOSTRUCT */
310 310 int inargs; /* INWHO, INDATAINT, INDATASTRING, INDEP, */
311 311 /* INDATAOUT */
312 312 int diptype; /* DIP or NODIP */
313 313 int deptype; /* DEP or NODEP */
314 314 int permission; /* SU, GU, or CHECKPERMS */
315 315 };
316 316
317 317 #ifdef DEBUG
318 318 char *pm_cmd_string;
319 319 int pm_cmd;
320 320 #endif
321 321
322 322 /*
323 323 * Returns true if permission granted by credentials
324 324 */
325 325 static int
326 326 pm_perms(int perm, cred_t *cr)
327 327 {
328 328 if (perm == 0) /* no restrictions */
329 329 return (1);
330 330 if (perm == CHECKPERMS) /* ok for now (is checked later) */
331 331 return (1);
332 332 if ((perm & SU) && secpolicy_power_mgmt(cr) == 0) /* privileged? */
333 333 return (1);
334 334 if ((perm & SG) && (crgetgid(cr) == 0)) /* group 0 is ok */
335 335 return (1);
336 336 return (0);
337 337 }
338 338
339 339 #ifdef DEBUG
340 340 static int
341 341 print_info(dev_info_t *dip, void *arg)
342 342 {
343 343 _NOTE(ARGUNUSED(arg))
344 344 pm_info_t *info;
345 345 int i, j;
346 346 struct pm_component *cp;
347 347 extern int pm_cur_power(pm_component_t *cp);
348 348
349 349 info = PM_GET_PM_INFO(dip);
350 350 if (!info)
351 351 return (DDI_WALK_CONTINUE);
352 352 cmn_err(CE_CONT, "pm_info for %s\n", ddi_node_name(dip));
353 353 for (i = 0; i < PM_NUMCMPTS(dip); i++) {
354 354 cp = PM_CP(dip, i);
355 355 cmn_err(CE_CONT, "\tThresholds[%d] =", i);
356 356 for (j = 0; j < cp->pmc_comp.pmc_numlevels; j++)
357 357 cmn_err(CE_CONT, " %d", cp->pmc_comp.pmc_thresh[i]);
358 358 cmn_err(CE_CONT, "\n");
359 359 cmn_err(CE_CONT, "\tCurrent power[%d] = %d\n", i,
360 360 pm_cur_power(cp));
361 361 }
362 362 if (PM_ISDIRECT(dip))
363 363 cmn_err(CE_CONT, "\tDirect power management\n");
364 364 return (DDI_WALK_CONTINUE);
365 365 }
366 366 #endif
367 367
368 368 /*
369 369 * command, name, supported, str_type, inargs, diptype, deptype, permission
370 370 */
371 371 static struct pm_cmd_info pmci[] = {
372 372 {PM_SCHEDULE, "PM_SCHEDULE", 0},
373 373 {PM_GET_IDLE_TIME, "PM_GET_IDLE_TIME", 0},
374 374 {PM_GET_NUM_CMPTS, "PM_GET_NUM_CMPTS", 0},
375 375 {PM_GET_THRESHOLD, "PM_GET_THRESHOLD", 0},
376 376 {PM_SET_THRESHOLD, "PM_SET_THRESHOLD", 0},
377 377 {PM_GET_NORM_PWR, "PM_GET_NORM_PWR", 0},
378 378 {PM_SET_CUR_PWR, "PM_SET_CUR_PWR", 0},
379 379 {PM_GET_CUR_PWR, "PM_GET_CUR_PWR", 0},
380 380 {PM_GET_NUM_DEPS, "PM_GET_NUM_DEPS", 0},
381 381 {PM_GET_DEP, "PM_GET_DEP", 0},
382 382 {PM_ADD_DEP, "PM_ADD_DEP", 0},
383 383 {PM_REM_DEP, "PM_REM_DEP", 0},
384 384 {PM_REM_DEVICE, "PM_REM_DEVICE", 0},
385 385 {PM_REM_DEVICES, "PM_REM_DEVICES", 0},
386 386 {PM_REPARSE_PM_PROPS, "PM_REPARSE_PM_PROPS", 1, PM_REQ, INWHO, DIP,
387 387 NODEP},
388 388 {PM_DISABLE_AUTOPM, "PM_DISABLE_AUTOPM", 0},
389 389 {PM_REENABLE_AUTOPM, "PM_REENABLE_AUTOPM", 0},
390 390 {PM_SET_NORM_PWR, "PM_SET_NORM_PWR", 0 },
391 391 {PM_SET_DEVICE_THRESHOLD, "PM_SET_DEVICE_THRESHOLD", 1, PM_REQ,
392 392 INWHO, NODIP, NODEP, SU},
393 393 {PM_GET_SYSTEM_THRESHOLD, "PM_GET_SYSTEM_THRESHOLD", 1, NOSTRUCT},
394 394 {PM_GET_DEFAULT_SYSTEM_THRESHOLD, "PM_GET_DEFAULT_SYSTEM_THRESHOLD",
395 395 1, NOSTRUCT},
396 396 {PM_SET_SYSTEM_THRESHOLD, "PM_SET_SYSTEM_THRESHOLD", 1, NOSTRUCT,
397 397 0, 0, 0, SU},
398 398 {PM_START_PM, "PM_START_PM", 1, NOSTRUCT, 0, 0, 0, SU},
399 399 {PM_STOP_PM, "PM_STOP_PM", 1, NOSTRUCT, 0, 0, 0, SU},
400 400 {PM_RESET_PM, "PM_RESET_PM", 1, NOSTRUCT, 0, 0, 0, SU},
401 401 {PM_GET_STATS, "PM_GET_STATS", 1, PM_REQ, INWHO | INDATAOUT,
402 402 DIP, NODEP},
403 403 {PM_GET_DEVICE_THRESHOLD, "PM_GET_DEVICE_THRESHOLD", 1, PM_REQ, INWHO,
404 404 DIP, NODEP},
405 405 {PM_GET_POWER_NAME, "PM_GET_POWER_NAME", 1, PM_REQ, INWHO | INDATAOUT,
406 406 DIP, NODEP},
407 407 {PM_GET_POWER_LEVELS, "PM_GET_POWER_LEVELS", 1, PM_REQ,
408 408 INWHO | INDATAOUT, DIP, NODEP},
409 409 {PM_GET_NUM_COMPONENTS, "PM_GET_NUM_COMPONENTS", 1, PM_REQ, INWHO,
410 410 DIP, NODEP},
411 411 {PM_GET_COMPONENT_NAME, "PM_GET_COMPONENT_NAME", 1, PM_REQ,
412 412 INWHO | INDATAOUT, DIP, NODEP},
413 413 {PM_GET_NUM_POWER_LEVELS, "PM_GET_NUM_POWER_LEVELS", 1, PM_REQ, INWHO,
414 414 DIP, NODEP},
415 415 {PM_GET_STATE_CHANGE, "PM_GET_STATE_CHANGE", 1, PM_PSC},
416 416 {PM_GET_STATE_CHANGE_WAIT, "PM_GET_STATE_CHANGE_WAIT", 1, PM_PSC},
417 417 {PM_DIRECT_PM, "PM_DIRECT_PM", 1, PM_REQ, INWHO, DIP, NODEP,
418 418 (SU | SG)},
419 419 {PM_RELEASE_DIRECT_PM, "PM_RELEASE_DIRECT_PM", 1, PM_REQ, INWHO,
420 420 DIP, NODEP},
421 421 {PM_DIRECT_NOTIFY, "PM_DIRECT_NOTIFY", 1, PM_PSC},
422 422 {PM_DIRECT_NOTIFY_WAIT, "PM_DIRECT_NOTIFY_WAIT", 1, PM_PSC},
423 423 {PM_RESET_DEVICE_THRESHOLD, "PM_RESET_DEVICE_THRESHOLD", 1, PM_REQ,
424 424 INWHO, DIP, NODEP, SU},
425 425 {PM_GET_PM_STATE, "PM_GET_PM_STATE", 1, NOSTRUCT},
426 426 {PM_GET_AUTOS3_STATE, "PM_GET_AUTOS3_STATE", 1, NOSTRUCT},
427 427 {PM_GET_S3_SUPPORT_STATE, "PM_GET_S3_SUPPORT_STATE", 1, NOSTRUCT},
428 428 {PM_GET_DEVICE_TYPE, "PM_GET_DEVICE_TYPE", 1, PM_REQ, INWHO,
429 429 DIP, NODEP},
430 430 {PM_SET_COMPONENT_THRESHOLDS, "PM_SET_COMPONENT_THRESHOLDS", 1, PM_REQ,
431 431 INWHO | INDATAINT, NODIP, NODEP, SU},
432 432 {PM_GET_COMPONENT_THRESHOLDS, "PM_GET_COMPONENT_THRESHOLDS", 1, PM_REQ,
433 433 INWHO | INDATAOUT, DIP, NODEP},
434 434 {PM_IDLE_DOWN, "PM_IDLE_DOWN", 1, NOSTRUCT, 0, 0, 0, SU},
435 435 {PM_GET_DEVICE_THRESHOLD_BASIS, "PM_GET_DEVICE_THRESHOLD_BASIS", 1,
436 436 PM_REQ, INWHO, DIP, NODEP},
437 437 {PM_SET_CURRENT_POWER, "PM_SET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
438 438 NODEP},
439 439 {PM_GET_CURRENT_POWER, "PM_GET_CURRENT_POWER", 1, PM_REQ, INWHO, DIP,
440 440 NODEP},
441 441 {PM_GET_FULL_POWER, "PM_GET_FULL_POWER", 1, PM_REQ, INWHO, DIP,
442 442 NODEP},
443 443 {PM_ADD_DEPENDENT, "PM_ADD_DEPENDENT", 1, PM_REQ, INWHO | INDATASTRING,
444 444 DIP, DEP, SU},
445 445 {PM_GET_TIME_IDLE, "PM_GET_TIME_IDLE", 1, PM_REQ, INWHO, DIP, NODEP},
446 446 {PM_ADD_DEPENDENT_PROPERTY, "PM_ADD_DEPENDENT_PROPERTY", 1, PM_REQ,
447 447 INWHO | INDATASTRING, NODIP, DEP, SU},
448 448 {PM_START_CPUPM, "PM_START_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
449 449 {PM_START_CPUPM_EV, "PM_START_CPUPM_EV", 1, NOSTRUCT, 0,
450 450 0, 0, SU},
451 451 {PM_START_CPUPM_POLL, "PM_START_CPUPM_POLL", 1, NOSTRUCT, 0,
452 452 0, 0, SU},
453 453 {PM_STOP_CPUPM, "PM_STOP_CPUPM", 1, NOSTRUCT, 0, 0, 0, SU},
454 454 {PM_GET_CPU_THRESHOLD, "PM_GET_CPU_THRESHOLD", 1, NOSTRUCT},
455 455 {PM_SET_CPU_THRESHOLD, "PM_SET_CPU_THRESHOLD", 1, NOSTRUCT,
456 456 0, 0, 0, SU},
457 457 {PM_GET_CPUPM_STATE, "PM_GET_CPUPM_STATE", 1, NOSTRUCT},
458 458 {PM_START_AUTOS3, "PM_START_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
459 459 {PM_STOP_AUTOS3, "PM_STOP_AUTOS3", 1, NOSTRUCT, 0, 0, 0, SU},
460 460 {PM_ENABLE_S3, "PM_ENABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
461 461 {PM_DISABLE_S3, "PM_DISABLE_S3", 1, NOSTRUCT, 0, 0, 0, SU},
462 462 {PM_ENTER_S3, "PM_ENTER_S3", 1, NOSTRUCT, 0, 0, 0, SU},
463 463 {PM_SEARCH_LIST, "PM_SEARCH_LIST", 1, PM_SRCH, 0, 0, 0, SU},
464 464 {PM_GET_CMD_NAME, "PM_GET_CMD_NAME", 1, PM_REQ, INDATAOUT, NODIP,
465 465 NODEP, 0},
466 466 {PM_DISABLE_CPU_DEEP_IDLE, "PM_DISABLE_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
467 467 0, 0, SU},
468 468 {PM_ENABLE_CPU_DEEP_IDLE, "PM_START_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
469 469 0, 0, SU},
470 470 {PM_DEFAULT_CPU_DEEP_IDLE, "PM_DFLT_CPU_DEEP_IDLE", 1, NOSTRUCT, 0,
471 471 0, 0, SU},
472 472 {0, NULL}
473 473 };
474 474
475 475 struct pm_cmd_info *
476 476 pc_info(int cmd)
477 477 {
478 478 struct pm_cmd_info *pcip;
479 479
480 480 for (pcip = pmci; pcip->name; pcip++) {
481 481 if (cmd == pcip->cmd)
482 482 return (pcip);
483 483 }
484 484 return (NULL);
485 485 }
486 486
487 487 static char *
488 488 pm_decode_cmd(int cmd)
489 489 {
490 490 static char invbuf[64];
491 491 struct pm_cmd_info *pcip = pc_info(cmd);
492 492 if (pcip != NULL)
493 493 return (pcip->name);
494 494 (void) sprintf(invbuf, "ioctl: invalid command %d\n", cmd);
495 495 return (invbuf);
496 496 }
497 497
498 498 /*
499 499 * Allocate scan resource, create taskq, then dispatch scan,
500 500 * called only if autopm is enabled.
501 501 */
502 502 int
503 503 pm_start_pm_walk(dev_info_t *dip, void *arg)
504 504 {
505 505 int cmd = *((int *)arg);
506 506 #ifdef PMDDEBUG
507 507 char *cmdstr = pm_decode_cmd(cmd);
508 508 #endif
509 509
510 510 if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip))
511 511 return (DDI_WALK_CONTINUE);
512 512
513 513 switch (cmd) {
514 514 case PM_START_CPUPM:
515 515 case PM_START_CPUPM_POLL:
516 516 if (!PM_ISCPU(dip))
517 517 return (DDI_WALK_CONTINUE);
518 518 mutex_enter(&pm_scan_lock);
519 519 if (!PM_CPUPM_DISABLED && !PM_EVENT_CPUPM)
520 520 pm_scan_init(dip);
521 521 mutex_exit(&pm_scan_lock);
522 522 break;
523 523 case PM_START_PM:
524 524 mutex_enter(&pm_scan_lock);
525 525 if (PM_ISCPU(dip) && (PM_CPUPM_DISABLED || PM_EVENT_CPUPM)) {
526 526 mutex_exit(&pm_scan_lock);
527 527 return (DDI_WALK_CONTINUE);
528 528 }
529 529 if (autopm_enabled)
530 530 pm_scan_init(dip);
531 531 mutex_exit(&pm_scan_lock);
532 532 break;
533 533 }
534 534
535 535 /*
536 536 * Start doing pm on device: ensure pm_scan data structure initiated,
537 537 * no need to guarantee a successful scan run.
538 538 */
539 539 PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: scan %s@%s(%s#%d)\n", cmdstr,
540 540 PM_DEVICE(dip)))
541 541 pm_rescan(dip);
542 542
543 543 return (DDI_WALK_CONTINUE);
544 544 }
545 545
546 546 /*
547 547 * Bring devices to full power level, then stop scan
548 548 */
549 549 int
550 550 pm_stop_pm_walk(dev_info_t *dip, void *arg)
551 551 {
552 552 pm_info_t *info = PM_GET_PM_INFO(dip);
553 553 int cmd = *((int *)arg);
554 554 #ifdef PMDDEBUG
555 555 char *cmdstr = pm_decode_cmd(cmd);
556 556 #endif
557 557
558 558 if (!info)
559 559 return (DDI_WALK_CONTINUE);
560 560
561 561 switch (cmd) {
562 562 case PM_STOP_PM:
563 563 /*
564 564 * If CPU devices are being managed independently, then don't
565 565 * stop them as part of PM_STOP_PM. Only stop them as part of
566 566 * PM_STOP_CPUPM and PM_RESET_PM.
567 567 */
568 568 if (PM_ISCPU(dip) && PM_POLLING_CPUPM)
569 569 return (DDI_WALK_CONTINUE);
570 570 break;
571 571 case PM_STOP_CPUPM:
572 572 /*
573 573 * If stopping CPU devices and this device is not marked
574 574 * as a CPU device, then skip.
575 575 */
576 576 if (!PM_ISCPU(dip))
577 577 return (DDI_WALK_CONTINUE);
578 578 break;
579 579 }
580 580
581 581 /*
582 582 * Stop the current scan, and then bring it back to normal power.
583 583 */
584 584 if (!PM_ISBC(dip)) {
585 585 PMD(PMD_SCAN | PMD_IOCTL, ("ioctl: %s: stop scan for "
586 586 "%s@%s(%s#%d)\n", cmdstr, PM_DEVICE(dip)))
587 587 pm_scan_stop(dip);
588 588 }
589 589
590 590 if (!PM_ISBC(dip) && !PM_ISDIRECT(dip) &&
591 591 !pm_all_at_normal(dip)) {
592 592 PM_LOCK_DIP(dip);
593 593 if (info->pmi_dev_pm_state & PM_DETACHING) {
594 594 PMD(PMD_ALLNORM, ("ioctl: %s: deferring "
595 595 "all_to_normal because %s@%s(%s#%d) is detaching\n",
596 596 cmdstr, PM_DEVICE(dip)))
597 597 info->pmi_dev_pm_state |= PM_ALLNORM_DEFERRED;
598 598 PM_UNLOCK_DIP(dip);
599 599 return (DDI_WALK_CONTINUE);
600 600 }
601 601 PM_UNLOCK_DIP(dip);
602 602 if (pm_all_to_normal(dip, PM_CANBLOCK_FAIL) != DDI_SUCCESS) {
603 603 PMD(PMD_ERROR, ("ioctl: %s: could not bring %s@%s"
604 604 "(%s#%d) to normal\n", cmdstr, PM_DEVICE(dip)))
605 605 }
606 606 }
607 607
608 608 return (DDI_WALK_CONTINUE);
609 609 }
610 610
611 611 static int
612 612 pm_start_idledown(dev_info_t *dip, void *arg)
613 613 {
614 614 int flag = (int)(intptr_t)arg;
615 615 pm_scan_t *scanp = PM_GET_PM_SCAN(dip);
616 616
617 617 if (!scanp)
618 618 return (DDI_WALK_CONTINUE);
619 619
620 620 PM_LOCK_DIP(dip);
621 621 scanp->ps_idle_down |= flag;
622 622 PM_UNLOCK_DIP(dip);
623 623 pm_rescan(dip);
624 624
625 625 return (DDI_WALK_CONTINUE);
626 626 }
627 627
628 628 /*ARGSUSED*/
629 629 static int
630 630 pm_end_idledown(dev_info_t *dip, void *ignore)
631 631 {
632 632 pm_scan_t *scanp = PM_GET_PM_SCAN(dip);
633 633
634 634 if (!scanp)
635 635 return (DDI_WALK_CONTINUE);
636 636
637 637 PM_LOCK_DIP(dip);
638 638 /*
639 639 * The PMID_TIMERS bits are place holder till idledown expires.
640 640 * The bits are also the base for regenerating PMID_SCANS bits.
641 641 * While it's up to scan thread to clear up the PMID_SCANS bits
642 642 * after each scan run, PMID_TIMERS ensure aggressive scan down
643 643 * performance throughout the idledown period.
644 644 */
645 645 scanp->ps_idle_down &= ~PMID_TIMERS;
646 646 PM_UNLOCK_DIP(dip);
647 647
648 648 return (DDI_WALK_CONTINUE);
649 649 }
650 650
651 651 /*ARGSUSED*/
652 652 static void
653 653 pm_end_idledown_walk(void *ignore)
654 654 {
655 655 PMD(PMD_IDLEDOWN, ("ioctl: end_idledown: idledown_id(%lx) timer is "
656 656 "off\n", (ulong_t)pmstp->pm_idledown_id));
657 657
658 658 mutex_enter(&pm_scan_lock);
659 659 pmstp->pm_idledown_id = 0;
660 660 mutex_exit(&pm_scan_lock);
661 661
662 662 ddi_walk_devs(ddi_root_node(), pm_end_idledown, NULL);
663 663 }
664 664
665 665 /*
666 666 * pm_timeout_idledown - keep idledown effect for 10 seconds.
667 667 *
668 668 * Return 0 if another competing caller scheduled idledown timeout,
669 669 * otherwise, return idledown timeout_id.
670 670 */
671 671 static timeout_id_t
672 672 pm_timeout_idledown(void)
673 673 {
674 674 timeout_id_t to_id;
675 675
676 676 /*
677 677 * Keep idle-down in effect for either 10 seconds
678 678 * or length of a scan interval, which ever is greater.
679 679 */
680 680 mutex_enter(&pm_scan_lock);
681 681 if (pmstp->pm_idledown_id != 0) {
682 682 to_id = pmstp->pm_idledown_id;
683 683 pmstp->pm_idledown_id = 0;
684 684 mutex_exit(&pm_scan_lock);
685 685 (void) untimeout(to_id);
686 686 mutex_enter(&pm_scan_lock);
687 687 if (pmstp->pm_idledown_id != 0) {
688 688 PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: "
689 689 "another caller got it, idledown_id(%lx)!\n",
690 690 (ulong_t)pmstp->pm_idledown_id))
691 691 mutex_exit(&pm_scan_lock);
692 692 return (0);
693 693 }
694 694 }
695 695 pmstp->pm_idledown_id = timeout(pm_end_idledown_walk, NULL,
696 696 PM_IDLEDOWN_TIME * hz);
697 697 PMD(PMD_IDLEDOWN, ("ioctl: timeout_idledown: idledown_id(%lx)\n",
698 698 (ulong_t)pmstp->pm_idledown_id))
699 699 mutex_exit(&pm_scan_lock);
700 700
701 701 return (pmstp->pm_idledown_id);
702 702 }
703 703
704 704 static int
705 705 pm_chpoll(dev_t dev, short events, int anyyet, short *reventsp,
706 706 struct pollhead **phpp)
707 707 {
708 708 extern struct pollhead pm_pollhead; /* common/os/sunpm.c */
709 709 int clone;
710 710
711 711 clone = PM_MINOR_TO_CLONE(getminor(dev));
712 712 PMD(PMD_IOCTL, ("ioctl: pm_chpoll: clone %d\n", clone))
713 713 if ((events & (POLLIN | POLLRDNORM)) && pm_poll_cnt[clone]) {
714 714 *reventsp |= (POLLIN | POLLRDNORM);
715 715 PMD(PMD_IOCTL, ("ioctl: pm_chpoll: reventsp set\n"))
716 716 } else {
717 717 *reventsp = 0;
718 718 if (!anyyet) {
719 719 PMD(PMD_IOCTL, ("ioctl: pm_chpoll: not anyyet\n"))
720 720 *phpp = &pm_pollhead;
721 721 }
722 722 #ifdef DEBUG
723 723 else {
724 724 PMD(PMD_IOCTL, ("ioctl: pm_chpoll: anyyet\n"))
725 725 }
726 726 #endif
727 727 }
728 728 return (0);
729 729 }
730 730
731 731 /*
732 732 * called by pm_dicard_entries to free up the memory. It also decrements
733 733 * pm_poll_cnt, if direct is non zero.
734 734 */
735 735 static void
736 736 pm_free_entries(psce_t *pscep, int clone, int direct)
737 737 {
738 738 pm_state_change_t *p;
739 739
740 740 if (pscep) {
741 741 p = pscep->psce_out;
742 742 while (p->size) {
743 743 if (direct) {
744 744 PMD(PMD_IOCTL, ("ioctl: discard: "
745 745 "pm_poll_cnt[%d] is %d before "
746 746 "ASSERT\n", clone,
747 747 pm_poll_cnt[clone]))
748 748 ASSERT(pm_poll_cnt[clone]);
749 749 pm_poll_cnt[clone]--;
750 750 }
751 751 kmem_free(p->physpath, p->size);
752 752 p->size = 0;
753 753 if (p == pscep->psce_last)
754 754 p = pscep->psce_first;
755 755 else
756 756 p++;
757 757 }
758 758 pscep->psce_out = pscep->psce_first;
759 759 pscep->psce_in = pscep->psce_first;
760 760 mutex_exit(&pscep->psce_lock);
761 761 }
762 762 }
763 763
764 764 /*
765 765 * Discard entries for this clone. Calls pm_free_entries to free up memory.
766 766 */
767 767 static void
768 768 pm_discard_entries(int clone)
769 769 {
770 770 psce_t *pscep;
771 771 int direct = 0;
772 772
773 773 mutex_enter(&pm_clone_lock);
774 774 if ((pscep = pm_psc_clone_to_direct(clone)) != NULL)
775 775 direct = 1;
776 776 pm_free_entries(pscep, clone, direct);
777 777 pscep = pm_psc_clone_to_interest(clone);
778 778 pm_free_entries(pscep, clone, 0);
779 779 mutex_exit(&pm_clone_lock);
780 780 }
781 781
782 782
783 783 static void
784 784 pm_set_idle_threshold(dev_info_t *dip, int thresh, int flag)
785 785 {
786 786 if (!PM_ISBC(dip) && !PM_ISDIRECT(dip)) {
787 787 switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
788 788 case PMC_DEF_THRESH:
789 789 case PMC_CPU_THRESH:
790 790 PMD(PMD_IOCTL, ("ioctl: set_idle_threshold: set "
791 791 "%s@%s(%s#%d) default thresh to 0t%d\n",
792 792 PM_DEVICE(dip), thresh))
793 793 pm_set_device_threshold(dip, thresh, flag);
794 794 break;
795 795 default:
796 796 break;
797 797 }
798 798 }
799 799 }
800 800
801 801 static int
802 802 pm_set_idle_thresh_walk(dev_info_t *dip, void *arg)
803 803 {
804 804 int cmd = *((int *)arg);
805 805
806 806 if (!PM_GET_PM_INFO(dip))
807 807 return (DDI_WALK_CONTINUE);
808 808
809 809 switch (cmd) {
810 810 case PM_SET_SYSTEM_THRESHOLD:
811 811 if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
812 812 break;
813 813 pm_set_idle_threshold(dip, pm_system_idle_threshold,
814 814 PMC_DEF_THRESH);
815 815 pm_rescan(dip);
816 816 break;
817 817 case PM_SET_CPU_THRESHOLD:
818 818 if (!PM_ISCPU(dip))
819 819 break;
820 820 pm_set_idle_threshold(dip, pm_cpu_idle_threshold,
821 821 PMC_CPU_THRESH);
822 822 pm_rescan(dip);
823 823 break;
824 824 }
825 825
826 826 return (DDI_WALK_CONTINUE);
827 827 }
828 828
829 829 /*ARGSUSED*/
830 830 static int
831 831 pm_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
832 832 {
833 833 dev_t dev;
834 834 int instance;
835 835
836 836 switch (infocmd) {
837 837 case DDI_INFO_DEVT2DEVINFO:
838 838 if (pmstp->pm_instance == -1)
839 839 return (DDI_FAILURE);
840 840 *result = pmstp->pm_dip;
841 841 return (DDI_SUCCESS);
842 842
843 843 case DDI_INFO_DEVT2INSTANCE:
844 844 dev = (dev_t)arg;
845 845 instance = getminor(dev) >> 8;
846 846 *result = (void *)(uintptr_t)instance;
847 847 return (DDI_SUCCESS);
848 848
849 849 default:
850 850 return (DDI_FAILURE);
851 851 }
852 852 }
853 853
854 854
855 855 /*ARGSUSED1*/
856 856 static int
857 857 pm_open(dev_t *devp, int flag, int otyp, cred_t *cr)
858 858 {
859 859 int clone;
860 860
861 861 if (otyp != OTYP_CHR)
862 862 return (EINVAL);
863 863
864 864 mutex_enter(&pm_clone_lock);
865 865 for (clone = 1; clone < PM_MAX_CLONE; clone++)
866 866 if (!pmstp->pm_clones[clone])
867 867 break;
868 868
869 869 if (clone == PM_MAX_CLONE) {
870 870 mutex_exit(&pm_clone_lock);
871 871 return (ENXIO);
872 872 }
873 873 pmstp->pm_cred[clone] = cr;
874 874 crhold(cr);
875 875
876 876 *devp = makedevice(getmajor(*devp), (pmstp->pm_instance << 8) + clone);
877 877 pmstp->pm_clones[clone] = 1;
878 878 mutex_exit(&pm_clone_lock);
879 879
880 880 return (0);
881 881 }
882 882
883 883 /*ARGSUSED1*/
884 884 static int
885 885 pm_close(dev_t dev, int flag, int otyp, cred_t *cr)
886 886 {
887 887 int clone;
888 888
889 889 if (otyp != OTYP_CHR)
890 890 return (EINVAL);
891 891
892 892 clone = PM_MINOR_TO_CLONE(getminor(dev));
893 893 PMD(PMD_CLOSE, ("pm_close: minor %x, clone %x\n", getminor(dev),
894 894 clone))
895 895
896 896 /*
897 897 * Walk the entire device tree to find the corresponding
898 898 * device and operate on it.
899 899 */
900 900 ddi_walk_devs(ddi_root_node(), pm_close_direct_pm_device,
901 901 (void *) &clone);
902 902
903 903 crfree(pmstp->pm_cred[clone]);
904 904 pmstp->pm_cred[clone] = 0;
905 905 pmstp->pm_clones[clone] = 0;
906 906 pm_discard_entries(clone);
907 907 ASSERT(pm_poll_cnt[clone] == 0);
908 908 pm_deregister_watcher(clone, NULL);
909 909 return (0);
910 910 }
911 911
912 912 /*ARGSUSED*/
913 913 static int
914 914 pm_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval_p)
915 915 {
916 916 struct pm_cmd_info *pc_info(int);
917 917 struct pm_cmd_info *pcip = pc_info(cmd);
918 918 pm_req_t req;
919 919 dev_info_t *dip = NULL;
920 920 pm_info_t *info = NULL;
921 921 int clone;
922 922 char *cmdstr = pm_decode_cmd(cmd);
923 923 /*
924 924 * To keep devinfo nodes from going away while we're holding a
925 925 * pointer to their dip, pm_name_to_dip() optionally holds
926 926 * the devinfo node. If we've done that, we set dipheld
927 927 * so we know at the end of the ioctl processing to release the
928 928 * node again.
929 929 */
930 930 int dipheld = 0;
931 931 int icount = 0;
932 932 int i;
933 933 int comps;
934 934 size_t lencopied;
935 935 int ret = ENOTTY;
936 936 int curpower;
937 937 char who[MAXNAMELEN];
938 938 size_t wholen; /* copyinstr length */
939 939 size_t deplen = MAXNAMELEN;
940 940 char *dep, i_dep_buf[MAXNAMELEN];
941 941 char pathbuf[MAXNAMELEN];
942 942 struct pm_component *cp;
943 943 #ifdef _MULTI_DATAMODEL
944 944 pm_state_change32_t *pscp32;
945 945 pm_state_change32_t psc32;
946 946 pm_searchargs32_t psa32;
947 947 size_t copysize32;
948 948 #endif
949 949 pm_state_change_t *pscp;
950 950 pm_state_change_t psc;
951 951 pm_searchargs_t psa;
952 952 char listname[MAXCOPYBUF];
953 953 char manufacturer[MAXCOPYBUF];
954 954 char product[MAXCOPYBUF];
955 955 size_t copysize;
956 956
957 957 PMD(PMD_IOCTL, ("ioctl: %s: begin\n", cmdstr))
958 958
959 959 #ifdef DEBUG
960 960 if (cmd == 666) {
961 961 ddi_walk_devs(ddi_root_node(), print_info, NULL);
962 962 return (0);
963 963 }
964 964 ret = 0x0badcafe; /* sanity checking */
965 965 pm_cmd = cmd; /* for ASSERT debugging */
966 966 pm_cmd_string = cmdstr; /* for ASSERT debugging */
967 967 #endif
968 968
969 969
970 970 if (pcip == NULL) {
971 971 PMD(PMD_ERROR, ("ioctl: unknown command %d\n", cmd))
972 972 return (ENOTTY);
973 973 }
974 974 if (pcip == NULL || pcip->supported == 0) {
975 975 PMD(PMD_ERROR, ("ioctl: command %s no longer supported\n",
976 976 pcip->name))
977 977 return (ENOTTY);
978 978 }
979 979
980 980 wholen = 0;
981 981 dep = i_dep_buf;
982 982 i_dep_buf[0] = 0;
983 983 clone = PM_MINOR_TO_CLONE(getminor(dev));
984 984 if (!pm_perms(pcip->permission, pmstp->pm_cred[clone])) {
985 985 ret = EPERM;
986 986 return (ret);
987 987 }
988 988 switch (pcip->str_type) {
989 989 case PM_REQ:
990 990 {
991 991 #ifdef _MULTI_DATAMODEL
992 992 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
993 993 pm_req32_t req32;
994 994
995 995 if (ddi_copyin((caddr_t)arg, &req32,
996 996 sizeof (req32), mode) != 0) {
997 997 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
998 998 "EFAULT\n\n", cmdstr))
999 999 ret = EFAULT;
1000 1000 break;
1001 1001 }
1002 1002 req.component = req32.component;
1003 1003 req.value = req32.value;
1004 1004 req.datasize = req32.datasize;
1005 1005 if (pcip->inargs & INWHO) {
1006 1006 ret = copyinstr((char *)(uintptr_t)
1007 1007 req32.physpath, who, MAXNAMELEN, &wholen);
1008 1008 if (ret) {
1009 1009 PMD(PMD_ERROR, ("ioctl: %s: "
1010 1010 "copyinstr fails returning %d\n",
1011 1011 cmdstr, ret))
1012 1012 break;
1013 1013 }
1014 1014 req.physpath = who;
1015 1015 PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1016 1016 cmdstr, req.physpath))
1017 1017 }
1018 1018 if (pcip->inargs & INDATA) {
1019 1019 req.data = (void *)(uintptr_t)req32.data;
1020 1020 req.datasize = req32.datasize;
1021 1021 } else {
1022 1022 req.data = NULL;
1023 1023 req.datasize = 0;
1024 1024 }
1025 1025 switch (pcip->diptype) {
1026 1026 case DIP:
1027 1027 if (!(dip =
1028 1028 pm_name_to_dip(req.physpath, 1))) {
1029 1029 PMD(PMD_ERROR, ("ioctl: %s: "
1030 1030 "pm_name_to_dip for %s failed\n",
1031 1031 cmdstr, req.physpath))
1032 1032 return (ENODEV);
1033 1033 }
1034 1034 ASSERT(!dipheld);
1035 1035 dipheld++;
1036 1036 break;
1037 1037 case NODIP:
1038 1038 break;
1039 1039 default:
1040 1040 /*
1041 1041 * Internal error, invalid ioctl description
1042 1042 * force debug entry even if pm_debug not set
1043 1043 */
1044 1044 #ifdef DEBUG
1045 1045 pm_log("invalid diptype %d for cmd %d (%s)\n",
1046 1046 pcip->diptype, cmd, pcip->name);
1047 1047 #endif
1048 1048 ASSERT(0);
1049 1049 return (EIO);
1050 1050 }
1051 1051 if (pcip->inargs & INDATAINT) {
1052 1052 int32_t int32buf;
1053 1053 int32_t *i32p;
1054 1054 int *ip;
1055 1055 icount = req32.datasize / sizeof (int32_t);
1056 1056 if (icount <= 0) {
1057 1057 PMD(PMD_ERROR, ("ioctl: %s: datasize"
1058 1058 " 0 or neg EFAULT\n\n", cmdstr))
1059 1059 ret = EFAULT;
1060 1060 break;
1061 1061 }
1062 1062 ASSERT(!(pcip->inargs & INDATASTRING));
1063 1063 req.datasize = icount * sizeof (int);
1064 1064 req.data = kmem_alloc(req.datasize, KM_SLEEP);
1065 1065 ip = req.data;
1066 1066 ret = 0;
1067 1067 for (i = 0,
1068 1068 i32p = (int32_t *)(uintptr_t)req32.data;
1069 1069 i < icount; i++, i32p++) {
1070 1070 if (ddi_copyin((void *)i32p, &int32buf,
1071 1071 sizeof (int32_t), mode)) {
1072 1072 kmem_free(req.data,
1073 1073 req.datasize);
1074 1074 PMD(PMD_ERROR, ("ioctl: %s: "
1075 1075 "entry %d EFAULT\n",
1076 1076 cmdstr, i))
1077 1077 ret = EFAULT;
1078 1078 break;
1079 1079 }
1080 1080 *ip++ = (int)int32buf;
1081 1081 }
1082 1082 if (ret)
1083 1083 break;
1084 1084 }
1085 1085 if (pcip->inargs & INDATASTRING) {
1086 1086 ASSERT(!(pcip->inargs & INDATAINT));
1087 1087 ASSERT(pcip->deptype == DEP);
1088 1088 if (req32.data != NULL) {
1089 1089 if (copyinstr((void *)(uintptr_t)
1090 1090 req32.data, dep, deplen, NULL)) {
1091 1091 PMD(PMD_ERROR, ("ioctl: %s: "
1092 1092 "0x%p dep size %lx, EFAULT"
1093 1093 "\n", cmdstr,
1094 1094 (void *)req.data, deplen))
1095 1095 ret = EFAULT;
1096 1096 break;
1097 1097 }
1098 1098 #ifdef DEBUG
1099 1099 else {
1100 1100 PMD(PMD_DEP, ("ioctl: %s: "
1101 1101 "dep %s\n", cmdstr, dep))
1102 1102 }
1103 1103 #endif
1104 1104 } else {
1105 1105 PMD(PMD_ERROR, ("ioctl: %s: no "
1106 1106 "dependent\n", cmdstr))
1107 1107 ret = EINVAL;
1108 1108 break;
1109 1109 }
1110 1110 }
1111 1111 } else
1112 1112 #endif /* _MULTI_DATAMODEL */
1113 1113 {
1114 1114 if (ddi_copyin((caddr_t)arg,
1115 1115 &req, sizeof (req), mode) != 0) {
1116 1116 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1117 1117 "EFAULT\n\n", cmdstr))
1118 1118 ret = EFAULT;
1119 1119 break;
1120 1120 }
1121 1121 if (pcip->inargs & INWHO) {
1122 1122 ret = copyinstr((char *)req.physpath, who,
1123 1123 MAXNAMELEN, &wholen);
1124 1124 if (ret) {
1125 1125 PMD(PMD_ERROR, ("ioctl: %s copyinstr"
1126 1126 " fails returning %d\n", cmdstr,
1127 1127 ret))
1128 1128 break;
1129 1129 }
1130 1130 req.physpath = who;
1131 1131 PMD(PMD_IOCTL, ("ioctl: %s: physpath=%s\n",
1132 1132 cmdstr, req.physpath))
1133 1133 }
1134 1134 if (!(pcip->inargs & INDATA)) {
1135 1135 req.data = NULL;
1136 1136 req.datasize = 0;
1137 1137 }
1138 1138 switch (pcip->diptype) {
1139 1139 case DIP:
1140 1140 if (!(dip =
1141 1141 pm_name_to_dip(req.physpath, 1))) {
1142 1142 PMD(PMD_ERROR, ("ioctl: %s: "
1143 1143 "pm_name_to_dip for %s failed\n",
1144 1144 cmdstr, req.physpath))
1145 1145 return (ENODEV);
1146 1146 }
1147 1147 ASSERT(!dipheld);
1148 1148 dipheld++;
1149 1149 break;
1150 1150 case NODIP:
1151 1151 break;
1152 1152 default:
1153 1153 /*
1154 1154 * Internal error, invalid ioctl description
1155 1155 * force debug entry even if pm_debug not set
1156 1156 */
1157 1157 #ifdef DEBUG
1158 1158 pm_log("invalid diptype %d for cmd %d (%s)\n",
1159 1159 pcip->diptype, cmd, pcip->name);
1160 1160 #endif
1161 1161 ASSERT(0);
1162 1162 return (EIO);
1163 1163 }
1164 1164 if (pcip->inargs & INDATAINT) {
1165 1165 int *ip;
1166 1166
1167 1167 ASSERT(!(pcip->inargs & INDATASTRING));
1168 1168 ip = req.data;
1169 1169 icount = req.datasize / sizeof (int);
1170 1170 if (icount <= 0) {
1171 1171 PMD(PMD_ERROR, ("ioctl: %s: datasize"
1172 1172 " 0 or neg EFAULT\n\n", cmdstr))
1173 1173 ret = EFAULT;
1174 1174 break;
1175 1175 }
1176 1176 req.data = kmem_alloc(req.datasize, KM_SLEEP);
1177 1177 if (ddi_copyin((caddr_t)ip, req.data,
1178 1178 req.datasize, mode) != 0) {
1179 1179 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
1180 1180 "EFAULT\n\n", cmdstr))
1181 1181 ret = EFAULT;
1182 1182 break;
1183 1183 }
1184 1184 }
1185 1185 if (pcip->inargs & INDATASTRING) {
1186 1186 ASSERT(!(pcip->inargs & INDATAINT));
1187 1187 ASSERT(pcip->deptype == DEP);
1188 1188 if (req.data != NULL) {
1189 1189 if (copyinstr((caddr_t)req.data,
1190 1190 dep, deplen, NULL)) {
1191 1191 PMD(PMD_ERROR, ("ioctl: %s: "
1192 1192 "0x%p dep size %lu, "
1193 1193 "EFAULT\n", cmdstr,
1194 1194 (void *)req.data, deplen))
1195 1195 ret = EFAULT;
1196 1196 break;
1197 1197 }
1198 1198 #ifdef DEBUG
1199 1199 else {
1200 1200 PMD(PMD_DEP, ("ioctl: %s: "
1201 1201 "dep %s\n", cmdstr, dep))
1202 1202 }
1203 1203 #endif
1204 1204 } else {
1205 1205 PMD(PMD_ERROR, ("ioctl: %s: no "
1206 1206 "dependent\n", cmdstr))
1207 1207 ret = EINVAL;
1208 1208 break;
1209 1209 }
1210 1210 }
1211 1211 }
1212 1212 /*
1213 1213 * Now we've got all the args in for the commands that
1214 1214 * use the new pm_req struct.
1215 1215 */
1216 1216 switch (cmd) {
1217 1217 case PM_REPARSE_PM_PROPS:
1218 1218 {
1219 1219 struct dev_ops *drv;
1220 1220 struct cb_ops *cb;
1221 1221 void *propval;
1222 1222 int length;
1223 1223 /*
1224 1224 * This ioctl is provided only for the ddivs pm test.
1225 1225 * We only do it to a driver which explicitly allows
1226 1226 * us to do so by exporting a pm-reparse-ok property.
1227 1227 * We only care whether the property exists or not.
1228 1228 */
1229 1229 if ((drv = ddi_get_driver(dip)) == NULL) {
1230 1230 ret = EINVAL;
1231 1231 break;
1232 1232 }
1233 1233 if ((cb = drv->devo_cb_ops) != NULL) {
1234 1234 if ((*cb->cb_prop_op)(DDI_DEV_T_ANY, dip,
1235 1235 PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1236 1236 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1237 1237 "pm-reparse-ok", (caddr_t)&propval,
1238 1238 &length) != DDI_SUCCESS) {
1239 1239 ret = EINVAL;
1240 1240 break;
1241 1241 }
1242 1242 } else if (ddi_prop_op(DDI_DEV_T_ANY, dip,
1243 1243 PROP_LEN_AND_VAL_ALLOC, (DDI_PROP_CANSLEEP |
1244 1244 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1245 1245 "pm-reparse-ok", (caddr_t)&propval,
1246 1246 &length) != DDI_SUCCESS) {
1247 1247 ret = EINVAL;
1248 1248 break;
1249 1249 }
1250 1250 kmem_free(propval, length);
1251 1251 ret = e_new_pm_props(dip);
1252 1252 break;
1253 1253 }
1254 1254
1255 1255 case PM_GET_DEVICE_THRESHOLD:
1256 1256 {
1257 1257 PM_LOCK_DIP(dip);
1258 1258 if (!PM_GET_PM_INFO(dip) || PM_ISBC(dip)) {
1259 1259 PM_UNLOCK_DIP(dip);
1260 1260 PMD(PMD_ERROR, ("ioctl: %s: ENODEV\n",
1261 1261 cmdstr))
1262 1262 ret = ENODEV;
1263 1263 break;
1264 1264 }
1265 1265 *rval_p = DEVI(dip)->devi_pm_dev_thresh;
1266 1266 PM_UNLOCK_DIP(dip);
1267 1267 ret = 0;
1268 1268 break;
1269 1269 }
1270 1270
1271 1271 case PM_DIRECT_PM:
1272 1272 {
1273 1273 int has_dep;
1274 1274 if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1275 1275 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1276 1276 "ENODEV\n", cmdstr))
1277 1277 ret = ENODEV;
1278 1278 break;
1279 1279 }
1280 1280 /*
1281 1281 * Check to see if we are there is a dependency on
1282 1282 * this kept device, if so, return EBUSY.
1283 1283 */
1284 1284 (void) ddi_pathname(dip, pathbuf);
1285 1285 pm_dispatch_to_dep_thread(PM_DEP_WK_CHECK_KEPT,
1286 1286 NULL, pathbuf, PM_DEP_WAIT, &has_dep, 0);
1287 1287 if (has_dep) {
1288 1288 PMD(PMD_ERROR | PMD_DPM, ("%s EBUSY\n",
1289 1289 cmdstr))
1290 1290 ret = EBUSY;
1291 1291 break;
1292 1292 }
1293 1293 PM_LOCK_DIP(dip);
1294 1294 if (PM_ISDIRECT(dip) || (info->pmi_clone != 0)) {
1295 1295 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1296 1296 "%s@%s(%s#%d): EBUSY\n", cmdstr,
1297 1297 PM_DEVICE(dip)))
1298 1298 PM_UNLOCK_DIP(dip);
1299 1299 ret = EBUSY;
1300 1300 break;
1301 1301 }
1302 1302 info->pmi_dev_pm_state |= PM_DIRECT;
1303 1303 info->pmi_clone = clone;
1304 1304 PM_UNLOCK_DIP(dip);
1305 1305 PMD(PMD_DPM, ("ioctl: %s: info %p, pmi_clone %d\n",
1306 1306 cmdstr, (void *)info, clone))
1307 1307 mutex_enter(&pm_clone_lock);
1308 1308 pm_register_watcher(clone, dip);
1309 1309 mutex_exit(&pm_clone_lock);
1310 1310 ret = 0;
1311 1311 break;
1312 1312 }
1313 1313
1314 1314 case PM_RELEASE_DIRECT_PM:
1315 1315 {
1316 1316 if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1317 1317 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1318 1318 "ENODEV\n", cmdstr))
1319 1319 ret = ENODEV;
1320 1320 break;
1321 1321 }
1322 1322 PM_LOCK_DIP(dip);
1323 1323 if (info->pmi_clone != clone) {
1324 1324 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1325 1325 "%s@%s(%s#%d) EINVAL\n", cmdstr,
1326 1326 PM_DEVICE(dip)))
1327 1327 ret = EINVAL;
1328 1328 PM_UNLOCK_DIP(dip);
1329 1329 break;
1330 1330 }
1331 1331 ASSERT(PM_ISDIRECT(dip));
1332 1332 info->pmi_dev_pm_state &= ~PM_DIRECT;
1333 1333 PM_UNLOCK_DIP(dip);
1334 1334 /* Bring ourselves up if there is a keeper. */
1335 1335 (void) ddi_pathname(dip, pathbuf);
1336 1336 pm_dispatch_to_dep_thread(PM_DEP_WK_BRINGUP_SELF,
1337 1337 NULL, pathbuf, PM_DEP_WAIT, NULL, 0);
1338 1338 pm_discard_entries(clone);
1339 1339 pm_deregister_watcher(clone, dip);
1340 1340 /*
1341 1341 * Now we could let the other threads that are
1342 1342 * trying to do a DIRECT_PM thru
1343 1343 */
1344 1344 PM_LOCK_DIP(dip);
1345 1345 info->pmi_clone = 0;
1346 1346 PM_UNLOCK_DIP(dip);
1347 1347 pm_proceed(dip, PMP_RELEASE, -1, -1);
1348 1348 PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1349 1349 cmdstr))
1350 1350 pm_rescan(dip);
1351 1351 ret = 0;
1352 1352 break;
1353 1353 }
1354 1354
1355 1355 case PM_SET_CURRENT_POWER:
1356 1356 {
1357 1357 int comp = req.component;
1358 1358 int value = req.value;
1359 1359 PMD(PMD_DPM, ("ioctl: %s: %s component %d to value "
1360 1360 "%d\n", cmdstr, req.physpath, comp, value))
1361 1361 if (!e_pm_valid_comp(dip, comp, NULL) ||
1362 1362 !e_pm_valid_power(dip, comp, value)) {
1363 1363 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1364 1364 "physpath=%s, comp=%d, level=%d, fails\n",
1365 1365 cmdstr, req.physpath, comp, value))
1366 1366 ret = EINVAL;
1367 1367 break;
1368 1368 }
1369 1369
1370 1370 if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1371 1371 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1372 1372 "ENODEV\n", cmdstr))
1373 1373 ret = ENODEV;
1374 1374 break;
1375 1375 }
1376 1376 if (info->pmi_clone != clone) {
1377 1377 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1378 1378 "(not owner) %s fails; clone %d, owner %d"
1379 1379 "\n", cmdstr, req.physpath, clone,
1380 1380 info->pmi_clone))
1381 1381 ret = EINVAL;
1382 1382 break;
1383 1383 }
1384 1384 ASSERT(PM_ISDIRECT(dip));
1385 1385
1386 1386 if (pm_set_power(dip, comp, value, PM_LEVEL_EXACT,
1387 1387 PM_CANBLOCK_BLOCK, 0, &ret) != DDI_SUCCESS) {
1388 1388 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s: "
1389 1389 "pm_set_power for %s fails, errno=%d\n",
1390 1390 cmdstr, req.physpath, ret))
1391 1391 break;
1392 1392 }
1393 1393
1394 1394 pm_proceed(dip, PMP_SETPOWER, comp, value);
1395 1395
1396 1396 /*
1397 1397 * Power down all idle components if console framebuffer
1398 1398 * is powered off.
1399 1399 */
1400 1400 if (PM_IS_CFB(dip) && (pm_system_idle_threshold ==
1401 1401 pm_default_idle_threshold)) {
1402 1402 dev_info_t *root = ddi_root_node();
1403 1403 if (PM_ISBC(dip)) {
1404 1404 if (comp == 0 && value == 0 &&
1405 1405 (pm_timeout_idledown() != 0)) {
1406 1406 ddi_walk_devs(root,
1407 1407 pm_start_idledown,
1408 1408 (void *)PMID_CFB);
1409 1409 }
1410 1410 } else {
1411 1411 int count = 0;
1412 1412 for (i = 0; i < PM_NUMCMPTS(dip); i++) {
1413 1413 ret = pm_get_current_power(dip,
1414 1414 i, &curpower);
1415 1415 if (ret == DDI_SUCCESS &&
1416 1416 curpower == 0)
1417 1417 count++;
1418 1418 }
1419 1419 if ((count == PM_NUMCMPTS(dip)) &&
1420 1420 (pm_timeout_idledown() != 0)) {
1421 1421 ddi_walk_devs(root,
1422 1422 pm_start_idledown,
1423 1423 (void *)PMID_CFB);
1424 1424 }
1425 1425 }
1426 1426 }
1427 1427
1428 1428 PMD(PMD_RESCAN | PMD_DPM, ("ioctl: %s: rescan\n",
1429 1429 cmdstr))
1430 1430 pm_rescan(dip);
1431 1431 *rval_p = 0;
1432 1432 ret = 0;
1433 1433 break;
1434 1434 }
1435 1435
1436 1436 case PM_GET_FULL_POWER:
1437 1437 {
1438 1438 int normal;
1439 1439 ASSERT(dip);
1440 1440 PMD(PMD_NORM, ("ioctl: %s: %s component %d\n",
1441 1441 cmdstr, req.physpath, req.component))
1442 1442 normal = pm_get_normal_power(dip, req.component);
1443 1443
1444 1444 if (normal == DDI_FAILURE) {
1445 1445 PMD(PMD_ERROR | PMD_NORM, ("ioctl: %s: "
1446 1446 "returns EINVAL\n", cmdstr))
1447 1447 ret = EINVAL;
1448 1448 break;
1449 1449 }
1450 1450 *rval_p = normal;
1451 1451 PMD(PMD_NORM, ("ioctl: %s: returns %d\n",
1452 1452 cmdstr, normal))
1453 1453 ret = 0;
1454 1454 break;
1455 1455 }
1456 1456
1457 1457 case PM_GET_CURRENT_POWER:
1458 1458 {
1459 1459 if (pm_get_current_power(dip, req.component,
1460 1460 rval_p) != DDI_SUCCESS) {
1461 1461 PMD(PMD_ERROR | PMD_DPM, ("ioctl: %s "
1462 1462 "EINVAL\n", cmdstr))
1463 1463 ret = EINVAL;
1464 1464 break;
1465 1465 }
1466 1466 PMD(PMD_DPM, ("ioctl: %s: %s comp %d returns %d\n",
1467 1467 cmdstr, req.physpath, req.component, *rval_p))
1468 1468 if (*rval_p == PM_LEVEL_UNKNOWN)
1469 1469 ret = EAGAIN;
1470 1470 else
1471 1471 ret = 0;
1472 1472 break;
1473 1473 }
1474 1474
1475 1475 case PM_GET_TIME_IDLE:
1476 1476 {
1477 1477 time_t timestamp;
1478 1478 int comp = req.component;
1479 1479 pm_component_t *cp;
1480 1480 if (!e_pm_valid_comp(dip, comp, &cp)) {
1481 1481 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1482 1482 "component %d > numcmpts - 1 %d--EINVAL\n",
1483 1483 cmdstr, PM_DEVICE(dip), comp,
1484 1484 PM_NUMCMPTS(dip) - 1))
1485 1485 ret = EINVAL;
1486 1486 break;
1487 1487 }
1488 1488 timestamp = cp->pmc_timestamp;
1489 1489 if (timestamp) {
1490 1490 time_t now;
1491 1491 (void) drv_getparm(TIME, &now);
1492 1492 *rval_p = (now - timestamp);
1493 1493 } else {
1494 1494 *rval_p = 0;
1495 1495 }
1496 1496 ret = 0;
1497 1497 break;
1498 1498 }
1499 1499
1500 1500 case PM_ADD_DEPENDENT:
1501 1501 {
1502 1502 dev_info_t *kept_dip;
1503 1503
1504 1504 PMD(PMD_KEEPS, ("%s, kept %s, keeper %s\n", cmdstr,
1505 1505 dep, req.physpath))
1506 1506
1507 1507 /*
1508 1508 * hold and install kept while processing dependency
1509 1509 * keeper (in .physpath) has already been held.
1510 1510 */
1511 1511 if (dep[0] == '\0') {
1512 1512 PMD(PMD_ERROR, ("kept NULL or null\n"))
1513 1513 ret = EINVAL;
1514 1514 break;
1515 1515 } else if ((kept_dip =
1516 1516 pm_name_to_dip(dep, 1)) == NULL) {
1517 1517 PMD(PMD_ERROR, ("no dip for kept %s\n", dep))
1518 1518 ret = ENODEV;
1519 1519 break;
1520 1520 } else if (kept_dip == dip) {
1521 1521 PMD(PMD_ERROR, ("keeper(%s, %p) - kept(%s, %p) "
1522 1522 "self-dependency not allowed.\n",
1523 1523 dep, (void *)kept_dip, req.physpath,
1524 1524 (void *) dip))
1525 1525 PM_RELE(dip); /* release "double" hold */
1526 1526 ret = EINVAL;
1527 1527 break;
1528 1528 }
1529 1529 ASSERT(!(strcmp(req.physpath, (char *)dep) == 0));
1530 1530
1531 1531 /*
1532 1532 * record dependency, then walk through device tree
1533 1533 * independently on behalf of kept and keeper to
1534 1534 * establish newly created dependency.
1535 1535 */
1536 1536 pm_dispatch_to_dep_thread(PM_DEP_WK_RECORD_KEEPER,
1537 1537 req.physpath, dep, PM_DEP_WAIT, NULL, 0);
1538 1538
1539 1539 /*
1540 1540 * release kept after establishing dependency, keeper
1541 1541 * is released as part of ioctl exit processing.
1542 1542 */
1543 1543 PM_RELE(kept_dip);
1544 1544 *rval_p = 0;
1545 1545 ret = 0;
1546 1546 break;
1547 1547 }
1548 1548
1549 1549 case PM_ADD_DEPENDENT_PROPERTY:
1550 1550 {
1551 1551 char *keeper, *kept;
1552 1552
1553 1553 if (dep[0] == '\0') {
1554 1554 PMD(PMD_ERROR, ("ioctl: %s: dep NULL or "
1555 1555 "null\n", cmdstr))
1556 1556 ret = EINVAL;
1557 1557 break;
1558 1558 }
1559 1559 kept = dep;
1560 1560 keeper = req.physpath;
1561 1561 /*
1562 1562 * record keeper - kept dependency, then walk through
1563 1563 * device tree to find out all attached keeper, walk
1564 1564 * through again to apply dependency to all the
1565 1565 * potential kept.
1566 1566 */
1567 1567 pm_dispatch_to_dep_thread(
1568 1568 PM_DEP_WK_RECORD_KEEPER_PROP, keeper, kept,
1569 1569 PM_DEP_WAIT, NULL, 0);
1570 1570
1571 1571 *rval_p = 0;
1572 1572 ret = 0;
1573 1573 break;
1574 1574 }
1575 1575
1576 1576 case PM_SET_DEVICE_THRESHOLD:
1577 1577 {
1578 1578 pm_thresh_rec_t *rp;
1579 1579 pm_pte_t *ep; /* threshold header storage */
1580 1580 int *tp; /* threshold storage */
1581 1581 size_t size;
1582 1582 extern int pm_thresh_specd(dev_info_t *);
1583 1583
1584 1584 /*
1585 1585 * The header struct plus one entry struct plus one
1586 1586 * threshold plus the length of the string
1587 1587 */
1588 1588 size = sizeof (pm_thresh_rec_t) +
1589 1589 (sizeof (pm_pte_t) * 1) +
1590 1590 (1 * sizeof (int)) +
1591 1591 strlen(req.physpath) + 1;
1592 1592
1593 1593 rp = kmem_zalloc(size, KM_SLEEP);
1594 1594 rp->ptr_size = size;
1595 1595 rp->ptr_numcomps = 0; /* means device threshold */
1596 1596 ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1597 1597 rp->ptr_entries = ep;
1598 1598 tp = (int *)((intptr_t)ep +
1599 1599 (1 * sizeof (pm_pte_t)));
1600 1600 ep->pte_numthresh = 1;
1601 1601 ep->pte_thresh = tp;
1602 1602 *tp++ = req.value;
1603 1603 (void) strcat((char *)tp, req.physpath);
1604 1604 rp->ptr_physpath = (char *)tp;
1605 1605 ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1606 1606 (intptr_t)rp + rp->ptr_size);
1607 1607 PMD(PMD_THRESH, ("ioctl: %s: record thresh %d for "
1608 1608 "%s\n", cmdstr, req.value, req.physpath))
1609 1609 pm_record_thresh(rp);
1610 1610 /*
1611 1611 * Don't free rp, pm_record_thresh() keeps it.
1612 1612 * We don't try to apply it ourselves because we'd need
1613 1613 * to know too much about locking. Since we don't
1614 1614 * hold a lock the entry could be removed before
1615 1615 * we get here
1616 1616 */
1617 1617 ASSERT(dip == NULL);
1618 1618 ret = 0; /* can't fail now */
1619 1619 if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1620 1620 break;
1621 1621 }
1622 1622 (void) pm_thresh_specd(dip);
1623 1623 PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d)\n",
1624 1624 cmdstr, PM_DEVICE(dip)))
1625 1625 PM_RELE(dip);
1626 1626 break;
1627 1627 }
1628 1628
1629 1629 case PM_RESET_DEVICE_THRESHOLD:
1630 1630 {
1631 1631 /*
1632 1632 * This only applies to a currently attached and power
1633 1633 * managed node
1634 1634 */
1635 1635 /*
1636 1636 * We don't do this to old-style drivers
1637 1637 */
1638 1638 info = PM_GET_PM_INFO(dip);
1639 1639 if (info == NULL) {
1640 1640 PMD(PMD_ERROR, ("ioctl: %s: %s not power "
1641 1641 "managed\n", cmdstr, req.physpath))
1642 1642 ret = EINVAL;
1643 1643 break;
1644 1644 }
1645 1645 if (PM_ISBC(dip)) {
1646 1646 PMD(PMD_ERROR, ("ioctl: %s: %s is BC\n",
1647 1647 cmdstr, req.physpath))
1648 1648 ret = EINVAL;
1649 1649 break;
1650 1650 }
1651 1651 pm_unrecord_threshold(req.physpath);
1652 1652 if (DEVI(dip)->devi_pm_flags & PMC_CPU_THRESH)
1653 1653 pm_set_device_threshold(dip,
1654 1654 pm_cpu_idle_threshold, PMC_CPU_THRESH);
1655 1655 else
1656 1656 pm_set_device_threshold(dip,
1657 1657 pm_system_idle_threshold, PMC_DEF_THRESH);
1658 1658 ret = 0;
1659 1659 break;
1660 1660 }
1661 1661
1662 1662 case PM_GET_NUM_COMPONENTS:
1663 1663 {
1664 1664 ret = 0;
1665 1665 *rval_p = PM_NUMCMPTS(dip);
1666 1666 break;
1667 1667 }
1668 1668
1669 1669 case PM_GET_DEVICE_TYPE:
1670 1670 {
1671 1671 ret = 0;
1672 1672 if ((info = PM_GET_PM_INFO(dip)) == NULL) {
1673 1673 PMD(PMD_ERROR, ("ioctl: %s: "
1674 1674 "PM_NO_PM_COMPONENTS\n", cmdstr))
1675 1675 *rval_p = PM_NO_PM_COMPONENTS;
1676 1676 break;
1677 1677 }
1678 1678 if (PM_ISBC(dip)) {
1679 1679 *rval_p = PM_CREATE_COMPONENTS;
1680 1680 } else {
1681 1681 *rval_p = PM_AUTOPM;
1682 1682 }
1683 1683 break;
1684 1684 }
1685 1685
1686 1686 case PM_SET_COMPONENT_THRESHOLDS:
1687 1687 {
1688 1688 int comps = 0;
1689 1689 int *end = (int *)req.data + icount;
1690 1690 pm_thresh_rec_t *rp;
1691 1691 pm_pte_t *ep; /* threshold header storage */
1692 1692 int *tp; /* threshold storage */
1693 1693 int *ip;
1694 1694 int j;
1695 1695 size_t size;
1696 1696 extern int pm_thresh_specd(dev_info_t *);
1697 1697 extern int pm_valid_thresh(dev_info_t *,
1698 1698 pm_thresh_rec_t *);
1699 1699
1700 1700 for (ip = req.data; *ip; ip++) {
1701 1701 if (ip >= end) {
1702 1702 ret = EFAULT;
1703 1703 break;
1704 1704 }
1705 1705 comps++;
1706 1706 /* skip over indicated number of entries */
1707 1707 for (j = *ip; j; j--) {
1708 1708 if (++ip >= end) {
1709 1709 ret = EFAULT;
1710 1710 break;
1711 1711 }
1712 1712 }
1713 1713 if (ret)
1714 1714 break;
1715 1715 }
1716 1716 if (ret)
1717 1717 break;
1718 1718 if ((intptr_t)ip != (intptr_t)end - sizeof (int)) {
1719 1719 /* did not exactly fill buffer */
1720 1720 ret = EINVAL;
1721 1721 break;
1722 1722 }
1723 1723 if (comps == 0) {
1724 1724 PMD(PMD_ERROR, ("ioctl: %s: %s 0 components"
1725 1725 "--EINVAL\n", cmdstr, req.physpath))
1726 1726 ret = EINVAL;
1727 1727 break;
1728 1728 }
1729 1729 /*
1730 1730 * The header struct plus one entry struct per component
1731 1731 * plus the size of the lists minus the counts
1732 1732 * plus the length of the string
1733 1733 */
1734 1734 size = sizeof (pm_thresh_rec_t) +
1735 1735 (sizeof (pm_pte_t) * comps) + req.datasize -
1736 1736 ((comps + 1) * sizeof (int)) +
1737 1737 strlen(req.physpath) + 1;
1738 1738
1739 1739 rp = kmem_zalloc(size, KM_SLEEP);
1740 1740 rp->ptr_size = size;
1741 1741 rp->ptr_numcomps = comps;
1742 1742 ep = (pm_pte_t *)((intptr_t)rp + sizeof (*rp));
1743 1743 rp->ptr_entries = ep;
1744 1744 tp = (int *)((intptr_t)ep +
1745 1745 (comps * sizeof (pm_pte_t)));
1746 1746 for (ip = req.data; *ip; ep++) {
1747 1747 ep->pte_numthresh = *ip;
1748 1748 ep->pte_thresh = tp;
1749 1749 for (j = *ip++; j; j--) {
1750 1750 *tp++ = *ip++;
1751 1751 }
1752 1752 }
1753 1753 (void) strcat((char *)tp, req.physpath);
1754 1754 rp->ptr_physpath = (char *)tp;
1755 1755 ASSERT((intptr_t)end == (intptr_t)ip + sizeof (int));
1756 1756 ASSERT((intptr_t)tp + strlen(req.physpath) + 1 ==
1757 1757 (intptr_t)rp + rp->ptr_size);
1758 1758
1759 1759 ASSERT(dip == NULL);
1760 1760 /*
1761 1761 * If this is not a currently power managed node,
1762 1762 * then we can't check for validity of the thresholds
1763 1763 */
1764 1764 if (!(dip = pm_name_to_dip(req.physpath, 1))) {
1765 1765 /* don't free rp, pm_record_thresh uses it */
1766 1766 pm_record_thresh(rp);
1767 1767 PMD(PMD_ERROR, ("ioctl: %s: pm_name_to_dip "
1768 1768 "for %s failed\n", cmdstr, req.physpath))
1769 1769 ret = 0;
1770 1770 break;
1771 1771 }
1772 1772 ASSERT(!dipheld);
1773 1773 dipheld++;
1774 1774
1775 1775 if (!pm_valid_thresh(dip, rp)) {
1776 1776 PMD(PMD_ERROR, ("ioctl: %s: invalid thresh "
1777 1777 "for %s@%s(%s#%d)\n", cmdstr,
1778 1778 PM_DEVICE(dip)))
1779 1779 kmem_free(rp, size);
1780 1780 ret = EINVAL;
1781 1781 break;
1782 1782 }
1783 1783 /*
1784 1784 * We don't just apply it ourselves because we'd need
1785 1785 * to know too much about locking. Since we don't
1786 1786 * hold a lock the entry could be removed before
1787 1787 * we get here
1788 1788 */
1789 1789 pm_record_thresh(rp);
1790 1790 (void) pm_thresh_specd(dip);
1791 1791 ret = 0;
1792 1792 break;
1793 1793 }
1794 1794
1795 1795 case PM_GET_COMPONENT_THRESHOLDS:
1796 1796 {
1797 1797 int musthave;
1798 1798 int numthresholds = 0;
1799 1799 int wordsize;
1800 1800 int numcomps;
1801 1801 caddr_t uaddr = req.data; /* user address */
1802 1802 int val; /* int value to be copied out */
1803 1803 int32_t val32; /* int32 value to be copied out */
1804 1804 caddr_t vaddr; /* address to copyout from */
1805 1805 int j;
1806 1806
1807 1807 #ifdef _MULTI_DATAMODEL
1808 1808 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1809 1809 wordsize = sizeof (int32_t);
1810 1810 } else
1811 1811 #endif /* _MULTI_DATAMODEL */
1812 1812 {
1813 1813 wordsize = sizeof (int);
1814 1814 }
1815 1815
1816 1816 ASSERT(dip);
1817 1817
1818 1818 numcomps = PM_NUMCMPTS(dip);
1819 1819 for (i = 0; i < numcomps; i++) {
1820 1820 cp = PM_CP(dip, i);
1821 1821 numthresholds += cp->pmc_comp.pmc_numlevels - 1;
1822 1822 }
1823 1823 musthave = (numthresholds + numcomps + 1) * wordsize;
1824 1824 if (req.datasize < musthave) {
1825 1825 PMD(PMD_ERROR, ("ioctl: %s: size %ld, need "
1826 1826 "%d--EINVAL\n", cmdstr, req.datasize,
1827 1827 musthave))
1828 1828 ret = EINVAL;
1829 1829 break;
1830 1830 }
1831 1831 PM_LOCK_DIP(dip);
1832 1832 for (i = 0; i < numcomps; i++) {
1833 1833 int *thp;
1834 1834 cp = PM_CP(dip, i);
1835 1835 thp = cp->pmc_comp.pmc_thresh;
1836 1836 /* first copyout the count */
1837 1837 if (wordsize == sizeof (int32_t)) {
1838 1838 val32 = cp->pmc_comp.pmc_numlevels - 1;
1839 1839 vaddr = (caddr_t)&val32;
1840 1840 } else {
1841 1841 val = cp->pmc_comp.pmc_numlevels - 1;
1842 1842 vaddr = (caddr_t)&val;
1843 1843 }
1844 1844 if (ddi_copyout(vaddr, (void *)uaddr,
1845 1845 wordsize, mode) != 0) {
1846 1846 PM_UNLOCK_DIP(dip);
1847 1847 PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1848 1848 "(%s#%d) vaddr %p EFAULT\n",
1849 1849 cmdstr, PM_DEVICE(dip),
1850 1850 (void*)vaddr))
1851 1851 ret = EFAULT;
1852 1852 break;
1853 1853 }
1854 1854 vaddr = uaddr;
1855 1855 vaddr += wordsize;
1856 1856 uaddr = (caddr_t)vaddr;
1857 1857 /* then copyout each threshold value */
1858 1858 for (j = 0; j < cp->pmc_comp.pmc_numlevels - 1;
1859 1859 j++) {
1860 1860 if (wordsize == sizeof (int32_t)) {
1861 1861 val32 = thp[j + 1];
1862 1862 vaddr = (caddr_t)&val32;
1863 1863 } else {
1864 1864 val = thp[i + 1];
1865 1865 vaddr = (caddr_t)&val;
1866 1866 }
1867 1867 if (ddi_copyout(vaddr, (void *) uaddr,
1868 1868 wordsize, mode) != 0) {
1869 1869 PM_UNLOCK_DIP(dip);
1870 1870 PMD(PMD_ERROR, ("ioctl: %s: "
1871 1871 "%s@%s(%s#%d) uaddr %p "
1872 1872 "EFAULT\n", cmdstr,
1873 1873 PM_DEVICE(dip),
1874 1874 (void *)uaddr))
1875 1875 ret = EFAULT;
1876 1876 break;
1877 1877 }
1878 1878 vaddr = uaddr;
1879 1879 vaddr += wordsize;
1880 1880 uaddr = (caddr_t)vaddr;
1881 1881 }
1882 1882 }
1883 1883 if (ret)
1884 1884 break;
1885 1885 /* last copyout a terminating 0 count */
1886 1886 if (wordsize == sizeof (int32_t)) {
1887 1887 val32 = 0;
1888 1888 vaddr = (caddr_t)&val32;
1889 1889 } else {
1890 1890 ASSERT(wordsize == sizeof (int));
1891 1891 val = 0;
1892 1892 vaddr = (caddr_t)&val;
1893 1893 }
1894 1894 if (ddi_copyout(vaddr, uaddr, wordsize, mode) != 0) {
1895 1895 PM_UNLOCK_DIP(dip);
1896 1896 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
1897 1897 "vaddr %p (0 count) EFAULT\n", cmdstr,
1898 1898 PM_DEVICE(dip), (void *)vaddr))
1899 1899 ret = EFAULT;
1900 1900 break;
1901 1901 }
1902 1902 /* finished, so don't need to increment addresses */
1903 1903 PM_UNLOCK_DIP(dip);
1904 1904 ret = 0;
1905 1905 break;
1906 1906 }
1907 1907
1908 1908 case PM_GET_STATS:
1909 1909 {
1910 1910 time_t now;
1911 1911 time_t *timestamp;
1912 1912 extern int pm_cur_power(pm_component_t *cp);
1913 1913 int musthave;
1914 1914 int wordsize;
1915 1915
1916 1916 #ifdef _MULTI_DATAMODEL
1917 1917 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
1918 1918 wordsize = sizeof (int32_t);
1919 1919 } else
1920 1920 #endif /* _MULTI_DATAMODEL */
1921 1921 {
1922 1922 wordsize = sizeof (int);
1923 1923 }
1924 1924
1925 1925 comps = PM_NUMCMPTS(dip);
1926 1926 if (comps == 0 || PM_GET_PM_INFO(dip) == NULL) {
1927 1927 PMD(PMD_ERROR, ("ioctl: %s: %s no components"
1928 1928 " or not power managed--EINVAL\n", cmdstr,
1929 1929 req.physpath))
1930 1930 ret = EINVAL;
1931 1931 break;
1932 1932 }
1933 1933 musthave = comps * 2 * wordsize;
1934 1934 if (req.datasize < musthave) {
1935 1935 PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
1936 1936 "%d--EINVAL\n", cmdstr, req.datasize,
1937 1937 musthave))
1938 1938 ret = EINVAL;
1939 1939 break;
1940 1940 }
1941 1941
1942 1942 PM_LOCK_DIP(dip);
1943 1943 (void) drv_getparm(TIME, &now);
1944 1944 timestamp = kmem_zalloc(comps * sizeof (time_t),
1945 1945 KM_SLEEP);
1946 1946 pm_get_timestamps(dip, timestamp);
1947 1947 /*
1948 1948 * First the current power levels
1949 1949 */
1950 1950 for (i = 0; i < comps; i++) {
1951 1951 int curpwr;
1952 1952 int32_t curpwr32;
1953 1953 caddr_t cpaddr;
1954 1954
1955 1955 cp = PM_CP(dip, i);
1956 1956 if (wordsize == sizeof (int)) {
1957 1957 curpwr = pm_cur_power(cp);
1958 1958 cpaddr = (caddr_t)&curpwr;
1959 1959 } else {
1960 1960 ASSERT(wordsize == sizeof (int32_t));
1961 1961 curpwr32 = pm_cur_power(cp);
1962 1962 cpaddr = (caddr_t)&curpwr32;
1963 1963 }
1964 1964 if (ddi_copyout(cpaddr, (void *) req.data,
1965 1965 wordsize, mode) != 0) {
1966 1966 PM_UNLOCK_DIP(dip);
1967 1967 PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
1968 1968 "(%s#%d) req.data %p EFAULT\n",
1969 1969 cmdstr, PM_DEVICE(dip),
1970 1970 (void *)req.data))
1971 1971 ASSERT(!dipheld);
1972 1972 return (EFAULT);
1973 1973 }
1974 1974 cpaddr = (caddr_t)req.data;
1975 1975 cpaddr += wordsize;
1976 1976 req.data = cpaddr;
1977 1977 }
1978 1978 /*
1979 1979 * Then the times remaining
1980 1980 */
1981 1981 for (i = 0; i < comps; i++) {
1982 1982 int retval;
1983 1983 int32_t retval32;
1984 1984 caddr_t rvaddr;
1985 1985 int curpwr;
1986 1986
1987 1987 cp = PM_CP(dip, i);
1988 1988 curpwr = cp->pmc_cur_pwr;
1989 1989 if (curpwr == 0 || timestamp[i] == 0) {
1990 1990 PMD(PMD_STATS, ("ioctl: %s: "
1991 1991 "cur_pwer %x, timestamp %lx\n",
1992 1992 cmdstr, curpwr, timestamp[i]))
1993 1993 retval = INT_MAX;
1994 1994 } else {
1995 1995 int thresh;
1996 1996 (void) pm_current_threshold(dip, i,
1997 1997 &thresh);
1998 1998 retval = thresh - (now - timestamp[i]);
1999 1999 PMD(PMD_STATS, ("ioctl: %s: current "
2000 2000 "thresh %x, now %lx, timestamp %lx,"
2001 2001 " retval %x\n", cmdstr, thresh, now,
2002 2002 timestamp[i], retval))
2003 2003 }
2004 2004 if (wordsize == sizeof (int)) {
2005 2005 rvaddr = (caddr_t)&retval;
2006 2006 } else {
2007 2007 ASSERT(wordsize == sizeof (int32_t));
2008 2008 retval32 = retval;
2009 2009 rvaddr = (caddr_t)&retval32;
2010 2010 }
2011 2011 if (ddi_copyout(rvaddr, (void *) req.data,
2012 2012 wordsize, mode) != 0) {
2013 2013 PM_UNLOCK_DIP(dip);
2014 2014 PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2015 2015 "(%s#%d) req.data %p EFAULT\n",
2016 2016 cmdstr, PM_DEVICE(dip),
2017 2017 (void *)req.data))
2018 2018 ASSERT(!dipheld);
2019 2019 kmem_free(timestamp,
2020 2020 comps * sizeof (time_t));
2021 2021 return (EFAULT);
2022 2022 }
2023 2023 rvaddr = (caddr_t)req.data;
2024 2024 rvaddr += wordsize;
2025 2025 req.data = (int *)rvaddr;
2026 2026 }
2027 2027 PM_UNLOCK_DIP(dip);
2028 2028 *rval_p = comps;
2029 2029 ret = 0;
2030 2030 kmem_free(timestamp, comps * sizeof (time_t));
2031 2031 break;
2032 2032 }
2033 2033
2034 2034 case PM_GET_CMD_NAME:
2035 2035 {
2036 2036 PMD(PMD_IOCTL, ("%s: %s\n", cmdstr,
2037 2037 pm_decode_cmd(req.value)))
2038 2038 if (ret = copyoutstr(pm_decode_cmd(req.value),
2039 2039 (char *)req.data, req.datasize, &lencopied)) {
2040 2040 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2041 2041 "copyoutstr %p failed--EFAULT\n", cmdstr,
2042 2042 PM_DEVICE(dip), (void *)req.data))
2043 2043 break;
2044 2044 }
2045 2045 *rval_p = lencopied;
2046 2046 ret = 0;
2047 2047 break;
2048 2048 }
2049 2049
2050 2050 case PM_GET_COMPONENT_NAME:
2051 2051 {
2052 2052 ASSERT(dip);
2053 2053 if (!e_pm_valid_comp(dip, req.component, &cp)) {
2054 2054 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2055 2055 "component %d > numcmpts - 1 %d--EINVAL\n",
2056 2056 cmdstr, PM_DEVICE(dip), req.component,
2057 2057 PM_NUMCMPTS(dip) - 1))
2058 2058 ret = EINVAL;
2059 2059 break;
2060 2060 }
2061 2061 if (ret = copyoutstr(cp->pmc_comp.pmc_name,
2062 2062 (char *)req.data, req.datasize, &lencopied)) {
2063 2063 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2064 2064 "copyoutstr %p failed--EFAULT\n", cmdstr,
2065 2065 PM_DEVICE(dip), (void *)req.data))
2066 2066 break;
2067 2067 }
2068 2068 *rval_p = lencopied;
2069 2069 ret = 0;
2070 2070 break;
2071 2071 }
2072 2072
2073 2073 case PM_GET_POWER_NAME:
2074 2074 {
2075 2075 int i;
2076 2076
2077 2077 ASSERT(dip);
2078 2078 if (!e_pm_valid_comp(dip, req.component, &cp)) {
2079 2079 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2080 2080 "component %d > numcmpts - 1 %d--EINVAL\n",
2081 2081 cmdstr, PM_DEVICE(dip), req.component,
2082 2082 PM_NUMCMPTS(dip) - 1))
2083 2083 ret = EINVAL;
2084 2084 break;
2085 2085 }
2086 2086 if ((i = req.value) < 0 ||
2087 2087 i > cp->pmc_comp.pmc_numlevels - 1) {
2088 2088 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2089 2089 "value %d > num_levels - 1 %d--EINVAL\n",
2090 2090 cmdstr, PM_DEVICE(dip), req.value,
2091 2091 cp->pmc_comp.pmc_numlevels - 1))
2092 2092 ret = EINVAL;
2093 2093 break;
2094 2094 }
2095 2095 dep = cp->pmc_comp.pmc_lnames[req.value];
2096 2096 if (ret = copyoutstr(dep,
2097 2097 req.data, req.datasize, &lencopied)) {
2098 2098 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2099 2099 "copyoutstr %p failed--EFAULT\n", cmdstr,
2100 2100 PM_DEVICE(dip), (void *)req.data))
2101 2101 break;
2102 2102 }
2103 2103 *rval_p = lencopied;
2104 2104 ret = 0;
2105 2105 break;
2106 2106 }
2107 2107
2108 2108 case PM_GET_POWER_LEVELS:
2109 2109 {
2110 2110 int musthave;
2111 2111 int numlevels;
2112 2112 int wordsize;
2113 2113
2114 2114 #ifdef _MULTI_DATAMODEL
2115 2115 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2116 2116 wordsize = sizeof (int32_t);
2117 2117 } else
2118 2118 #endif /* _MULTI_DATAMODEL */
2119 2119 {
2120 2120 wordsize = sizeof (int);
2121 2121 }
2122 2122 ASSERT(dip);
2123 2123
2124 2124 if (!e_pm_valid_comp(dip, req.component, &cp)) {
2125 2125 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2126 2126 "has %d components, component %d requested"
2127 2127 "--EINVAL\n", cmdstr, PM_DEVICE(dip),
2128 2128 PM_NUMCMPTS(dip), req.component))
2129 2129 ret = EINVAL;
2130 2130 break;
2131 2131 }
2132 2132 numlevels = cp->pmc_comp.pmc_numlevels;
2133 2133 musthave = numlevels * wordsize;
2134 2134 if (req.datasize < musthave) {
2135 2135 PMD(PMD_ERROR, ("ioctl: %s: size %lu, need "
2136 2136 "%d--EINVAL\n", cmdstr, req.datasize,
2137 2137 musthave))
2138 2138 ret = EINVAL;
2139 2139 break;
2140 2140 }
2141 2141 PM_LOCK_DIP(dip);
2142 2142 for (i = 0; i < numlevels; i++) {
2143 2143 int level;
2144 2144 int32_t level32;
2145 2145 caddr_t laddr;
2146 2146
2147 2147 if (wordsize == sizeof (int)) {
2148 2148 level = cp->pmc_comp.pmc_lvals[i];
2149 2149 laddr = (caddr_t)&level;
2150 2150 } else {
2151 2151 level32 = cp->pmc_comp.pmc_lvals[i];
2152 2152 laddr = (caddr_t)&level32;
2153 2153 }
2154 2154 if (ddi_copyout(laddr, (void *) req.data,
2155 2155 wordsize, mode) != 0) {
2156 2156 PM_UNLOCK_DIP(dip);
2157 2157 PMD(PMD_ERROR, ("ioctl: %s: %s@%s"
2158 2158 "(%s#%d) laddr %p EFAULT\n",
2159 2159 cmdstr, PM_DEVICE(dip),
2160 2160 (void *)laddr))
2161 2161 ASSERT(!dipheld);
2162 2162 return (EFAULT);
2163 2163 }
2164 2164 laddr = (caddr_t)req.data;
2165 2165 laddr += wordsize;
2166 2166 req.data = (int *)laddr;
2167 2167 }
2168 2168 PM_UNLOCK_DIP(dip);
2169 2169 *rval_p = numlevels;
2170 2170 ret = 0;
2171 2171 break;
2172 2172 }
2173 2173
2174 2174
2175 2175 case PM_GET_NUM_POWER_LEVELS:
2176 2176 {
2177 2177 if (!e_pm_valid_comp(dip, req.component, &cp)) {
2178 2178 PMD(PMD_ERROR, ("ioctl: %s: %s@%s(%s#%d) "
2179 2179 "component %d > numcmpts - 1 %d--EINVAL\n",
2180 2180 cmdstr, PM_DEVICE(dip), req.component,
2181 2181 PM_NUMCMPTS(dip) - 1))
2182 2182 ret = EINVAL;
2183 2183 break;
2184 2184 }
2185 2185 *rval_p = cp->pmc_comp.pmc_numlevels;
2186 2186 ret = 0;
2187 2187 break;
2188 2188 }
2189 2189
2190 2190 case PM_GET_DEVICE_THRESHOLD_BASIS:
2191 2191 {
2192 2192 ret = 0;
2193 2193 PM_LOCK_DIP(dip);
2194 2194 if ((info = PM_GET_PM_INFO(dip)) == NULL) {
2195 2195 PM_UNLOCK_DIP(dip);
2196 2196 PMD(PMD_ERROR, ("ioctl: %s: "
2197 2197 "PM_NO_PM_COMPONENTS\n", cmdstr))
2198 2198 *rval_p = PM_NO_PM_COMPONENTS;
2199 2199 break;
2200 2200 }
2201 2201 if (PM_ISDIRECT(dip)) {
2202 2202 PM_UNLOCK_DIP(dip);
2203 2203 *rval_p = PM_DIRECTLY_MANAGED;
2204 2204 break;
2205 2205 }
2206 2206 switch (DEVI(dip)->devi_pm_flags & PMC_THRESH_ALL) {
2207 2207 case PMC_DEF_THRESH:
2208 2208 case PMC_NEXDEF_THRESH:
2209 2209 *rval_p = PM_DEFAULT_THRESHOLD;
2210 2210 break;
2211 2211 case PMC_DEV_THRESH:
2212 2212 *rval_p = PM_DEVICE_THRESHOLD;
2213 2213 break;
2214 2214 case PMC_COMP_THRESH:
2215 2215 *rval_p = PM_COMPONENT_THRESHOLD;
2216 2216 break;
2217 2217 case PMC_CPU_THRESH:
2218 2218 *rval_p = PM_CPU_THRESHOLD;
2219 2219 break;
2220 2220 default:
2221 2221 if (PM_ISBC(dip)) {
2222 2222 *rval_p = PM_OLD_THRESHOLD;
2223 2223 break;
2224 2224 }
2225 2225 PMD(PMD_ERROR, ("ioctl: %s: default, not "
2226 2226 "BC--EINVAL", cmdstr))
2227 2227 ret = EINVAL;
2228 2228 break;
2229 2229 }
2230 2230 PM_UNLOCK_DIP(dip);
2231 2231 break;
2232 2232 }
2233 2233 default:
2234 2234 /*
2235 2235 * Internal error, invalid ioctl description
2236 2236 * force debug entry even if pm_debug not set
2237 2237 */
2238 2238 #ifdef DEBUG
2239 2239 pm_log("invalid diptype %d for cmd %d (%s)\n",
2240 2240 pcip->diptype, cmd, pcip->name);
2241 2241 #endif
2242 2242 ASSERT(0);
2243 2243 return (EIO);
2244 2244 }
2245 2245 break;
2246 2246 }
2247 2247
2248 2248 case PM_PSC:
2249 2249 {
2250 2250 /*
2251 2251 * Commands that require pm_state_change_t as arg
2252 2252 */
2253 2253 #ifdef _MULTI_DATAMODEL
2254 2254 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2255 2255 pscp32 = (pm_state_change32_t *)arg;
2256 2256 if (ddi_copyin((caddr_t)arg, &psc32,
2257 2257 sizeof (psc32), mode) != 0) {
2258 2258 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2259 2259 "EFAULT\n\n", cmdstr))
2260 2260 ASSERT(!dipheld);
2261 2261 return (EFAULT);
2262 2262 }
2263 2263 psc.physpath = (caddr_t)(uintptr_t)psc32.physpath;
2264 2264 psc.size = psc32.size;
2265 2265 } else
2266 2266 #endif /* _MULTI_DATAMODEL */
2267 2267 {
2268 2268 pscp = (pm_state_change_t *)arg;
2269 2269 if (ddi_copyin((caddr_t)arg, &psc,
2270 2270 sizeof (psc), mode) != 0) {
2271 2271 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2272 2272 "EFAULT\n\n", cmdstr))
2273 2273 ASSERT(!dipheld);
2274 2274 return (EFAULT);
2275 2275 }
2276 2276 }
2277 2277 switch (cmd) {
2278 2278
2279 2279 case PM_GET_STATE_CHANGE:
2280 2280 case PM_GET_STATE_CHANGE_WAIT:
2281 2281 {
2282 2282 psce_t *pscep;
2283 2283 pm_state_change_t *p;
2284 2284 caddr_t physpath;
2285 2285 size_t physlen;
2286 2286
2287 2287 /*
2288 2288 * We want to know if any device has changed state.
2289 2289 * We look up by clone. In case we have another thread
2290 2290 * from the same process, we loop.
2291 2291 * pm_psc_clone_to_interest() returns a locked entry.
2292 2292 * We create an internal copy of the event entry prior
2293 2293 * to copyout to user space because we don't want to
2294 2294 * hold the psce_lock while doing copyout as we might
2295 2295 * hit page fault which eventually brings us back
2296 2296 * here requesting the same lock.
2297 2297 */
2298 2298 mutex_enter(&pm_clone_lock);
2299 2299 if (!pm_interest_registered(clone))
2300 2300 pm_register_watcher(clone, NULL);
2301 2301 while ((pscep =
2302 2302 pm_psc_clone_to_interest(clone)) == NULL) {
2303 2303 if (cmd == PM_GET_STATE_CHANGE) {
2304 2304 PMD(PMD_IOCTL, ("ioctl: %s: "
2305 2305 "EWOULDBLOCK\n", cmdstr))
2306 2306 mutex_exit(&pm_clone_lock);
2307 2307 ASSERT(!dipheld);
2308 2308 return (EWOULDBLOCK);
2309 2309 } else {
2310 2310 if (cv_wait_sig(&pm_clones_cv[clone],
2311 2311 &pm_clone_lock) == 0) {
2312 2312 mutex_exit(&pm_clone_lock);
2313 2313 PMD(PMD_ERROR, ("ioctl: %s "
2314 2314 "EINTR\n", cmdstr))
2315 2315 ASSERT(!dipheld);
2316 2316 return (EINTR);
2317 2317 }
2318 2318 }
2319 2319 }
2320 2320 mutex_exit(&pm_clone_lock);
2321 2321
2322 2322 physlen = pscep->psce_out->size;
2323 2323 physpath = NULL;
2324 2324 /*
2325 2325 * If we were unable to store the path while bringing
2326 2326 * up the console fb upon entering the prom, we give
2327 2327 * a "" name with the overrun event set
2328 2328 */
2329 2329 if (physlen == (size_t)-1) { /* kmemalloc failed */
2330 2330 physpath = kmem_zalloc(1, KM_SLEEP);
2331 2331 physlen = 1;
2332 2332 }
2333 2333 if ((psc.physpath == NULL) || (psc.size < physlen)) {
2334 2334 PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n", cmdstr))
2335 2335 mutex_exit(&pscep->psce_lock);
2336 2336 ret = EFAULT;
2337 2337 break;
2338 2338 }
2339 2339 if (physpath == NULL) {
2340 2340 physpath = kmem_zalloc(physlen, KM_SLEEP);
2341 2341 bcopy((const void *) pscep->psce_out->physpath,
2342 2342 (void *) physpath, physlen);
2343 2343 }
2344 2344
2345 2345 p = pscep->psce_out;
2346 2346 #ifdef _MULTI_DATAMODEL
2347 2347 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2348 2348 #ifdef DEBUG
2349 2349 size_t usrcopysize;
2350 2350 #endif
2351 2351 psc32.flags = (ushort_t)p->flags;
2352 2352 psc32.event = (ushort_t)p->event;
2353 2353 psc32.timestamp = (int32_t)p->timestamp;
2354 2354 psc32.component = (int32_t)p->component;
2355 2355 psc32.old_level = (int32_t)p->old_level;
2356 2356 psc32.new_level = (int32_t)p->new_level;
2357 2357 copysize32 = ((intptr_t)&psc32.size -
2358 2358 (intptr_t)&psc32.component);
2359 2359 #ifdef DEBUG
2360 2360 usrcopysize = ((intptr_t)&pscp32->size -
2361 2361 (intptr_t)&pscp32->component);
2362 2362 ASSERT(usrcopysize == copysize32);
2363 2363 #endif
2364 2364 } else
2365 2365 #endif /* _MULTI_DATAMODEL */
2366 2366 {
2367 2367 psc.flags = p->flags;
2368 2368 psc.event = p->event;
2369 2369 psc.timestamp = p->timestamp;
2370 2370 psc.component = p->component;
2371 2371 psc.old_level = p->old_level;
2372 2372 psc.new_level = p->new_level;
2373 2373 copysize = ((long)&p->size -
2374 2374 (long)&p->component);
2375 2375 }
2376 2376 if (p->size != (size_t)-1)
2377 2377 kmem_free(p->physpath, p->size);
2378 2378 p->size = 0;
2379 2379 p->physpath = NULL;
2380 2380 if (pscep->psce_out == pscep->psce_last)
2381 2381 p = pscep->psce_first;
2382 2382 else
2383 2383 p++;
2384 2384 pscep->psce_out = p;
2385 2385 mutex_exit(&pscep->psce_lock);
2386 2386
2387 2387 ret = copyoutstr(physpath, psc.physpath,
2388 2388 physlen, &lencopied);
2389 2389 kmem_free(physpath, physlen);
2390 2390 if (ret) {
2391 2391 PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2392 2392 "failed--EFAULT\n", cmdstr,
2393 2393 (void *)psc.physpath))
2394 2394 break;
2395 2395 }
2396 2396
2397 2397 #ifdef _MULTI_DATAMODEL
2398 2398 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2399 2399 if (ddi_copyout(&psc32.component,
2400 2400 &pscp32->component, copysize32, mode)
2401 2401 != 0) {
2402 2402 PMD(PMD_ERROR, ("ioctl: %s: copyout "
2403 2403 "failed--EFAULT\n", cmdstr))
2404 2404 ret = EFAULT;
2405 2405 break;
2406 2406 }
2407 2407 } else
2408 2408 #endif /* _MULTI_DATAMODEL */
2409 2409 {
2410 2410 if (ddi_copyout(&psc.component,
2411 2411 &pscp->component, copysize, mode) != 0) {
2412 2412 PMD(PMD_ERROR, ("ioctl: %s: copyout "
2413 2413 "failed--EFAULT\n", cmdstr))
2414 2414 ret = EFAULT;
2415 2415 break;
2416 2416 }
2417 2417 }
2418 2418 ret = 0;
2419 2419 break;
2420 2420 }
2421 2421
2422 2422 case PM_DIRECT_NOTIFY:
2423 2423 case PM_DIRECT_NOTIFY_WAIT:
2424 2424 {
2425 2425 psce_t *pscep;
2426 2426 pm_state_change_t *p;
2427 2427 caddr_t physpath;
2428 2428 size_t physlen;
2429 2429 /*
2430 2430 * We want to know if any direct device of ours has
2431 2431 * something we should know about. We look up by clone.
2432 2432 * In case we have another thread from the same process,
2433 2433 * we loop.
2434 2434 * pm_psc_clone_to_direct() returns a locked entry.
2435 2435 */
2436 2436 mutex_enter(&pm_clone_lock);
2437 2437 while (pm_poll_cnt[clone] == 0 ||
2438 2438 (pscep = pm_psc_clone_to_direct(clone)) == NULL) {
2439 2439 if (cmd == PM_DIRECT_NOTIFY) {
2440 2440 PMD(PMD_IOCTL, ("ioctl: %s: "
2441 2441 "EWOULDBLOCK\n", cmdstr))
2442 2442 mutex_exit(&pm_clone_lock);
2443 2443 ASSERT(!dipheld);
2444 2444 return (EWOULDBLOCK);
2445 2445 } else {
2446 2446 if (cv_wait_sig(&pm_clones_cv[clone],
2447 2447 &pm_clone_lock) == 0) {
2448 2448 mutex_exit(&pm_clone_lock);
2449 2449 PMD(PMD_ERROR, ("ioctl: %s: "
2450 2450 "EINTR\n", cmdstr))
2451 2451 ASSERT(!dipheld);
2452 2452 return (EINTR);
2453 2453 }
2454 2454 }
2455 2455 }
2456 2456 mutex_exit(&pm_clone_lock);
2457 2457 physlen = pscep->psce_out->size;
2458 2458 if ((psc.physpath == NULL) || (psc.size < physlen)) {
2459 2459 mutex_exit(&pscep->psce_lock);
2460 2460 PMD(PMD_ERROR, ("ioctl: %s: EFAULT\n",
2461 2461 cmdstr))
2462 2462 ret = EFAULT;
2463 2463 break;
2464 2464 }
2465 2465 physpath = kmem_zalloc(physlen, KM_SLEEP);
2466 2466 bcopy((const void *) pscep->psce_out->physpath,
2467 2467 (void *) physpath, physlen);
2468 2468
2469 2469 p = pscep->psce_out;
2470 2470 #ifdef _MULTI_DATAMODEL
2471 2471 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2472 2472 #ifdef DEBUG
2473 2473 size_t usrcopysize;
2474 2474 #endif
2475 2475 psc32.component = (int32_t)p->component;
2476 2476 psc32.flags = (ushort_t)p->flags;
2477 2477 psc32.event = (ushort_t)p->event;
2478 2478 psc32.timestamp = (int32_t)p->timestamp;
2479 2479 psc32.old_level = (int32_t)p->old_level;
2480 2480 psc32.new_level = (int32_t)p->new_level;
2481 2481 copysize32 = (intptr_t)&psc32.size -
2482 2482 (intptr_t)&psc32.component;
2483 2483 PMD(PMD_DPM, ("ioctl: %s: PDN32 %s, comp %d "
2484 2484 "%d -> %d\n", cmdstr, physpath,
2485 2485 p->component, p->old_level, p->new_level))
2486 2486 #ifdef DEBUG
2487 2487 usrcopysize = (intptr_t)&pscp32->size -
2488 2488 (intptr_t)&pscp32->component;
2489 2489 ASSERT(usrcopysize == copysize32);
2490 2490 #endif
2491 2491 } else
2492 2492 #endif
2493 2493 {
2494 2494 psc.component = p->component;
2495 2495 psc.flags = p->flags;
2496 2496 psc.event = p->event;
2497 2497 psc.timestamp = p->timestamp;
2498 2498 psc.old_level = p->old_level;
2499 2499 psc.new_level = p->new_level;
2500 2500 copysize = (intptr_t)&p->size -
2501 2501 (intptr_t)&p->component;
2502 2502 PMD(PMD_DPM, ("ioctl: %s: PDN %s, comp %d "
2503 2503 "%d -> %d\n", cmdstr, physpath,
2504 2504 p->component, p->old_level, p->new_level))
2505 2505 }
2506 2506 mutex_enter(&pm_clone_lock);
2507 2507 PMD(PMD_IOCTL, ("ioctl: %s: pm_poll_cnt[%d] is %d "
2508 2508 "before decrement\n", cmdstr, clone,
2509 2509 pm_poll_cnt[clone]))
2510 2510 pm_poll_cnt[clone]--;
2511 2511 mutex_exit(&pm_clone_lock);
2512 2512 kmem_free(p->physpath, p->size);
2513 2513 p->size = 0;
2514 2514 p->physpath = NULL;
2515 2515 if (pscep->psce_out == pscep->psce_last)
2516 2516 p = pscep->psce_first;
2517 2517 else
2518 2518 p++;
2519 2519 pscep->psce_out = p;
2520 2520 mutex_exit(&pscep->psce_lock);
2521 2521
2522 2522 ret = copyoutstr(physpath, psc.physpath,
2523 2523 physlen, &lencopied);
2524 2524 kmem_free(physpath, physlen);
2525 2525 if (ret) {
2526 2526 PMD(PMD_ERROR, ("ioctl: %s: copyoutstr %p "
2527 2527 "failed--EFAULT\n", cmdstr,
2528 2528 (void *)psc.physpath))
2529 2529 break;
2530 2530 }
2531 2531
2532 2532 #ifdef _MULTI_DATAMODEL
2533 2533 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2534 2534 if (ddi_copyout(&psc32.component,
2535 2535 &pscp32->component, copysize32, mode)
2536 2536 != 0) {
2537 2537 PMD(PMD_ERROR, ("ioctl: %s: copyout "
2538 2538 "failed--EFAULT\n", cmdstr))
2539 2539 ret = EFAULT;
2540 2540 break;
2541 2541 }
2542 2542 } else
2543 2543 #endif /* _MULTI_DATAMODEL */
2544 2544 {
2545 2545 if (ddi_copyout(&psc.component,
2546 2546 &pscp->component, copysize, mode) != 0) {
2547 2547 PMD(PMD_ERROR, ("ioctl: %s: copyout "
2548 2548 "failed--EFAULT\n", cmdstr))
2549 2549 ret = EFAULT;
2550 2550 break;
2551 2551 }
2552 2552 }
2553 2553 ret = 0;
2554 2554 break;
2555 2555 }
2556 2556 default:
2557 2557 /*
2558 2558 * Internal error, invalid ioctl description
2559 2559 * force debug entry even if pm_debug not set
2560 2560 */
2561 2561 #ifdef DEBUG
2562 2562 pm_log("invalid diptype %d for cmd %d (%s)\n",
2563 2563 pcip->diptype, cmd, pcip->name);
2564 2564 #endif
2565 2565 ASSERT(0);
2566 2566 return (EIO);
2567 2567 }
2568 2568 break;
2569 2569 }
2570 2570
2571 2571 case PM_SRCH: /* command that takes a pm_searchargs_t arg */
2572 2572 {
2573 2573 /*
2574 2574 * If no ppm, then there is nothing to search.
2575 2575 */
2576 2576 if (DEVI(ddi_root_node())->devi_pm_ppm == NULL) {
2577 2577 ret = ENODEV;
2578 2578 break;
2579 2579 }
2580 2580
2581 2581 #ifdef _MULTI_DATAMODEL
2582 2582 if ((mode & DATAMODEL_MASK) == DATAMODEL_ILP32) {
2583 2583 if (ddi_copyin((caddr_t)arg, &psa32,
2584 2584 sizeof (psa32), mode) != 0) {
2585 2585 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2586 2586 "EFAULT\n\n", cmdstr))
2587 2587 return (EFAULT);
2588 2588 }
2589 2589 if (copyinstr((void *)(uintptr_t)psa32.pms_listname,
2590 2590 listname, MAXCOPYBUF, NULL)) {
2591 2591 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2592 2592 "%d, " "EFAULT\n", cmdstr,
2593 2593 (void *)(uintptr_t)psa32.pms_listname,
2594 2594 MAXCOPYBUF))
2595 2595 ret = EFAULT;
2596 2596 break;
2597 2597 }
2598 2598 if (copyinstr((void *)(uintptr_t)psa32.pms_manufacturer,
2599 2599 manufacturer, MAXCOPYBUF, NULL)) {
2600 2600 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2601 2601 "%d, " "EFAULT\n", cmdstr,
2602 2602 (void *)(uintptr_t)psa32.pms_manufacturer,
2603 2603 MAXCOPYBUF))
2604 2604 ret = EFAULT;
2605 2605 break;
2606 2606 }
2607 2607 if (copyinstr((void *)(uintptr_t)psa32.pms_product,
2608 2608 product, MAXCOPYBUF, NULL)) {
2609 2609 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2610 2610 "%d, " "EFAULT\n", cmdstr,
2611 2611 (void *)(uintptr_t)psa32.pms_product,
2612 2612 MAXCOPYBUF))
2613 2613 ret = EFAULT;
2614 2614 break;
2615 2615 }
2616 2616 } else
2617 2617 #endif /* _MULTI_DATAMODEL */
2618 2618 {
2619 2619 if (ddi_copyin((caddr_t)arg, &psa,
2620 2620 sizeof (psa), mode) != 0) {
2621 2621 PMD(PMD_ERROR, ("ioctl: %s: ddi_copyin "
2622 2622 "EFAULT\n\n", cmdstr))
2623 2623 return (EFAULT);
2624 2624 }
2625 2625 if (copyinstr(psa.pms_listname,
2626 2626 listname, MAXCOPYBUF, NULL)) {
2627 2627 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2628 2628 "%d, " "EFAULT\n", cmdstr,
2629 2629 (void *)psa.pms_listname, MAXCOPYBUF))
2630 2630 ret = EFAULT;
2631 2631 break;
2632 2632 }
2633 2633 if (copyinstr(psa.pms_manufacturer,
2634 2634 manufacturer, MAXCOPYBUF, NULL)) {
2635 2635 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2636 2636 "%d, " "EFAULT\n", cmdstr,
2637 2637 (void *)psa.pms_manufacturer, MAXCOPYBUF))
2638 2638 ret = EFAULT;
2639 2639 break;
2640 2640 }
2641 2641 if (copyinstr(psa.pms_product,
2642 2642 product, MAXCOPYBUF, NULL)) {
2643 2643 PMD(PMD_ERROR, ("ioctl: %s: 0x%p MAXCOPYBUF "
2644 2644 "%d, " "EFAULT\n", cmdstr,
2645 2645 (void *)psa.pms_product, MAXCOPYBUF))
2646 2646 ret = EFAULT;
2647 2647 break;
2648 2648 }
2649 2649 }
2650 2650 psa.pms_listname = listname;
2651 2651 psa.pms_manufacturer = manufacturer;
2652 2652 psa.pms_product = product;
2653 2653 switch (cmd) {
2654 2654 case PM_SEARCH_LIST:
2655 2655 ret = pm_ppm_searchlist(&psa);
2656 2656 break;
2657 2657
2658 2658 default:
2659 2659 /*
2660 2660 * Internal error, invalid ioctl description
2661 2661 * force debug entry even if pm_debug not set
2662 2662 */
2663 2663 #ifdef DEBUG
2664 2664 pm_log("invalid diptype %d for cmd %d (%s)\n",
2665 2665 pcip->diptype, cmd, pcip->name);
2666 2666 #endif
2667 2667 ASSERT(0);
2668 2668 return (EIO);
2669 2669 }
2670 2670 break;
2671 2671 }
2672 2672
2673 2673 case NOSTRUCT:
2674 2674 {
2675 2675 switch (cmd) {
2676 2676 case PM_START_PM:
2677 2677 case PM_START_CPUPM:
2678 2678 case PM_START_CPUPM_EV:
2679 2679 case PM_START_CPUPM_POLL:
2680 2680 {
2681 2681 pm_cpupm_t new_mode = PM_CPUPM_NOTSET;
2682 2682 pm_cpupm_t old_mode = PM_CPUPM_NOTSET;
2683 2683 int r;
2684 2684
2685 2685 mutex_enter(&pm_scan_lock);
2686 2686 if ((cmd == PM_START_PM && autopm_enabled) ||
2687 2687 (cmd == PM_START_CPUPM && PM_DEFAULT_CPUPM) ||
2688 2688 (cmd == PM_START_CPUPM_EV && PM_EVENT_CPUPM) ||
2689 2689 (cmd == PM_START_CPUPM_POLL && PM_POLLING_CPUPM)) {
2690 2690 mutex_exit(&pm_scan_lock);
2691 2691 PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n", cmdstr))
2692 2692 ret = EBUSY;
2693 2693 break;
2694 2694 }
2695 2695
2696 2696 if (cmd == PM_START_PM) {
2697 2697 autopm_enabled = 1;
2698 2698 } else if (cmd == PM_START_CPUPM) {
2699 2699 old_mode = cpupm;
2700 2700 new_mode = cpupm = cpupm_default_mode;
2701 2701 } else if (cmd == PM_START_CPUPM_EV) {
2702 2702 old_mode = cpupm;
2703 2703 new_mode = cpupm = PM_CPUPM_EVENT;
2704 2704 } else if (cmd == PM_START_CPUPM_POLL) {
2705 2705 old_mode = cpupm;
2706 2706 new_mode = cpupm = PM_CPUPM_POLLING;
2707 2707 }
2708 2708
2709 2709 mutex_exit(&pm_scan_lock);
2710 2710
2711 2711 /*
2712 2712 * If we are changing CPUPM modes, and it is active,
2713 2713 * then stop it from operating in the old mode.
2714 2714 */
2715 2715 if (old_mode == PM_CPUPM_POLLING) {
2716 2716 int c = PM_STOP_CPUPM;
2717 2717 ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk,
2718 2718 &c);
2719 2719 } else if (old_mode == PM_CPUPM_EVENT) {
2720 2720 r = cpupm_set_policy(CPUPM_POLICY_DISABLED);
2721 2721
2722 2722 /*
2723 2723 * Disabling CPUPM policy should always
2724 2724 * succeed
2725 2725 */
2726 2726 ASSERT(r == 0);
2727 2727 }
2728 2728
2729 2729 /*
2730 2730 * If we are changing to event based CPUPM, enable it.
2731 2731 * In the event it's not supported, fall back to
2732 2732 * polling based CPUPM.
2733 2733 */
2734 2734 if (new_mode == PM_CPUPM_EVENT &&
2735 2735 cpupm_set_policy(CPUPM_POLICY_ELASTIC) < 0) {
2736 2736 mutex_enter(&pm_scan_lock);
2737 2737 new_mode = cpupm = PM_CPUPM_POLLING;
2738 2738 cmd = PM_START_CPUPM_POLL;
2739 2739 mutex_exit(&pm_scan_lock);
2740 2740 }
2741 2741 if (new_mode == PM_CPUPM_POLLING ||
2742 2742 cmd == PM_START_PM) {
2743 2743 ddi_walk_devs(ddi_root_node(), pm_start_pm_walk,
2744 2744 &cmd);
2745 2745 }
2746 2746 ret = 0;
2747 2747 break;
2748 2748 }
2749 2749
2750 2750 case PM_RESET_PM:
2751 2751 case PM_STOP_PM:
2752 2752 case PM_STOP_CPUPM:
2753 2753 {
2754 2754 extern void pm_discard_thresholds(void);
2755 2755 pm_cpupm_t old_mode = PM_CPUPM_NOTSET;
2756 2756
2757 2757 mutex_enter(&pm_scan_lock);
2758 2758 if ((cmd == PM_STOP_PM && !autopm_enabled) ||
2759 2759 (cmd == PM_STOP_CPUPM && PM_CPUPM_DISABLED)) {
2760 2760 mutex_exit(&pm_scan_lock);
2761 2761 PMD(PMD_ERROR, ("ioctl: %s: EINVAL\n",
2762 2762 cmdstr))
2763 2763 ret = EINVAL;
2764 2764 break;
2765 2765 }
2766 2766
2767 2767 if (cmd == PM_STOP_PM) {
2768 2768 autopm_enabled = 0;
2769 2769 pm_S3_enabled = 0;
2770 2770 autoS3_enabled = 0;
2771 2771 } else if (cmd == PM_STOP_CPUPM) {
2772 2772 old_mode = cpupm;
2773 2773 cpupm = PM_CPUPM_DISABLE;
2774 2774 } else {
2775 2775 autopm_enabled = 0;
2776 2776 autoS3_enabled = 0;
2777 2777 old_mode = cpupm;
2778 2778 cpupm = PM_CPUPM_NOTSET;
2779 2779 }
2780 2780 mutex_exit(&pm_scan_lock);
2781 2781
2782 2782 /*
2783 2783 * bring devices to full power level, stop scan
2784 2784 * If CPUPM was operating in event driven mode, disable
2785 2785 * that.
2786 2786 */
2787 2787 if (old_mode == PM_CPUPM_EVENT) {
2788 2788 (void) cpupm_set_policy(CPUPM_POLICY_DISABLED);
2789 2789 }
2790 2790 ddi_walk_devs(ddi_root_node(), pm_stop_pm_walk, &cmd);
2791 2791 ret = 0;
2792 2792 if (cmd == PM_STOP_PM || cmd == PM_STOP_CPUPM)
2793 2793 break;
2794 2794 /*
2795 2795 * Now do only PM_RESET_PM stuff.
2796 2796 */
2797 2797 pm_system_idle_threshold = pm_default_idle_threshold;
2798 2798 pm_cpu_idle_threshold = 0;
2799 2799 pm_discard_thresholds();
2800 2800 pm_all_to_default_thresholds();
2801 2801 pm_dispatch_to_dep_thread(PM_DEP_WK_REMOVE_DEP,
2802 2802 NULL, NULL, PM_DEP_WAIT, NULL, 0);
2803 2803 break;
2804 2804 }
2805 2805
2806 2806 case PM_GET_SYSTEM_THRESHOLD:
2807 2807 {
2808 2808 *rval_p = pm_system_idle_threshold;
2809 2809 ret = 0;
2810 2810 break;
2811 2811 }
2812 2812
2813 2813 case PM_GET_DEFAULT_SYSTEM_THRESHOLD:
2814 2814 {
2815 2815 *rval_p = pm_default_idle_threshold;
2816 2816 ret = 0;
2817 2817 break;
2818 2818 }
2819 2819
2820 2820 case PM_GET_CPU_THRESHOLD:
2821 2821 {
2822 2822 *rval_p = pm_cpu_idle_threshold;
2823 2823 ret = 0;
2824 2824 break;
2825 2825 }
2826 2826
2827 2827 case PM_SET_SYSTEM_THRESHOLD:
2828 2828 case PM_SET_CPU_THRESHOLD:
2829 2829 {
2830 2830 if ((int)arg < 0) {
2831 2831 PMD(PMD_ERROR, ("ioctl: %s: arg 0x%x < 0"
2832 2832 "--EINVAL\n", cmdstr, (int)arg))
2833 2833 ret = EINVAL;
2834 2834 break;
2835 2835 }
2836 2836 PMD(PMD_IOCTL, ("ioctl: %s: 0x%x 0t%d\n", cmdstr,
2837 2837 (int)arg, (int)arg))
2838 2838 if (cmd == PM_SET_SYSTEM_THRESHOLD)
2839 2839 pm_system_idle_threshold = (int)arg;
2840 2840 else {
2841 2841 pm_cpu_idle_threshold = (int)arg;
2842 2842 }
2843 2843 ddi_walk_devs(ddi_root_node(), pm_set_idle_thresh_walk,
2844 2844 (void *) &cmd);
2845 2845
2846 2846 ret = 0;
2847 2847 break;
2848 2848 }
2849 2849
2850 2850 case PM_IDLE_DOWN:
2851 2851 {
2852 2852 if (pm_timeout_idledown() != 0) {
2853 2853 ddi_walk_devs(ddi_root_node(),
2854 2854 pm_start_idledown, (void *)PMID_IOC);
2855 2855 }
2856 2856 ret = 0;
2857 2857 break;
2858 2858 }
2859 2859
2860 2860 case PM_GET_PM_STATE:
2861 2861 {
2862 2862 if (autopm_enabled) {
2863 2863 *rval_p = PM_SYSTEM_PM_ENABLED;
2864 2864 } else {
2865 2865 *rval_p = PM_SYSTEM_PM_DISABLED;
2866 2866 }
2867 2867 ret = 0;
2868 2868 break;
2869 2869 }
2870 2870
2871 2871 case PM_GET_CPUPM_STATE:
2872 2872 {
2873 2873 if (PM_POLLING_CPUPM || PM_EVENT_CPUPM)
2874 2874 *rval_p = PM_CPU_PM_ENABLED;
2875 2875 else if (PM_CPUPM_DISABLED)
2876 2876 *rval_p = PM_CPU_PM_DISABLED;
2877 2877 else
2878 2878 *rval_p = PM_CPU_PM_NOTSET;
2879 2879 ret = 0;
2880 2880 break;
2881 2881 }
2882 2882
2883 2883 case PM_GET_AUTOS3_STATE:
2884 2884 {
2885 2885 if (autoS3_enabled) {
2886 2886 *rval_p = PM_AUTOS3_ENABLED;
2887 2887 } else {
2888 2888 *rval_p = PM_AUTOS3_DISABLED;
2889 2889 }
2890 2890 ret = 0;
2891 2891 break;
2892 2892 }
2893 2893
2894 2894 case PM_GET_S3_SUPPORT_STATE:
2895 2895 {
2896 2896 if (pm_S3_enabled) {
2897 2897 *rval_p = PM_S3_SUPPORT_ENABLED;
2898 2898 } else {
2899 2899 *rval_p = PM_S3_SUPPORT_DISABLED;
2900 2900 }
2901 2901 ret = 0;
2902 2902 break;
2903 2903 }
2904 2904
2905 2905 /*
2906 2906 * pmconfig tells us if the platform supports S3
2907 2907 */
2908 2908 case PM_ENABLE_S3:
2909 2909 {
2910 2910 mutex_enter(&pm_scan_lock);
2911 2911 if (pm_S3_enabled) {
2912 2912 mutex_exit(&pm_scan_lock);
2913 2913 PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2914 2914 cmdstr))
2915 2915 ret = EBUSY;
2916 2916 break;
2917 2917 }
2918 2918 pm_S3_enabled = 1;
2919 2919 mutex_exit(&pm_scan_lock);
2920 2920 ret = 0;
2921 2921 break;
2922 2922 }
2923 2923
2924 2924 case PM_DISABLE_S3:
2925 2925 {
2926 2926 mutex_enter(&pm_scan_lock);
2927 2927 pm_S3_enabled = 0;
2928 2928 mutex_exit(&pm_scan_lock);
2929 2929 ret = 0;
2930 2930 break;
2931 2931 }
2932 2932
2933 2933 case PM_START_AUTOS3:
2934 2934 {
2935 2935 mutex_enter(&pm_scan_lock);
2936 2936 if (autoS3_enabled) {
2937 2937 mutex_exit(&pm_scan_lock);
2938 2938 PMD(PMD_ERROR, ("ioctl: %s: EBUSY\n",
2939 2939 cmdstr))
2940 2940 ret = EBUSY;
2941 2941 break;
2942 2942 }
2943 2943 autoS3_enabled = 1;
2944 2944 mutex_exit(&pm_scan_lock);
2945 2945 ret = 0;
2946 2946 break;
2947 2947 }
2948 2948
2949 2949 case PM_STOP_AUTOS3:
2950 2950 {
2951 2951 mutex_enter(&pm_scan_lock);
2952 2952 autoS3_enabled = 0;
2953 2953 mutex_exit(&pm_scan_lock);
2954 2954 ret = 0;
2955 2955 break;
2956 2956 }
2957 2957
2958 2958 case PM_ENABLE_CPU_DEEP_IDLE:
2959 2959 {
2960 2960 if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2961 2961 PM_ENABLE_CPU_DEEP_IDLE) == NULL)
2962 2962 ret = 0;
2963 2963 else
2964 2964 ret = EBUSY;
2965 2965 break;
2966 2966 }
2967 2967 case PM_DISABLE_CPU_DEEP_IDLE:
2968 2968 {
2969 2969 if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2970 2970 PM_DISABLE_CPU_DEEP_IDLE) == NULL)
2971 2971 ret = 0;
2972 2972 else
2973 2973 ret = EINVAL;
2974 2974 break;
2975 2975 }
2976 2976 case PM_DEFAULT_CPU_DEEP_IDLE:
2977 2977 {
2978 2978 if (callb_execute_class(CB_CL_CPU_DEEP_IDLE,
2979 2979 PM_DEFAULT_CPU_DEEP_IDLE) == NULL)
2980 2980 ret = 0;
2981 2981 else
2982 2982 ret = EBUSY;
2983 2983 break;
2984 2984 }
2985 2985
2986 2986 default:
2987 2987 /*
2988 2988 * Internal error, invalid ioctl description
2989 2989 * force debug entry even if pm_debug not set
2990 2990 */
2991 2991 #ifdef DEBUG
2992 2992 pm_log("invalid diptype %d for cmd %d (%s)\n",
2993 2993 pcip->diptype, cmd, pcip->name);
2994 2994 #endif
2995 2995 ASSERT(0);
2996 2996 return (EIO);
2997 2997 }
2998 2998 break;
2999 2999 }
3000 3000
3001 3001 default:
3002 3002 /*
3003 3003 * Internal error, invalid ioctl description
3004 3004 * force debug entry even if pm_debug not set
3005 3005 */
3006 3006 #ifdef DEBUG
3007 3007 pm_log("ioctl: invalid str_type %d for cmd %d (%s)\n",
3008 3008 pcip->str_type, cmd, pcip->name);
3009 3009 #endif
3010 3010 ASSERT(0);
3011 3011 return (EIO);
3012 3012 }
3013 3013 ASSERT(ret != 0x0badcafe); /* some cmd in wrong case! */
3014 3014 if (dipheld) {
3015 3015 ASSERT(dip);
3016 3016 PMD(PMD_DHR, ("ioctl: %s: releasing %s@%s(%s#%d) for "
3017 3017 "exiting pm_ioctl\n", cmdstr, PM_DEVICE(dip)))
3018 3018 PM_RELE(dip);
3019 3019 }
3020 3020 PMD(PMD_IOCTL, ("ioctl: %s: end, ret=%d\n", cmdstr, ret))
3021 3021 return (ret);
3022 3022 }
↓ open down ↓ |
2851 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX