1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 28 */ 29 30 #include <sys/cmn_err.h> 31 #include <sys/ddi_timer.h> 32 #include <sys/id_space.h> 33 #include <sys/kobj.h> 34 #include <sys/sysmacros.h> 35 #include <sys/systm.h> 36 #include <sys/taskq.h> 37 #include <sys/taskq_impl.h> 38 #include <sys/time.h> 39 #include <sys/types.h> 40 #include <sys/sdt.h> 41 42 /* 43 * The ddi_periodic_add(9F) Implementation 44 * 45 * This file contains the implementation of the ddi_periodic_add(9F) interface. 46 * It is a thin wrapper around the cyclic subsystem (see documentation in 47 * uts/common/os/cyclic.c), providing a DDI interface for registering 48 * (and unregistering) callbacks for periodic invocation at arbitrary 49 * interrupt levels, or in kernel context. 50 * 51 * Each call to ddi_periodic_add will result in a new opaque handle, as 52 * allocated from an id_space, a new "periodic" object (ddi_periodic_impl_t) 53 * and a registered cyclic. 54 * 55 * Operation 56 * 57 * Whenever the cyclic fires, our cyclic handler checks that the particular 58 * periodic is not dispatched already (we do not support overlapping execution 59 * of the consumer's handler function), and not yet cancelled. If both of 60 * these conditions hold, we mark the periodic as DPF_DISPATCHED and enqueue it 61 * to either the taskq (for DDI_IPL_0) or to one of the soft interrupt queues 62 * (DDI_IPL_1 to DDI_IPL_10). 63 * 64 * While the taskq (or soft interrupt handler) is handling a particular 65 * periodic, we mark it as DPF_EXECUTING. When complete, we reset both 66 * DPF_DISPATCHED and DPF_EXECUTING. 67 * 68 * Cancellation 69 * 70 * ddi_periodic_delete(9F) historically had spectacularly loose semantics with 71 * respect to cancellation concurrent with handler execution. These semantics 72 * are now tighter: 73 * 74 * 1. At most one invocation of ddi_periodic_delete(9F) will actually 75 * perform the deletion, all others will return immediately. 76 * 2. The invocation that performs the deletion will _block_ until 77 * the handler is no longer running, and all resources have been 78 * released. 79 * 80 * We affect this model by removing the cancelling periodic from the 81 * global list and marking it DPF_CANCELLED. This will prevent further 82 * execution of the handler. We then wait on a CV until the DPF_EXECUTING 83 * and DPF_DISPATCHED flags are clear, which means the periodic is removed 84 * from all request queues, is no longer executing, and may be freed. At this 85 * point we return the opaque ID to the id_space and free the memory. 86 * 87 * NOTE: 88 * The ddi_periodic_add(9F) interface is presently limited to a minimum period 89 * of 10ms between firings. 90 */ 91 92 /* 93 * Tuneables: 94 */ 95 int ddi_periodic_max_id = 1024; 96 int ddi_periodic_taskq_threadcount = 4; 97 hrtime_t ddi_periodic_resolution = 10000000; 98 99 /* 100 * Globals: 101 */ 102 static kmem_cache_t *periodic_cache; 103 static id_space_t *periodic_id_space; 104 static taskq_t *periodic_taskq; 105 106 /* 107 * periodics_lock protects the list of all periodics (periodics), and 108 * each of the soft interrupt request queues (periodic_softint_queue). 109 * 110 * Do not hold an individual periodic's lock while obtaining periodics_lock. 111 * While in the periodic_softint_queue list, the periodic will be marked 112 * DPF_DISPATCHED, and thus safe from frees. Only the invocation of 113 * i_untimeout() that removes the periodic from the global list is allowed 114 * to free it. 115 */ 116 static kmutex_t periodics_lock; 117 static list_t periodics; 118 static list_t periodic_softint_queue[10]; /* for IPL1 up to IPL10 */ 119 120 typedef enum periodic_ipl { 121 PERI_IPL_0 = 0, 122 PERI_IPL_1, 123 PERI_IPL_2, 124 PERI_IPL_3, 125 PERI_IPL_4, 126 PERI_IPL_5, 127 PERI_IPL_6, 128 PERI_IPL_7, 129 PERI_IPL_8, 130 PERI_IPL_9, 131 PERI_IPL_10, 132 } periodic_ipl_t; 133 134 /* 135 * This function may be called either from a soft interrupt handler 136 * (ddi_periodic_softintr), or as a taskq worker function. 137 */ 138 static void 139 periodic_execute(void *arg) 140 { 141 ddi_periodic_impl_t *dpr = arg; 142 mutex_enter(&dpr->dpr_lock); 143 144 /* 145 * We must be DISPATCHED, but not yet EXECUTING: 146 */ 147 VERIFY((dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) == 148 DPF_DISPATCHED); 149 150 if (!(dpr->dpr_flags & DPF_CANCELLED)) { 151 int level = dpr->dpr_level; 152 uint64_t count = dpr->dpr_fire_count; 153 /* 154 * If we have not yet been cancelled, then 155 * mark us executing: 156 */ 157 dpr->dpr_flags |= DPF_EXECUTING; 158 mutex_exit(&dpr->dpr_lock); 159 160 /* 161 * Execute the handler, without holding locks: 162 */ 163 DTRACE_PROBE4(ddi__periodic__execute, void *, dpr->dpr_handler, 164 void *, dpr->dpr_arg, int, level, uint64_t, count); 165 (*dpr->dpr_handler)(dpr->dpr_arg); 166 DTRACE_PROBE4(ddi__periodic__done, void *, dpr->dpr_handler, 167 void *, dpr->dpr_arg, int, level, uint64_t, count); 168 169 mutex_enter(&dpr->dpr_lock); 170 dpr->dpr_fire_count++; 171 } 172 173 /* 174 * We're done with this periodic for now, so release it and 175 * wake anybody that was waiting for us to be finished: 176 */ 177 dpr->dpr_flags &= ~(DPF_DISPATCHED | DPF_EXECUTING); 178 cv_broadcast(&dpr->dpr_cv); 179 mutex_exit(&dpr->dpr_lock); 180 } 181 182 void 183 ddi_periodic_softintr(int level) 184 { 185 ddi_periodic_impl_t *dpr; 186 VERIFY(level >= PERI_IPL_1 && level <= PERI_IPL_10); 187 188 mutex_enter(&periodics_lock); 189 /* 190 * Pull the first scheduled periodic off the queue for this priority 191 * level: 192 */ 193 while ((dpr = list_remove_head(&periodic_softint_queue[level - 1])) 194 != NULL) { 195 mutex_exit(&periodics_lock); 196 /* 197 * And execute it: 198 */ 199 periodic_execute(dpr); 200 mutex_enter(&periodics_lock); 201 } 202 mutex_exit(&periodics_lock); 203 } 204 205 void 206 ddi_periodic_init(void) 207 { 208 int i; 209 210 /* 211 * Create a kmem_cache for request tracking objects, and a list 212 * to store them in so we can later delete based on opaque handles: 213 */ 214 periodic_cache = kmem_cache_create("ddi_periodic", 215 sizeof (ddi_periodic_impl_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 216 list_create(&periodics, sizeof (ddi_periodic_impl_t), 217 offsetof(ddi_periodic_impl_t, dpr_link)); 218 219 /* 220 * Initialise the identifier space for ddi_periodic_add(9F): 221 */ 222 periodic_id_space = id_space_create("ddi_periodic", 1, 223 ddi_periodic_max_id); 224 225 /* 226 * Initialise the request queue for each soft interrupt level: 227 */ 228 for (i = PERI_IPL_1; i <= PERI_IPL_10; i++) { 229 list_create(&periodic_softint_queue[i - 1], 230 sizeof (ddi_periodic_impl_t), offsetof(ddi_periodic_impl_t, 231 dpr_softint_link)); 232 } 233 234 /* 235 * Create the taskq for running PERI_IPL_0 handlers. This taskq will 236 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t 237 * pre-allocated with the ddi_periodic_impl_t. 238 */ 239 periodic_taskq = taskq_create_instance("ddi_periodic_taskq", -1, 240 ddi_periodic_taskq_threadcount, maxclsyspri, 0, 0, 0); 241 242 /* 243 * Initialize the mutex lock used for the soft interrupt request 244 * queues. 245 */ 246 mutex_init(&periodics_lock, NULL, MUTEX_ADAPTIVE, NULL); 247 } 248 249 static void 250 periodic_cyclic_handler(void *arg) 251 { 252 extern void sir_on(int); 253 ddi_periodic_impl_t *dpr = arg; 254 255 mutex_enter(&dpr->dpr_lock); 256 /* 257 * If we've been cancelled, or we're already dispatched, then exit 258 * immediately: 259 */ 260 if (dpr->dpr_flags & (DPF_CANCELLED | DPF_DISPATCHED)) { 261 mutex_exit(&dpr->dpr_lock); 262 return; 263 } 264 VERIFY(!(dpr->dpr_flags & DPF_EXECUTING)); 265 266 /* 267 * This periodic is not presently dispatched, so dispatch it now: 268 */ 269 dpr->dpr_flags |= DPF_DISPATCHED; 270 mutex_exit(&dpr->dpr_lock); 271 272 if (dpr->dpr_level == PERI_IPL_0) { 273 /* 274 * DDI_IPL_0 periodics are dispatched onto the taskq: 275 */ 276 taskq_dispatch_ent(periodic_taskq, periodic_execute, 277 dpr, 0, &dpr->dpr_taskq_ent); 278 } else { 279 /* 280 * Higher priority periodics are handled by a soft 281 * interrupt handler. Enqueue us for processing and 282 * fire the soft interrupt: 283 */ 284 mutex_enter(&periodics_lock); 285 list_insert_tail(&periodic_softint_queue[dpr->dpr_level - 1], 286 dpr); 287 mutex_exit(&periodics_lock); 288 289 /* 290 * Raise the soft interrupt level for this periodic: 291 */ 292 sir_on(dpr->dpr_level); 293 } 294 } 295 296 static void 297 periodic_destroy(ddi_periodic_impl_t *dpr) 298 { 299 if (dpr == NULL) 300 return; 301 302 /* 303 * By now, we should have a periodic that is not busy, and has been 304 * cancelled: 305 */ 306 VERIFY(dpr->dpr_flags == DPF_CANCELLED); 307 308 id_free(periodic_id_space, dpr->dpr_id); 309 kmem_cache_free(periodic_cache, dpr); 310 } 311 312 static ddi_periodic_impl_t * 313 periodic_create(void) 314 { 315 ddi_periodic_impl_t *dpr; 316 317 dpr = kmem_cache_alloc(periodic_cache, KM_SLEEP); 318 bzero(dpr, sizeof (*dpr)); 319 dpr->dpr_id = id_alloc(periodic_id_space); 320 mutex_init(&dpr->dpr_lock, NULL, MUTEX_ADAPTIVE, NULL); 321 cv_init(&dpr->dpr_cv, NULL, CV_DEFAULT, NULL); 322 323 return (dpr); 324 } 325 326 timeout_t 327 i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level) 328 { 329 cyc_handler_t cyh; 330 cyc_time_t cyt; 331 ddi_periodic_impl_t *dpr; 332 333 VERIFY(func != NULL); 334 VERIFY(level >= 0 && level <= 10); 335 336 /* 337 * Allocate object to track this periodic: 338 */ 339 dpr = periodic_create(); 340 dpr->dpr_level = level; 341 dpr->dpr_handler = func; 342 dpr->dpr_arg = arg; 343 344 /* 345 * The resolution must be finer than or equal to 346 * the requested interval. If it's not, set the resolution 347 * to the interval. 348 * Note. There is a restriction currently. Regardless of the 349 * clock resolution used here, 10ms is set as the timer resolution. 350 * Even on the 1ms resolution timer, the minimum interval is 10ms. 351 */ 352 if (ddi_periodic_resolution > interval) { 353 uintptr_t pc = (uintptr_t)dpr->dpr_handler; 354 ulong_t off; 355 cmn_err(CE_WARN, 356 "The periodic timeout (handler=%s, interval=%lld) " 357 "requests a finer interval than the supported resolution. " 358 "It rounds up to %lld\n", kobj_getsymname(pc, &off), 359 interval, ddi_periodic_resolution); 360 interval = ddi_periodic_resolution; 361 } 362 363 /* 364 * If the specified interval is already multiples of 365 * the resolution, use it as is. Otherwise, it rounds 366 * up to multiples of the timer resolution. 367 */ 368 dpr->dpr_interval = roundup(interval, ddi_periodic_resolution); 369 370 /* 371 * Create the underlying cyclic: 372 */ 373 cyh.cyh_func = periodic_cyclic_handler; 374 cyh.cyh_arg = dpr; 375 cyh.cyh_level = CY_LOCK_LEVEL; 376 377 cyt.cyt_when = roundup(gethrtime() + dpr->dpr_interval, 378 ddi_periodic_resolution); 379 cyt.cyt_interval = dpr->dpr_interval; 380 381 mutex_enter(&cpu_lock); 382 dpr->dpr_cyclic_id = cyclic_add(&cyh, &cyt); 383 mutex_exit(&cpu_lock); 384 385 /* 386 * Make the id visible to ddi_periodic_delete(9F) before we 387 * return it: 388 */ 389 mutex_enter(&periodics_lock); 390 list_insert_tail(&periodics, dpr); 391 mutex_exit(&periodics_lock); 392 393 return ((timeout_t)(uintptr_t)dpr->dpr_id); 394 } 395 396 /* 397 * void 398 * i_untimeout(timeout_t req) 399 * 400 * Overview 401 * i_untimeout() is an internal function canceling the i_timeout() 402 * request previously issued. 403 * This function is used for ddi_periodic_delete(9F). 404 * 405 * Argument 406 * req: timeout_t opaque value i_timeout() returned previously. 407 * 408 * Return value 409 * Nothing. 410 * 411 * Caller's context 412 * i_untimeout() can be called in user, kernel or interrupt context. 413 * It cannot be called in high interrupt context. 414 * 415 * Note. This function is used by ddi_periodic_delete(), which cannot 416 * be called in interrupt context. As a result, this function is called 417 * in user or kernel context only in practice. 418 */ 419 void 420 i_untimeout(timeout_t id) 421 { 422 ddi_periodic_impl_t *dpr; 423 424 /* 425 * Find the periodic in the list of all periodics and remove it. 426 * If we find in (and remove it from) the global list, we have 427 * license to free it once it is no longer busy. 428 */ 429 mutex_enter(&periodics_lock); 430 for (dpr = list_head(&periodics); dpr != NULL; dpr = 431 list_next(&periodics, dpr)) { 432 if (dpr->dpr_id == (id_t)(uintptr_t)id) { 433 list_remove(&periodics, dpr); 434 break; 435 } 436 } 437 mutex_exit(&periodics_lock); 438 439 /* 440 * We could not find a periodic for this id, so bail out: 441 */ 442 if (dpr == NULL) 443 return; 444 445 mutex_enter(&dpr->dpr_lock); 446 /* 447 * We should be the only one trying to cancel this periodic: 448 */ 449 VERIFY(!(dpr->dpr_flags & DPF_CANCELLED)); 450 /* 451 * Mark the periodic as cancelled: 452 */ 453 dpr->dpr_flags |= DPF_CANCELLED; 454 mutex_exit(&dpr->dpr_lock); 455 456 /* 457 * Cancel our cyclic. cyclic_remove() guarantees that the cyclic 458 * handler will not run again after it returns. Note that the cyclic 459 * handler merely _dispatches_ the periodic, so this does _not_ mean 460 * the periodic handler is also finished running. 461 */ 462 mutex_enter(&cpu_lock); 463 cyclic_remove(dpr->dpr_cyclic_id); 464 mutex_exit(&cpu_lock); 465 466 /* 467 * Wait until the periodic handler is no longer running: 468 */ 469 mutex_enter(&dpr->dpr_lock); 470 while (dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) { 471 cv_wait(&dpr->dpr_cv, &dpr->dpr_lock); 472 } 473 mutex_exit(&dpr->dpr_lock); 474 475 periodic_destroy(dpr); 476 }