1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11 /*
12 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
13 */
14
15 #include <sys/cmn_err.h>
16 #include <sys/ddi_periodic.h>
17 #include <sys/id_space.h>
18 #include <sys/kobj.h>
19 #include <sys/sysmacros.h>
20 #include <sys/systm.h>
21 #include <sys/taskq.h>
22 #include <sys/taskq_impl.h>
23 #include <sys/time.h>
24 #include <sys/types.h>
25 #include <sys/sdt.h>
26
27 /*
28 * The ddi_periodic_add(9F) Implementation
29 *
30 * This file contains the implementation of the ddi_periodic_add(9F) interface.
31 * It is a thin wrapper around the cyclic subsystem (see documentation in
32 * uts/common/os/cyclic.c), providing a DDI interface for registering
33 * (and unregistering) callbacks for periodic invocation at arbitrary
34 * interrupt levels, or in kernel context.
35 *
36 * Each call to ddi_periodic_add will result in a new opaque handle, as
37 * allocated from an id_space, a new "periodic" object (ddi_periodic_impl_t)
38 * and a registered cyclic.
39 *
40 * Operation
41 *
42 * Whenever the cyclic fires, our cyclic handler checks that the particular
43 * periodic is not dispatched already (we do not support overlapping execution
44 * of the consumer's handler function), and not yet cancelled. If both of
45 * these conditions hold, we mark the periodic as DPF_DISPATCHED and enqueue it
46 * to either the taskq (for DDI_IPL_0) or to one of the soft interrupt queues
47 * (DDI_IPL_1 to DDI_IPL_10).
48 *
49 * While the taskq (or soft interrupt handler) is handling a particular
50 * periodic, we mark it as DPF_EXECUTING. When complete, we reset both
51 * DPF_DISPATCHED and DPF_EXECUTING.
52 *
53 * Cancellation
54 *
55 * ddi_periodic_delete(9F) historically had spectacularly loose semantics with
56 * respect to cancellation concurrent with handler execution. These semantics
57 * are now tighter:
58 *
59 * 1. At most one invocation of ddi_periodic_delete(9F) will actually
60 * perform the deletion, all others will return immediately.
61 * 2. The invocation that performs the deletion will _block_ until
62 * the handler is no longer running, and all resources have been
63 * released.
64 *
65 * We affect this model by removing the cancelling periodic from the
66 * global list and marking it DPF_CANCELLED. This will prevent further
67 * execution of the handler. We then wait on a CV until the DPF_EXECUTING
68 * and DPF_DISPATCHED flags are clear, which means the periodic is removed
69 * from all request queues, is no longer executing, and may be freed. At this
70 * point we return the opaque ID to the id_space and free the memory.
71 *
72 * NOTE:
73 * The ddi_periodic_add(9F) interface is presently limited to a minimum period
74 * of 10ms between firings.
75 */
76
77 /*
78 * Tuneables:
79 */
80 int ddi_periodic_max_id = 1024;
81 int ddi_periodic_taskq_threadcount = 4;
82 hrtime_t ddi_periodic_resolution = 10000000;
83
84 /*
85 * Globals:
86 */
87 static kmem_cache_t *periodic_cache;
88 static id_space_t *periodic_id_space;
89 static taskq_t *periodic_taskq;
90
91 /*
92 * periodics_lock protects the list of all periodics (periodics), and
93 * each of the soft interrupt request queues (periodic_softint_queue).
94 *
95 * Do not hold an individual periodic's lock while obtaining periodics_lock.
96 * While in the periodic_softint_queue list, the periodic will be marked
97 * DPF_DISPATCHED, and thus safe from frees. Only the invocation of
98 * i_untimeout() that removes the periodic from the global list is allowed
99 * to free it.
100 */
101 static kmutex_t periodics_lock;
102 static list_t periodics;
103 static list_t periodic_softint_queue[10]; /* for IPL1 up to IPL10 */
104
105 typedef enum periodic_ipl {
106 PERI_IPL_0 = 0,
107 PERI_IPL_1,
108 PERI_IPL_2,
109 PERI_IPL_3,
110 PERI_IPL_4,
111 PERI_IPL_5,
112 PERI_IPL_6,
113 PERI_IPL_7,
114 PERI_IPL_8,
115 PERI_IPL_9,
116 PERI_IPL_10
117 } periodic_ipl_t;
118
119 static char *
120 periodic_handler_symbol(ddi_periodic_impl_t *dpr)
121 {
122 ulong_t off;
123
124 return (kobj_getsymname((uintptr_t)dpr->dpr_handler, &off));
125 }
126
127 /*
128 * This function may be called either from a soft interrupt handler
129 * (ddi_periodic_softintr), or as a taskq worker function.
130 */
131 static void
132 periodic_execute(void *arg)
133 {
134 ddi_periodic_impl_t *dpr = arg;
135 mutex_enter(&dpr->dpr_lock);
136
137 /*
138 * We must be DISPATCHED, but not yet EXECUTING:
139 */
140 VERIFY((dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) ==
141 DPF_DISPATCHED);
142 VERIFY(dpr->dpr_thread == NULL);
143
144 if (!(dpr->dpr_flags & DPF_CANCELLED)) {
145 int level = dpr->dpr_level;
146 uint64_t count = dpr->dpr_fire_count;
147 /*
148 * If we have not yet been cancelled, then
149 * mark us executing:
150 */
151 dpr->dpr_flags |= DPF_EXECUTING;
152 dpr->dpr_thread = curthread;
153 mutex_exit(&dpr->dpr_lock);
154
155 /*
156 * Execute the handler, without holding locks:
157 */
158 DTRACE_PROBE4(ddi__periodic__execute, void *, dpr->dpr_handler,
159 void *, dpr->dpr_arg, int, level, uint64_t, count);
160 (*dpr->dpr_handler)(dpr->dpr_arg);
161 DTRACE_PROBE4(ddi__periodic__done, void *, dpr->dpr_handler,
162 void *, dpr->dpr_arg, int, level, uint64_t, count);
163
164 mutex_enter(&dpr->dpr_lock);
165 dpr->dpr_thread = NULL;
166 dpr->dpr_fire_count++;
167 }
168
169 /*
170 * We're done with this periodic for now, so release it and
171 * wake anybody that was waiting for us to be finished:
172 */
173 dpr->dpr_flags &= ~(DPF_DISPATCHED | DPF_EXECUTING);
174 cv_broadcast(&dpr->dpr_cv);
175 mutex_exit(&dpr->dpr_lock);
176 }
177
178 void
179 ddi_periodic_softintr(int level)
180 {
181 ddi_periodic_impl_t *dpr;
182 VERIFY(level >= PERI_IPL_1 && level <= PERI_IPL_10);
183
184 mutex_enter(&periodics_lock);
185 /*
186 * Pull the first scheduled periodic off the queue for this priority
187 * level:
188 */
189 while ((dpr = list_remove_head(&periodic_softint_queue[level - 1])) !=
190 NULL) {
191 mutex_exit(&periodics_lock);
192 /*
193 * And execute it:
194 */
195 periodic_execute(dpr);
196 mutex_enter(&periodics_lock);
197 }
198 mutex_exit(&periodics_lock);
199 }
200
201 void
202 ddi_periodic_init(void)
203 {
204 int i;
205
206 /*
207 * Create a kmem_cache for request tracking objects, and a list
208 * to store them in so we can later delete based on opaque handles:
209 */
210 periodic_cache = kmem_cache_create("ddi_periodic",
211 sizeof (ddi_periodic_impl_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
212 list_create(&periodics, sizeof (ddi_periodic_impl_t),
213 offsetof(ddi_periodic_impl_t, dpr_link));
214
215 /*
216 * Initialise the identifier space for ddi_periodic_add(9F):
217 */
218 periodic_id_space = id_space_create("ddi_periodic", 1,
219 ddi_periodic_max_id);
220
221 /*
222 * Initialise the request queue for each soft interrupt level:
223 */
224 for (i = PERI_IPL_1; i <= PERI_IPL_10; i++) {
225 list_create(&periodic_softint_queue[i - 1],
226 sizeof (ddi_periodic_impl_t), offsetof(ddi_periodic_impl_t,
227 dpr_softint_link));
228 }
229
230 /*
231 * Create the taskq for running PERI_IPL_0 handlers. This taskq will
232 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t
233 * pre-allocated with the ddi_periodic_impl_t.
234 */
235 periodic_taskq = taskq_create_instance("ddi_periodic_taskq", -1,
236 ddi_periodic_taskq_threadcount, maxclsyspri, 0, 0, 0);
237
238 /*
239 * Initialize the mutex lock used for the soft interrupt request
240 * queues.
241 */
242 mutex_init(&periodics_lock, NULL, MUTEX_ADAPTIVE, NULL);
243 }
244
245 void
246 ddi_periodic_fini(void)
247 {
248 int i;
249 ddi_periodic_impl_t *dpr;
250
251 /*
252 * Find all periodics that have not yet been unregistered and,
253 * on DEBUG bits, print a warning about this resource leak.
254 */
255 mutex_enter(&periodics_lock);
256 while ((dpr = list_head(&periodics)) != NULL) {
257 #ifdef DEBUG
258 printf("DDI periodic handler not deleted (id=%lx, hdlr=%s)\n",
259 (unsigned long)dpr->dpr_id, periodic_handler_symbol(dpr));
260 #endif
261
262 mutex_exit(&periodics_lock);
263 /*
264 * Delete the periodic ourselves:
265 */
266 i_untimeout((timeout_t)(uintptr_t)dpr->dpr_id);
267 mutex_enter(&periodics_lock);
268 }
269 mutex_exit(&periodics_lock);
270
271 /*
272 * At this point there are no remaining cyclics, so clean up the
273 * remaining resources:
274 */
275 taskq_destroy(periodic_taskq);
276 periodic_taskq = NULL;
277
278 id_space_destroy(periodic_id_space);
279 periodic_id_space = NULL;
280
281 kmem_cache_destroy(periodic_cache);
282 periodic_cache = NULL;
283
284 list_destroy(&periodics);
285 for (i = PERI_IPL_1; i <= PERI_IPL_10; i++)
286 list_destroy(&periodic_softint_queue[i - 1]);
287
288 mutex_destroy(&periodics_lock);
289 }
290
291 static void
292 periodic_cyclic_handler(void *arg)
293 {
294 extern void sir_on(int);
295 ddi_periodic_impl_t *dpr = arg;
296
297 mutex_enter(&dpr->dpr_lock);
298 /*
299 * If we've been cancelled, or we're already dispatched, then exit
300 * immediately:
301 */
302 if (dpr->dpr_flags & (DPF_CANCELLED | DPF_DISPATCHED)) {
303 mutex_exit(&dpr->dpr_lock);
304 return;
305 }
306 VERIFY(!(dpr->dpr_flags & DPF_EXECUTING));
307
308 /*
309 * This periodic is not presently dispatched, so dispatch it now:
310 */
311 dpr->dpr_flags |= DPF_DISPATCHED;
312 mutex_exit(&dpr->dpr_lock);
313
314 if (dpr->dpr_level == PERI_IPL_0) {
315 /*
316 * DDI_IPL_0 periodics are dispatched onto the taskq:
317 */
318 taskq_dispatch_ent(periodic_taskq, periodic_execute,
319 dpr, 0, &dpr->dpr_taskq_ent);
320 } else {
321 /*
322 * Higher priority periodics are handled by a soft
323 * interrupt handler. Enqueue us for processing and
324 * fire the soft interrupt:
325 */
326 mutex_enter(&periodics_lock);
327 list_insert_tail(&periodic_softint_queue[dpr->dpr_level - 1],
328 dpr);
329 mutex_exit(&periodics_lock);
330
331 /*
332 * Raise the soft interrupt level for this periodic:
333 */
334 sir_on(dpr->dpr_level);
335 }
336 }
337
338 static void
339 periodic_destroy(ddi_periodic_impl_t *dpr)
340 {
341 if (dpr == NULL)
342 return;
343
344 /*
345 * By now, we should have a periodic that is not busy, and has been
346 * cancelled:
347 */
348 VERIFY(dpr->dpr_flags == DPF_CANCELLED);
349 VERIFY(dpr->dpr_thread == NULL);
350
351 id_free(periodic_id_space, dpr->dpr_id);
352 cv_destroy(&dpr->dpr_cv);
353 mutex_destroy(&dpr->dpr_lock);
354 kmem_cache_free(periodic_cache, dpr);
355 }
356
357 static ddi_periodic_impl_t *
358 periodic_create(void)
359 {
360 ddi_periodic_impl_t *dpr;
361
362 dpr = kmem_cache_alloc(periodic_cache, KM_SLEEP);
363 bzero(dpr, sizeof (*dpr));
364 dpr->dpr_id = id_alloc(periodic_id_space);
365 mutex_init(&dpr->dpr_lock, NULL, MUTEX_ADAPTIVE, NULL);
366 cv_init(&dpr->dpr_cv, NULL, CV_DEFAULT, NULL);
367
368 return (dpr);
369 }
370
371 /*
372 * This function provides the implementation for the ddi_periodic_add(9F)
373 * interface. It registers a periodic handler and returns an opaque identifier
374 * that can be unregistered via ddi_periodic_delete(9F)/i_untimeout().
375 *
376 * It may be called in user or kernel context, provided cpu_lock is not held.
377 */
378 timeout_t
379 i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level)
380 {
381 cyc_handler_t cyh;
382 cyc_time_t cyt;
383 ddi_periodic_impl_t *dpr;
384
385 VERIFY(func != NULL);
386 VERIFY(level >= 0 && level <= 10);
387
388 /*
389 * Allocate object to track this periodic:
390 */
391 dpr = periodic_create();
392 dpr->dpr_level = level;
393 dpr->dpr_handler = func;
394 dpr->dpr_arg = arg;
395
396 /*
397 * The minimum supported interval between firings of the periodic
398 * handler is 10ms; see ddi_periodic_add(9F) for more details. If a
399 * shorter interval is requested, round up.
400 */
401 if (ddi_periodic_resolution > interval) {
402 cmn_err(CE_WARN,
403 "The periodic timeout (handler=%s, interval=%lld) "
404 "requests a finer interval than the supported resolution. "
405 "It rounds up to %lld\n", periodic_handler_symbol(dpr),
406 interval, ddi_periodic_resolution);
407 interval = ddi_periodic_resolution;
408 }
409
410 /*
411 * Ensure that the interval is an even multiple of the base resolution
412 * that is at least as long as the requested interval.
413 */
414 dpr->dpr_interval = roundup(interval, ddi_periodic_resolution);
415
416 /*
417 * Create the underlying cyclic:
418 */
419 cyh.cyh_func = periodic_cyclic_handler;
420 cyh.cyh_arg = dpr;
421 cyh.cyh_level = CY_LOCK_LEVEL;
422
423 cyt.cyt_when = 0;
424 cyt.cyt_interval = dpr->dpr_interval;
425
426 mutex_enter(&cpu_lock);
427 dpr->dpr_cyclic_id = cyclic_add(&cyh, &cyt);
428 mutex_exit(&cpu_lock);
429
430 /*
431 * Make the id visible to ddi_periodic_delete(9F) before we
432 * return it:
433 */
434 mutex_enter(&periodics_lock);
435 list_insert_tail(&periodics, dpr);
436 mutex_exit(&periodics_lock);
437
438 return ((timeout_t)(uintptr_t)dpr->dpr_id);
439 }
440
441 /*
442 * This function provides the implementation for the ddi_periodic_delete(9F)
443 * interface. It cancels a periodic handler previously registered through
444 * ddi_periodic_add(9F)/i_timeout().
445 *
446 * It may be called in user or kernel context, provided cpu_lock is not held.
447 * It may NOT be called from within a periodic handler.
448 */
449 void
450 i_untimeout(timeout_t id)
451 {
452 ddi_periodic_impl_t *dpr;
453
454 /*
455 * Find the periodic in the list of all periodics and remove it.
456 * If we find in (and remove it from) the global list, we have
457 * license to free it once it is no longer busy.
458 */
459 mutex_enter(&periodics_lock);
460 for (dpr = list_head(&periodics); dpr != NULL; dpr =
461 list_next(&periodics, dpr)) {
462 if (dpr->dpr_id == (id_t)(uintptr_t)id) {
463 list_remove(&periodics, dpr);
464 break;
465 }
466 }
467 mutex_exit(&periodics_lock);
468
469 /*
470 * We could not find a periodic for this id, so bail out:
471 */
472 if (dpr == NULL)
473 return;
474
475 mutex_enter(&dpr->dpr_lock);
476 /*
477 * We should be the only one trying to cancel this periodic:
478 */
479 VERIFY(!(dpr->dpr_flags & DPF_CANCELLED));
480 /*
481 * Removing a periodic from within its own handler function will
482 * cause a deadlock, so panic explicitly.
483 */
484 if (dpr->dpr_thread == curthread) {
485 panic("ddi_periodic_delete(%lx) called from its own handler\n",
486 (unsigned long)dpr->dpr_id);
487 }
488 /*
489 * Mark the periodic as cancelled:
490 */
491 dpr->dpr_flags |= DPF_CANCELLED;
492 mutex_exit(&dpr->dpr_lock);
493
494 /*
495 * Cancel our cyclic. cyclic_remove() guarantees that the cyclic
496 * handler will not run again after it returns. Note that the cyclic
497 * handler merely _dispatches_ the periodic, so this does _not_ mean
498 * the periodic handler is also finished running.
499 */
500 mutex_enter(&cpu_lock);
501 cyclic_remove(dpr->dpr_cyclic_id);
502 mutex_exit(&cpu_lock);
503
504 /*
505 * Wait until the periodic handler is no longer running:
506 */
507 mutex_enter(&dpr->dpr_lock);
508 while (dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) {
509 cv_wait(&dpr->dpr_cv, &dpr->dpr_lock);
510 }
511 mutex_exit(&dpr->dpr_lock);
512
513 periodic_destroy(dpr);
514 }