1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 /*
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 */
29
30 #include <sys/cmn_err.h>
31 #include <sys/ddi_timer.h>
32 #include <sys/id_space.h>
33 #include <sys/kobj.h>
34 #include <sys/sysmacros.h>
35 #include <sys/systm.h>
36 #include <sys/taskq.h>
37 #include <sys/taskq_impl.h>
38 #include <sys/time.h>
39 #include <sys/types.h>
40 #include <sys/sdt.h>
41
42 /*
43 * The ddi_periodic_add(9F) Implementation
44 *
45 * This file contains the implementation of the ddi_periodic_add(9F) interface.
46 * It is a thin wrapper around the cyclic subsystem (see documentation in
47 * uts/common/os/cyclic.c), providing a DDI interface for registering
48 * (and unregistering) callbacks for periodic invocation at arbitrary
49 * interrupt levels, or in kernel context.
50 *
51 * Each call to ddi_periodic_add will result in a new opaque handle, as
111 * While in the periodic_softint_queue list, the periodic will be marked
112 * DPF_DISPATCHED, and thus safe from frees. Only the invocation of
113 * i_untimeout() that removes the periodic from the global list is allowed
114 * to free it.
115 */
116 static kmutex_t periodics_lock;
117 static list_t periodics;
118 static list_t periodic_softint_queue[10]; /* for IPL1 up to IPL10 */
119
120 typedef enum periodic_ipl {
121 PERI_IPL_0 = 0,
122 PERI_IPL_1,
123 PERI_IPL_2,
124 PERI_IPL_3,
125 PERI_IPL_4,
126 PERI_IPL_5,
127 PERI_IPL_6,
128 PERI_IPL_7,
129 PERI_IPL_8,
130 PERI_IPL_9,
131 PERI_IPL_10,
132 } periodic_ipl_t;
133
134 /*
135 * This function may be called either from a soft interrupt handler
136 * (ddi_periodic_softintr), or as a taskq worker function.
137 */
138 static void
139 periodic_execute(void *arg)
140 {
141 ddi_periodic_impl_t *dpr = arg;
142 mutex_enter(&dpr->dpr_lock);
143
144 /*
145 * We must be DISPATCHED, but not yet EXECUTING:
146 */
147 VERIFY((dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) ==
148 DPF_DISPATCHED);
149
150 if (!(dpr->dpr_flags & DPF_CANCELLED)) {
151 int level = dpr->dpr_level;
152 uint64_t count = dpr->dpr_fire_count;
153 /*
154 * If we have not yet been cancelled, then
155 * mark us executing:
156 */
157 dpr->dpr_flags |= DPF_EXECUTING;
158 mutex_exit(&dpr->dpr_lock);
159
160 /*
161 * Execute the handler, without holding locks:
162 */
163 DTRACE_PROBE4(ddi__periodic__execute, void *, dpr->dpr_handler,
164 void *, dpr->dpr_arg, int, level, uint64_t, count);
165 (*dpr->dpr_handler)(dpr->dpr_arg);
166 DTRACE_PROBE4(ddi__periodic__done, void *, dpr->dpr_handler,
167 void *, dpr->dpr_arg, int, level, uint64_t, count);
168
169 mutex_enter(&dpr->dpr_lock);
170 dpr->dpr_fire_count++;
171 }
172
173 /*
174 * We're done with this periodic for now, so release it and
175 * wake anybody that was waiting for us to be finished:
176 */
177 dpr->dpr_flags &= ~(DPF_DISPATCHED | DPF_EXECUTING);
178 cv_broadcast(&dpr->dpr_cv);
179 mutex_exit(&dpr->dpr_lock);
180 }
181
182 void
183 ddi_periodic_softintr(int level)
184 {
185 ddi_periodic_impl_t *dpr;
186 VERIFY(level >= PERI_IPL_1 && level <= PERI_IPL_10);
187
188 mutex_enter(&periodics_lock);
189 /*
190 * Pull the first scheduled periodic off the queue for this priority
191 * level:
192 */
193 while ((dpr = list_remove_head(&periodic_softint_queue[level - 1]))
194 != NULL) {
195 mutex_exit(&periodics_lock);
196 /*
197 * And execute it:
198 */
199 periodic_execute(dpr);
200 mutex_enter(&periodics_lock);
201 }
202 mutex_exit(&periodics_lock);
203 }
204
205 void
206 ddi_periodic_init(void)
207 {
208 int i;
209
210 /*
211 * Create a kmem_cache for request tracking objects, and a list
212 * to store them in so we can later delete based on opaque handles:
213 */
214 periodic_cache = kmem_cache_create("ddi_periodic",
229 list_create(&periodic_softint_queue[i - 1],
230 sizeof (ddi_periodic_impl_t), offsetof(ddi_periodic_impl_t,
231 dpr_softint_link));
232 }
233
234 /*
235 * Create the taskq for running PERI_IPL_0 handlers. This taskq will
236 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t
237 * pre-allocated with the ddi_periodic_impl_t.
238 */
239 periodic_taskq = taskq_create_instance("ddi_periodic_taskq", -1,
240 ddi_periodic_taskq_threadcount, maxclsyspri, 0, 0, 0);
241
242 /*
243 * Initialize the mutex lock used for the soft interrupt request
244 * queues.
245 */
246 mutex_init(&periodics_lock, NULL, MUTEX_ADAPTIVE, NULL);
247 }
248
249 static void
250 periodic_cyclic_handler(void *arg)
251 {
252 extern void sir_on(int);
253 ddi_periodic_impl_t *dpr = arg;
254
255 mutex_enter(&dpr->dpr_lock);
256 /*
257 * If we've been cancelled, or we're already dispatched, then exit
258 * immediately:
259 */
260 if (dpr->dpr_flags & (DPF_CANCELLED | DPF_DISPATCHED)) {
261 mutex_exit(&dpr->dpr_lock);
262 return;
263 }
264 VERIFY(!(dpr->dpr_flags & DPF_EXECUTING));
265
266 /*
267 * This periodic is not presently dispatched, so dispatch it now:
268 */
287 mutex_exit(&periodics_lock);
288
289 /*
290 * Raise the soft interrupt level for this periodic:
291 */
292 sir_on(dpr->dpr_level);
293 }
294 }
295
296 static void
297 periodic_destroy(ddi_periodic_impl_t *dpr)
298 {
299 if (dpr == NULL)
300 return;
301
302 /*
303 * By now, we should have a periodic that is not busy, and has been
304 * cancelled:
305 */
306 VERIFY(dpr->dpr_flags == DPF_CANCELLED);
307
308 id_free(periodic_id_space, dpr->dpr_id);
309 kmem_cache_free(periodic_cache, dpr);
310 }
311
312 static ddi_periodic_impl_t *
313 periodic_create(void)
314 {
315 ddi_periodic_impl_t *dpr;
316
317 dpr = kmem_cache_alloc(periodic_cache, KM_SLEEP);
318 bzero(dpr, sizeof (*dpr));
319 dpr->dpr_id = id_alloc(periodic_id_space);
320 mutex_init(&dpr->dpr_lock, NULL, MUTEX_ADAPTIVE, NULL);
321 cv_init(&dpr->dpr_cv, NULL, CV_DEFAULT, NULL);
322
323 return (dpr);
324 }
325
326 timeout_t
327 i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level)
328 {
329 cyc_handler_t cyh;
330 cyc_time_t cyt;
331 ddi_periodic_impl_t *dpr;
332
333 VERIFY(func != NULL);
334 VERIFY(level >= 0 && level <= 10);
335
336 /*
337 * Allocate object to track this periodic:
338 */
339 dpr = periodic_create();
340 dpr->dpr_level = level;
341 dpr->dpr_handler = func;
342 dpr->dpr_arg = arg;
343
344 /*
345 * The resolution must be finer than or equal to
346 * the requested interval. If it's not, set the resolution
347 * to the interval.
348 * Note. There is a restriction currently. Regardless of the
349 * clock resolution used here, 10ms is set as the timer resolution.
350 * Even on the 1ms resolution timer, the minimum interval is 10ms.
351 */
352 if (ddi_periodic_resolution > interval) {
353 uintptr_t pc = (uintptr_t)dpr->dpr_handler;
354 ulong_t off;
355 cmn_err(CE_WARN,
356 "The periodic timeout (handler=%s, interval=%lld) "
357 "requests a finer interval than the supported resolution. "
358 "It rounds up to %lld\n", kobj_getsymname(pc, &off),
359 interval, ddi_periodic_resolution);
360 interval = ddi_periodic_resolution;
361 }
362
363 /*
364 * If the specified interval is already multiples of
365 * the resolution, use it as is. Otherwise, it rounds
366 * up to multiples of the timer resolution.
367 */
368 dpr->dpr_interval = roundup(interval, ddi_periodic_resolution);
369
370 /*
371 * Create the underlying cyclic:
372 */
373 cyh.cyh_func = periodic_cyclic_handler;
374 cyh.cyh_arg = dpr;
375 cyh.cyh_level = CY_LOCK_LEVEL;
376
377 cyt.cyt_when = roundup(gethrtime() + dpr->dpr_interval,
378 ddi_periodic_resolution);
379 cyt.cyt_interval = dpr->dpr_interval;
380
381 mutex_enter(&cpu_lock);
382 dpr->dpr_cyclic_id = cyclic_add(&cyh, &cyt);
383 mutex_exit(&cpu_lock);
384
385 /*
386 * Make the id visible to ddi_periodic_delete(9F) before we
387 * return it:
388 */
389 mutex_enter(&periodics_lock);
390 list_insert_tail(&periodics, dpr);
391 mutex_exit(&periodics_lock);
392
393 return ((timeout_t)(uintptr_t)dpr->dpr_id);
394 }
395
396 /*
397 * void
398 * i_untimeout(timeout_t req)
399 *
400 * Overview
401 * i_untimeout() is an internal function canceling the i_timeout()
402 * request previously issued.
403 * This function is used for ddi_periodic_delete(9F).
404 *
405 * Argument
406 * req: timeout_t opaque value i_timeout() returned previously.
407 *
408 * Return value
409 * Nothing.
410 *
411 * Caller's context
412 * i_untimeout() can be called in user, kernel or interrupt context.
413 * It cannot be called in high interrupt context.
414 *
415 * Note. This function is used by ddi_periodic_delete(), which cannot
416 * be called in interrupt context. As a result, this function is called
417 * in user or kernel context only in practice.
418 */
419 void
420 i_untimeout(timeout_t id)
421 {
422 ddi_periodic_impl_t *dpr;
423
424 /*
425 * Find the periodic in the list of all periodics and remove it.
426 * If we find in (and remove it from) the global list, we have
427 * license to free it once it is no longer busy.
428 */
429 mutex_enter(&periodics_lock);
430 for (dpr = list_head(&periodics); dpr != NULL; dpr =
431 list_next(&periodics, dpr)) {
432 if (dpr->dpr_id == (id_t)(uintptr_t)id) {
433 list_remove(&periodics, dpr);
434 break;
435 }
436 }
437 mutex_exit(&periodics_lock);
438
439 /*
440 * We could not find a periodic for this id, so bail out:
441 */
442 if (dpr == NULL)
443 return;
444
445 mutex_enter(&dpr->dpr_lock);
446 /*
447 * We should be the only one trying to cancel this periodic:
448 */
449 VERIFY(!(dpr->dpr_flags & DPF_CANCELLED));
450 /*
451 * Mark the periodic as cancelled:
452 */
453 dpr->dpr_flags |= DPF_CANCELLED;
454 mutex_exit(&dpr->dpr_lock);
455
456 /*
457 * Cancel our cyclic. cyclic_remove() guarantees that the cyclic
458 * handler will not run again after it returns. Note that the cyclic
459 * handler merely _dispatches_ the periodic, so this does _not_ mean
460 * the periodic handler is also finished running.
461 */
462 mutex_enter(&cpu_lock);
463 cyclic_remove(dpr->dpr_cyclic_id);
464 mutex_exit(&cpu_lock);
465
466 /*
467 * Wait until the periodic handler is no longer running:
468 */
469 mutex_enter(&dpr->dpr_lock);
470 while (dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) {
|
1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11 /*
12 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
13 */
14
15 #include <sys/cmn_err.h>
16 #include <sys/ddi_periodic.h>
17 #include <sys/id_space.h>
18 #include <sys/kobj.h>
19 #include <sys/sysmacros.h>
20 #include <sys/systm.h>
21 #include <sys/taskq.h>
22 #include <sys/taskq_impl.h>
23 #include <sys/time.h>
24 #include <sys/types.h>
25 #include <sys/sdt.h>
26
27 /*
28 * The ddi_periodic_add(9F) Implementation
29 *
30 * This file contains the implementation of the ddi_periodic_add(9F) interface.
31 * It is a thin wrapper around the cyclic subsystem (see documentation in
32 * uts/common/os/cyclic.c), providing a DDI interface for registering
33 * (and unregistering) callbacks for periodic invocation at arbitrary
34 * interrupt levels, or in kernel context.
35 *
36 * Each call to ddi_periodic_add will result in a new opaque handle, as
96 * While in the periodic_softint_queue list, the periodic will be marked
97 * DPF_DISPATCHED, and thus safe from frees. Only the invocation of
98 * i_untimeout() that removes the periodic from the global list is allowed
99 * to free it.
100 */
101 static kmutex_t periodics_lock;
102 static list_t periodics;
103 static list_t periodic_softint_queue[10]; /* for IPL1 up to IPL10 */
104
105 typedef enum periodic_ipl {
106 PERI_IPL_0 = 0,
107 PERI_IPL_1,
108 PERI_IPL_2,
109 PERI_IPL_3,
110 PERI_IPL_4,
111 PERI_IPL_5,
112 PERI_IPL_6,
113 PERI_IPL_7,
114 PERI_IPL_8,
115 PERI_IPL_9,
116 PERI_IPL_10
117 } periodic_ipl_t;
118
119 static char *
120 periodic_handler_symbol(ddi_periodic_impl_t *dpr)
121 {
122 ulong_t off;
123
124 return (kobj_getsymname((uintptr_t)dpr->dpr_handler, &off));
125 }
126
127 /*
128 * This function may be called either from a soft interrupt handler
129 * (ddi_periodic_softintr), or as a taskq worker function.
130 */
131 static void
132 periodic_execute(void *arg)
133 {
134 ddi_periodic_impl_t *dpr = arg;
135 mutex_enter(&dpr->dpr_lock);
136
137 /*
138 * We must be DISPATCHED, but not yet EXECUTING:
139 */
140 VERIFY((dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) ==
141 DPF_DISPATCHED);
142 VERIFY(dpr->dpr_thread == NULL);
143
144 if (!(dpr->dpr_flags & DPF_CANCELLED)) {
145 int level = dpr->dpr_level;
146 uint64_t count = dpr->dpr_fire_count;
147 /*
148 * If we have not yet been cancelled, then
149 * mark us executing:
150 */
151 dpr->dpr_flags |= DPF_EXECUTING;
152 dpr->dpr_thread = curthread;
153 mutex_exit(&dpr->dpr_lock);
154
155 /*
156 * Execute the handler, without holding locks:
157 */
158 DTRACE_PROBE4(ddi__periodic__execute, void *, dpr->dpr_handler,
159 void *, dpr->dpr_arg, int, level, uint64_t, count);
160 (*dpr->dpr_handler)(dpr->dpr_arg);
161 DTRACE_PROBE4(ddi__periodic__done, void *, dpr->dpr_handler,
162 void *, dpr->dpr_arg, int, level, uint64_t, count);
163
164 mutex_enter(&dpr->dpr_lock);
165 dpr->dpr_thread = NULL;
166 dpr->dpr_fire_count++;
167 }
168
169 /*
170 * We're done with this periodic for now, so release it and
171 * wake anybody that was waiting for us to be finished:
172 */
173 dpr->dpr_flags &= ~(DPF_DISPATCHED | DPF_EXECUTING);
174 cv_broadcast(&dpr->dpr_cv);
175 mutex_exit(&dpr->dpr_lock);
176 }
177
178 void
179 ddi_periodic_softintr(int level)
180 {
181 ddi_periodic_impl_t *dpr;
182 VERIFY(level >= PERI_IPL_1 && level <= PERI_IPL_10);
183
184 mutex_enter(&periodics_lock);
185 /*
186 * Pull the first scheduled periodic off the queue for this priority
187 * level:
188 */
189 while ((dpr = list_remove_head(&periodic_softint_queue[level - 1])) !=
190 NULL) {
191 mutex_exit(&periodics_lock);
192 /*
193 * And execute it:
194 */
195 periodic_execute(dpr);
196 mutex_enter(&periodics_lock);
197 }
198 mutex_exit(&periodics_lock);
199 }
200
201 void
202 ddi_periodic_init(void)
203 {
204 int i;
205
206 /*
207 * Create a kmem_cache for request tracking objects, and a list
208 * to store them in so we can later delete based on opaque handles:
209 */
210 periodic_cache = kmem_cache_create("ddi_periodic",
225 list_create(&periodic_softint_queue[i - 1],
226 sizeof (ddi_periodic_impl_t), offsetof(ddi_periodic_impl_t,
227 dpr_softint_link));
228 }
229
230 /*
231 * Create the taskq for running PERI_IPL_0 handlers. This taskq will
232 * _only_ be used with taskq_dispatch_ent(), and a taskq_ent_t
233 * pre-allocated with the ddi_periodic_impl_t.
234 */
235 periodic_taskq = taskq_create_instance("ddi_periodic_taskq", -1,
236 ddi_periodic_taskq_threadcount, maxclsyspri, 0, 0, 0);
237
238 /*
239 * Initialize the mutex lock used for the soft interrupt request
240 * queues.
241 */
242 mutex_init(&periodics_lock, NULL, MUTEX_ADAPTIVE, NULL);
243 }
244
245 void
246 ddi_periodic_fini(void)
247 {
248 ddi_periodic_impl_t *dpr;
249 /*
250 * Find all periodics that have not yet been unregistered and,
251 * on DEBUG bits, print a warning about this resource leak.
252 */
253 mutex_enter(&periodics_lock);
254 while ((dpr = list_head(&periodics)) != NULL) {
255 #ifdef DEBUG
256 printf("DDI periodic handler not deleted (id=%p, handler=%s)\n",
257 dpr->dpr_id, periodic_handler_symbol(dpr));
258 #endif
259
260 mutex_exit(&periodics_lock);
261 /*
262 * Delete the periodic ourselves:
263 */
264 i_untimeout(dpr->dpr_id);
265 mutex_enter(&periodics_lock);
266 }
267 mutex_exit(&periodics_lock);
268 }
269
270 static void
271 periodic_cyclic_handler(void *arg)
272 {
273 extern void sir_on(int);
274 ddi_periodic_impl_t *dpr = arg;
275
276 mutex_enter(&dpr->dpr_lock);
277 /*
278 * If we've been cancelled, or we're already dispatched, then exit
279 * immediately:
280 */
281 if (dpr->dpr_flags & (DPF_CANCELLED | DPF_DISPATCHED)) {
282 mutex_exit(&dpr->dpr_lock);
283 return;
284 }
285 VERIFY(!(dpr->dpr_flags & DPF_EXECUTING));
286
287 /*
288 * This periodic is not presently dispatched, so dispatch it now:
289 */
308 mutex_exit(&periodics_lock);
309
310 /*
311 * Raise the soft interrupt level for this periodic:
312 */
313 sir_on(dpr->dpr_level);
314 }
315 }
316
317 static void
318 periodic_destroy(ddi_periodic_impl_t *dpr)
319 {
320 if (dpr == NULL)
321 return;
322
323 /*
324 * By now, we should have a periodic that is not busy, and has been
325 * cancelled:
326 */
327 VERIFY(dpr->dpr_flags == DPF_CANCELLED);
328 VERIFY(dpr->dpr_thread == NULL);
329
330 id_free(periodic_id_space, dpr->dpr_id);
331 cv_destroy(dpr->dpr_cv);
332 mutex_destroy(dpr->dpr_lock);
333 kmem_cache_free(periodic_cache, dpr);
334 }
335
336 static ddi_periodic_impl_t *
337 periodic_create(void)
338 {
339 ddi_periodic_impl_t *dpr;
340
341 dpr = kmem_cache_alloc(periodic_cache, KM_SLEEP);
342 bzero(dpr, sizeof (*dpr));
343 dpr->dpr_id = id_alloc(periodic_id_space);
344 mutex_init(&dpr->dpr_lock, NULL, MUTEX_ADAPTIVE, NULL);
345 cv_init(&dpr->dpr_cv, NULL, CV_DEFAULT, NULL);
346
347 return (dpr);
348 }
349
350 /*
351 * This function provides the implementation for the ddi_periodic_add(9F)
352 * interface. It registers a periodic handler and returns an opaque identifier
353 * that can be unregistered via ddi_periodic_delete(9F)/i_untimeout().
354 *
355 * It may be called in user or kernel context, provided cpu_lock is not held.
356 */
357 timeout_t
358 i_timeout(void (*func)(void *), void *arg, hrtime_t interval, int level)
359 {
360 cyc_handler_t cyh;
361 cyc_time_t cyt;
362 ddi_periodic_impl_t *dpr;
363
364 VERIFY(func != NULL);
365 VERIFY(level >= 0 && level <= 10);
366
367 /*
368 * Allocate object to track this periodic:
369 */
370 dpr = periodic_create();
371 dpr->dpr_level = level;
372 dpr->dpr_handler = func;
373 dpr->dpr_arg = arg;
374
375 /*
376 * The minimum supported interval between firings of the periodic
377 * handler is 10ms; see ddi_periodic_add(9F) for more details. If a
378 * shorter interval is requested, round up.
379 */
380 if (ddi_periodic_resolution > interval) {
381 cmn_err(CE_WARN,
382 "The periodic timeout (handler=%s, interval=%lld) "
383 "requests a finer interval than the supported resolution. "
384 "It rounds up to %lld\n", periodic_handler_symbol(dpr),
385 interval, ddi_periodic_resolution);
386 interval = ddi_periodic_resolution;
387 }
388
389 /*
390 * Ensure that the interval is an even multiple of the base resolution
391 * that is at least as long as the requested interval.
392 */
393 dpr->dpr_interval = roundup(interval, ddi_periodic_resolution);
394
395 /*
396 * Create the underlying cyclic:
397 */
398 cyh.cyh_func = periodic_cyclic_handler;
399 cyh.cyh_arg = dpr;
400 cyh.cyh_level = CY_LOCK_LEVEL;
401
402 cyt.cyt_when = 0;
403 cyt.cyt_interval = dpr->dpr_interval;
404
405 mutex_enter(&cpu_lock);
406 dpr->dpr_cyclic_id = cyclic_add(&cyh, &cyt);
407 mutex_exit(&cpu_lock);
408
409 /*
410 * Make the id visible to ddi_periodic_delete(9F) before we
411 * return it:
412 */
413 mutex_enter(&periodics_lock);
414 list_insert_tail(&periodics, dpr);
415 mutex_exit(&periodics_lock);
416
417 return ((timeout_t)(uintptr_t)dpr->dpr_id);
418 }
419
420 /*
421 * This function provides the implementation for the ddi_periodic_delete(9F)
422 * interface. It cancels a periodic handler previously registered through
423 * ddi_periodic_add(9F)/i_timeout().
424 *
425 * It may be called in user or kernel context, provided cpu_lock is not held.
426 * It may NOT be called from within a periodic handler.
427 */
428 void
429 i_untimeout(timeout_t id)
430 {
431 ddi_periodic_impl_t *dpr;
432
433 /*
434 * Find the periodic in the list of all periodics and remove it.
435 * If we find in (and remove it from) the global list, we have
436 * license to free it once it is no longer busy.
437 */
438 mutex_enter(&periodics_lock);
439 for (dpr = list_head(&periodics); dpr != NULL; dpr =
440 list_next(&periodics, dpr)) {
441 if (dpr->dpr_id == (id_t)(uintptr_t)id) {
442 list_remove(&periodics, dpr);
443 break;
444 }
445 }
446 mutex_exit(&periodics_lock);
447
448 /*
449 * We could not find a periodic for this id, so bail out:
450 */
451 if (dpr == NULL)
452 return;
453
454 mutex_enter(&dpr->dpr_lock);
455 /*
456 * We should be the only one trying to cancel this periodic:
457 */
458 VERIFY(!(dpr->dpr_flags & DPF_CANCELLED));
459 /*
460 * Removing a periodic from within its own handler function will
461 * cause a deadlock, so panic explicitly.
462 */
463 if (dpr->dpr_thread == curthread) {
464 panic("ddi_periodic_delete(%p) called from its own handler\n",
465 dpr->dpr_id);
466 }
467 /*
468 * Mark the periodic as cancelled:
469 */
470 dpr->dpr_flags |= DPF_CANCELLED;
471 mutex_exit(&dpr->dpr_lock);
472
473 /*
474 * Cancel our cyclic. cyclic_remove() guarantees that the cyclic
475 * handler will not run again after it returns. Note that the cyclic
476 * handler merely _dispatches_ the periodic, so this does _not_ mean
477 * the periodic handler is also finished running.
478 */
479 mutex_enter(&cpu_lock);
480 cyclic_remove(dpr->dpr_cyclic_id);
481 mutex_exit(&cpu_lock);
482
483 /*
484 * Wait until the periodic handler is no longer running:
485 */
486 mutex_enter(&dpr->dpr_lock);
487 while (dpr->dpr_flags & (DPF_DISPATCHED | DPF_EXECUTING)) {
|