Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/winlockio.c
+++ new/usr/src/uts/common/io/winlockio.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26
27 27 /*
28 28 * This is the lock device driver.
29 29 *
30 30 * The lock driver provides a variation of inter-process mutexes with the
31 31 * following twist in semantics:
32 32 * A waiter for a lock after a set timeout can "break" the lock and
33 33 * grab it from the current owner (without informing the owner).
34 34 *
35 35 * These semantics result in temporarily multiple processes thinking they
36 36 * own the lock. This usually does not make sense for cases where locks are
37 37 * used to protect a critical region and it is important to serialize access
38 38 * to data structures. As breaking the lock will also lose the serialization
39 39 * and result in corrupt data structures.
40 40 *
41 41 * The usage for winlock driver is primarily driven by the graphics system
42 42 * when doing DGA (direct graphics access) graphics. The locks are used to
43 43 * protect access to the frame buffer (presumably reflects back to the screen)
44 44 * between competing processes that directly write to the screen as opposed
45 45 * to going through the window server etc.
46 46 * In this case, the result of breaking the lock at worst causes the screen
47 47 * image to be distorted and is easily fixed by doing a "refresh"
48 48 *
49 49 * In well-behaved applications, the lock is held for a very short time and
50 50 * the breaking semantics do not come into play. Not having this feature and
51 51 * using normal inter-process mutexes will result in a misbehaved application
52 52 * from grabbing the screen writing capability from the window manager and
53 53 * effectively make the system look like it is hung (mouse pointer does not
54 54 * move).
55 55 *
56 56 * A secondary aspect of the winlock driver is that it allows for extremely
57 57 * fast lock acquire/release in cases where there is low contention. A memory
58 58 * write is all that is needed (not even a function call). And the window
59 59 * manager is the only DGA writer usually and this optimized for. Occasionally
60 60 * some processes might do DGA graphics and cause kernel faults to handle
61 61 * the contention/locking (and that has got to be slow!).
62 62 *
63 63 * The following IOCTLs are supported:
64 64 *
65 65 * GRABPAGEALLOC:
66 66 * Compatibility with old cgsix device driver lockpage ioctls.
67 67 * Lockpages created this way must be an entire page for compatibility with
68 68 * older software. This ioctl allocates a lock context with its own
69 69 * private lock page. The unique "ident" that identifies this lock is
70 70 * returned.
71 71 *
72 72 * GRABPAGEFREE:
73 73 * Compatibility with cgsix device driver lockpage ioctls. This
74 74 * ioctl releases the lock context allocated by GRABPAGEALLOC.
75 75 *
76 76 * GRABLOCKINFO:
77 77 * Returns a one-word flag. '1' means that multiple clients may
78 78 * access this lock page. Older device drivers returned '0',
79 79 * meaning that only two clients could access a lock page.
80 80 *
81 81 * GRABATTACH:
82 82 * Not supported. This ioctl would have grabbed all lock pages
83 83 * on behalf of the calling program.
84 84 *
85 85 * WINLOCKALLOC:
86 86 * Allocate a lock context. This ioctl accepts a key value. as
87 87 * its argument. If the key is zero, a new lock context is
88 88 * created, and its "ident" is returned. If the key is nonzero,
89 89 * all existing contexts are checked to see if they match they
90 90 * key. If a match is found, its reference count is incremented
91 91 * and its ident is returned, otherwise a new context is created
92 92 * and its ident is returned.
93 93 *
94 94 * WINLOCKFREE:
95 95 * Free a lock context. This ioctl accepts the ident of a lock
96 96 * context and decrements its reference count. Once the reference
97 97 * count reaches zero *and* all mappings are released, the lock
98 98 * context is freed. When all the lock context in the lock page are
99 99 * freed, the lock page is freed as well.
100 100 *
101 101 * WINLOCKSETTIMEOUT:
102 102 * Set lock timeout for a context. This ioctl accepts the ident
103 103 * of a lock context and a timeout value in milliseconds.
104 104 * Whenever lock contention occurs, the timer is started and the lock is
105 105 * broken after the timeout expires. If timeout value is zero, lock does
106 106 * not timeout. This value will be rounded to the nearest clock
107 107 * tick, so don't try to use it for real-time control or something.
108 108 *
109 109 * WINLOCKGETTIMEOUT:
110 110 * Get lock timeout from a context.
111 111 *
112 112 * WINLOCKDUMP:
113 113 * Dump state of this device.
114 114 *
115 115 *
116 116 * How /dev/winlock works:
117 117 *
118 118 * Every lock context consists of two mappings for the client to the lock
119 119 * page. These mappings are known as the "lock page" and "unlock page"
120 120 * to the client. The first mmap to the lock context (identified by the
121 121 * sy_ident field returns during alloc) allocates mapping to the lock page,
122 122 * the second mmap allocates a mapping to the unlock page.
123 123 * The mappings dont have to be ordered in virtual address space, but do
124 124 * need to be ordered in time. Mapping and unmapping of these lock and unlock
125 125 * pages should happen in pairs. Doing them one at a time or unmapping one
126 126 * and leaving one mapped etc cause undefined behaviors.
127 127 * The mappings are always of length PAGESIZE, and type MAP_SHARED.
128 128 *
129 129 * The first ioctl is to ALLOC a lock, either based on a key (if trying to
130 130 * grab a preexisting lock) or 0 (gets a default new one)
131 131 * This ioctl returns a value in sy_ident which is needed to do the
132 132 * later mmaps and FREE/other ioctls.
133 133 *
134 134 * The "page number" portion of the sy_ident needs to be passed as the
135 135 * file offset when doing an mmap for both the lock page and unlock page
136 136 *
137 137 * The value returned by mmap ( a user virtual address) needs to be
138 138 * incremented by the "page offset" portion of sy_ident to obtain the
139 139 * pointer to the actual lock. (Skipping this step, does not cause any
140 140 * visible error, but the process will be using the wrong lock!)
141 141 *
142 142 * On a fork(), the child process will inherit the mappings for free, but
143 143 * will not inherit the parent's lock ownership if any. The child should NOT
144 144 * do an explicit FREE on the lock context unless it did an explicit ALLOC.
145 145 * Only one process at a time is allowed to have a valid hat
146 146 * mapping to a lock page. This is enforced by this driver.
147 147 * A client acquires a lock by writing a '1' to the lock page.
148 148 * Note, that it is not necessary to read and veryify that the lock is '0'
149 149 * prior to writing a '1' in it.
150 150 * If it does not already have a valid mapping to that page, the driver
151 151 * takes a fault (devmap_access), loads the client mapping
152 152 * and allows the client to continue. The client releases the lock by
153 153 * writing a '0' to the unlock page. Again, if it does not have a valid
154 154 * mapping to the unlock page, the segment driver takes a fault,
155 155 * loads the mapping, and lets the client continue. From this point
156 156 * forward, the client can make as many locks and unlocks as it
157 157 * wants, without any more faults into the kernel.
158 158 *
159 159 * If a different process wants to acquire a lock, it takes a page fault
160 160 * when it writes the '1' to the lock page. If the segment driver sees
161 161 * that the lock page contained a zero, then it invalidates the owner's
162 162 * mappings and gives the mappings to this process.
163 163 *
164 164 * If there is already a '1' in the lock page when the second client
165 165 * tries to access the lock page, then a lock exists. The segment
166 166 * driver sleeps the second client and, if applicable, starts the
167 167 * timeout on the lock. The owner's mapping to the unlock page
168 168 * is invalidated so that the driver will be woken again when the owner
169 169 * releases the lock.
170 170 *
171 171 * When the locking client finally writes a '0' to the unlock page, the
172 172 * segment driver takes another fault. The client is given a valid
173 173 * mapping, not to the unlock page, but to the "trash page", and allowed
174 174 * to continue. Meanwhile, the sleeping client is given a valid mapping
175 175 * to the lock/unlock pages and allowed to continue as well.
176 176 *
177 177 * RFE: There is a leak if process exits before freeing allocated locks
178 178 * But currently not tracking which locks were allocated by which
179 179 * process and we do not have a clean entry point into the driver
180 180 * to do garbage collection. If the interface used a file descriptor for each
181 181 * lock it allocs, then the driver can free up stuff in the _close routine
182 182 */
183 183
184 184 #include <sys/types.h> /* various type defn's */
185 185 #include <sys/debug.h>
186 186 #include <sys/param.h> /* various kernel limits */
187 187 #include <sys/time.h>
188 188 #include <sys/errno.h>
189 189 #include <sys/kmem.h> /* defines kmem_alloc() */
190 190 #include <sys/conf.h> /* defines cdevsw */
191 191 #include <sys/file.h> /* various file modes, etc. */
192 192 #include <sys/uio.h> /* UIO stuff */
193 193 #include <sys/ioctl.h>
194 194 #include <sys/cred.h> /* defines cred struct */
195 195 #include <sys/mman.h> /* defines mmap(2) parameters */
196 196 #include <sys/stat.h> /* defines S_IFCHR */
197 197 #include <sys/cmn_err.h> /* use cmn_err */
198 198 #include <sys/ddi.h> /* ddi stuff */
199 199 #include <sys/sunddi.h> /* ddi stuff */
200 200 #include <sys/ddi_impldefs.h> /* ddi stuff */
201 201 #include <sys/winlockio.h> /* defines ioctls, flags, data structs */
202 202
203 203 static int winlock_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
204 204 static int winlock_devmap(dev_t, devmap_cookie_t, offset_t, size_t,
205 205 size_t *, uint_t);
206 206 static int winlocksegmap(dev_t, off_t, struct as *, caddr_t *, off_t,
207 207 uint_t, uint_t, uint_t, cred_t *);
208 208
209 209 static struct cb_ops winlock_cb_ops = {
210 210 nulldev, /* open */
211 211 nulldev, /* close */
212 212 nodev, /* strategy */
213 213 nodev, /* print */
214 214 nodev, /* dump */
215 215 nodev, /* read */
216 216 nodev, /* write */
217 217 winlock_ioctl, /* ioctl */
218 218 winlock_devmap, /* devmap */
219 219 nodev, /* mmap */
220 220 winlocksegmap, /* segmap */
221 221 nochpoll, /* poll */
222 222 ddi_prop_op, /* prop_op */
223 223 NULL, /* streamtab */
224 224 D_NEW|D_MP|D_DEVMAP, /* Driver compatibility flag */
225 225 0, /* rev */
226 226 nodev, /* aread */
227 227 nodev /* awrite */
228 228 };
229 229
230 230 static int winlock_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
231 231 static int winlock_attach(dev_info_t *, ddi_attach_cmd_t);
232 232 static int winlock_detach(dev_info_t *, ddi_detach_cmd_t);
233 233
234 234 static struct dev_ops winlock_ops = {
235 235 DEVO_REV,
236 236 0, /* refcount */
237 237 winlock_info, /* info */
238 238 nulldev, /* identify */
239 239 nulldev, /* probe */
240 240 winlock_attach, /* attach */
241 241 winlock_detach, /* detach */
242 242 nodev, /* reset */
243 243 &winlock_cb_ops, /* driver ops */
244 244 NULL, /* bus ops */
245 245 NULL, /* power */
246 246 ddi_quiesce_not_needed, /* quiesce */
247 247 };
248 248
249 249 static int winlockmap_map(devmap_cookie_t, dev_t, uint_t, offset_t, size_t,
250 250 void **);
251 251 static void winlockmap_unmap(devmap_cookie_t, void *, offset_t, size_t,
252 252 devmap_cookie_t, void **, devmap_cookie_t, void **);
253 253 static int winlockmap_dup(devmap_cookie_t, void *,
254 254 devmap_cookie_t, void **);
255 255 static int winlockmap_access(devmap_cookie_t, void *, offset_t, size_t,
256 256 uint_t, uint_t);
257 257
258 258 static
259 259 struct devmap_callback_ctl winlockmap_ops = {
260 260 DEVMAP_OPS_REV,
261 261 winlockmap_map,
262 262 winlockmap_access,
263 263 winlockmap_dup,
264 264 winlockmap_unmap,
265 265 };
266 266
267 267 #if DEBUG
268 268 static int lock_debug = 0;
269 269 #define DEBUGF(level, args) { if (lock_debug >= (level)) cmn_err args; }
270 270 #else
271 271 #define DEBUGF(level, args)
272 272 #endif
273 273
274 274 /* Driver supports two styles of locks */
275 275 enum winlock_style { NEWSTYLE_LOCK, OLDSTYLE_LOCK };
276 276
277 277 /*
278 278 * These structures describe a lock context. We permit multiple
279 279 * clients (not just two) to access a lock page
280 280 *
281 281 * The "cookie" identifies the lock context. It is the page number portion
282 282 * sy_ident returned on lock allocation. Cookie is used in later ioctls.
283 283 * "cookie" is lockid * PAGESIZE
284 284 * "lockptr" is the kernel virtual address to the lock itself
285 285 * The page offset portion of lockptr is the page offset portion of sy_ident
286 286 */
287 287
288 288 /*
289 289 * per-process information about locks. This is the private field of
290 290 * a devmap mapping. Note that usually *two* mappings point to this.
291 291 */
292 292
293 293 /*
294 294 * Each process using winlock is associated with a segproc structure
295 295 * In various driver entry points, we need to search to find the right
296 296 * segproc structure (If we were using file handles for each lock this
297 297 * would not have been necessary).
298 298 * It would have been simple to use the process pid (and ddi_get_pid)
299 299 * However, during fork devmap_dup is called in the parent process context
300 300 * and using the pid complicates the code by introducing orphans.
301 301 * Instead we use the as pointer for the process as a cookie
302 302 * which requires delving into various non-DDI kosher structs
303 303 */
304 304 typedef struct segproc {
305 305 struct segproc *next; /* next client of this lock */
306 306 struct seglock *lp; /* associated lock context */
307 307 devmap_cookie_t lockseg; /* lock mapping, if any */
308 308 devmap_cookie_t unlockseg; /* unlock mapping, if any */
309 309 void *tag; /* process as pointer as tag */
310 310 uint_t flag; /* see "flag bits" in winlockio.h */
311 311 } SegProc;
312 312
313 313 #define ID(sdp) ((sdp)->tag)
314 314 #define CURPROC_ID (void *)(curproc->p_as)
315 315
316 316 /* per lock context information */
317 317
318 318 typedef struct seglock {
319 319 struct seglock *next; /* next lock */
320 320 uint_t sleepers; /* nthreads sleeping on this lock */
321 321 uint_t alloccount; /* how many times created? */
322 322 uint_t cookie; /* mmap() offset (page #) into device */
323 323 uint_t key; /* key, if any */
324 324 enum winlock_style style; /* style of lock - OLDSTYLE, NEWSTYLE */
325 325 clock_t timeout; /* sleep time in ticks */
326 326 ddi_umem_cookie_t umem_cookie; /* cookie for umem allocated memory */
327 327 int *lockptr; /* kernel virtual addr of lock */
328 328 struct segproc *clients; /* list of clients of this lock */
329 329 struct segproc *owner; /* current owner of lock */
330 330 kmutex_t mutex; /* mutex for lock */
331 331 kcondvar_t locksleep; /* for sleeping on lock */
332 332 } SegLock;
333 333
334 334 #define LOCK(lp) (*((lp)->lockptr))
335 335
336 336 /*
337 337 * Number of locks that can fit in a page. Driver can support only that many.
338 338 * For oldsytle locks, it is relatively easy to increase the limit as each
339 339 * is in a separate page (MAX_LOCKS mostly serves to prevent runaway allocation
340 340 * For newstyle locks, this is trickier as the code needs to allow for mapping
341 341 * into the second or third page of the cookie for some locks.
342 342 */
343 343 #define MAX_LOCKS (PAGESIZE/sizeof (int))
344 344
345 345 #define LOCKTIME 3 /* Default lock timeout in seconds */
346 346
347 347
348 348 /* Protections setting for winlock user mappings */
349 349 #define WINLOCK_PROT (PROT_READ|PROT_WRITE|PROT_USER)
350 350
351 351 /*
352 352 * The trash page is where unwanted writes go
353 353 * when a process is releasing a lock.
354 354 */
355 355 static ddi_umem_cookie_t trashpage_cookie = NULL;
356 356
357 357 /* For newstyle allocations a common page of locks is used */
358 358 static caddr_t lockpage = NULL;
359 359 static ddi_umem_cookie_t lockpage_cookie = NULL;
360 360
361 361 static dev_info_t *winlock_dip = NULL;
362 362 static kmutex_t winlock_mutex;
363 363
364 364 /*
365 365 * winlock_mutex protects
366 366 * lock_list
367 367 * lock_free_list
368 368 * "next" field in SegLock
369 369 * next_lock
370 370 * trashpage_cookie
371 371 * lockpage & lockpage_cookie
372 372 *
373 373 * SegLock_mutex protects
374 374 * rest of fields in SegLock
375 375 * All fields in list of SegProc (lp->clients)
376 376 *
377 377 * Lock ordering is winlock_mutex->SegLock_mutex
378 378 * During devmap/seg operations SegLock_mutex acquired without winlock_mutex
379 379 *
380 380 * During devmap callbacks, the pointer to SegProc is stored as the private
381 381 * data in the devmap handle. This pointer will not go stale (i.e., the
382 382 * SegProc getting deleted) as the SegProc is not deleted until both the
383 383 * lockseg and unlockseg have been unmapped and the pointers stored in
384 384 * the devmap handles have been NULL'ed.
385 385 * But before this pointer is used to access any fields (other than the 'lp')
386 386 * lp->mutex must be held.
387 387 */
388 388
389 389 /*
390 390 * The allocation code tries to allocate from lock_free_list
391 391 * first, otherwise it uses kmem_zalloc. When lock list is idle, all
392 392 * locks in lock_free_list are kmem_freed
393 393 */
394 394 static SegLock *lock_list = NULL; /* in-use locks */
395 395 static SegLock *lock_free_list = NULL; /* free locks */
396 396 static int next_lock = 0; /* next lock cookie */
397 397
398 398 /* Routines to find a lock in lock_list based on offset or key */
399 399 static SegLock *seglock_findlock(uint_t);
400 400 static SegLock *seglock_findkey(uint_t);
401 401
402 402 /* Routines to find and allocate SegProc structures */
403 403 static SegProc *seglock_find_specific(SegLock *, void *);
404 404 static SegProc *seglock_alloc_specific(SegLock *, void *);
405 405 #define seglock_findclient(lp) seglock_find_specific((lp), CURPROC_ID)
406 406 #define seglock_allocclient(lp) seglock_alloc_specific((lp), CURPROC_ID)
407 407
408 408 /* Delete client from lock's client list */
409 409 static void seglock_deleteclient(SegLock *, SegProc *);
410 410 static void garbage_collect_lock(SegLock *, SegProc *);
411 411
412 412 /* Create a new lock */
413 413 static SegLock *seglock_createlock(enum winlock_style);
414 414 /* Destroy lock */
415 415 static void seglock_destroylock(SegLock *);
416 416 static void lock_destroyall(void);
417 417
418 418 /* Helper functions in winlockmap_access */
419 419 static int give_mapping(SegLock *, SegProc *, uint_t);
420 420 static int lock_giveup(SegLock *, int);
421 421 static int seglock_lockfault(devmap_cookie_t, SegProc *, SegLock *, uint_t);
422 422
423 423 /* routines called from ioctl */
424 424 static int seglock_graballoc(intptr_t, enum winlock_style, int);
425 425 static int seglock_grabinfo(intptr_t, int);
426 426 static int seglock_grabfree(intptr_t, int);
427 427 static int seglock_gettimeout(intptr_t, int);
428 428 static int seglock_settimeout(intptr_t, int);
429 429 static void seglock_dump_all(void);
430 430
431 431 static int
432 432 winlock_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
433 433 {
434 434 DEBUGF(1, (CE_CONT, "winlock_attach, devi=%p, cmd=%d\n",
435 435 (void *)devi, (int)cmd));
436 436 if (cmd != DDI_ATTACH)
437 437 return (DDI_FAILURE);
438 438 if (ddi_create_minor_node(devi, "winlock", S_IFCHR, 0, DDI_PSEUDO, 0)
439 439 == DDI_FAILURE) {
440 440 return (DDI_FAILURE);
441 441 }
442 442 winlock_dip = devi;
443 443 ddi_report_dev(devi);
444 444 return (DDI_SUCCESS);
445 445 }
446 446
447 447 /*ARGSUSED*/
448 448 static int
449 449 winlock_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
450 450 {
451 451 DEBUGF(1, (CE_CONT, "winlock_detach, devi=%p, cmd=%d\n",
452 452 (void *)devi, (int)cmd));
453 453 if (cmd != DDI_DETACH)
454 454 return (DDI_FAILURE);
455 455
456 456 mutex_enter(&winlock_mutex);
457 457 if (lock_list != NULL) {
458 458 mutex_exit(&winlock_mutex);
459 459 return (DDI_FAILURE);
460 460 }
461 461 ASSERT(lock_free_list == NULL);
462 462
463 463 DEBUGF(1, (CE_CONT, "detach freeing trashpage and lockpage\n"));
464 464 /* destroy any common stuff created */
465 465 if (trashpage_cookie != NULL) {
466 466 ddi_umem_free(trashpage_cookie);
467 467 trashpage_cookie = NULL;
468 468 }
469 469 if (lockpage != NULL) {
470 470 ddi_umem_free(lockpage_cookie);
471 471 lockpage = NULL;
472 472 lockpage_cookie = NULL;
473 473 }
474 474 winlock_dip = NULL;
475 475 mutex_exit(&winlock_mutex);
476 476 return (DDI_SUCCESS);
477 477 }
478 478
479 479 /*ARGSUSED*/
480 480 static int
481 481 winlock_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
482 482 {
483 483 register int error;
484 484
485 485 /* initialize result */
486 486 *result = NULL;
487 487
488 488 /* only valid instance (i.e., getminor) is 0 */
489 489 if (getminor((dev_t)arg) >= 1)
490 490 return (DDI_FAILURE);
491 491
492 492 switch (infocmd) {
493 493 case DDI_INFO_DEVT2DEVINFO:
494 494 if (winlock_dip == NULL)
495 495 error = DDI_FAILURE;
496 496 else {
497 497 *result = (void *)winlock_dip;
498 498 error = DDI_SUCCESS;
499 499 }
500 500 break;
501 501 case DDI_INFO_DEVT2INSTANCE:
502 502 *result = (void *)0;
503 503 error = DDI_SUCCESS;
504 504 break;
505 505 default:
506 506 error = DDI_FAILURE;
507 507 }
508 508 return (error);
509 509 }
510 510
511 511
512 512 /*ARGSUSED*/
513 513 int
514 514 winlock_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
515 515 cred_t *cred, int *rval)
516 516 {
517 517 DEBUGF(1, (CE_CONT, "winlockioctl: cmd=%d, arg=0x%p\n",
518 518 cmd, (void *)arg));
519 519
520 520 switch (cmd) {
521 521 /*
522 522 * ioctls that used to be handled by framebuffers (defined in fbio.h)
523 523 * RFE: No code really calls the GRAB* ioctls now. Should EOL.
524 524 */
525 525
526 526 case GRABPAGEALLOC:
527 527 return (seglock_graballoc(arg, OLDSTYLE_LOCK, mode));
528 528 case GRABPAGEFREE:
529 529 return (seglock_grabfree(arg, mode));
530 530 case GRABLOCKINFO:
531 531 return (seglock_grabinfo(arg, mode));
532 532 case GRABATTACH:
533 533 return (EINVAL); /* GRABATTACH is not supported (never was) */
534 534
535 535 case WINLOCKALLOC:
536 536 return (seglock_graballoc(arg, NEWSTYLE_LOCK, mode));
537 537 case WINLOCKFREE:
538 538 return (seglock_grabfree(arg, mode));
539 539 case WINLOCKSETTIMEOUT:
540 540 return (seglock_settimeout(arg, mode));
541 541 case WINLOCKGETTIMEOUT:
542 542 return (seglock_gettimeout(arg, mode));
543 543 case WINLOCKDUMP:
544 544 seglock_dump_all();
545 545 return (0);
546 546
547 547 #ifdef DEBUG
548 548 case (WIOC|255):
549 549 lock_debug = arg;
550 550 return (0);
551 551 #endif
552 552
553 553 default:
554 554 return (ENOTTY); /* Why is this not EINVAL */
555 555 }
556 556 }
557 557
558 558 int
559 559 winlocksegmap(
560 560 dev_t dev, /* major:minor */
561 561 off_t off, /* device offset from mmap(2) */
562 562 struct as *as, /* user's address space. */
563 563 caddr_t *addr, /* address from mmap(2) */
564 564 off_t len, /* length from mmap(2) */
565 565 uint_t prot, /* user wants this access */
566 566 uint_t maxprot, /* this is the maximum the user can have */
567 567 uint_t flags, /* flags from mmap(2) */
568 568 cred_t *cred)
569 569 {
570 570 DEBUGF(1, (CE_CONT, "winlock_segmap off=%lx, len=0x%lx\n", off, len));
571 571
572 572 /* Only MAP_SHARED mappings are supported */
573 573 if ((flags & MAP_TYPE) == MAP_PRIVATE) {
574 574 return (EINVAL);
575 575 }
576 576
577 577 /* Use devmap_setup to setup the mapping */
578 578 return (devmap_setup(dev, (offset_t)off, as, addr, (size_t)len, prot,
579 579 maxprot, flags, cred));
580 580 }
581 581
582 582 /*ARGSUSED*/
583 583 int
584 584 winlock_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len,
585 585 size_t *maplen, uint_t model)
586 586 {
587 587 SegLock *lp;
588 588 int err;
589 589
590 590 DEBUGF(1, (CE_CONT, "winlock devmap: off=%llx, len=%lx, dhp=%p\n",
591 591 off, len, (void *)dhp));
592 592
593 593 *maplen = 0;
594 594
595 595 /* Check if the lock exists, i.e., has been created by alloc */
596 596 /* off is the sy_ident returned in the alloc ioctl */
597 597 if ((lp = seglock_findlock((uint_t)off)) == NULL) {
598 598 return (ENXIO);
599 599 }
600 600
601 601 /*
602 602 * The offset bits in mmap(2) offset has to be same as in lockptr
603 603 * OR the offset should be 0 (i.e. masked off)
604 604 */
605 605 if (((off & PAGEOFFSET) != 0) &&
606 606 ((off ^ (uintptr_t)(lp->lockptr)) & (offset_t)PAGEOFFSET) != 0) {
607 607 DEBUGF(2, (CE_CONT,
608 608 "mmap offset %llx mismatch with lockptr %p\n",
609 609 off, (void *)lp->lockptr));
610 610 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
611 611 return (EINVAL);
612 612 }
613 613
614 614 /* Only supports PAGESIZE length mappings */
615 615 if (len != PAGESIZE) {
616 616 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
617 617 return (EINVAL);
618 618 }
619 619
620 620 /*
621 621 * Set up devmap to point at page associated with lock
622 622 * RFE: At this point we dont know if this is a lockpage or unlockpage
623 623 * a lockpage would not need DEVMAP_ALLOW_REMAP setting
624 624 * We could have kept track of the mapping order here,
625 625 * but devmap framework does not support storing any state in this
626 626 * devmap callback as it does not callback for error cleanup if some
627 627 * other error happens in the framework.
628 628 * RFE: We should modify the winlock mmap interface so that the
629 629 * user process marks in the offset passed in whether this is for a
630 630 * lock or unlock mapping instead of guessing based on order of maps
631 631 * This would cleanup other things (such as in fork)
632 632 */
633 633 if ((err = devmap_umem_setup(dhp, winlock_dip, &winlockmap_ops,
634 634 lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT,
635 635 DEVMAP_ALLOW_REMAP, 0)) < 0) {
636 636 mutex_exit(&lp->mutex); /* held by seglock_findlock */
637 637 return (err);
638 638 }
639 639 /*
640 640 * No mappings are loaded to those segments yet. The correctness
641 641 * of the winlock semantics depends on the devmap framework/seg_dev NOT
642 642 * loading the translations without calling _access callback.
643 643 */
644 644
645 645 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
646 646 *maplen = PAGESIZE;
647 647 return (0);
648 648 }
649 649
650 650 /*
651 651 * This routine is called by the devmap framework after the devmap entry point
652 652 * above and the mapping is setup in seg_dev.
653 653 * We store the pointer to the per-process context in the devmap private data.
654 654 */
655 655 /*ARGSUSED*/
656 656 static int
657 657 winlockmap_map(devmap_cookie_t dhp, dev_t dev, uint_t flags, offset_t off,
658 658 size_t len, void **pvtp)
659 659 {
660 660 SegLock *lp = seglock_findlock((uint_t)off); /* returns w/ mutex held */
661 661 SegProc *sdp;
662 662
663 663 ASSERT(len == PAGESIZE);
664 664
665 665 /* Find the per-process context for this lock, alloc one if not found */
666 666 sdp = seglock_allocclient(lp);
667 667
668 668 /*
669 669 * RFE: Determining which is a lock vs unlock seg is based on order
670 670 * of mmaps, we should change that to be derivable from off
671 671 */
672 672 if (sdp->lockseg == NULL) {
673 673 sdp->lockseg = dhp;
674 674 } else if (sdp->unlockseg == NULL) {
675 675 sdp->unlockseg = dhp;
676 676 } else {
677 677 /* attempting to map lock more than twice */
678 678 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
679 679 return (ENOMEM);
680 680 }
681 681
682 682 *pvtp = sdp;
683 683 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
684 684 return (DDI_SUCCESS);
685 685 }
686 686
687 687 /*
688 688 * duplicate a segment, as in fork()
689 689 * On fork, the child inherits the mappings to the lock
690 690 * lp->alloccount is NOT incremented, so child should not do a free().
691 691 * Semantics same as if done an alloc(), map(), map().
692 692 * This way it would work fine if doing an exec() variant later
693 693 * Child does not inherit any UFLAGS set in parent
694 694 * The lock and unlock pages are started off unmapped, i.e., child does not
695 695 * own the lock.
696 696 * The code assumes that the child process has a valid pid at this point
697 697 * RFE: This semantics depends on fork not duplicating the hat mappings
698 698 * (which is the current implementation). To enforce it would need to
699 699 * call devmap_unload from here - not clear if that is allowed.
700 700 */
701 701
702 702 static int
703 703 winlockmap_dup(devmap_cookie_t dhp, void *oldpvt, devmap_cookie_t new_dhp,
704 704 void **newpvt)
705 705 {
706 706 SegProc *sdp = (SegProc *)oldpvt;
707 707 SegProc *ndp;
708 708 SegLock *lp = sdp->lp;
709 709
710 710 mutex_enter(&lp->mutex);
711 711 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
712 712
713 713 /*
714 714 * Note: At this point, the child process does have a pid, but
715 715 * the arguments passed to as_dup and hence to devmap_dup dont pass it
716 716 * down. So we cannot use normal seglock_findclient - which finds the
717 717 * parent sdp itself!
718 718 * Instead we allocate the child's SegProc by using the child as pointer
719 719 * RFE: we are using the as stucture which means peeking into the
720 720 * devmap_cookie. This is not DDI-compliant. Need a compliant way of
721 721 * getting at either the as or, better, a way to get the child's new pid
722 722 */
723 723 ndp = seglock_alloc_specific(lp,
724 724 (void *)((devmap_handle_t *)new_dhp)->dh_seg->s_as);
725 725 ASSERT(ndp != sdp);
726 726
727 727 if (sdp->lockseg == dhp) {
728 728 ASSERT(ndp->lockseg == NULL);
729 729 ndp->lockseg = new_dhp;
730 730 } else {
731 731 ASSERT(sdp->unlockseg == dhp);
732 732 ASSERT(ndp->unlockseg == NULL);
733 733 ndp->unlockseg = new_dhp;
734 734 if (sdp->flag & TRASHPAGE) {
735 735 ndp->flag |= TRASHPAGE;
736 736 }
737 737 }
738 738 mutex_exit(&lp->mutex);
739 739 *newpvt = (void *)ndp;
740 740 return (0);
741 741 }
742 742
743 743
744 744 /*ARGSUSED*/
745 745 static void
746 746 winlockmap_unmap(devmap_cookie_t dhp, void *pvtp, offset_t off, size_t len,
747 747 devmap_cookie_t new_dhp1, void **newpvtp1,
748 748 devmap_cookie_t new_dhp2, void **newpvtp2)
749 749 {
750 750 SegProc *sdp = (SegProc *)pvtp;
751 751 SegLock *lp = sdp->lp;
752 752
753 753 /*
754 754 * We always create PAGESIZE length mappings, so there should never
755 755 * be a partial unmapping case
756 756 */
757 757 ASSERT((new_dhp1 == NULL) && (new_dhp2 == NULL));
758 758
759 759 mutex_enter(&lp->mutex);
760 760 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
761 761 /* make sure this process doesn't own the lock */
762 762 if (sdp == lp->owner) {
763 763 /*
764 764 * Not handling errors - i.e., errors in unloading mapping
765 765 * As part of unmapping hat/seg structure get torn down anyway
766 766 */
767 767 (void) lock_giveup(lp, 0);
768 768 }
769 769
770 770 ASSERT(sdp != lp->owner);
771 771 if (sdp->lockseg == dhp) {
772 772 sdp->lockseg = NULL;
773 773 } else {
774 774 ASSERT(sdp->unlockseg == dhp);
775 775 sdp->unlockseg = NULL;
776 776 sdp->flag &= ~TRASHPAGE; /* clear flag if set */
777 777 }
778 778
779 779 garbage_collect_lock(lp, sdp);
780 780 }
781 781
782 782 /*ARGSUSED*/
783 783 static int
784 784 winlockmap_access(devmap_cookie_t dhp, void *pvt, offset_t off, size_t len,
785 785 uint_t type, uint_t rw)
786 786 {
787 787 SegProc *sdp = (SegProc *)pvt;
788 788 SegLock *lp = sdp->lp;
789 789 int err;
790 790
791 791 /* Driver handles only DEVMAP_ACCESS type of faults */
792 792 if (type != DEVMAP_ACCESS)
793 793 return (-1);
794 794
795 795 mutex_enter(&lp->mutex);
796 796 ASSERT((dhp == sdp->lockseg) || (dhp == sdp->unlockseg));
797 797
798 798 /* should be using a SegProc that corresponds to current process */
799 799 ASSERT(ID(sdp) == CURPROC_ID);
800 800
801 801 /*
802 802 * If process is faulting but does not have both segments mapped
803 803 * return error (should cause a segv).
804 804 * RFE: could give it a permanent trashpage
805 805 */
806 806 if ((sdp->lockseg == NULL) || (sdp->unlockseg == NULL)) {
807 807 err = -1;
808 808 } else {
809 809 err = seglock_lockfault(dhp, sdp, lp, rw);
810 810 }
811 811 mutex_exit(&lp->mutex);
812 812 return (err);
813 813 }
814 814
815 815 /* INTERNAL ROUTINES START HERE */
816 816
817 817
818 818
819 819 /*
820 820 * search the lock_list list for the specified cookie
821 821 * The cookie is the sy_ident field returns by ALLOC ioctl.
822 822 * This has two parts:
823 823 * the pageoffset bits contain offset into the lock page.
824 824 * the pagenumber bits contain the lock id.
825 825 * The user code is supposed to pass in only the pagenumber portion
826 826 * (i.e. mask off the pageoffset bits). However the code below
827 827 * does the mask in case the users are not diligent
828 828 * if found, returns with mutex for SegLock structure held
829 829 */
830 830 static SegLock *
831 831 seglock_findlock(uint_t cookie)
832 832 {
833 833 SegLock *lp;
834 834
835 835 cookie &= (uint_t)PAGEMASK; /* remove pageoffset bits to get cookie */
836 836 mutex_enter(&winlock_mutex);
837 837 for (lp = lock_list; lp != NULL; lp = lp->next) {
838 838 mutex_enter(&lp->mutex);
839 839 if (cookie == lp->cookie) {
840 840 break; /* return with lp->mutex held */
841 841 }
842 842 mutex_exit(&lp->mutex);
843 843 }
844 844 mutex_exit(&winlock_mutex);
845 845 return (lp);
846 846 }
847 847
848 848 /*
849 849 * search the lock_list list for the specified non-zero key
850 850 * if found, returns with lock for SegLock structure held
851 851 */
852 852 static SegLock *
853 853 seglock_findkey(uint_t key)
854 854 {
855 855 SegLock *lp;
856 856
857 857 ASSERT(MUTEX_HELD(&winlock_mutex));
858 858 /* The driver allows multiple locks with key 0, dont search */
859 859 if (key == 0)
860 860 return (NULL);
861 861 for (lp = lock_list; lp != NULL; lp = lp->next) {
862 862 mutex_enter(&lp->mutex);
863 863 if (key == lp->key)
864 864 break;
865 865 mutex_exit(&lp->mutex);
866 866 }
867 867 return (lp);
868 868 }
869 869
870 870 /*
871 871 * Create a new lock context.
872 872 * Returns with SegLock mutex held
873 873 */
874 874
875 875 static SegLock *
876 876 seglock_createlock(enum winlock_style style)
877 877 {
878 878 SegLock *lp;
879 879
880 880 DEBUGF(3, (CE_CONT, "seglock_createlock: free_list=%p, next_lock %d\n",
881 881 (void *)lock_free_list, next_lock));
882 882
883 883 ASSERT(MUTEX_HELD(&winlock_mutex));
884 884 if (lock_free_list != NULL) {
885 885 lp = lock_free_list;
886 886 lock_free_list = lp->next;
887 887 } else if (next_lock >= MAX_LOCKS) {
888 888 return (NULL);
889 889 } else {
890 890 lp = kmem_zalloc(sizeof (SegLock), KM_SLEEP);
891 891 lp->cookie = (next_lock + 1) * (uint_t)PAGESIZE;
892 892 mutex_init(&lp->mutex, NULL, MUTEX_DEFAULT, NULL);
893 893 cv_init(&lp->locksleep, NULL, CV_DEFAULT, NULL);
894 894 ++next_lock;
895 895 }
896 896
897 897 mutex_enter(&lp->mutex);
898 898 ASSERT((lp->cookie/PAGESIZE) <= next_lock);
899 899
900 900 if (style == OLDSTYLE_LOCK) {
901 901 lp->lockptr = (int *)ddi_umem_alloc(PAGESIZE,
902 902 DDI_UMEM_SLEEP, &(lp->umem_cookie));
903 903 } else {
904 904 lp->lockptr = ((int *)lockpage) + ((lp->cookie/PAGESIZE) - 1);
905 905 lp->umem_cookie = lockpage_cookie;
906 906 }
907 907
908 908 ASSERT(lp->lockptr != NULL);
909 909 lp->style = style;
910 910 lp->sleepers = 0;
911 911 lp->alloccount = 1;
912 912 lp->timeout = LOCKTIME*hz;
913 913 lp->clients = NULL;
914 914 lp->owner = NULL;
915 915 LOCK(lp) = 0;
916 916 lp->next = lock_list;
917 917 lock_list = lp;
918 918 return (lp);
919 919 }
920 920
921 921 /*
922 922 * Routine to destory a lock structure.
923 923 * This routine is called while holding the lp->mutex but not the
924 924 * winlock_mutex.
925 925 */
926 926
927 927 static void
928 928 seglock_destroylock(SegLock *lp)
929 929 {
930 930 ASSERT(MUTEX_HELD(&lp->mutex));
931 931 ASSERT(!MUTEX_HELD(&winlock_mutex));
932 932
933 933 DEBUGF(3, (CE_CONT, "destroying lock cookie %d key %d\n",
934 934 lp->cookie, lp->key));
935 935
936 936 ASSERT(lp->alloccount == 0);
937 937 ASSERT(lp->clients == NULL);
938 938 ASSERT(lp->owner == NULL);
939 939 ASSERT(lp->sleepers == 0);
940 940
941 941 /* clean up/release fields in lp */
942 942 if (lp->style == OLDSTYLE_LOCK) {
943 943 ddi_umem_free(lp->umem_cookie);
944 944 }
945 945 lp->umem_cookie = NULL;
946 946 lp->lockptr = NULL;
947 947 lp->key = 0;
948 948
949 949 /*
950 950 * Reduce cookie by 1, makes it non page-aligned and invalid
951 951 * This prevents any valid lookup from finding this lock
952 952 * so when we drop the lock and regrab it it will still
953 953 * be there and nobody else would have attached to it
954 954 */
955 955 lp->cookie--;
956 956
957 957 /* Drop and reacquire mutexes in right order */
958 958 mutex_exit(&lp->mutex);
959 959 mutex_enter(&winlock_mutex);
960 960 mutex_enter(&lp->mutex);
961 961
962 962 /* reincrement the cookie to get the original valid cookie */
963 963 lp->cookie++;
964 964 ASSERT((lp->cookie & PAGEOFFSET) == 0);
965 965 ASSERT(lp->alloccount == 0);
966 966 ASSERT(lp->clients == NULL);
967 967 ASSERT(lp->owner == NULL);
968 968 ASSERT(lp->sleepers == 0);
969 969
970 970 /* Remove lp from lock_list */
971 971 if (lock_list == lp) {
972 972 lock_list = lp->next;
973 973 } else {
974 974 SegLock *tmp = lock_list;
975 975 while (tmp->next != lp) {
976 976 tmp = tmp->next;
977 977 ASSERT(tmp != NULL);
978 978 }
979 979 tmp->next = lp->next;
980 980 }
981 981
982 982 /* Add to lock_free_list */
983 983 lp->next = lock_free_list;
984 984 lock_free_list = lp;
985 985 mutex_exit(&lp->mutex);
986 986
987 987 /* Check if all locks deleted and cleanup */
988 988 if (lock_list == NULL) {
989 989 lock_destroyall();
990 990 }
991 991
992 992 mutex_exit(&winlock_mutex);
993 993 }
994 994
995 995 /* Routine to find a SegProc corresponding to the tag */
996 996
997 997 static SegProc *
998 998 seglock_find_specific(SegLock *lp, void *tag)
999 999 {
1000 1000 SegProc *sdp;
1001 1001
1002 1002 ASSERT(MUTEX_HELD(&lp->mutex));
1003 1003 ASSERT(tag != NULL);
1004 1004 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1005 1005 if (ID(sdp) == tag)
1006 1006 break;
1007 1007 }
1008 1008 return (sdp);
1009 1009 }
1010 1010
1011 1011 /* Routine to find (and if needed allocate) a SegProc corresponding to tag */
1012 1012
1013 1013 static SegProc *
1014 1014 seglock_alloc_specific(SegLock *lp, void *tag)
1015 1015 {
1016 1016 SegProc *sdp;
1017 1017
1018 1018 ASSERT(MUTEX_HELD(&lp->mutex));
1019 1019 ASSERT(tag != NULL);
1020 1020
1021 1021 /* Search and return if existing one found */
1022 1022 sdp = seglock_find_specific(lp, tag);
1023 1023 if (sdp != NULL)
1024 1024 return (sdp);
1025 1025
1026 1026 DEBUGF(3, (CE_CONT, "Allocating segproc structure for tag %p lock %d\n",
1027 1027 tag, lp->cookie));
1028 1028
1029 1029 /* Allocate a new SegProc */
1030 1030 sdp = kmem_zalloc(sizeof (SegProc), KM_SLEEP);
1031 1031 sdp->next = lp->clients;
1032 1032 lp->clients = sdp;
1033 1033 sdp->lp = lp;
1034 1034 ID(sdp) = tag;
1035 1035 return (sdp);
1036 1036 }
1037 1037
1038 1038 /*
1039 1039 * search a context's client list for the given client and delete
1040 1040 */
1041 1041
1042 1042 static void
1043 1043 seglock_deleteclient(SegLock *lp, SegProc *sdp)
1044 1044 {
1045 1045 ASSERT(MUTEX_HELD(&lp->mutex));
1046 1046 ASSERT(lp->owner != sdp); /* Not current owner of lock */
1047 1047 ASSERT(sdp->lockseg == NULL); /* Mappings torn down */
1048 1048 ASSERT(sdp->unlockseg == NULL);
1049 1049
1050 1050 DEBUGF(3, (CE_CONT, "Deleting segproc structure for pid %d lock %d\n",
1051 1051 ddi_get_pid(), lp->cookie));
1052 1052 if (lp->clients == sdp) {
1053 1053 lp->clients = sdp->next;
1054 1054 } else {
1055 1055 SegProc *tmp = lp->clients;
1056 1056 while (tmp->next != sdp) {
1057 1057 tmp = tmp->next;
1058 1058 ASSERT(tmp != NULL);
1059 1059 }
1060 1060 tmp->next = sdp->next;
1061 1061 }
1062 1062 kmem_free(sdp, sizeof (SegProc));
1063 1063 }
1064 1064
1065 1065 /*
1066 1066 * Routine to verify if a SegProc and SegLock
1067 1067 * structures are empty/idle.
1068 1068 * Destroys the structures if they are ready
1069 1069 * Can be called with sdp == NULL if want to verify only the lock state
1070 1070 * caller should hold the lp->mutex
1071 1071 * and this routine drops the mutex
1072 1072 */
1073 1073 static void
1074 1074 garbage_collect_lock(SegLock *lp, SegProc *sdp)
1075 1075 {
1076 1076 ASSERT(MUTEX_HELD(&lp->mutex));
1077 1077 /* see if both segments unmapped from client structure */
1078 1078 if ((sdp != NULL) && (sdp->lockseg == NULL) && (sdp->unlockseg == NULL))
1079 1079 seglock_deleteclient(lp, sdp);
1080 1080
1081 1081 /* see if this is last client in the entire lock context */
1082 1082 if ((lp->clients == NULL) && (lp->alloccount == 0)) {
1083 1083 seglock_destroylock(lp);
1084 1084 } else {
1085 1085 mutex_exit(&lp->mutex);
1086 1086 }
1087 1087 }
1088 1088
1089 1089
1090 1090 /* IOCTLS START HERE */
1091 1091
1092 1092 static int
1093 1093 seglock_grabinfo(intptr_t arg, int mode)
1094 1094 {
1095 1095 int i = 1;
1096 1096
1097 1097 /* multiple clients per lock supported - see comments up top */
1098 1098 if (ddi_copyout((caddr_t)&i, (caddr_t)arg, sizeof (int), mode) != 0)
1099 1099 return (EFAULT);
1100 1100 return (0);
1101 1101 }
1102 1102
1103 1103 static int
1104 1104 seglock_graballoc(intptr_t arg, enum winlock_style style, int mode) /* IOCTL */
1105 1105 {
1106 1106 struct seglock *lp;
1107 1107 uint_t key;
1108 1108 struct winlockalloc wla;
1109 1109 int err;
1110 1110
1111 1111 if (style == OLDSTYLE_LOCK) {
1112 1112 key = 0;
1113 1113 } else {
1114 1114 if (ddi_copyin((caddr_t)arg, (caddr_t)&wla, sizeof (wla),
1115 1115 mode)) {
1116 1116 return (EFAULT);
1117 1117 }
1118 1118 key = wla.sy_key;
1119 1119 }
1120 1120
1121 1121 DEBUGF(3, (CE_CONT,
1122 1122 "seglock_graballoc: key=%u, style=%d\n", key, style));
1123 1123
1124 1124 mutex_enter(&winlock_mutex);
1125 1125 /* Allocate lockpage on first new style alloc */
1126 1126 if ((lockpage == NULL) && (style == NEWSTYLE_LOCK)) {
1127 1127 lockpage = ddi_umem_alloc(PAGESIZE, DDI_UMEM_SLEEP,
1128 1128 &lockpage_cookie);
1129 1129 }
1130 1130
1131 1131 /* Allocate trashpage on first alloc (any style) */
1132 1132 if (trashpage_cookie == NULL) {
1133 1133 (void) ddi_umem_alloc(PAGESIZE, DDI_UMEM_TRASH | DDI_UMEM_SLEEP,
1134 1134 &trashpage_cookie);
1135 1135 }
1136 1136
1137 1137 if ((lp = seglock_findkey(key)) != NULL) {
1138 1138 DEBUGF(2, (CE_CONT, "alloc: found lock key %d cookie %d\n",
1139 1139 key, lp->cookie));
1140 1140 ++lp->alloccount;
1141 1141 } else if ((lp = seglock_createlock(style)) != NULL) {
1142 1142 DEBUGF(2, (CE_CONT, "alloc: created lock key %d cookie %d\n",
1143 1143 key, lp->cookie));
1144 1144 lp->key = key;
1145 1145 } else {
1146 1146 DEBUGF(2, (CE_CONT, "alloc: cannot create lock key %d\n", key));
1147 1147 mutex_exit(&winlock_mutex);
1148 1148 return (ENOMEM);
1149 1149 }
1150 1150 ASSERT((lp != NULL) && MUTEX_HELD(&lp->mutex));
1151 1151
1152 1152 mutex_exit(&winlock_mutex);
1153 1153
1154 1154 if (style == OLDSTYLE_LOCK) {
1155 1155 err = ddi_copyout((caddr_t)&lp->cookie, (caddr_t)arg,
1156 1156 sizeof (lp->cookie), mode);
1157 1157 } else {
1158 1158 wla.sy_ident = lp->cookie +
1159 1159 (uint_t)((uintptr_t)(lp->lockptr) & PAGEOFFSET);
1160 1160 err = ddi_copyout((caddr_t)&wla, (caddr_t)arg,
1161 1161 sizeof (wla), mode);
1162 1162 }
1163 1163
1164 1164 if (err) {
1165 1165 /* On error, should undo allocation */
1166 1166 lp->alloccount--;
1167 1167
1168 1168 /* Verify and delete if lock is unused now */
1169 1169 garbage_collect_lock(lp, NULL);
1170 1170 return (EFAULT);
1171 1171 }
1172 1172
1173 1173 mutex_exit(&lp->mutex);
1174 1174 return (0);
1175 1175 }
1176 1176
1177 1177 static int
1178 1178 seglock_grabfree(intptr_t arg, int mode) /* IOCTL */
1179 1179 {
1180 1180 struct seglock *lp;
1181 1181 uint_t offset;
1182 1182
1183 1183 if (ddi_copyin((caddr_t)arg, &offset, sizeof (offset), mode)
1184 1184 != 0) {
1185 1185 return (EFAULT);
1186 1186 }
1187 1187 DEBUGF(2, (CE_CONT, "seglock_grabfree: offset=%u", offset));
1188 1188
1189 1189 if ((lp = seglock_findlock(offset)) == NULL) {
1190 1190 DEBUGF(2, (CE_CONT, "did not find lock\n"));
1191 1191 return (EINVAL);
1192 1192 }
1193 1193 DEBUGF(3, (CE_CONT, " lock key %d, cookie %d, alloccount %d\n",
1194 1194 lp->key, lp->cookie, lp->alloccount));
1195 1195
1196 1196 if (lp->alloccount > 0)
1197 1197 lp->alloccount--;
1198 1198
1199 1199 /* Verify and delete if lock is unused now */
1200 1200 garbage_collect_lock(lp, NULL);
1201 1201 return (0);
1202 1202 }
1203 1203
1204 1204
1205 1205 /*
1206 1206 * Sets timeout in lock and UFLAGS in client
1207 1207 * the UFLAGS are stored in the client structure and persistent only
1208 1208 * till the unmap of the lock pages. If the process sets UFLAGS
1209 1209 * does a map of the lock/unlock pages and unmaps them, the client
1210 1210 * structure will get deleted and the UFLAGS will be lost. The process
1211 1211 * will need to resetup the flags.
1212 1212 */
1213 1213 static int
1214 1214 seglock_settimeout(intptr_t arg, int mode) /* IOCTL */
1215 1215 {
1216 1216 SegLock *lp;
1217 1217 SegProc *sdp;
1218 1218 struct winlocktimeout wlt;
1219 1219
1220 1220 if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0) {
1221 1221 return (EFAULT);
1222 1222 }
1223 1223
1224 1224 if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1225 1225 return (EINVAL);
1226 1226
1227 1227 lp->timeout = MSEC_TO_TICK_ROUNDUP(wlt.sy_timeout);
1228 1228 /* if timeout modified, wake up any sleepers */
1229 1229 if (lp->sleepers > 0) {
1230 1230 cv_broadcast(&lp->locksleep);
1231 1231 }
1232 1232
1233 1233 /*
1234 1234 * If the process is trying to set UFLAGS,
1235 1235 * Find the client segproc and allocate one if needed
1236 1236 * Set the flags preserving the kernel flags
1237 1237 * If the process is clearing UFLAGS
1238 1238 * Find the client segproc but dont allocate one if does not exist
1239 1239 */
1240 1240 if (wlt.sy_flags & UFLAGS) {
1241 1241 sdp = seglock_allocclient(lp);
1242 1242 sdp->flag = sdp->flag & KFLAGS | wlt.sy_flags & UFLAGS;
1243 1243 } else if ((sdp = seglock_findclient(lp)) != NULL) {
1244 1244 sdp->flag = sdp->flag & KFLAGS;
1245 1245 /* If clearing UFLAGS leaves the segment or lock idle, delete */
1246 1246 garbage_collect_lock(lp, sdp);
1247 1247 return (0);
1248 1248 }
1249 1249 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
1250 1250 return (0);
1251 1251 }
1252 1252
1253 1253 static int
1254 1254 seglock_gettimeout(intptr_t arg, int mode)
1255 1255 {
1256 1256 SegLock *lp;
1257 1257 SegProc *sdp;
1258 1258 struct winlocktimeout wlt;
1259 1259
1260 1260 if (ddi_copyin((caddr_t)arg, &wlt, sizeof (wlt), mode) != 0)
1261 1261 return (EFAULT);
1262 1262
1263 1263 if ((lp = seglock_findlock(wlt.sy_ident)) == NULL)
1264 1264 return (EINVAL);
1265 1265
1266 1266 wlt.sy_timeout = TICK_TO_MSEC(lp->timeout);
1267 1267 /*
1268 1268 * If this process has an active allocated lock return those flags
1269 1269 * Dont allocate a client structure on gettimeout
1270 1270 * If not, return 0.
1271 1271 */
1272 1272 if ((sdp = seglock_findclient(lp)) != NULL) {
1273 1273 wlt.sy_flags = sdp->flag & UFLAGS;
1274 1274 } else {
1275 1275 wlt.sy_flags = 0;
1276 1276 }
1277 1277 mutex_exit(&lp->mutex); /* mutex held by seglock_findlock */
1278 1278
1279 1279 if (ddi_copyout(&wlt, (caddr_t)arg, sizeof (wlt), mode) != 0)
1280 1280 return (EFAULT);
1281 1281
1282 1282 return (0);
1283 1283 }
1284 1284
1285 1285 /*
1286 1286 * Handle lock segment faults here...
1287 1287 *
1288 1288 * This is where the magic happens.
1289 1289 */
1290 1290
1291 1291 /* ARGSUSED */
1292 1292 static int
1293 1293 seglock_lockfault(devmap_cookie_t dhp, SegProc *sdp, SegLock *lp, uint_t rw)
1294 1294 {
1295 1295 SegProc *owner = lp->owner;
1296 1296 int err;
1297 1297
1298 1298 ASSERT(MUTEX_HELD(&lp->mutex));
1299 1299 DEBUGF(3, (CE_CONT,
1300 1300 "seglock_lockfault: hdl=%p, sdp=%p, lp=%p owner=%p\n",
1301 1301 (void *)dhp, (void *)sdp, (void *)lp, (void *)owner));
1302 1302
1303 1303 /* lockfault is always called with sdp in current process context */
1304 1304 ASSERT(ID(sdp) == CURPROC_ID);
1305 1305
1306 1306 /* If Lock has no current owner, give the mapping to new owner */
1307 1307 if (owner == NULL) {
1308 1308 DEBUGF(4, (CE_CONT, " lock has no current owner\n"));
1309 1309 return (give_mapping(lp, sdp, rw));
1310 1310 }
1311 1311
1312 1312 if (owner == sdp) {
1313 1313 /*
1314 1314 * Current owner is faulting on owned lock segment OR
1315 1315 * Current owner is faulting on unlock page and has no waiters
1316 1316 * Then can give the mapping to current owner
1317 1317 */
1318 1318 if ((sdp->lockseg == dhp) || (lp->sleepers == 0)) {
1319 1319 DEBUGF(4, (CE_CONT, "lock owner faulting\n"));
1320 1320 return (give_mapping(lp, sdp, rw));
1321 1321 } else {
1322 1322 /*
1323 1323 * Owner must be writing to unlock page and there are waiters.
1324 1324 * other cases have been checked earlier.
1325 1325 * Release the lock, owner, and owners mappings
1326 1326 * As the owner is trying to write to the unlock page, leave
1327 1327 * it with a trashpage mapping and wake up the sleepers
1328 1328 */
1329 1329 ASSERT((dhp == sdp->unlockseg) && (lp->sleepers != 0));
1330 1330 DEBUGF(4, (CE_CONT, " owner fault on unlock seg w/ sleeper\n"));
1331 1331 return (lock_giveup(lp, 1));
1332 1332 }
1333 1333 }
1334 1334
1335 1335 ASSERT(owner != sdp);
1336 1336
1337 1337 /*
1338 1338 * If old owner faulting on trash unlock mapping,
1339 1339 * load hat mappings to trash page
1340 1340 * RFE: non-owners should NOT be faulting on unlock mapping as they
1341 1341 * as first supposed to fault on the lock seg. We could give them
1342 1342 * a trash page or return error.
1343 1343 */
1344 1344 if ((sdp->unlockseg == dhp) && (sdp->flag & TRASHPAGE)) {
1345 1345 DEBUGF(4, (CE_CONT, " old owner reloads trash mapping\n"));
1346 1346 return (devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1347 1347 DEVMAP_ACCESS, rw));
1348 1348 }
1349 1349
1350 1350 /*
1351 1351 * Non-owner faulting. Need to check current LOCK state.
1352 1352 *
1353 1353 * Before reading lock value in LOCK(lp), we must make sure that
1354 1354 * the owner cannot change its value before we change mappings
1355 1355 * or else we could end up either with a hung process
1356 1356 * or more than one process thinking they have the lock.
1357 1357 * We do that by unloading the owner's mappings
1358 1358 */
1359 1359 DEBUGF(4, (CE_CONT, " owner loses mappings to check lock state\n"));
1360 1360 err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1361 1361 err |= devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1362 1362 if (err != 0)
1363 1363 return (err); /* unable to remove owner mapping */
1364 1364
1365 1365 /*
1366 1366 * If lock is not held, then current owner mappings were
1367 1367 * unloaded above and we can give the lock to the new owner
1368 1368 */
1369 1369 if (LOCK(lp) == 0) {
1370 1370 DEBUGF(4, (CE_CONT,
1371 1371 "Free lock (%p): Giving mapping to new owner %d\n",
1372 1372 (void *)lp, ddi_get_pid()));
1373 1373 return (give_mapping(lp, sdp, rw));
1374 1374 }
1375 1375
1376 1376 DEBUGF(4, (CE_CONT, " lock held, sleeping\n"));
1377 1377
1378 1378 /*
1379 1379 * A non-owning process tried to write (presumably to the lockpage,
1380 1380 * but it doesn't matter) but the lock is held; we need to sleep for
1381 1381 * the lock while there is an owner.
1382 1382 */
1383 1383
1384 1384 lp->sleepers++;
1385 1385 while ((owner = lp->owner) != NULL) {
1386 1386 int rval;
1387 1387
1388 1388 if ((lp->timeout == 0) || (owner->flag & SY_NOTIMEOUT)) {
1389 1389 /*
1390 1390 * No timeout has been specified for this lock;
1391 1391 * we'll simply sleep on the condition variable.
1392 1392 */
1393 1393 rval = cv_wait_sig(&lp->locksleep, &lp->mutex);
1394 1394 } else {
1395 1395 /*
1396 1396 * A timeout _has_ been specified for this lock. We need
1397 1397 * to wake up and possibly steal this lock if the owner
1398 1398 * does not let it go. Note that all sleepers on a lock
1399 1399 * with a timeout wait; the sleeper with the earliest
1400 1400 * timeout will wakeup, and potentially steal the lock
1401 1401 * Stealing the lock will cause a broadcast on the
1402 1402 * locksleep cv and thus kick the other timed waiters
1403 1403 * and cause everyone to restart in a new timedwait
1404 1404 */
1405 1405 rval = cv_reltimedwait_sig(&lp->locksleep,
1406 1406 &lp->mutex, lp->timeout, TR_CLOCK_TICK);
1407 1407 }
1408 1408
1409 1409 /*
1410 1410 * Timeout and still old owner - steal lock
1411 1411 * Force-Release lock and give old owner a trashpage mapping
1412 1412 */
1413 1413 if ((rval == -1) && (lp->owner == owner)) {
1414 1414 /*
1415 1415 * if any errors in lock_giveup, go back and sleep/retry
1416 1416 * If successful, will break out of loop
1417 1417 */
1418 1418 cmn_err(CE_NOTE, "Process %d timed out on lock %d\n",
1419 1419 ddi_get_pid(), lp->cookie);
1420 1420 (void) lock_giveup(lp, 1);
1421 1421 } else if (rval == 0) { /* signal pending */
1422 1422 cmn_err(CE_NOTE,
1423 1423 "Process %d signalled while waiting on lock %d\n",
1424 1424 ddi_get_pid(), lp->cookie);
1425 1425 lp->sleepers--;
1426 1426 return (FC_MAKE_ERR(EINTR));
1427 1427 }
1428 1428 }
1429 1429
1430 1430 lp->sleepers--;
1431 1431 /*
1432 1432 * Give mapping to this process and save a fault later
1433 1433 */
1434 1434 return (give_mapping(lp, sdp, rw));
1435 1435 }
1436 1436
1437 1437 /*
1438 1438 * Utility: give a valid mapping to lock and unlock pages to current process.
1439 1439 * Caller responsible for unloading old owner's mappings
1440 1440 */
1441 1441
1442 1442 static int
1443 1443 give_mapping(SegLock *lp, SegProc *sdp, uint_t rw)
1444 1444 {
1445 1445 int err = 0;
1446 1446
1447 1447 ASSERT(MUTEX_HELD(&lp->mutex));
1448 1448 ASSERT(!((lp->owner == NULL) && (LOCK(lp) != 0)));
1449 1449 /* give_mapping is always called with sdp in current process context */
1450 1450 ASSERT(ID(sdp) == CURPROC_ID);
1451 1451
1452 1452 /* remap any old trash mappings */
1453 1453 if (sdp->flag & TRASHPAGE) {
1454 1454 /* current owner should not have a trash mapping */
1455 1455 ASSERT(sdp != lp->owner);
1456 1456
1457 1457 DEBUGF(4, (CE_CONT,
1458 1458 "new owner %d remapping old trash mapping\n",
1459 1459 ddi_get_pid()));
1460 1460 if ((err = devmap_umem_remap(sdp->unlockseg, winlock_dip,
1461 1461 lp->umem_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) {
1462 1462 /*
1463 1463 * unable to remap old trash page,
1464 1464 * abort before changing owner
1465 1465 */
1466 1466 DEBUGF(4, (CE_CONT,
1467 1467 "aborting: error in umem_remap %d\n", err));
1468 1468 return (err);
1469 1469 }
1470 1470 sdp->flag &= ~TRASHPAGE;
1471 1471 }
1472 1472
1473 1473 /* we have a new owner now */
1474 1474 lp->owner = sdp;
1475 1475
1476 1476 if ((err = devmap_load(sdp->lockseg, lp->cookie, PAGESIZE,
1477 1477 DEVMAP_ACCESS, rw)) != 0) {
1478 1478 return (err);
1479 1479 }
1480 1480 DEBUGF(4, (CE_CONT, "new owner %d gets lock mapping", ddi_get_pid()));
1481 1481
1482 1482 if (lp->sleepers) {
1483 1483 /* Force unload unlock mapping if there are waiters */
1484 1484 DEBUGF(4, (CE_CONT,
1485 1485 " lock has %d sleepers => remove unlock mapping\n",
1486 1486 lp->sleepers));
1487 1487 err = devmap_unload(sdp->unlockseg, lp->cookie, PAGESIZE);
1488 1488 } else {
1489 1489 /*
1490 1490 * while here, give new owner a valid mapping to unlock
1491 1491 * page so we don't get called again.
1492 1492 */
1493 1493 DEBUGF(4, (CE_CONT, " and unlock mapping\n"));
1494 1494 err = devmap_load(sdp->unlockseg, lp->cookie, PAGESIZE,
1495 1495 DEVMAP_ACCESS, PROT_WRITE);
1496 1496 }
1497 1497 return (err);
1498 1498 }
1499 1499
1500 1500 /*
1501 1501 * Unload owner's mappings, release the lock and wakeup any sleepers
1502 1502 * If trash, then the old owner is given a trash mapping
1503 1503 * => old owner held lock too long and caused a timeout
1504 1504 */
1505 1505 static int
1506 1506 lock_giveup(SegLock *lp, int trash)
1507 1507 {
1508 1508 SegProc *owner = lp->owner;
1509 1509
1510 1510 DEBUGF(4, (CE_CONT, "winlock_giveup: lp=%p, owner=%p, trash %d\n",
1511 1511 (void *)lp, (void *)ID(lp->owner), trash));
1512 1512
1513 1513 ASSERT(MUTEX_HELD(&lp->mutex));
1514 1514 ASSERT(owner != NULL);
1515 1515
1516 1516 /*
1517 1517 * owner loses lockpage/unlockpage mappings and gains a
1518 1518 * trashpage mapping, if needed.
1519 1519 */
1520 1520 if (!trash) {
1521 1521 /*
1522 1522 * We do not handle errors in devmap_unload in the !trash case,
1523 1523 * as the process is attempting to unmap/exit or otherwise
1524 1524 * release the lock. Errors in unloading the mapping are not
1525 1525 * going to affect that (unmap does not take error return).
1526 1526 */
1527 1527 (void) devmap_unload(owner->lockseg, lp->cookie, PAGESIZE);
1528 1528 (void) devmap_unload(owner->unlockseg, lp->cookie, PAGESIZE);
1529 1529 } else {
1530 1530 int err;
1531 1531
1532 1532 if (err = devmap_unload(owner->lockseg, lp->cookie, PAGESIZE)) {
1533 1533 /* error unloading lockseg mapping. abort giveup */
1534 1534 return (err);
1535 1535 }
1536 1536
1537 1537 /*
1538 1538 * old owner gets mapping to trash page so it can continue
1539 1539 * devmap_umem_remap does a hat_unload (and does it holding
1540 1540 * the right locks), so no need to devmap_unload on unlockseg
1541 1541 */
1542 1542 if ((err = devmap_umem_remap(owner->unlockseg, winlock_dip,
1543 1543 trashpage_cookie, 0, PAGESIZE, WINLOCK_PROT, 0, 0)) != 0) {
1544 1544 /* error remapping to trash page, abort giveup */
1545 1545 return (err);
1546 1546 }
1547 1547 owner->flag |= TRASHPAGE;
1548 1548 /*
1549 1549 * Preload mapping to trash page by calling devmap_load
1550 1550 * However, devmap_load can only be called on the faulting
1551 1551 * process context and not on the owner's process context
1552 1552 * we preload only if we happen to be in owner process context
1553 1553 * Other processes will fault on the unlock mapping
1554 1554 * and be given a trash mapping at that time.
1555 1555 */
1556 1556 if (ID(owner) == CURPROC_ID) {
1557 1557 (void) devmap_load(owner->unlockseg, lp->cookie,
1558 1558 PAGESIZE, DEVMAP_ACCESS, PROT_WRITE);
1559 1559 }
1560 1560 }
1561 1561
1562 1562 lp->owner = NULL;
1563 1563
1564 1564 /* Clear the lock value in underlying page so new owner can grab it */
1565 1565 LOCK(lp) = 0;
1566 1566
1567 1567 if (lp->sleepers) {
1568 1568 DEBUGF(4, (CE_CONT, " waking up, lp=%p\n", (void *)lp));
1569 1569 cv_broadcast(&lp->locksleep);
1570 1570 }
1571 1571 return (0);
1572 1572 }
1573 1573
1574 1574 /*
1575 1575 * destroy all allocated memory.
1576 1576 */
1577 1577
1578 1578 static void
1579 1579 lock_destroyall(void)
1580 1580 {
1581 1581 SegLock *lp, *lpnext;
1582 1582
1583 1583 ASSERT(MUTEX_HELD(&winlock_mutex));
1584 1584 ASSERT(lock_list == NULL);
1585 1585
1586 1586 DEBUGF(1, (CE_CONT, "Lock list empty. Releasing free list\n"));
1587 1587 for (lp = lock_free_list; lp != NULL; lp = lpnext) {
1588 1588 mutex_enter(&lp->mutex);
1589 1589 lpnext = lp->next;
1590 1590 ASSERT(lp->clients == NULL);
1591 1591 ASSERT(lp->owner == NULL);
1592 1592 ASSERT(lp->alloccount == 0);
1593 1593 mutex_destroy(&lp->mutex);
1594 1594 cv_destroy(&lp->locksleep);
1595 1595 kmem_free(lp, sizeof (SegLock));
1596 1596 }
1597 1597 lock_free_list = NULL;
1598 1598 next_lock = 0;
1599 1599 }
1600 1600
1601 1601
1602 1602 /* RFE: create mdb walkers instead of dump routines? */
1603 1603 static void
1604 1604 seglock_dump_all(void)
1605 1605 {
1606 1606 SegLock *lp;
1607 1607
1608 1608 mutex_enter(&winlock_mutex);
1609 1609 cmn_err(CE_CONT, "ID\tKEY\tNALLOC\tATTCH\tOWNED\tLOCK\tWAITER\n");
1610 1610
1611 1611 cmn_err(CE_CONT, "Lock List:\n");
1612 1612 for (lp = lock_list; lp != NULL; lp = lp->next) {
1613 1613 mutex_enter(&lp->mutex);
1614 1614 cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n",
1615 1615 lp->cookie, lp->key, lp->alloccount,
1616 1616 lp->clients ? 'Y' : 'N',
1617 1617 lp->owner ? 'Y' : 'N',
1618 1618 lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1619 1619 lp->sleepers);
1620 1620 mutex_exit(&lp->mutex);
1621 1621 }
1622 1622 cmn_err(CE_CONT, "Free Lock List:\n");
1623 1623 for (lp = lock_free_list; lp != NULL; lp = lp->next) {
1624 1624 mutex_enter(&lp->mutex);
1625 1625 cmn_err(CE_CONT, "%d\t%d\t%u\t%c\t%c\t%c\t%d\n",
1626 1626 lp->cookie, lp->key, lp->alloccount,
1627 1627 lp->clients ? 'Y' : 'N',
1628 1628 lp->owner ? 'Y' : 'N',
1629 1629 lp->lockptr != 0 && LOCK(lp) ? 'Y' : 'N',
1630 1630 lp->sleepers);
1631 1631 mutex_exit(&lp->mutex);
1632 1632 }
1633 1633
1634 1634 #ifdef DEBUG
1635 1635 if (lock_debug < 3) {
1636 1636 mutex_exit(&winlock_mutex);
1637 1637 return;
1638 1638 }
1639 1639
1640 1640 for (lp = lock_list; lp != NULL; lp = lp->next) {
1641 1641 SegProc *sdp;
1642 1642
1643 1643 mutex_enter(&lp->mutex);
1644 1644 cmn_err(CE_CONT,
1645 1645 "lock %p, key=%d, cookie=%d, nalloc=%u, lock=%d, wait=%d\n",
1646 1646 (void *)lp, lp->key, lp->cookie, lp->alloccount,
1647 1647 lp->lockptr != 0 ? LOCK(lp) : -1, lp->sleepers);
1648 1648
1649 1649 cmn_err(CE_CONT,
1650 1650 "style=%d, lockptr=%p, timeout=%ld, clients=%p, owner=%p\n",
1651 1651 lp->style, (void *)lp->lockptr, lp->timeout,
1652 1652 (void *)lp->clients, (void *)lp->owner);
1653 1653
1654 1654
1655 1655 for (sdp = lp->clients; sdp != NULL; sdp = sdp->next) {
1656 1656 cmn_err(CE_CONT, " client %p%s, lp=%p, flag=%x, "
1657 1657 "process tag=%p, lockseg=%p, unlockseg=%p\n",
1658 1658 (void *)sdp, sdp == lp->owner ? " (owner)" : "",
1659 1659 (void *)sdp->lp, sdp->flag, (void *)ID(sdp),
1660 1660 (void *)sdp->lockseg, (void *)sdp->unlockseg);
1661 1661 }
1662 1662 mutex_exit(&lp->mutex);
1663 1663 }
1664 1664 #endif
1665 1665 mutex_exit(&winlock_mutex);
1666 1666 }
1667 1667
↓ open down ↓ |
1667 lines elided |
↑ open up ↑ |
1668 1668 #include <sys/modctl.h>
1669 1669
1670 1670 static struct modldrv modldrv = {
1671 1671 &mod_driverops, /* Type of module. This one is a driver */
1672 1672 "Winlock Driver", /* Name of the module */
1673 1673 &winlock_ops, /* driver ops */
1674 1674 };
1675 1675
1676 1676 static struct modlinkage modlinkage = {
1677 1677 MODREV_1,
1678 - (void *)&modldrv,
1679 - 0,
1680 - 0,
1681 - 0
1678 + { (void *)&modldrv, NULL }
1682 1679 };
1683 1680
1684 1681 int
1685 1682 _init(void)
1686 1683 {
1687 1684 int e;
1688 1685
1689 1686 mutex_init(&winlock_mutex, NULL, MUTEX_DEFAULT, NULL);
1690 1687 e = mod_install(&modlinkage);
1691 1688 if (e) {
1692 1689 mutex_destroy(&winlock_mutex);
1693 1690 }
1694 1691 return (e);
1695 1692 }
1696 1693
1697 1694
1698 1695 int
1699 1696 _info(struct modinfo *modinfop)
1700 1697 {
1701 1698 return (mod_info(&modlinkage, modinfop));
1702 1699 }
1703 1700
1704 1701 int
1705 1702 _fini(void)
1706 1703 {
1707 1704 int e;
1708 1705
1709 1706 e = mod_remove(&modlinkage);
1710 1707 if (e == 0) {
1711 1708 mutex_destroy(&winlock_mutex);
1712 1709 }
1713 1710 return (e);
1714 1711 }
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX