Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/gssapi/mechs/krb5/include/k5-thread.h
+++ new/usr/src/uts/common/gssapi/mechs/krb5/include/k5-thread.h
1 1 /*
2 2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 */
4 4
5 5 /*
6 6 * include/k5-thread.h
7 7 *
8 8 * Copyright 2004,2005,2006 by the Massachusetts Institute of Technology.
9 9 * All Rights Reserved.
10 10 *
11 11 * Export of this software from the United States of America may
12 12 * require a specific license from the United States Government.
13 13 * It is the responsibility of any person or organization contemplating
14 14 * export to obtain such a license before exporting.
15 15 *
16 16 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
17 17 * distribute this software and its documentation for any purpose and
18 18 * without fee is hereby granted, provided that the above copyright
19 19 * notice appear in all copies and that both that copyright notice and
20 20 * this permission notice appear in supporting documentation, and that
21 21 * the name of M.I.T. not be used in advertising or publicity pertaining
22 22 * to distribution of the software without specific, written prior
23 23 * permission. Furthermore if you modify this software you must label
24 24 * your software as modified software and not distribute it in such a
25 25 * fashion that it might be confused with the original M.I.T. software.
26 26 * M.I.T. makes no representations about the suitability of
27 27 * this software for any purpose. It is provided "as is" without express
28 28 * or implied warranty.
29 29 *
30 30 *
31 31 * Preliminary thread support.
32 32 */
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
33 33
34 34 #ifndef K5_THREAD_H
35 35 #define K5_THREAD_H
36 36
37 37 #ifdef _KERNEL
38 38
39 39 #include <sys/ksynch.h>
40 40
41 41 typedef kmutex_t k5_mutex_t;
42 42
43 -#define K5_MUTEX_PARTIAL_INITIALIZER {0}
43 +#define K5_MUTEX_PARTIAL_INITIALIZER {{NULL}}
44 44
45 45 /* ARGSUSED */
46 46 static void k5_mutex_assert_locked(k5_mutex_t *m) { }
47 47
48 48 static int
49 49 k5_mutex_lock(k5_mutex_t *m)
50 50 {
51 51 mutex_enter(m);
52 52 return (0);
53 53 }
54 54
55 55 static int
56 56 k5_mutex_unlock(k5_mutex_t *m)
57 57 {
58 58 mutex_exit(m);
59 59 return(0);
60 60 }
61 61
62 62
63 63 #else /* _KERNEL */
64 64
65 65 #include "autoconf.h"
66 66 #ifndef KRB5_CALLCONV
67 67 # define KRB5_CALLCONV
68 68 #endif
69 69 #ifndef KRB5_CALLCONV_C
70 70 # define KRB5_CALLCONV_C
71 71 #endif
72 72
73 73 /* Interface (tentative):
74 74
75 75 Mutex support:
76 76
77 77 // Between these two, we should be able to do pure compile-time
78 78 // and pure run-time initialization.
79 79 // POSIX: partial initializer is PTHREAD_MUTEX_INITIALIZER,
80 80 // finish does nothing
81 81 // Windows: partial initializer is an invalid handle,
82 82 // finish does the real initialization work
83 83 // debug: partial initializer sets one magic value,
84 84 // finish verifies and sets a new magic value for
85 85 // lock/unlock to check
86 86 k5_mutex_t foo_mutex = K5_MUTEX_PARTIAL_INITIALIZER;
87 87 int k5_mutex_finish_init(k5_mutex_t *);
88 88 // for dynamic allocation
89 89 int k5_mutex_init(k5_mutex_t *);
90 90 // Must work for both kinds of alloc, even if it means adding flags.
91 91 int k5_mutex_destroy(k5_mutex_t *);
92 92
93 93 // As before.
94 94 int k5_mutex_lock(k5_mutex_t *);
95 95 int k5_mutex_unlock(k5_mutex_t *);
96 96
97 97 In each library, one new function to finish the static mutex init,
98 98 and any other library-wide initialization that might be desired.
99 99 On POSIX, this function would be called via the second support
100 100 function (see below). On Windows, it would be called at library
101 101 load time. These functions, or functions they calls, should be the
102 102 only places that k5_mutex_finish_init gets called.
103 103
104 104 A second function or macro called at various possible "first" entry
105 105 points which either calls pthread_once on the first function
106 106 (POSIX), or checks some flag set by the first function (Windows,
107 107 debug support), and possibly returns an error. (In the
108 108 non-threaded case, a simple flag can be used to avoid multiple
109 109 invocations, and the mutexes don't need run-time initialization
110 110 anyways.)
111 111
112 112 A third function for library termination calls mutex_destroy on
113 113 each mutex for the library. This function would be called
114 114 automatically at library unload time. If it turns out to be needed
115 115 at exit time for libraries that don't get unloaded, perhaps we
116 116 should also use atexit(). Any static mutexes should be cleaned up
117 117 with k5_mutex_destroy here.
118 118
119 119 How does that second support function invoke the first support
120 120 function only once? Through something modelled on pthread_once
121 121 that I haven't written up yet. Probably:
122 122
123 123 k5_once_t foo_once = K5_ONCE_INIT;
124 124 k5_once(k5_once_t *, void (*)(void));
125 125
126 126 For POSIX: Map onto pthread_once facility.
127 127 For non-threaded case: A simple flag.
128 128 For Windows: Not needed; library init code takes care of it.
129 129
130 130 XXX: A general k5_once mechanism isn't possible for Windows,
131 131 without faking it through named mutexes or mutexes initialized at
132 132 startup. I was only using it in one place outside these headers,
133 133 so I'm dropping the general scheme. Eventually the existing uses
134 134 in k5-thread.h and k5-platform.h will be converted to pthread_once
135 135 or static variables.
136 136
137 137
138 138 Thread-specific data:
139 139
140 140 // TSD keys are limited in number in gssapi/krb5/com_err; enumerate
141 141 // them all. This allows support code init to allocate the
142 142 // necessary storage for pointers all at once, and avoids any
143 143 // possible error in key creation.
144 144 enum { ... } k5_key_t;
145 145 // Register destructor function. Called in library init code.
146 146 int k5_key_register(k5_key_t, void (*destructor)(void *));
147 147 // Returns NULL or data.
148 148 void *k5_getspecific(k5_key_t);
149 149 // Returns error if key out of bounds, or the pointer table can't
150 150 // be allocated. A call to k5_key_register must have happened first.
151 151 // This may trigger the calling of pthread_setspecific on POSIX.
152 152 int k5_setspecific(k5_key_t, void *);
153 153 // Called in library termination code.
154 154 // Trashes data in all threads, calling the registered destructor
155 155 // (but calling it from the current thread).
156 156 int k5_key_delete(k5_key_t);
157 157
158 158 For the non-threaded version, the support code will have a static
159 159 array indexed by k5_key_t values, and get/setspecific simply access
160 160 the array elements.
161 161
162 162 The TSD destructor table is global state, protected by a mutex if
163 163 threads are enabled.
164 164
165 165 Debug support: Not much. Might check if k5_key_register has been
166 166 called and abort if not.
167 167
168 168
169 169 Any actual external symbols will use the krb5int_ prefix. The k5_
170 170 names will be simple macros or inline functions to rename the
171 171 external symbols, or slightly more complex ones to expand the
172 172 implementation inline (e.g., map to POSIX versions and/or debug
173 173 code using __FILE__ and the like).
174 174
175 175
176 176 More to be added, perhaps. */
177 177
178 178 #undef DEBUG_THREADS /* SUNW14resync XXX */
179 179 #undef DEBUG_THREADS_LOC /* SUNW14resync XXX */
180 180 #undef DEBUG_THREADS_SLOW /* debugging stuff that'll slow things down? */
181 181 #undef DEBUG_THREADS_STATS
182 182
183 183 #ifndef _KERNEL
184 184 #include <assert.h>
185 185 #include <stdarg.h>
186 186 #define ASSERT assert
187 187 #endif
188 188
189 189 /* For tracking locations, of (e.g.) last lock or unlock of mutex. */
190 190 #ifdef DEBUG_THREADS_LOC
191 191 typedef struct {
192 192 const char *filename;
193 193 int lineno;
194 194 } k5_debug_loc;
195 195 #define K5_DEBUG_LOC_INIT { __FILE__, __LINE__ }
196 196 #if __GNUC__ >= 2
197 197 #define K5_DEBUG_LOC (__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT)
198 198 #else
199 199 static inline k5_debug_loc k5_debug_make_loc(const char *file, int line)
200 200 {
201 201 k5_debug_loc l;
202 202 l.filename = file;
203 203 l.lineno = line;
204 204 return l;
205 205 }
206 206 #define K5_DEBUG_LOC (k5_debug_make_loc(__FILE__,__LINE__))
207 207 #endif
208 208 #else /* ! DEBUG_THREADS_LOC */
209 209 typedef char k5_debug_loc;
210 210 #define K5_DEBUG_LOC_INIT 0
211 211 #define K5_DEBUG_LOC 0
212 212 #endif
213 213
214 214 #define k5_debug_update_loc(L) ((L) = K5_DEBUG_LOC)
215 215
216 216
217 217
218 218 /* Statistics gathering:
219 219
220 220 Currently incomplete, don't try enabling it.
221 221
222 222 Eventually: Report number of times locked, total and standard
223 223 deviation of the time the lock was held, total and std dev time
224 224 spent waiting for the lock. "Report" will probably mean "write a
225 225 line to a file if a magic environment variable is set." */
226 226
227 227 #ifdef DEBUG_THREADS_STATS
228 228
229 229 #if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME))
230 230 # include <time.h>
231 231 #endif
232 232 #if HAVE_SYS_TIME_H
233 233 # include <sys/time.h>
234 234 #endif
235 235 #ifdef HAVE_STDINT_H
236 236 # include <stdint.h>
237 237 #endif
238 238 /* for memset */
239 239 #include <string.h>
240 240 /* for uint64_t */
241 241 #include <inttypes.h>
242 242 typedef uint64_t k5_debug_timediff_t; /* or long double */
243 243 typedef struct timeval k5_debug_time_t;
244 244 static inline k5_debug_timediff_t
245 245 timediff(k5_debug_time_t t2, k5_debug_time_t t1)
246 246 {
247 247 return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec);
248 248 }
249 249 static inline k5_debug_time_t get_current_time(void)
250 250 {
251 251 struct timeval tv;
252 252 if (gettimeofday(&tv,0) < 0) { tv.tv_sec = tv.tv_usec = 0; }
253 253 return tv;
254 254 }
255 255 struct k5_timediff_stats {
256 256 k5_debug_timediff_t valmin, valmax, valsum, valsqsum;
257 257 };
258 258 typedef struct {
259 259 int count;
260 260 k5_debug_time_t time_acquired, time_created;
261 261 struct k5_timediff_stats lockwait, lockheld;
262 262 } k5_debug_mutex_stats;
263 263 #define k5_mutex_init_stats(S) \
264 264 (memset((S), 0, sizeof(k5_debug_mutex_stats)), \
265 265 (S)->time_created = get_current_time(), \
266 266 0)
267 267 #define k5_mutex_finish_init_stats(S) (0)
268 268 #define K5_MUTEX_STATS_INIT { 0, {0}, {0}, {0}, {0} }
269 269 typedef k5_debug_time_t k5_mutex_stats_tmp;
270 270 #define k5_mutex_stats_start() get_current_time()
271 271 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
272 272 k5_mutex_stats_tmp start);
273 273 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
274 274 #define k5_mutex_lock_update_stats krb5int_mutex_lock_update_stats
275 275 #define k5_mutex_unlock_update_stats krb5int_mutex_unlock_update_stats
276 276 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
277 277
278 278 #else
279 279
280 280 typedef char k5_debug_mutex_stats;
281 281 #define k5_mutex_init_stats(S) (*(S) = 's', 0)
282 282 #define k5_mutex_finish_init_stats(S) (0)
283 283 #define K5_MUTEX_STATS_INIT 's'
284 284 typedef int k5_mutex_stats_tmp;
285 285 #define k5_mutex_stats_start() (0)
286 286 #ifdef __GNUC__
287 287 static void
288 288 k5_mutex_lock_update_stats(k5_debug_mutex_stats *m, k5_mutex_stats_tmp t)
289 289 {
290 290 }
291 291 #else
292 292 # define k5_mutex_lock_update_stats(M,S) (S)
293 293 #endif
294 294 #define k5_mutex_unlock_update_stats(M) (*(M) = 's')
295 295
296 296 /* If statistics tracking isn't enabled, these functions don't actually
297 297 do anything. Declare anyways so we can do type checking etc. */
298 298 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
299 299 k5_mutex_stats_tmp start);
300 300 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
301 301 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
302 302
303 303 #define krb5int_mutex_report_stats(M) ((M)->stats = 'd')
304 304
305 305 #endif
306 306
307 307
308 308
309 309 /* Define the OS mutex bit. */
310 310
311 311 /* First, if we're not actually doing multiple threads, do we
312 312 want the debug support or not? */
313 313
314 314 #ifdef DEBUG_THREADS
315 315
316 316 enum k5_mutex_init_states {
317 317 K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12,
318 318 K5_MUTEX_DEBUG_INITIALIZED,
319 319 K5_MUTEX_DEBUG_DESTROYED
320 320 };
321 321 enum k5_mutex_flag_states {
322 322 K5_MUTEX_DEBUG_UNLOCKED = 0x23,
323 323 K5_MUTEX_DEBUG_LOCKED
324 324 };
325 325
326 326 typedef struct {
327 327 enum k5_mutex_init_states initialized;
328 328 enum k5_mutex_flag_states locked;
329 329 } k5_os_nothread_mutex;
330 330
331 331 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \
332 332 { K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED }
333 333
334 334 # define k5_os_nothread_mutex_finish_init(M) \
335 335 (ASSERT((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED), \
336 336 ASSERT((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED), \
337 337 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED), \
338 338 (M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0)
339 339 # define k5_os_nothread_mutex_init(M) \
340 340 ((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, \
341 341 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
342 342 # define k5_os_nothread_mutex_destroy(M) \
343 343 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
344 344 (M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0)
345 345
346 346 # define k5_os_nothread_mutex_lock(M) \
347 347 (k5_os_nothread_mutex_assert_unlocked(M), \
348 348 (M)->locked = K5_MUTEX_DEBUG_LOCKED, 0)
349 349 # define k5_os_nothread_mutex_unlock(M) \
350 350 (k5_os_nothread_mutex_assert_locked(M), \
351 351 (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
352 352
353 353 # define k5_os_nothread_mutex_assert_locked(M) \
354 354 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
355 355 ASSERT((M)->locked != K5_MUTEX_DEBUG_UNLOCKED), \
356 356 ASSERT((M)->locked == K5_MUTEX_DEBUG_LOCKED))
357 357 # define k5_os_nothread_mutex_assert_unlocked(M) \
358 358 (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
359 359 ASSERT((M)->locked != K5_MUTEX_DEBUG_LOCKED), \
360 360 ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED))
361 361
362 362 #else /* threads disabled and not debugging */
363 363 typedef char k5_os_nothread_mutex;
364 364 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER 0
365 365 /* Empty inline functions avoid the "statement with no effect"
366 366 warnings, and do better type-checking than functions that don't use
367 367 their arguments. */
368 368 /* SUNW 1.4resync, remove "inline" to avoid warning */
369 369 /* ARGSUSED */
370 370 /* LINTED */
371 371 static int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) {
372 372 return 0;
373 373 }
374 374 /* ARGSUSED */
375 375 /* LINTED */
376 376 static int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) {
377 377 return 0;
378 378 }
379 379 /* ARGSUSED */
380 380 /* LINTED */
381 381 static int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) {
382 382 return 0;
383 383 }
384 384 /* ARGSUSED */
385 385 /* LINTED */
386 386 static int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) {
387 387 return 0;
388 388 }
389 389 /* ARGSUSED */
390 390 /* LINTED */
391 391 static int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) {
392 392 return 0;
393 393 }
394 394 # define k5_os_nothread_mutex_assert_locked(M) ((void)0)
395 395 # define k5_os_nothread_mutex_assert_unlocked(M) ((void)0)
396 396
397 397 #endif
398 398
399 399 /* Values:
400 400 2 - function has not been run
401 401 3 - function has been run
402 402 4 - function is being run -- deadlock detected */
403 403 typedef unsigned char k5_os_nothread_once_t;
404 404 # define K5_OS_NOTHREAD_ONCE_INIT 2
405 405 # define k5_os_nothread_once(O,F) \
406 406 (*(O) == 3 ? 0 \
407 407 : *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0) \
408 408 : (ASSERT(*(O) != 4), ASSERT(*(O) == 2 || *(O) == 3), 0))
409 409
410 410
411 411
412 412 #ifndef ENABLE_THREADS
413 413 typedef k5_os_nothread_mutex k5_os_mutex;
414 414 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
415 415 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER
416 416 # define k5_os_mutex_finish_init k5_os_nothread_mutex_finish_init
417 417 # define k5_os_mutex_init k5_os_nothread_mutex_init
418 418 # define k5_os_mutex_destroy k5_os_nothread_mutex_destroy
419 419 # define k5_os_mutex_lock k5_os_nothread_mutex_lock
420 420 # define k5_os_mutex_unlock k5_os_nothread_mutex_unlock
421 421 # define k5_os_mutex_assert_locked k5_os_nothread_mutex_assert_locked
422 422 # define k5_os_mutex_assert_unlocked k5_os_nothread_mutex_assert_unlocked
423 423
424 424 # define k5_once_t k5_os_nothread_once_t
425 425 # define K5_ONCE_INIT K5_OS_NOTHREAD_ONCE_INIT
426 426 # define k5_once k5_os_nothread_once
427 427
428 428 #elif HAVE_PTHREAD
429 429
430 430 # include <pthread.h>
431 431
432 432 /* Weak reference support, etc.
433 433
434 434 Linux: Stub mutex routines exist, but pthread_once does not.
435 435
436 436 Solaris: In libc there's a pthread_once that doesn't seem to do
437 437 anything. Bleah. But pthread_mutexattr_setrobust_np is defined
438 438 only in libpthread. However, some version of GNU libc (Red Hat's
439 439 Fedora Core 5, reportedly) seems to have that function, but no
440 440 declaration, so we'd have to declare it in order to test for its
441 441 address. We now have tests to see if pthread_once actually works,
442 442 so stick with that for now.
443 443
444 444 IRIX 6.5 stub pthread support in libc is really annoying. The
445 445 pthread_mutex_lock function returns ENOSYS for a program not linked
446 446 against -lpthread. No link-time failure, no weak symbols, etc.
447 447 The C library doesn't provide pthread_once; we can use weak
448 448 reference support for that.
449 449
450 450 If weak references are not available, then for now, we assume that
451 451 the pthread support routines will always be available -- either the
452 452 real thing, or functional stubs that merely prohibit creating
453 453 threads.
454 454
455 455 If we find a platform with non-functional stubs and no weak
456 456 references, we may have to resort to some hack like dlsym on the
457 457 symbol tables of the current process. */
458 458 #ifdef HAVE_PRAGMA_WEAK_REF
459 459 # pragma weak pthread_once
460 460 # pragma weak pthread_mutex_lock
461 461 # pragma weak pthread_mutex_unlock
462 462 # pragma weak pthread_mutex_destroy
463 463 # pragma weak pthread_mutex_init
464 464 # pragma weak pthread_self
465 465 # pragma weak pthread_equal
466 466 # ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
467 467 # pragma weak pthread_mutexattr_setrobust_np
468 468 # endif
469 469 # if !defined HAVE_PTHREAD_ONCE
470 470 # define K5_PTHREADS_LOADED (&pthread_once != 0)
471 471 # elif !defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP \
472 472 && defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
473 473 # define K5_PTHREADS_LOADED (&pthread_mutexattr_setrobust_np != 0)
474 474 # else
475 475 # define K5_PTHREADS_LOADED (1)
476 476 # endif
477 477 #else
478 478 /* no pragma weak support */
479 479 # define K5_PTHREADS_LOADED (1)
480 480 #endif
481 481
482 482 #if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__))
483 483 /* IRIX 6.5 stub pthread support in libc is really annoying. The
484 484 pthread_mutex_lock function returns ENOSYS for a program not linked
485 485 against -lpthread. No link-time failure, no weak reference tests,
486 486 etc.
487 487
488 488 The C library doesn't provide pthread_once; we can use weak
489 489 reference support for that. */
490 490 # ifndef HAVE_PRAGMA_WEAK_REF
491 491 # if defined(__GNUC__) && __GNUC__ < 3
492 492 # error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile."
493 493 # else
494 494 # error "Weak reference support is required"
495 495 # endif
496 496 # endif
497 497 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
498 498 #endif
499 499
500 500 #if !defined(HAVE_PTHREAD_MUTEX_LOCK) && !defined(USE_PTHREAD_LOCK_ONLY_IF_LOADED)
501 501 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
502 502 #endif
503 503
504 504 #ifdef HAVE_PRAGMA_WEAK_REF
505 505 /* Can't rely on useful stubs -- see above regarding Solaris. */
506 506 typedef struct {
507 507 pthread_once_t o;
508 508 k5_os_nothread_once_t n;
509 509 } k5_once_t;
510 510 # define K5_ONCE_INIT { PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT }
511 511 # define k5_once(O,F) (K5_PTHREADS_LOADED \
512 512 ? pthread_once(&(O)->o,F) \
513 513 : k5_os_nothread_once(&(O)->n,F))
514 514 #else
515 515 typedef pthread_once_t k5_once_t;
516 516 # define K5_ONCE_INIT PTHREAD_ONCE_INIT
517 517 # define k5_once pthread_once
518 518 #endif
519 519
520 520 typedef struct {
521 521 pthread_mutex_t p;
522 522 #ifdef DEBUG_THREADS
523 523 pthread_t owner;
524 524 #endif
525 525 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
526 526 k5_os_nothread_mutex n;
527 527 #endif
528 528 } k5_os_mutex;
529 529
530 530 #ifdef DEBUG_THREADS
531 531 # ifdef __GNUC__
532 532 # define k5_pthread_mutex_lock(M) \
533 533 ({ \
534 534 k5_os_mutex *_m2 = (M); \
535 535 int _r2 = pthread_mutex_lock(&_m2->p); \
536 536 if (_r2 == 0) _m2->owner = pthread_self(); \
537 537 _r2; \
538 538 })
539 539 # else
540 540 static int
541 541 k5_pthread_mutex_lock(k5_os_mutex *m)
542 542 {
543 543 int r = pthread_mutex_lock(&m->p);
544 544 if (r)
545 545 return r;
546 546 m->owner = pthread_self();
547 547 return 0;
548 548 }
549 549 # endif
550 550 # define k5_pthread_assert_locked(M) \
551 551 (K5_PTHREADS_LOADED \
552 552 ? ASSERT(pthread_equal((M)->owner, pthread_self())) \
553 553 : (void)0)
554 554 # define k5_pthread_mutex_unlock(M) \
555 555 (k5_pthread_assert_locked(M), \
556 556 (M)->owner = (pthread_t) 0, \
557 557 pthread_mutex_unlock(&(M)->p))
558 558 #else
559 559 # define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
560 560 /* LINTED */
561 561 static void k5_pthread_assert_locked(k5_os_mutex *m) { }
562 562 # define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
563 563 #endif
564 564
565 565 /* Define as functions to:
566 566 (1) eliminate "statement with no effect" warnings for "0"
567 567 (2) encourage type-checking in calling code */
568 568
569 569 /* LINTED */
570 570 static void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
571 571
572 572 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
573 573 # include <sched.h>
574 574 # if !HAVE_SCHED_YIELD
575 575 # pragma weak sched_yield
576 576 # define MAYBE_SCHED_YIELD() ((void)((&sched_yield != NULL) ? sched_yield() : 0))
577 577 # else
578 578 # define MAYBE_SCHED_YIELD() ((void)sched_yield())
579 579 # endif
580 580 #else
581 581 # define MAYBE_SCHED_YIELD() ((void)0)
582 582 #endif
583 583
584 584 /* It may not be obvious why this function is desirable.
585 585
586 586 I want to call pthread_mutex_lock, then sched_yield, then look at
587 587 the return code from pthread_mutex_lock. That can't be implemented
588 588 in a macro without a temporary variable, or GNU C extensions.
589 589
590 590 There used to be an inline function which did it, with both
591 591 functions called from the inline function. But that messes with
592 592 the debug information on a lot of configurations, and you can't
593 593 tell where the inline function was called from. (Typically, gdb
594 594 gives you the name of the function from which the inline function
595 595 was called, and a line number within the inline function itself.)
596 596
597 597 With this auxiliary function, pthread_mutex_lock can be called at
598 598 the invoking site via a macro; once it returns, the inline function
599 599 is called (with messed-up line-number info for gdb hopefully
600 600 localized to just that call). */
601 601 #ifdef __GNUC__
602 602 #define return_after_yield(R) \
603 603 __extension__ ({ \
604 604 int _r = (R); \
605 605 MAYBE_SCHED_YIELD(); \
606 606 _r; \
607 607 })
608 608 #else
609 609 static int return_after_yield(int r)
610 610 {
611 611 MAYBE_SCHED_YIELD();
612 612 return r;
613 613 }
614 614 #endif
615 615
616 616 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
617 617
618 618 # if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
619 619 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
620 620 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
621 621 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
622 622 # elif defined(DEBUG_THREADS)
623 623 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
624 624 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
625 625 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
626 626 # else
627 627 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
628 628 { PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
629 629 # endif
630 630 asdfsdf
631 631 # define k5_os_mutex_finish_init(M) \
632 632 k5_os_nothread_mutex_finish_init(&(M)->n)
633 633 # define k5_os_mutex_init(M) \
634 634 (k5_os_nothread_mutex_init(&(M)->n), \
635 635 (K5_PTHREADS_LOADED \
636 636 ? pthread_mutex_init(&(M)->p, 0) \
637 637 : 0))
638 638 # define k5_os_mutex_destroy(M) \
639 639 (k5_os_nothread_mutex_destroy(&(M)->n), \
640 640 (K5_PTHREADS_LOADED \
641 641 ? pthread_mutex_destroy(&(M)->p) \
642 642 : 0))
643 643
644 644 # define k5_os_mutex_lock(M) \
645 645 return_after_yield(K5_PTHREADS_LOADED \
646 646 ? k5_pthread_mutex_lock(M) \
647 647 : k5_os_nothread_mutex_lock(&(M)->n))
648 648 # define k5_os_mutex_unlock(M) \
649 649 (MAYBE_SCHED_YIELD(), \
650 650 (K5_PTHREADS_LOADED \
651 651 ? k5_pthread_mutex_unlock(M) \
652 652 : k5_os_nothread_mutex_unlock(&(M)->n)))
653 653
654 654 # define k5_os_mutex_assert_unlocked(M) \
655 655 (K5_PTHREADS_LOADED \
656 656 ? k5_pthread_assert_unlocked(&(M)->p) \
657 657 : k5_os_nothread_mutex_assert_unlocked(&(M)->n))
658 658 # define k5_os_mutex_assert_locked(M) \
659 659 (K5_PTHREADS_LOADED \
660 660 ? k5_pthread_assert_locked(M) \
661 661 : k5_os_nothread_mutex_assert_locked(&(M)->n))
662 662
663 663 #else
664 664
665 665 # ifdef DEBUG_THREADS
666 666 # ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
667 667 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
668 668 { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
669 669 # else
670 670 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
671 671 { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
672 672 # endif
673 673 # else
674 674 # define K5_OS_MUTEX_PARTIAL_INITIALIZER \
675 675 { PTHREAD_MUTEX_INITIALIZER }
676 676 # endif
677 677
678 678 /* LINTED */
679 679 static int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
680 680 # define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0)
681 681 # define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p)
682 682 # define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M))
683 683 # define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
684 684
685 685 # define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
686 686 # define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M)
687 687
688 688 #endif /* is pthreads always available? */
689 689
690 690 #elif defined _WIN32
691 691
692 692 typedef struct {
693 693 HANDLE h;
694 694 int is_locked;
695 695 } k5_os_mutex;
696 696
697 697 # define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 }
698 698
699 699 # define k5_os_mutex_finish_init(M) \
700 700 (ASSERT((M)->h == INVALID_HANDLE_VALUE), \
701 701 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
702 702 # define k5_os_mutex_init(M) \
703 703 ((M)->is_locked = 0, \
704 704 ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
705 705 # define k5_os_mutex_destroy(M) \
706 706 (CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError())
707 707
708 708 static int k5_os_mutex_lock(k5_os_mutex *m)
709 709 {
710 710 DWORD res;
711 711 res = WaitForSingleObject(m->h, INFINITE);
712 712 if (res == WAIT_FAILED)
713 713 return GetLastError();
714 714 /* Eventually these should be turned into some reasonable error
715 715 code. */
716 716 ASSERT(res != WAIT_TIMEOUT);
717 717 ASSERT(res != WAIT_ABANDONED);
718 718 ASSERT(res == WAIT_OBJECT_0);
719 719 /* Avoid locking twice. */
720 720 ASSERT(m->is_locked == 0);
721 721 m->is_locked = 1;
722 722 return 0;
723 723 }
724 724
725 725 # define k5_os_mutex_unlock(M) \
726 726 (ASSERT((M)->is_locked == 1), \
727 727 (M)->is_locked = 0, \
728 728 ReleaseMutex((M)->h) ? 0 : GetLastError())
729 729
730 730 # define k5_os_mutex_assert_unlocked(M) ((void)0)
731 731 # define k5_os_mutex_assert_locked(M) ((void)0)
732 732
733 733 #else
734 734
735 735 # error "Thread support enabled, but thread system unknown"
736 736
737 737 #endif
738 738
739 739
740 740
741 741
742 742 typedef struct {
743 743 k5_debug_loc loc_last, loc_created;
744 744 k5_os_mutex os;
745 745 k5_debug_mutex_stats stats;
746 746 } k5_mutex_t;
747 747 #define K5_MUTEX_PARTIAL_INITIALIZER \
748 748 { K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT, \
749 749 K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT }
750 750 /* LINTED */
751 751 static int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l)
752 752 {
753 753 int err = k5_os_mutex_init(&m->os);
754 754 if (err) return err;
755 755 m->loc_created = m->loc_last = l;
756 756 err = k5_mutex_init_stats(&m->stats);
757 757 ASSERT(err == 0);
758 758 return 0;
759 759 }
760 760 #define k5_mutex_init(M) k5_mutex_init_1((M), K5_DEBUG_LOC)
761 761 /* LINTED */
762 762 static int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
763 763 {
764 764 int err = k5_os_mutex_finish_init(&m->os);
765 765 if (err) return err;
766 766 m->loc_created = m->loc_last = l;
767 767 err = k5_mutex_finish_init_stats(&m->stats);
768 768 ASSERT(err == 0);
769 769 return 0;
770 770 }
771 771 #define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
772 772 #define k5_mutex_destroy(M) \
773 773 (k5_os_mutex_assert_unlocked(&(M)->os), \
774 774 k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
775 775 k5_os_mutex_destroy(&(M)->os))
776 776 #ifdef __GNUC__
777 777 #define k5_mutex_lock(M) \
778 778 __extension__ ({ \
779 779 int _err = 0; \
780 780 k5_mutex_t *_m = (M); \
781 781 _err = k5_os_mutex_lock(&_m->os); \
782 782 if (_err == 0) _m->loc_last = K5_DEBUG_LOC; \
783 783 _err; \
784 784 })
785 785 #else
786 786 /* LINTED */
787 787 static int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
788 788 {
789 789 int err = 0;
790 790 err = k5_os_mutex_lock(&m->os);
791 791 if (err)
792 792 return err;
793 793 m->loc_last = l;
794 794 return err;
795 795 }
796 796 #define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC)
797 797 #endif
798 798 #define k5_mutex_unlock(M) \
799 799 (k5_mutex_assert_locked(M), \
800 800 (M)->loc_last = K5_DEBUG_LOC, \
801 801 k5_os_mutex_unlock(&(M)->os))
802 802
803 803 #define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os)
804 804 #define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os)
805 805
806 806 #define k5_assert_locked k5_mutex_assert_locked
807 807 #define k5_assert_unlocked k5_mutex_assert_unlocked
808 808
809 809
810 810 /* Thread-specific data; implemented in a support file, because we'll
811 811 need to keep track of some global data for cleanup purposes.
812 812
813 813 Note that the callback function type is such that the C library
814 814 routine free() is a valid callback. */
815 815 typedef enum {
816 816 K5_KEY_COM_ERR,
817 817 K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME,
818 818 K5_KEY_GSS_KRB5_CCACHE_NAME,
819 819 K5_KEY_GSS_KRB5_ERROR_MESSAGE,
820 820 K5_KEY_GSS_SPNEGO_ERROR_MESSAGE,
821 821 K5_KEY_MAX
822 822 } k5_key_t;
823 823 /* rename shorthand symbols for export */
824 824 #define k5_key_register krb5int_key_register
825 825 #define k5_getspecific krb5int_getspecific
826 826 #define k5_setspecific krb5int_setspecific
827 827 #define k5_key_delete krb5int_key_delete
828 828 extern int k5_key_register(k5_key_t, void (*)(void *));
829 829 extern void *k5_getspecific(k5_key_t);
830 830 extern int k5_setspecific(k5_key_t, void *);
831 831 extern int k5_key_delete(k5_key_t);
832 832
833 833 extern int KRB5_CALLCONV krb5int_mutex_alloc (k5_mutex_t **);
834 834 extern void KRB5_CALLCONV krb5int_mutex_free (k5_mutex_t *);
835 835 extern int KRB5_CALLCONV krb5int_mutex_lock (k5_mutex_t *);
836 836 extern int KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *);
837 837
838 838 /* In time, many of the definitions above should move into the support
839 839 library, and this file should be greatly simplified. For type
840 840 definitions, that'll take some work, since other data structures
841 841 incorporate mutexes directly, and our mutex type is dependent on
842 842 configuration options and system attributes. For most functions,
843 843 though, it should be relatively easy.
844 844
845 845 For now, plugins should use the exported functions, and not the
846 846 above macros, and use krb5int_mutex_alloc for allocations. */
847 847 #ifdef PLUGIN
848 848 #undef k5_mutex_lock
849 849 #define k5_mutex_lock krb5int_mutex_lock
850 850 #undef k5_mutex_unlock
851 851 #define k5_mutex_unlock krb5int_mutex_unlock
852 852 #endif
853 853
854 854 #endif /* _KERNEL */
855 855
856 856
857 857 #endif /* multiple inclusion? */
↓ open down ↓ |
804 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX