1 /*
   2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
   3  */
   4 
   5 /*
   6  * include/k5-thread.h
   7  *
   8  * Copyright 2004,2005,2006 by the Massachusetts Institute of Technology.
   9  * All Rights Reserved.
  10  *
  11  * Export of this software from the United States of America may
  12  *   require a specific license from the United States Government.
  13  *   It is the responsibility of any person or organization contemplating
  14  *   export to obtain such a license before exporting.
  15  * 
  16  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
  17  * distribute this software and its documentation for any purpose and
  18  * without fee is hereby granted, provided that the above copyright
  19  * notice appear in all copies and that both that copyright notice and
  20  * this permission notice appear in supporting documentation, and that
  21  * the name of M.I.T. not be used in advertising or publicity pertaining
  22  * to distribution of the software without specific, written prior
  23  * permission.  Furthermore if you modify this software you must label
  24  * your software as modified software and not distribute it in such a
  25  * fashion that it might be confused with the original M.I.T. software.
  26  * M.I.T. makes no representations about the suitability of
  27  * this software for any purpose.  It is provided "as is" without express
  28  * or implied warranty.
  29  * 
  30  *
  31  * Preliminary thread support.
  32  */
  33 
  34 #ifndef K5_THREAD_H
  35 #define K5_THREAD_H
  36 
  37 #ifdef _KERNEL
  38 
  39 #include <sys/ksynch.h>
  40 
  41 typedef kmutex_t k5_mutex_t;
  42 
  43 #define K5_MUTEX_PARTIAL_INITIALIZER {0}
  44 
  45 /* ARGSUSED */
  46 static void k5_mutex_assert_locked(k5_mutex_t *m) { }
  47 
  48 static int
  49 k5_mutex_lock(k5_mutex_t *m)
  50 {
  51   mutex_enter(m);
  52   return (0);
  53 }
  54 
  55 static int
  56 k5_mutex_unlock(k5_mutex_t *m)
  57 {
  58   mutex_exit(m);
  59   return(0);
  60 }
  61 
  62 
  63 #else /* _KERNEL */
  64 
  65 #include "autoconf.h"
  66 #ifndef KRB5_CALLCONV
  67 # define KRB5_CALLCONV
  68 #endif
  69 #ifndef KRB5_CALLCONV_C
  70 # define KRB5_CALLCONV_C
  71 #endif
  72 
  73 /* Interface (tentative):
  74 
  75    Mutex support:
  76 
  77    // Between these two, we should be able to do pure compile-time
  78    // and pure run-time initialization.
  79    //   POSIX:   partial initializer is PTHREAD_MUTEX_INITIALIZER,
  80    //            finish does nothing
  81    //   Windows: partial initializer is an invalid handle,
  82    //            finish does the real initialization work
  83    //   debug:   partial initializer sets one magic value,
  84    //            finish verifies and sets a new magic value for
  85    //              lock/unlock to check
  86    k5_mutex_t foo_mutex = K5_MUTEX_PARTIAL_INITIALIZER;
  87    int k5_mutex_finish_init(k5_mutex_t *);
  88    // for dynamic allocation
  89    int k5_mutex_init(k5_mutex_t *);
  90    // Must work for both kinds of alloc, even if it means adding flags.
  91    int k5_mutex_destroy(k5_mutex_t *);
  92 
  93    // As before.
  94    int k5_mutex_lock(k5_mutex_t *);
  95    int k5_mutex_unlock(k5_mutex_t *);
  96 
  97    In each library, one new function to finish the static mutex init,
  98    and any other library-wide initialization that might be desired.
  99    On POSIX, this function would be called via the second support
 100    function (see below).  On Windows, it would be called at library
 101    load time.  These functions, or functions they calls, should be the
 102    only places that k5_mutex_finish_init gets called.
 103 
 104    A second function or macro called at various possible "first" entry
 105    points which either calls pthread_once on the first function
 106    (POSIX), or checks some flag set by the first function (Windows,
 107    debug support), and possibly returns an error.  (In the
 108    non-threaded case, a simple flag can be used to avoid multiple
 109    invocations, and the mutexes don't need run-time initialization
 110    anyways.)
 111 
 112    A third function for library termination calls mutex_destroy on
 113    each mutex for the library.  This function would be called
 114    automatically at library unload time.  If it turns out to be needed
 115    at exit time for libraries that don't get unloaded, perhaps we
 116    should also use atexit().  Any static mutexes should be cleaned up
 117    with k5_mutex_destroy here.
 118 
 119    How does that second support function invoke the first support
 120    function only once?  Through something modelled on pthread_once
 121    that I haven't written up yet.  Probably:
 122 
 123    k5_once_t foo_once = K5_ONCE_INIT;
 124    k5_once(k5_once_t *, void (*)(void));
 125 
 126    For POSIX: Map onto pthread_once facility.
 127    For non-threaded case: A simple flag.
 128    For Windows: Not needed; library init code takes care of it.
 129 
 130    XXX: A general k5_once mechanism isn't possible for Windows,
 131    without faking it through named mutexes or mutexes initialized at
 132    startup.  I was only using it in one place outside these headers,
 133    so I'm dropping the general scheme.  Eventually the existing uses
 134    in k5-thread.h and k5-platform.h will be converted to pthread_once
 135    or static variables.
 136 
 137 
 138    Thread-specific data:
 139 
 140    // TSD keys are limited in number in gssapi/krb5/com_err; enumerate
 141    // them all.  This allows support code init to allocate the
 142    // necessary storage for pointers all at once, and avoids any
 143    // possible error in key creation.
 144    enum { ... } k5_key_t;
 145    // Register destructor function.  Called in library init code.
 146    int k5_key_register(k5_key_t, void (*destructor)(void *));
 147    // Returns NULL or data.
 148    void *k5_getspecific(k5_key_t);
 149    // Returns error if key out of bounds, or the pointer table can't
 150    // be allocated.  A call to k5_key_register must have happened first.
 151    // This may trigger the calling of pthread_setspecific on POSIX.
 152    int k5_setspecific(k5_key_t, void *);
 153    // Called in library termination code.
 154    // Trashes data in all threads, calling the registered destructor
 155    // (but calling it from the current thread).
 156    int k5_key_delete(k5_key_t);
 157 
 158    For the non-threaded version, the support code will have a static
 159    array indexed by k5_key_t values, and get/setspecific simply access
 160    the array elements.
 161 
 162    The TSD destructor table is global state, protected by a mutex if
 163    threads are enabled.
 164 
 165    Debug support: Not much.  Might check if k5_key_register has been
 166    called and abort if not.
 167 
 168 
 169    Any actual external symbols will use the krb5int_ prefix.  The k5_
 170    names will be simple macros or inline functions to rename the
 171    external symbols, or slightly more complex ones to expand the
 172    implementation inline (e.g., map to POSIX versions and/or debug
 173    code using __FILE__ and the like).
 174 
 175 
 176    More to be added, perhaps.  */
 177 
 178 #undef DEBUG_THREADS /* SUNW14resync XXX */
 179 #undef DEBUG_THREADS_LOC /* SUNW14resync XXX */
 180 #undef DEBUG_THREADS_SLOW /* debugging stuff that'll slow things down? */
 181 #undef DEBUG_THREADS_STATS
 182 
 183 #ifndef _KERNEL
 184 #include <assert.h>
 185 #include <stdarg.h> 
 186 #define ASSERT assert 
 187 #endif
 188 
 189 /* For tracking locations, of (e.g.) last lock or unlock of mutex.  */
 190 #ifdef DEBUG_THREADS_LOC
 191 typedef struct {
 192     const char *filename;
 193     int lineno;
 194 } k5_debug_loc;
 195 #define K5_DEBUG_LOC_INIT       { __FILE__, __LINE__ }
 196 #if __GNUC__ >= 2
 197 #define K5_DEBUG_LOC            (__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT)
 198 #else
 199 static inline k5_debug_loc k5_debug_make_loc(const char *file, int line)
 200 {
 201     k5_debug_loc l;
 202     l.filename = file;
 203     l.lineno = line;
 204     return l;
 205 }
 206 #define K5_DEBUG_LOC            (k5_debug_make_loc(__FILE__,__LINE__))
 207 #endif
 208 #else /* ! DEBUG_THREADS_LOC */
 209 typedef char k5_debug_loc;
 210 #define K5_DEBUG_LOC_INIT       0
 211 #define K5_DEBUG_LOC            0
 212 #endif
 213 
 214 #define k5_debug_update_loc(L)  ((L) = K5_DEBUG_LOC)
 215 
 216 
 217 
 218 /* Statistics gathering:
 219 
 220    Currently incomplete, don't try enabling it.
 221 
 222    Eventually: Report number of times locked, total and standard
 223    deviation of the time the lock was held, total and std dev time
 224    spent waiting for the lock.  "Report" will probably mean "write a
 225    line to a file if a magic environment variable is set."  */
 226 
 227 #ifdef DEBUG_THREADS_STATS
 228 
 229 #if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME))
 230 # include <time.h>
 231 #endif
 232 #if HAVE_SYS_TIME_H
 233 # include <sys/time.h>
 234 #endif
 235 #ifdef HAVE_STDINT_H
 236 # include <stdint.h>
 237 #endif
 238 /* for memset */
 239 #include <string.h>
 240 /* for uint64_t */
 241 #include <inttypes.h>
 242 typedef uint64_t k5_debug_timediff_t; /* or long double */
 243 typedef struct timeval k5_debug_time_t;
 244 static inline k5_debug_timediff_t
 245 timediff(k5_debug_time_t t2, k5_debug_time_t t1)
 246 {
 247     return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec);
 248 }
 249 static inline k5_debug_time_t get_current_time(void)
 250 {
 251     struct timeval tv;
 252     if (gettimeofday(&tv,0) < 0) { tv.tv_sec = tv.tv_usec = 0; }
 253     return tv;
 254 }
 255 struct k5_timediff_stats {
 256     k5_debug_timediff_t valmin, valmax, valsum, valsqsum;
 257 };
 258 typedef struct {
 259     int count;
 260     k5_debug_time_t time_acquired, time_created;
 261     struct k5_timediff_stats lockwait, lockheld;
 262 } k5_debug_mutex_stats;
 263 #define k5_mutex_init_stats(S)                                  \
 264         (memset((S), 0, sizeof(k5_debug_mutex_stats)),  \
 265          (S)->time_created = get_current_time(),             \
 266          0)
 267 #define k5_mutex_finish_init_stats(S)   (0)
 268 #define K5_MUTEX_STATS_INIT     { 0, {0}, {0}, {0}, {0} }
 269 typedef k5_debug_time_t k5_mutex_stats_tmp;
 270 #define k5_mutex_stats_start()  get_current_time()
 271 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
 272                                                    k5_mutex_stats_tmp start);
 273 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
 274 #define k5_mutex_lock_update_stats      krb5int_mutex_lock_update_stats
 275 #define k5_mutex_unlock_update_stats    krb5int_mutex_unlock_update_stats
 276 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
 277 
 278 #else
 279 
 280 typedef char k5_debug_mutex_stats;
 281 #define k5_mutex_init_stats(S)          (*(S) = 's', 0)
 282 #define k5_mutex_finish_init_stats(S)   (0)
 283 #define K5_MUTEX_STATS_INIT             's'
 284 typedef int k5_mutex_stats_tmp;
 285 #define k5_mutex_stats_start()          (0)
 286 #ifdef __GNUC__
 287 static void
 288 k5_mutex_lock_update_stats(k5_debug_mutex_stats *m, k5_mutex_stats_tmp t)
 289 {
 290 }
 291 #else
 292 # define k5_mutex_lock_update_stats(M,S)        (S)
 293 #endif
 294 #define k5_mutex_unlock_update_stats(M) (*(M) = 's')
 295 
 296 /* If statistics tracking isn't enabled, these functions don't actually
 297    do anything.  Declare anyways so we can do type checking etc.  */
 298 void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
 299                                                    k5_mutex_stats_tmp start);
 300 void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
 301 void KRB5_CALLCONV krb5int_mutex_report_stats(/* k5_mutex_t *m */);
 302 
 303 #define krb5int_mutex_report_stats(M)   ((M)->stats = 'd')
 304 
 305 #endif
 306 
 307 
 308 
 309 /* Define the OS mutex bit.  */
 310 
 311 /* First, if we're not actually doing multiple threads, do we
 312    want the debug support or not?  */
 313 
 314 #ifdef DEBUG_THREADS
 315 
 316 enum k5_mutex_init_states {
 317     K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12,
 318     K5_MUTEX_DEBUG_INITIALIZED,
 319     K5_MUTEX_DEBUG_DESTROYED
 320 };
 321 enum k5_mutex_flag_states {
 322     K5_MUTEX_DEBUG_UNLOCKED = 0x23,
 323     K5_MUTEX_DEBUG_LOCKED
 324 };
 325 
 326 typedef struct {
 327     enum k5_mutex_init_states initialized;
 328     enum k5_mutex_flag_states locked;
 329 } k5_os_nothread_mutex;
 330 
 331 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \
 332         { K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED }
 333 
 334 # define k5_os_nothread_mutex_finish_init(M)                            \
 335         (ASSERT((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED),     \
 336          ASSERT((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED),      \
 337          ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED),             \
 338          (M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0)
 339 # define k5_os_nothread_mutex_init(M)                   \
 340         ((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED,      \
 341          (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
 342 # define k5_os_nothread_mutex_destroy(M)                                \
 343         (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),     \
 344          (M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0)
 345 
 346 # define k5_os_nothread_mutex_lock(M)                   \
 347         (k5_os_nothread_mutex_assert_unlocked(M),       \
 348          (M)->locked = K5_MUTEX_DEBUG_LOCKED, 0)
 349 # define k5_os_nothread_mutex_unlock(M)                 \
 350         (k5_os_nothread_mutex_assert_locked(M),         \
 351          (M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
 352 
 353 # define k5_os_nothread_mutex_assert_locked(M)                          \
 354         (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),     \
 355          ASSERT((M)->locked != K5_MUTEX_DEBUG_UNLOCKED),             \
 356          ASSERT((M)->locked == K5_MUTEX_DEBUG_LOCKED))
 357 # define k5_os_nothread_mutex_assert_unlocked(M)                        \
 358         (ASSERT((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED),     \
 359          ASSERT((M)->locked != K5_MUTEX_DEBUG_LOCKED),                       \
 360          ASSERT((M)->locked == K5_MUTEX_DEBUG_UNLOCKED))
 361 
 362 #else /* threads disabled and not debugging */
 363 typedef char k5_os_nothread_mutex;
 364 # define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER       0
 365 /* Empty inline functions avoid the "statement with no effect"
 366    warnings, and do better type-checking than functions that don't use
 367    their arguments.  */
 368 /* SUNW 1.4resync, remove "inline" to avoid warning */
 369 /* ARGSUSED */
 370 /* LINTED */
 371 static int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) {
 372     return 0;
 373 }
 374 /* ARGSUSED */
 375 /* LINTED */
 376 static int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) {
 377     return 0;
 378 }
 379 /* ARGSUSED */
 380 /* LINTED */
 381 static int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) {
 382     return 0;
 383 }
 384 /* ARGSUSED */
 385 /* LINTED */
 386 static int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) {
 387     return 0;
 388 }
 389 /* ARGSUSED */
 390 /* LINTED */
 391 static int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) {
 392     return 0;
 393 }
 394 # define k5_os_nothread_mutex_assert_locked(M)          ((void)0)
 395 # define k5_os_nothread_mutex_assert_unlocked(M)        ((void)0)
 396 
 397 #endif
 398 
 399 /* Values:
 400    2 - function has not been run
 401    3 - function has been run
 402    4 - function is being run -- deadlock detected */
 403 typedef unsigned char k5_os_nothread_once_t;
 404 # define K5_OS_NOTHREAD_ONCE_INIT       2
 405 # define k5_os_nothread_once(O,F)                                       \
 406         (*(O) == 3 ? 0                                                  \
 407          : *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0)                   \
 408          : (ASSERT(*(O) != 4), ASSERT(*(O) == 2 || *(O) == 3), 0))
 409 
 410 
 411 
 412 #ifndef ENABLE_THREADS
 413 typedef k5_os_nothread_mutex k5_os_mutex;
 414 # define K5_OS_MUTEX_PARTIAL_INITIALIZER        \
 415                 K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER
 416 # define k5_os_mutex_finish_init        k5_os_nothread_mutex_finish_init
 417 # define k5_os_mutex_init               k5_os_nothread_mutex_init
 418 # define k5_os_mutex_destroy            k5_os_nothread_mutex_destroy
 419 # define k5_os_mutex_lock               k5_os_nothread_mutex_lock
 420 # define k5_os_mutex_unlock             k5_os_nothread_mutex_unlock
 421 # define k5_os_mutex_assert_locked      k5_os_nothread_mutex_assert_locked
 422 # define k5_os_mutex_assert_unlocked    k5_os_nothread_mutex_assert_unlocked
 423 
 424 # define k5_once_t                      k5_os_nothread_once_t
 425 # define K5_ONCE_INIT                   K5_OS_NOTHREAD_ONCE_INIT
 426 # define k5_once                        k5_os_nothread_once
 427 
 428 #elif HAVE_PTHREAD
 429 
 430 # include <pthread.h>
 431 
 432 /* Weak reference support, etc.
 433 
 434    Linux: Stub mutex routines exist, but pthread_once does not.
 435 
 436    Solaris: In libc there's a pthread_once that doesn't seem to do
 437    anything.  Bleah.  But pthread_mutexattr_setrobust_np is defined
 438    only in libpthread.  However, some version of GNU libc (Red Hat's
 439    Fedora Core 5, reportedly) seems to have that function, but no
 440    declaration, so we'd have to declare it in order to test for its
 441    address.  We now have tests to see if pthread_once actually works,
 442    so stick with that for now.
 443 
 444    IRIX 6.5 stub pthread support in libc is really annoying.  The
 445    pthread_mutex_lock function returns ENOSYS for a program not linked
 446    against -lpthread.  No link-time failure, no weak symbols, etc.
 447    The C library doesn't provide pthread_once; we can use weak
 448    reference support for that.
 449 
 450    If weak references are not available, then for now, we assume that
 451    the pthread support routines will always be available -- either the
 452    real thing, or functional stubs that merely prohibit creating
 453    threads.
 454 
 455    If we find a platform with non-functional stubs and no weak
 456    references, we may have to resort to some hack like dlsym on the
 457    symbol tables of the current process.  */
 458 #ifdef HAVE_PRAGMA_WEAK_REF
 459 # pragma weak pthread_once
 460 # pragma weak pthread_mutex_lock
 461 # pragma weak pthread_mutex_unlock
 462 # pragma weak pthread_mutex_destroy
 463 # pragma weak pthread_mutex_init
 464 # pragma weak pthread_self
 465 # pragma weak pthread_equal
 466 # ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
 467 #  pragma weak pthread_mutexattr_setrobust_np
 468 # endif
 469 # if !defined HAVE_PTHREAD_ONCE
 470 #  define K5_PTHREADS_LOADED    (&pthread_once != 0)
 471 # elif !defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP \
 472         && defined HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP_IN_THREAD_LIB
 473 #  define K5_PTHREADS_LOADED    (&pthread_mutexattr_setrobust_np != 0)
 474 # else
 475 #  define K5_PTHREADS_LOADED    (1)
 476 # endif
 477 #else
 478 /* no pragma weak support */
 479 # define K5_PTHREADS_LOADED     (1)
 480 #endif
 481 
 482 #if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__))
 483 /* IRIX 6.5 stub pthread support in libc is really annoying.  The
 484    pthread_mutex_lock function returns ENOSYS for a program not linked
 485    against -lpthread.  No link-time failure, no weak reference tests,
 486    etc.
 487 
 488    The C library doesn't provide pthread_once; we can use weak
 489    reference support for that.  */
 490 # ifndef HAVE_PRAGMA_WEAK_REF
 491 #  if defined(__GNUC__) && __GNUC__ < 3
 492 #   error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile."
 493 #  else
 494 #   error "Weak reference support is required"
 495 #  endif
 496 # endif
 497 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
 498 #endif
 499 
 500 #if !defined(HAVE_PTHREAD_MUTEX_LOCK) && !defined(USE_PTHREAD_LOCK_ONLY_IF_LOADED)
 501 # define USE_PTHREAD_LOCK_ONLY_IF_LOADED
 502 #endif
 503 
 504 #ifdef HAVE_PRAGMA_WEAK_REF
 505 /* Can't rely on useful stubs -- see above regarding Solaris.  */
 506 typedef struct {
 507     pthread_once_t o;
 508     k5_os_nothread_once_t n;
 509 } k5_once_t;
 510 # define K5_ONCE_INIT   { PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT }
 511 # define k5_once(O,F)   (K5_PTHREADS_LOADED                     \
 512                          ? pthread_once(&(O)->o,F)               \
 513                          : k5_os_nothread_once(&(O)->n,F))
 514 #else
 515 typedef pthread_once_t k5_once_t;
 516 # define K5_ONCE_INIT   PTHREAD_ONCE_INIT
 517 # define k5_once        pthread_once
 518 #endif
 519 
 520 typedef struct {
 521     pthread_mutex_t p;
 522 #ifdef DEBUG_THREADS
 523     pthread_t owner;
 524 #endif
 525 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
 526     k5_os_nothread_mutex n;
 527 #endif
 528 } k5_os_mutex;
 529 
 530 #ifdef DEBUG_THREADS
 531 # ifdef __GNUC__
 532 #  define k5_pthread_mutex_lock(M)                      \
 533         ({                                              \
 534             k5_os_mutex *_m2 = (M);                     \
 535             int _r2 = pthread_mutex_lock(&_m2->p);       \
 536             if (_r2 == 0) _m2->owner = pthread_self();       \
 537             _r2;                                        \
 538         })
 539 # else
 540 static int
 541 k5_pthread_mutex_lock(k5_os_mutex *m)
 542 {
 543     int r = pthread_mutex_lock(&m->p);
 544     if (r)
 545         return r;
 546     m->owner = pthread_self();
 547     return 0;
 548 }
 549 # endif
 550 # define k5_pthread_assert_locked(M)                            \
 551         (K5_PTHREADS_LOADED                                     \
 552          ? ASSERT(pthread_equal((M)->owner, pthread_self())) \
 553          : (void)0)
 554 # define k5_pthread_mutex_unlock(M)     \
 555         (k5_pthread_assert_locked(M),   \
 556          (M)->owner = (pthread_t) 0, \
 557          pthread_mutex_unlock(&(M)->p))
 558 #else
 559 # define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
 560 /* LINTED */
 561 static void k5_pthread_assert_locked(k5_os_mutex *m) { }
 562 # define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
 563 #endif
 564 
 565 /* Define as functions to:
 566    (1) eliminate "statement with no effect" warnings for "0"
 567    (2) encourage type-checking in calling code  */
 568 
 569 /* LINTED */
 570 static void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
 571 
 572 #if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
 573 # include <sched.h>
 574 # if !HAVE_SCHED_YIELD
 575 #  pragma weak sched_yield
 576 #  define MAYBE_SCHED_YIELD()   ((void)((&sched_yield != NULL) ? sched_yield() : 0))
 577 # else
 578 #  define MAYBE_SCHED_YIELD()   ((void)sched_yield())
 579 # endif
 580 #else
 581 # define MAYBE_SCHED_YIELD()    ((void)0)
 582 #endif
 583 
 584 /* It may not be obvious why this function is desirable.
 585 
 586    I want to call pthread_mutex_lock, then sched_yield, then look at
 587    the return code from pthread_mutex_lock.  That can't be implemented
 588    in a macro without a temporary variable, or GNU C extensions.
 589 
 590    There used to be an inline function which did it, with both
 591    functions called from the inline function.  But that messes with
 592    the debug information on a lot of configurations, and you can't
 593    tell where the inline function was called from.  (Typically, gdb
 594    gives you the name of the function from which the inline function
 595    was called, and a line number within the inline function itself.)
 596 
 597    With this auxiliary function, pthread_mutex_lock can be called at
 598    the invoking site via a macro; once it returns, the inline function
 599    is called (with messed-up line-number info for gdb hopefully
 600    localized to just that call).  */
 601 #ifdef __GNUC__
 602 #define return_after_yield(R)                   \
 603         __extension__ ({                        \
 604             int _r = (R);                       \
 605             MAYBE_SCHED_YIELD();                \
 606             _r;                                 \
 607         })
 608 #else
 609 static int return_after_yield(int r)
 610 {
 611     MAYBE_SCHED_YIELD();
 612     return r;
 613 }
 614 #endif
 615 
 616 #ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
 617 
 618 # if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
 619 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 620         { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
 621           K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
 622 # elif defined(DEBUG_THREADS)
 623 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 624         { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
 625           K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
 626 # else
 627 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 628         { PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
 629 # endif
 630 asdfsdf
 631 # define k5_os_mutex_finish_init(M)             \
 632         k5_os_nothread_mutex_finish_init(&(M)->n)
 633 # define k5_os_mutex_init(M)                    \
 634         (k5_os_nothread_mutex_init(&(M)->n),     \
 635          (K5_PTHREADS_LOADED                    \
 636           ? pthread_mutex_init(&(M)->p, 0)       \
 637           : 0))
 638 # define k5_os_mutex_destroy(M)                 \
 639         (k5_os_nothread_mutex_destroy(&(M)->n),  \
 640          (K5_PTHREADS_LOADED                    \
 641           ? pthread_mutex_destroy(&(M)->p)       \
 642           : 0))
 643 
 644 # define k5_os_mutex_lock(M)                                            \
 645         return_after_yield(K5_PTHREADS_LOADED                           \
 646                            ? k5_pthread_mutex_lock(M)                   \
 647                            : k5_os_nothread_mutex_lock(&(M)->n))
 648 # define k5_os_mutex_unlock(M)                          \
 649         (MAYBE_SCHED_YIELD(),                           \
 650          (K5_PTHREADS_LOADED                            \
 651           ? k5_pthread_mutex_unlock(M)                  \
 652           : k5_os_nothread_mutex_unlock(&(M)->n)))
 653 
 654 # define k5_os_mutex_assert_unlocked(M)                 \
 655         (K5_PTHREADS_LOADED                             \
 656          ? k5_pthread_assert_unlocked(&(M)->p)           \
 657          : k5_os_nothread_mutex_assert_unlocked(&(M)->n))
 658 # define k5_os_mutex_assert_locked(M)                   \
 659         (K5_PTHREADS_LOADED                             \
 660          ? k5_pthread_assert_locked(M)                  \
 661          : k5_os_nothread_mutex_assert_locked(&(M)->n))
 662 
 663 #else
 664 
 665 # ifdef DEBUG_THREADS
 666 #  ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
 667 #   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 668         { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
 669 #  else
 670 #   define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 671         { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
 672 #  endif
 673 # else
 674 #  define K5_OS_MUTEX_PARTIAL_INITIALIZER \
 675         { PTHREAD_MUTEX_INITIALIZER }
 676 # endif
 677 
 678 /* LINTED */
 679 static  int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
 680 # define k5_os_mutex_init(M)            pthread_mutex_init(&(M)->p, 0)
 681 # define k5_os_mutex_destroy(M)         pthread_mutex_destroy(&(M)->p)
 682 # define k5_os_mutex_lock(M)    return_after_yield(k5_pthread_mutex_lock(M))
 683 # define k5_os_mutex_unlock(M)          (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
 684 
 685 # define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
 686 # define k5_os_mutex_assert_locked(M)   k5_pthread_assert_locked(M)
 687 
 688 #endif /* is pthreads always available? */
 689 
 690 #elif defined _WIN32
 691 
 692 typedef struct {
 693     HANDLE h;
 694     int is_locked;
 695 } k5_os_mutex;
 696 
 697 # define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 }
 698 
 699 # define k5_os_mutex_finish_init(M)                                      \
 700         (ASSERT((M)->h == INVALID_HANDLE_VALUE),                      \
 701          ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
 702 # define k5_os_mutex_init(M)                                             \
 703         ((M)->is_locked = 0,                                          \
 704          ((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
 705 # define k5_os_mutex_destroy(M)         \
 706         (CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError())
 707 
 708 static int k5_os_mutex_lock(k5_os_mutex *m)
 709 {
 710     DWORD res;
 711     res = WaitForSingleObject(m->h, INFINITE);
 712     if (res == WAIT_FAILED)
 713         return GetLastError();
 714     /* Eventually these should be turned into some reasonable error
 715        code.  */
 716     ASSERT(res != WAIT_TIMEOUT);
 717     ASSERT(res != WAIT_ABANDONED);
 718     ASSERT(res == WAIT_OBJECT_0);
 719     /* Avoid locking twice.  */
 720     ASSERT(m->is_locked == 0);
 721     m->is_locked = 1;
 722     return 0;
 723 }
 724 
 725 # define k5_os_mutex_unlock(M)                          \
 726         (ASSERT((M)->is_locked == 1),                        \
 727          (M)->is_locked = 0,                         \
 728          ReleaseMutex((M)->h) ? 0 : GetLastError())
 729 
 730 # define k5_os_mutex_assert_unlocked(M) ((void)0)
 731 # define k5_os_mutex_assert_locked(M)   ((void)0)
 732 
 733 #else
 734 
 735 # error "Thread support enabled, but thread system unknown"
 736 
 737 #endif
 738 
 739 
 740 
 741 
 742 typedef struct {
 743     k5_debug_loc loc_last, loc_created;
 744     k5_os_mutex os;
 745     k5_debug_mutex_stats stats;
 746 } k5_mutex_t;
 747 #define K5_MUTEX_PARTIAL_INITIALIZER            \
 748         { K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT, \
 749           K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT }
 750 /* LINTED */
 751 static int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l)
 752 {
 753     int err = k5_os_mutex_init(&m->os);
 754     if (err) return err;
 755     m->loc_created = m->loc_last = l;
 756     err = k5_mutex_init_stats(&m->stats);
 757     ASSERT(err == 0);
 758     return 0;
 759 }
 760 #define k5_mutex_init(M)        k5_mutex_init_1((M), K5_DEBUG_LOC)
 761 /* LINTED */
 762 static  int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
 763 {
 764     int err = k5_os_mutex_finish_init(&m->os);
 765     if (err) return err;
 766     m->loc_created = m->loc_last = l;
 767     err = k5_mutex_finish_init_stats(&m->stats);
 768     ASSERT(err == 0);
 769     return 0;
 770 }
 771 #define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
 772 #define k5_mutex_destroy(M)                     \
 773         (k5_os_mutex_assert_unlocked(&(M)->os),  \
 774          k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
 775          k5_os_mutex_destroy(&(M)->os))
 776 #ifdef __GNUC__
 777 #define k5_mutex_lock(M)                                \
 778         __extension__ ({                                \
 779             int _err = 0;                               \
 780             k5_mutex_t *_m = (M);                       \
 781             _err = k5_os_mutex_lock(&_m->os);            \
 782             if (_err == 0) _m->loc_last = K5_DEBUG_LOC;      \
 783             _err;                                       \
 784         })
 785 #else
 786 /* LINTED */
 787 static  int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
 788 {
 789     int err = 0;
 790     err = k5_os_mutex_lock(&m->os);
 791     if (err)
 792         return err;
 793     m->loc_last = l;
 794     return err;
 795 }
 796 #define k5_mutex_lock(M)        k5_mutex_lock_1(M, K5_DEBUG_LOC)
 797 #endif
 798 #define k5_mutex_unlock(M)                              \
 799         (k5_mutex_assert_locked(M),                     \
 800          (M)->loc_last = K5_DEBUG_LOC,                       \
 801          k5_os_mutex_unlock(&(M)->os))
 802 
 803 #define k5_mutex_assert_locked(M)       k5_os_mutex_assert_locked(&(M)->os)
 804 #define k5_mutex_assert_unlocked(M)     k5_os_mutex_assert_unlocked(&(M)->os)
 805 
 806 #define k5_assert_locked        k5_mutex_assert_locked
 807 #define k5_assert_unlocked      k5_mutex_assert_unlocked
 808 
 809 
 810 /* Thread-specific data; implemented in a support file, because we'll
 811    need to keep track of some global data for cleanup purposes.
 812 
 813    Note that the callback function type is such that the C library
 814    routine free() is a valid callback.  */
 815 typedef enum {
 816     K5_KEY_COM_ERR,
 817     K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME,
 818     K5_KEY_GSS_KRB5_CCACHE_NAME,
 819     K5_KEY_GSS_KRB5_ERROR_MESSAGE,
 820     K5_KEY_GSS_SPNEGO_ERROR_MESSAGE,
 821     K5_KEY_MAX
 822 } k5_key_t;
 823 /* rename shorthand symbols for export */
 824 #define k5_key_register krb5int_key_register
 825 #define k5_getspecific  krb5int_getspecific
 826 #define k5_setspecific  krb5int_setspecific
 827 #define k5_key_delete   krb5int_key_delete
 828 extern int k5_key_register(k5_key_t, void (*)(void *));
 829 extern void *k5_getspecific(k5_key_t);
 830 extern int k5_setspecific(k5_key_t, void *);
 831 extern int k5_key_delete(k5_key_t);
 832 
 833 extern int  KRB5_CALLCONV krb5int_mutex_alloc  (k5_mutex_t **);
 834 extern void KRB5_CALLCONV krb5int_mutex_free   (k5_mutex_t *);
 835 extern int  KRB5_CALLCONV krb5int_mutex_lock   (k5_mutex_t *);
 836 extern int  KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *);
 837 
 838 /* In time, many of the definitions above should move into the support
 839    library, and this file should be greatly simplified.  For type
 840    definitions, that'll take some work, since other data structures
 841    incorporate mutexes directly, and our mutex type is dependent on
 842    configuration options and system attributes.  For most functions,
 843    though, it should be relatively easy.
 844 
 845    For now, plugins should use the exported functions, and not the
 846    above macros, and use krb5int_mutex_alloc for allocations.  */
 847 #ifdef PLUGIN
 848 #undef k5_mutex_lock
 849 #define k5_mutex_lock krb5int_mutex_lock
 850 #undef k5_mutex_unlock
 851 #define k5_mutex_unlock krb5int_mutex_unlock
 852 #endif
 853 
 854 #endif /* _KERNEL */
 855 
 856 
 857 #endif /* multiple inclusion? */