Print this page
195 Need replacement for nfs/lockd+klm
Reviewed by: Gordon Ross <gordon.ross@nexenta.com>
Reviewed by: Jeremy Jones <jeremy@delphix.com>
Reviewed by: Jeff Biseda <jbiseda@delphix.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/flock.c
          +++ new/usr/src/uts/common/os/flock.c
↓ open down ↓ 19 lines elided ↑ open up ↑
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
  24   24   * Use is subject to license terms.
  25   25   */
  26   26  
  27   27  /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  28   28  /*      All Rights Reserved */
  29   29  
       30 +/*
       31 + * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
       32 + */
       33 +
  30   34  #include <sys/flock_impl.h>
  31   35  #include <sys/vfs.h>
  32   36  #include <sys/t_lock.h>         /* for <sys/callb.h> */
  33   37  #include <sys/callb.h>
  34   38  #include <sys/clconf.h>
  35   39  #include <sys/cladm.h>
  36   40  #include <sys/nbmlock.h>
  37   41  #include <sys/cred.h>
  38   42  #include <sys/policy.h>
  39   43  
↓ open down ↓ 213 lines elided ↑ open up ↑
 253  257          lock_descriptor_t       stack_lock_request;
 254  258          lock_descriptor_t       *lock_request;
 255  259          int error = 0;
 256  260          graph_t *gp;
 257  261          int                     nlmid;
 258  262  
 259  263          /*
 260  264           * Check access permissions
 261  265           */
 262  266          if ((cmd & SETFLCK) &&
 263      -                ((lckdat->l_type == F_RDLCK && (flag & FREAD) == 0) ||
 264      -                (lckdat->l_type == F_WRLCK && (flag & FWRITE) == 0)))
      267 +            ((lckdat->l_type == F_RDLCK && (flag & FREAD) == 0) ||
      268 +            (lckdat->l_type == F_WRLCK && (flag & FWRITE) == 0)))
 265  269                          return (EBADF);
 266  270  
 267  271          /*
 268  272           * for query and unlock we use the stack_lock_request
 269  273           */
 270  274  
 271  275          if ((lckdat->l_type == F_UNLCK) ||
 272      -                        !((cmd & INOFLCK) || (cmd & SETFLCK))) {
      276 +            !((cmd & INOFLCK) || (cmd & SETFLCK))) {
 273  277                  lock_request = &stack_lock_request;
 274  278                  (void) bzero((caddr_t)lock_request,
 275      -                                sizeof (lock_descriptor_t));
      279 +                    sizeof (lock_descriptor_t));
 276  280  
 277  281                  /*
 278  282                   * following is added to make the assertions in
 279  283                   * flk_execute_request() to pass through
 280  284                   */
 281  285  
 282  286                  lock_request->l_edge.edge_in_next = &lock_request->l_edge;
 283  287                  lock_request->l_edge.edge_in_prev = &lock_request->l_edge;
 284  288                  lock_request->l_edge.edge_adj_next = &lock_request->l_edge;
 285  289                  lock_request->l_edge.edge_adj_prev = &lock_request->l_edge;
↓ open down ↓ 9 lines elided ↑ open up ↑
 295  299           * Convert the request range into the canonical start and end
 296  300           * values.  The NLM protocol supports locking over the entire
 297  301           * 32-bit range, so there's no range checking for remote requests,
 298  302           * but we still need to verify that local requests obey the rules.
 299  303           */
 300  304          /* Clustering */
 301  305          if ((cmd & (RCMDLCK | PCMDLCK)) != 0) {
 302  306                  ASSERT(lckdat->l_whence == 0);
 303  307                  lock_request->l_start = lckdat->l_start;
 304  308                  lock_request->l_end = (lckdat->l_len == 0) ? MAX_U_OFFSET_T :
 305      -                        lckdat->l_start + (lckdat->l_len - 1);
      309 +                    lckdat->l_start + (lckdat->l_len - 1);
 306  310          } else {
 307  311                  /* check the validity of the lock range */
 308  312                  error = flk_convert_lock_data(vp, lckdat,
 309      -                        &lock_request->l_start, &lock_request->l_end,
 310      -                        offset);
      313 +                    &lock_request->l_start, &lock_request->l_end,
      314 +                    offset);
 311  315                  if (error) {
 312  316                          goto done;
 313  317                  }
 314  318                  error = flk_check_lock_data(lock_request->l_start,
 315      -                                            lock_request->l_end, MAXEND);
      319 +                    lock_request->l_end, MAXEND);
 316  320                  if (error) {
 317  321                          goto done;
 318  322                  }
 319  323          }
 320  324  
 321  325          ASSERT(lock_request->l_end >= lock_request->l_start);
 322  326  
 323  327          lock_request->l_type = lckdat->l_type;
 324  328          if (cmd & INOFLCK)
 325  329                  lock_request->l_state |= IO_LOCK;
↓ open down ↓ 9 lines elided ↑ open up ↑
 335  339           * also be of type 'RCMDLCK'.
 336  340           * We do not _only_ check the GETPXFSID() macro because local PXFS
 337  341           * clients use a pxfsid of zero to permit deadlock detection in the LLM.
 338  342           */
 339  343  
 340  344          if ((cmd & PCMDLCK) || (GETPXFSID(lckdat->l_sysid) != 0)) {
 341  345                  lock_request->l_state |= PXFS_LOCK;
 342  346          }
 343  347          if (!((cmd & SETFLCK) || (cmd & INOFLCK))) {
 344  348                  if (lock_request->l_type == F_RDLCK ||
 345      -                        lock_request->l_type == F_WRLCK)
      349 +                    lock_request->l_type == F_WRLCK)
 346  350                          lock_request->l_state |= QUERY_LOCK;
 347  351          }
 348  352          lock_request->l_flock = (*lckdat);
 349  353          lock_request->l_callbacks = flk_cbp;
 350  354  
 351  355          /*
 352  356           * We are ready for processing the request
 353  357           */
 354  358          if (IS_LOCKMGR(lock_request)) {
 355  359                  /*
↓ open down ↓ 15 lines elided ↑ open up ↑
 371  375                          nlmid = GETNLMID(lock_request->l_flock.l_sysid);
 372  376                          ASSERT(nlmid <= nlm_status_size && nlmid >= 0);
 373  377  
 374  378                          mutex_enter(&nlm_reg_lock);
 375  379                          /*
 376  380                           * If the NLM registry does not know about this
 377  381                           * NLM server making the request, add its nlmid
 378  382                           * to the registry.
 379  383                           */
 380  384                          if (FLK_REGISTRY_IS_NLM_UNKNOWN(nlm_reg_status,
 381      -                                nlmid)) {
      385 +                            nlmid)) {
 382  386                                  FLK_REGISTRY_ADD_NLMID(nlm_reg_status, nlmid);
 383  387                          } else if (!FLK_REGISTRY_IS_NLM_UP(nlm_reg_status,
 384      -                                nlmid)) {
      388 +                            nlmid)) {
 385  389                                  /*
 386  390                                   * If the NLM server is already known (has made
 387  391                                   * previous lock requests) and its state is
 388  392                                   * not NLM_UP (means that NLM server is
 389  393                                   * shutting down), then bail out with an
 390  394                                   * error to deny the lock request.
 391  395                                   */
 392  396                                  mutex_exit(&nlm_reg_lock);
 393  397                                  error = ENOLCK;
 394  398                                  goto done;
↓ open down ↓ 5 lines elided ↑ open up ↑
 400  404          /* Now get the lock graph for a particular vnode */
 401  405          gp = flk_get_lock_graph(vp, FLK_INIT_GRAPH);
 402  406  
 403  407          /*
 404  408           * We drop rwlock here otherwise this might end up causing a
 405  409           * deadlock if this IOLOCK sleeps. (bugid # 1183392).
 406  410           */
 407  411  
 408  412          if (IS_IO_LOCK(lock_request)) {
 409  413                  VOP_RWUNLOCK(vp,
 410      -                        (lock_request->l_type == F_RDLCK) ?
 411      -                                V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL);
      414 +                    (lock_request->l_type == F_RDLCK) ?
      415 +                    V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL);
 412  416          }
 413  417          mutex_enter(&gp->gp_mutex);
 414  418  
 415  419          lock_request->l_state |= REFERENCED_LOCK;
 416  420          lock_request->l_graph = gp;
 417  421  
 418  422          switch (lock_request->l_type) {
 419  423          case F_RDLCK:
 420  424          case F_WRLCK:
 421  425                  if (IS_QUERY_LOCK(lock_request)) {
↓ open down ↓ 52 lines elided ↑ open up ↑
 474  478                  return (error);
 475  479          }
 476  480  
 477  481          /*
 478  482           * Now that we have seen the status of locks in the system for
 479  483           * this vnode we acquire the rwlock if it is an IO_LOCK.
 480  484           */
 481  485  
 482  486          if (IS_IO_LOCK(lock_request)) {
 483  487                  (void) VOP_RWLOCK(vp,
 484      -                        (lock_request->l_type == F_RDLCK) ?
 485      -                                V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL);
      488 +                    (lock_request->l_type == F_RDLCK) ?
      489 +                    V_WRITELOCK_FALSE : V_WRITELOCK_TRUE, NULL);
 486  490                  if (!error) {
 487  491                          lckdat->l_type = F_UNLCK;
 488  492  
 489  493                          /*
 490  494                           * This wake up is needed otherwise
 491  495                           * if IO_LOCK has slept the dependents on this
 492  496                           * will not be woken up at all. (bugid # 1185482).
 493  497                           */
 494  498  
 495  499                          flk_wakeup(lock_request, 1);
↓ open down ↓ 115 lines elided ↑ open up ↑
 611  615   * Initialize the flk_edge_cache data structure and create the
 612  616   * nlm_reg_status array.
 613  617   */
 614  618  
 615  619  void
 616  620  flk_init(void)
 617  621  {
 618  622          uint_t  i;
 619  623  
 620  624          flk_edge_cache = kmem_cache_create("flk_edges",
 621      -                sizeof (struct edge), 0, NULL, NULL, NULL, NULL, NULL, 0);
      625 +            sizeof (struct edge), 0, NULL, NULL, NULL, NULL, NULL, 0);
 622  626          if (flk_edge_cache == NULL) {
 623  627                  cmn_err(CE_PANIC, "Couldn't create flk_edge_cache\n");
 624  628          }
 625  629          /*
 626  630           * Create the NLM registry object.
 627  631           */
 628  632  
 629  633          if (cluster_bootflags & CLUSTER_BOOTED) {
 630  634                  /*
 631  635                   * This routine tells you the maximum node id that will be used
↓ open down ↓ 2 lines elided ↑ open up ↑
 634  638                   * all entries indexed from 0 to maxnodeid; e.g., from 0
 635  639                   * to 64, for a total of 65 entries.
 636  640                   */
 637  641                  nlm_status_size = clconf_maximum_nodeid() + 1;
 638  642          } else {
 639  643                  nlm_status_size = 0;
 640  644          }
 641  645  
 642  646          if (nlm_status_size != 0) {     /* booted as a cluster */
 643  647                  nlm_reg_status = (flk_nlm_status_t *)
 644      -                        kmem_alloc(sizeof (flk_nlm_status_t) * nlm_status_size,
 645      -                                KM_SLEEP);
      648 +                    kmem_alloc(sizeof (flk_nlm_status_t) * nlm_status_size,
      649 +                    KM_SLEEP);
 646  650  
 647  651                  /* initialize all NLM states in array to NLM_UNKNOWN */
 648  652                  for (i = 0; i < nlm_status_size; i++) {
 649  653                          nlm_reg_status[i] = FLK_NLM_UNKNOWN;
 650  654                  }
 651  655          }
 652  656  }
 653  657  
 654  658  /*
 655  659   * Zone constructor/destructor callbacks to be executed when a zone is
↓ open down ↓ 144 lines elided ↑ open up ↑
 800  804                                          return (EAGAIN);
 801  805                                  request_blocked_by_active = 1;
 802  806                                  break;
 803  807                          }
 804  808                          /*
 805  809                           * Grant lock if it is for the same owner holding active
 806  810                           * lock that covers the request.
 807  811                           */
 808  812  
 809  813                          if (SAME_OWNER(lock, request) &&
 810      -                                        COVERS(lock, request) &&
 811      -                                                (request->l_type == F_RDLCK))
      814 +                            COVERS(lock, request) &&
      815 +                            (request->l_type == F_RDLCK))
 812  816                                  return (flk_execute_request(request));
 813  817                          lock = lock->l_next;
 814  818                  } while (lock->l_vnode == vp);
 815  819          }
 816  820  
 817  821          if (!request_blocked_by_active) {
 818  822                          lock_descriptor_t *lk[1];
 819  823                          lock_descriptor_t *first_glock = NULL;
 820  824                  /*
 821  825                   * Shall we grant this?! NO!!
↓ open down ↓ 84 lines elided ↑ open up ↑
 906  910           * there cannot be any of l2, l3, l4, etc., arrived before l5, and if
 907  911           * it has l1 would have produced a deadlock already.
 908  912           */
 909  913  
 910  914          if (lock) {
 911  915                  do {
 912  916                          if (BLOCKS(lock, request)) {
 913  917                                  if (!request_will_wait)
 914  918                                          return (EAGAIN);
 915  919                                  if (COVERS(lock, request) &&
 916      -                                                lock->l_type == F_WRLCK) {
      920 +                                    lock->l_type == F_WRLCK) {
 917  921                                          if (found_covering_lock &&
 918  922                                              !SAME_OWNER(lock, covered_by)) {
 919  923                                                  found_covering_lock++;
 920  924                                                  break;
 921  925                                          }
 922  926                                          found_covering_lock = 1;
 923  927                                          covered_by = lock;
 924  928                                  }
 925  929                                  if (found_covering_lock &&
 926      -                                        !SAME_OWNER(lock, covered_by)) {
      930 +                                    !SAME_OWNER(lock, covered_by)) {
 927  931                                          lock = lock->l_next;
 928  932                                          continue;
 929  933                                  }
 930  934                                  if ((error = flk_add_edge(request, lock,
 931      -                                                !found_covering_lock, 0)))
      935 +                                    !found_covering_lock, 0)))
 932  936                                          return (error);
 933  937                          }
 934  938                          lock = lock->l_next;
 935  939                  } while (lock->l_vnode == vp);
 936  940          }
 937  941  
 938  942  /*
 939  943   * found_covering_lock == 2 iff at this point 'request' has paths
 940  944   * to all locks that blocks 'request'. found_covering_lock == 1 iff at this
 941  945   * point 'request' has paths to all locks that blocks 'request' whose owners
 942  946   * are not same as the one that covers 'request' (covered_by above) and
 943  947   * we can have locks whose owner is same as covered_by in the active list.
 944  948   */
 945  949  
 946  950          if (request_blocked_by_active && found_covering_lock != 2) {
 947  951                  SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
 948  952                  ASSERT(lock != NULL);
 949  953                  do {
 950  954                          if (BLOCKS(lock, request)) {
 951  955                                  if (found_covering_lock &&
 952      -                                        !SAME_OWNER(lock, covered_by)) {
      956 +                                    !SAME_OWNER(lock, covered_by)) {
 953  957                                          lock = lock->l_next;
 954  958                                          continue;
 955  959                                  }
 956  960                                  if ((error = flk_add_edge(request, lock,
 957      -                                                        CHECK_CYCLE, 0)))
      961 +                                    CHECK_CYCLE, 0)))
 958  962                                          return (error);
 959  963                          }
 960  964                          lock = lock->l_next;
 961  965                  } while (lock->l_vnode == vp);
 962  966          }
 963  967  
 964  968          if (NOT_BLOCKED(request)) {
 965  969                  /*
 966  970                   * request not dependent on any other locks
 967  971                   * so execute this request
↓ open down ↓ 131 lines elided ↑ open up ↑
1099 1103                   * sleep list, or we must check the shutdown status after
1100 1104                   * returning from the callback (and before sleeping).  At
1101 1105                   * least for now, we'll use the first option.  If a
1102 1106                   * shutdown or signal or whatever happened while the graph
1103 1107                   * mutex was dropped, that will be detected by
1104 1108                   * wait_for_lock().
1105 1109                   */
1106 1110                  mutex_exit(&gp->gp_mutex);
1107 1111  
1108 1112                  cprp = flk_invoke_callbacks(request->l_callbacks,
1109      -                                            FLK_BEFORE_SLEEP);
     1113 +                    FLK_BEFORE_SLEEP);
1110 1114  
1111 1115                  mutex_enter(&gp->gp_mutex);
1112 1116  
1113 1117                  if (cprp == NULL) {
1114 1118                          wait_for_lock(request);
1115 1119                  } else {
1116 1120                          mutex_enter(cprp->cc_lockp);
1117 1121                          CALLB_CPR_SAFE_BEGIN(cprp);
1118 1122                          mutex_exit(cprp->cc_lockp);
1119 1123                          wait_for_lock(request);
1120 1124                          mutex_enter(cprp->cc_lockp);
1121 1125                          CALLB_CPR_SAFE_END(cprp, cprp->cc_lockp);
1122 1126                          mutex_exit(cprp->cc_lockp);
1123 1127                  }
1124 1128  
1125 1129                  mutex_exit(&gp->gp_mutex);
1126 1130                  (void) flk_invoke_callbacks(request->l_callbacks,
1127      -                                            FLK_AFTER_SLEEP);
     1131 +                    FLK_AFTER_SLEEP);
1128 1132                  mutex_enter(&gp->gp_mutex);
1129 1133          } else {
1130 1134                  wait_for_lock(request);
1131 1135          }
1132 1136  
1133 1137          if (IS_LOCKMGR(request)) {
1134 1138                  /*
1135 1139                   * If the lock manager is shutting down, return an
1136 1140                   * error that will encourage the client to retransmit.
1137 1141                   */
1138 1142                  if (fg->lockmgr_status[index] != FLK_LOCKMGR_UP &&
1139      -                        !IS_GRANTED(request)) {
     1143 +                    !IS_GRANTED(request)) {
1140 1144                          flk_cancel_sleeping_lock(request, 1);
1141 1145                          return (ENOLCK);
1142 1146                  }
1143 1147          }
1144 1148  
1145 1149          if (IS_INTERRUPTED(request)) {
1146 1150                  /* we got a signal, or act like we did */
1147 1151                  flk_cancel_sleeping_lock(request, 1);
1148 1152                  return (EINTR);
1149 1153          }
↓ open down ↓ 77 lines elided ↑ open up ↑
1227 1231                  return (0);
1228 1232          }
1229 1233  
1230 1234          STACK_PUSH(vertex_stack, from_lock, l_stack);
1231 1235  
1232 1236          while ((vertex = STACK_TOP(vertex_stack)) != NULL) {
1233 1237  
1234 1238                  STACK_POP(vertex_stack, l_stack);
1235 1239  
1236 1240                  for (ep = FIRST_ADJ(vertex);
1237      -                        ep != HEAD(vertex);
1238      -                                ep = NEXT_ADJ(ep)) {
     1241 +                    ep != HEAD(vertex);
     1242 +                    ep = NEXT_ADJ(ep)) {
1239 1243                          if (COLORED(ep->to_vertex))
1240 1244                                  continue;
1241 1245                          COLOR(ep->to_vertex);
1242 1246                          if (SAME_OWNER(ep->to_vertex, from_lock))
1243 1247                                  goto dead_lock;
1244 1248                          STACK_PUSH(vertex_stack, ep->to_vertex, l_stack);
1245 1249                  }
1246 1250          }
1247 1251          return (0);
1248 1252  
↓ open down ↓ 68 lines elided ↑ open up ↑
1317 1321          CHECK_SLEEPING_LOCKS(gp);
1318 1322          CHECK_ACTIVE_LOCKS(gp);
1319 1323  
1320 1324          ASSERT(MUTEX_HELD(&gp->gp_mutex));
1321 1325  
1322 1326          topology[0] = topology[1] = topology[2] = NULL;
1323 1327  
1324 1328          if (request->l_type == F_UNLCK)
1325 1329                  lock_effect = FLK_UNLOCK;
1326 1330          else if (request->l_type == F_RDLCK &&
1327      -                        lock->l_type == F_WRLCK)
     1331 +            lock->l_type == F_WRLCK)
1328 1332                  lock_effect = FLK_DOWNGRADE;
1329 1333          else if (request->l_type == F_WRLCK &&
1330      -                        lock->l_type == F_RDLCK)
     1334 +            lock->l_type == F_RDLCK)
1331 1335                  lock_effect = FLK_UPGRADE;
1332 1336          else
1333 1337                  lock_effect = FLK_STAY_SAME;
1334 1338  
1335 1339          if (lock->l_end < request->l_start) {
1336 1340                  if (lock->l_end == request->l_start - 1 &&
1337      -                                lock_effect == FLK_STAY_SAME) {
     1341 +                    lock_effect == FLK_STAY_SAME) {
1338 1342                          topology[0] = request;
1339 1343                          request->l_start = lock->l_start;
1340 1344                          nvertex = 1;
1341 1345                          goto recompute;
1342 1346                  } else {
1343 1347                          return (0);
1344 1348                  }
1345 1349          }
1346 1350  
1347 1351          if (lock->l_start > request->l_end) {
1348 1352                  if (request->l_end == lock->l_start - 1 &&
1349      -                                        lock_effect == FLK_STAY_SAME) {
     1353 +                    lock_effect == FLK_STAY_SAME) {
1350 1354                          topology[0] = request;
1351 1355                          request->l_end = lock->l_end;
1352 1356                          nvertex = 1;
1353 1357                          goto recompute;
1354 1358                  } else {
1355 1359                          return (1);
1356 1360                  }
1357 1361          }
1358 1362  
1359 1363          if (request->l_end < lock->l_end) {
↓ open down ↓ 177 lines elided ↑ open up ↑
1537 1541          vnode_t *vp = new_lock->l_vnode;
1538 1542          lock_descriptor_t *first_lock, *lock;
1539 1543  
1540 1544          ASSERT(MUTEX_HELD(&gp->gp_mutex));
1541 1545  
1542 1546          SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
1543 1547          first_lock = lock;
1544 1548  
1545 1549          if (first_lock != NULL) {
1546 1550                  for (; (lock->l_vnode == vp &&
1547      -                        lock->l_start < new_lock->l_start); lock = lock->l_next)
     1551 +                    lock->l_start < new_lock->l_start); lock = lock->l_next)
1548 1552                          ;
1549 1553          } else {
1550 1554                  lock = ACTIVE_HEAD(gp);
1551 1555          }
1552 1556  
1553 1557          lock->l_prev->l_next = new_lock;
1554 1558          new_lock->l_next = lock;
1555 1559          new_lock->l_prev = lock->l_prev;
1556 1560          lock->l_prev = new_lock;
1557 1561  
↓ open down ↓ 22 lines elided ↑ open up ↑
1580 1584          ASSERT(MUTEX_HELD(&gp->gp_mutex));
1581 1585          if (free_lock)
1582 1586                  ASSERT(NO_DEPENDENTS(lock));
1583 1587          ASSERT(NOT_BLOCKED(lock));
1584 1588          ASSERT(IS_ACTIVE(lock));
1585 1589  
1586 1590          ASSERT((vp->v_filocks != NULL));
1587 1591  
1588 1592          if (vp->v_filocks == (struct filock *)lock) {
1589 1593                  vp->v_filocks = (struct filock *)
1590      -                                ((lock->l_next->l_vnode == vp) ? lock->l_next :
1591      -                                                                NULL);
     1594 +                    ((lock->l_next->l_vnode == vp) ? lock->l_next :
     1595 +                    NULL);
1592 1596          }
1593 1597          lock->l_next->l_prev = lock->l_prev;
1594 1598          lock->l_prev->l_next = lock->l_next;
1595 1599          lock->l_next = lock->l_prev = NULL;
1596 1600          flk_set_state(lock, FLK_DEAD_STATE);
1597 1601          lock->l_state &= ~ACTIVE_LOCK;
1598 1602  
1599 1603          if (free_lock)
1600 1604                  flk_free_lock(lock);
1601 1605          CHECK_ACTIVE_LOCKS(gp);
↓ open down ↓ 8 lines elided ↑ open up ↑
1610 1614  flk_insert_sleeping_lock(lock_descriptor_t *request)
1611 1615  {
1612 1616          graph_t *gp = request->l_graph;
1613 1617          vnode_t *vp = request->l_vnode;
1614 1618          lock_descriptor_t       *lock;
1615 1619  
1616 1620          ASSERT(MUTEX_HELD(&gp->gp_mutex));
1617 1621          ASSERT(IS_INITIAL(request));
1618 1622  
1619 1623          for (lock = gp->sleeping_locks.l_next; (lock != &gp->sleeping_locks &&
1620      -                lock->l_vnode < vp); lock = lock->l_next)
     1624 +            lock->l_vnode < vp); lock = lock->l_next)
1621 1625                  ;
1622 1626  
1623 1627          lock->l_prev->l_next = request;
1624 1628          request->l_prev = lock->l_prev;
1625 1629          lock->l_prev = request;
1626 1630          request->l_next = lock;
1627 1631          flk_set_state(request, FLK_SLEEPING_STATE);
1628 1632          request->l_state |= SLEEPING_LOCK;
1629 1633  }
1630 1634  
↓ open down ↓ 22 lines elided ↑ open up ↑
1653 1657          /*
1654 1658           * count number of vertex pointers that has to be allocated
1655 1659           * All vertices that are reachable from request.
1656 1660           */
1657 1661  
1658 1662          STACK_PUSH(vertex_stack, request, l_stack);
1659 1663  
1660 1664          while ((vertex = STACK_TOP(vertex_stack)) != NULL) {
1661 1665                  STACK_POP(vertex_stack, l_stack);
1662 1666                  for (ep = FIRST_ADJ(vertex); ep != HEAD(vertex);
1663      -                                        ep = NEXT_ADJ(ep)) {
     1667 +                    ep = NEXT_ADJ(ep)) {
1664 1668                          if (IS_RECOMPUTE(ep->to_vertex))
1665 1669                                  continue;
1666 1670                          ep->to_vertex->l_state |= RECOMPUTE_LOCK;
1667 1671                          STACK_PUSH(vertex_stack, ep->to_vertex, l_stack);
1668 1672                          nvertex++;
1669 1673                  }
1670 1674          }
1671 1675  
1672 1676          /*
1673 1677           * allocate memory for holding the vertex pointers
1674 1678           */
1675 1679  
1676 1680          if (nvertex) {
1677 1681                  topology = kmem_zalloc(nvertex * sizeof (lock_descriptor_t *),
1678      -                                                KM_SLEEP);
     1682 +                    KM_SLEEP);
1679 1683          }
1680 1684  
1681 1685          /*
1682 1686           * one more pass to actually store the vertices in the
1683 1687           * allocated array.
1684 1688           * We first check sleeping locks and then active locks
1685 1689           * so that topology array will be in a topological
1686 1690           * order.
1687 1691           */
1688 1692  
↓ open down ↓ 58 lines elided ↑ open up ↑
1747 1751  
1748 1752          for (i = 0; i < nvertex; i++) {
1749 1753                  topology[i]->l_state &= ~RECOMPUTE_LOCK;
1750 1754          }
1751 1755  
1752 1756          /*
1753 1757           * free the topology
1754 1758           */
1755 1759          if (nvertex)
1756 1760                  kmem_free((void *)topology,
1757      -                        (nvertex * sizeof (lock_descriptor_t *)));
     1761 +                    (nvertex * sizeof (lock_descriptor_t *)));
1758 1762          /*
1759 1763           * Possibility of some locks unblocked now
1760 1764           */
1761 1765  
1762 1766          flk_wakeup(request, 0);
1763 1767  
1764 1768          /*
1765 1769           * we expect to have a correctly recomputed graph  now.
1766 1770           */
1767 1771          flk_set_state(request, FLK_DEAD_STATE);
↓ open down ↓ 10 lines elided ↑ open up ↑
1778 1782   */
1779 1783  
1780 1784  static void
1781 1785  flk_graph_uncolor(graph_t *gp)
1782 1786  {
1783 1787          lock_descriptor_t *lock;
1784 1788  
1785 1789          if (gp->mark == UINT_MAX) {
1786 1790                  gp->mark = 1;
1787 1791          for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
1788      -                                        lock = lock->l_next)
     1792 +            lock = lock->l_next)
1789 1793                          lock->l_color  = 0;
1790 1794  
1791 1795          for (lock = SLEEPING_HEAD(gp)->l_next; lock != SLEEPING_HEAD(gp);
1792      -                                        lock = lock->l_next)
     1796 +            lock = lock->l_next)
1793 1797                          lock->l_color  = 0;
1794 1798          } else {
1795 1799                  gp->mark++;
1796 1800          }
1797 1801  }
1798 1802  
1799 1803  /*
1800 1804   * Wake up locks that are blocked on the given lock.
1801 1805   */
1802 1806  
↓ open down ↓ 110 lines elided ↑ open up ↑
1913 1917                                      NO_CHECK_CYCLE, update_graph);
1914 1918                                  COLOR(lock);
1915 1919                                  count++;
1916 1920                                  count += flk_color_reachables(lock);
1917 1921                          }
1918 1922  
1919 1923                  }
1920 1924  
1921 1925  next_in_edge:
1922 1926                  if (count == nvertex ||
1923      -                                vertex->l_sedge == HEAD(vertex)) {
     1927 +                    vertex->l_sedge == HEAD(vertex)) {
1924 1928                          /* prune the tree below this */
1925 1929                          STACK_POP(vertex_stack, l_stack);
1926 1930                          vertex->l_state &= ~RECOMPUTE_DONE;
1927 1931                          /* update the barrier locks below this! */
1928 1932                          if (vertex->l_sedge != HEAD(vertex) && barrier_found) {
1929 1933                                  flk_graph_uncolor(gp);
1930 1934                                  flk_update_barriers(vertex);
1931 1935                          }
1932 1936                          continue;
1933 1937                  }
↓ open down ↓ 25 lines elided ↑ open up ↑
1959 1963          lock_descriptor_t *vertex_stack;
1960 1964  
1961 1965          STACK_INIT(vertex_stack);
1962 1966  
1963 1967          STACK_PUSH(vertex_stack, vertex, l_stack1);
1964 1968          count = 0;
1965 1969          while ((ver = STACK_TOP(vertex_stack)) != NULL) {
1966 1970  
1967 1971                  STACK_POP(vertex_stack, l_stack1);
1968 1972                  for (ep = FIRST_ADJ(ver); ep != HEAD(ver);
1969      -                                        ep = NEXT_ADJ(ep)) {
     1973 +                    ep = NEXT_ADJ(ep)) {
1970 1974                          lock = ep->to_vertex;
1971 1975                          if (COLORED(lock))
1972 1976                                  continue;
1973 1977                          COLOR(lock);
1974 1978                          if (IS_RECOMPUTE(lock))
1975 1979                                  count++;
1976 1980                          STACK_PUSH(vertex_stack, lock, l_stack1);
1977 1981                  }
1978 1982  
1979 1983          }
↓ open down ↓ 12 lines elided ↑ open up ↑
1992 1996          edge_t  *ep;
1993 1997          lock_descriptor_t *vertex_stack;
1994 1998  
1995 1999          STACK_INIT(vertex_stack);
1996 2000  
1997 2001          STACK_PUSH(vertex_stack, lock, l_stack1);
1998 2002  
1999 2003          while ((vertex = STACK_TOP(vertex_stack)) != NULL) {
2000 2004                  STACK_POP(vertex_stack, l_stack1);
2001 2005                  for (ep = FIRST_IN(vertex); ep != HEAD(vertex);
2002      -                                                ep = NEXT_IN(ep)) {
     2006 +                    ep = NEXT_IN(ep)) {
2003 2007                          lck = ep->from_vertex;
2004 2008                          if (COLORED(lck)) {
2005 2009                                  if (IS_BARRIER(lck)) {
2006 2010                                          ASSERT(lck->l_index > 0);
2007 2011                                          lck->l_index--;
2008 2012                                          if (lck->l_index == 0)
2009 2013                                                  lck->l_state &= ~BARRIER_LOCK;
2010 2014                                  }
2011 2015                                  continue;
2012 2016                          }
↓ open down ↓ 24 lines elided ↑ open up ↑
2037 2041          edge_t  *ep;
2038 2042          lock_descriptor_t *vertex_stack;
2039 2043  
2040 2044          STACK_INIT(vertex_stack);
2041 2045  
2042 2046          STACK_PUSH(vertex_stack, lock, l_stack1);
2043 2047  
2044 2048          while ((vertex = STACK_TOP(vertex_stack)) != NULL) {
2045 2049                  STACK_POP(vertex_stack, l_stack1);
2046 2050                  for (ep = FIRST_IN(vertex); ep != HEAD(vertex);
2047      -                                                ep = NEXT_IN(ep)) {
     2051 +                    ep = NEXT_IN(ep)) {
2048 2052                          lck = ep->from_vertex;
2049 2053                          if (COLORED(lck)) {
2050 2054                                  /* this is a barrier */
2051 2055                                  lck->l_state |= BARRIER_LOCK;
2052 2056                                  /* index will have barrier count */
2053 2057                                  lck->l_index++;
2054 2058                                  if (!found)
2055 2059                                          found = 1;
2056 2060                                  continue;
2057 2061                          }
↓ open down ↓ 225 lines elided ↑ open up ↑
2283 2287                          lock = lock->l_next;
2284 2288                  }
2285 2289          }
2286 2290  
2287 2291  done:
2288 2292          mutex_exit(&gp->gp_mutex);
2289 2293          return (result);
2290 2294  }
2291 2295  
2292 2296  /*
     2297 + * Determine whether there are any locks for the given vnode with a remote
     2298 + * sysid matching given sysid.
     2299 + * Used by the new (open source) NFS Lock Manager (NLM)
     2300 + */
     2301 +int
     2302 +flk_has_remote_locks_for_sysid(vnode_t *vp, int sysid)
     2303 +{
     2304 +        lock_descriptor_t *lock;
     2305 +        int result = 0;
     2306 +        graph_t *gp;
     2307 +
     2308 +        if (sysid == 0)
     2309 +                return (0);
     2310 +
     2311 +        gp = flk_get_lock_graph(vp, FLK_USE_GRAPH);
     2312 +        if (gp == NULL) {
     2313 +                return (0);
     2314 +        }
     2315 +
     2316 +        mutex_enter(&gp->gp_mutex);
     2317 +
     2318 +        SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
     2319 +
     2320 +        if (lock) {
     2321 +                while (lock->l_vnode == vp) {
     2322 +                        if (lock->l_flock.l_sysid == sysid) {
     2323 +                                result = 1;
     2324 +                                goto done;
     2325 +                        }
     2326 +                        lock = lock->l_next;
     2327 +                }
     2328 +        }
     2329 +
     2330 +        SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
     2331 +
     2332 +        if (lock) {
     2333 +                while (lock->l_vnode == vp) {
     2334 +                        if (lock->l_flock.l_sysid == sysid) {
     2335 +                                result = 1;
     2336 +                                goto done;
     2337 +                        }
     2338 +                        lock = lock->l_next;
     2339 +                }
     2340 +        }
     2341 +
     2342 +done:
     2343 +        mutex_exit(&gp->gp_mutex);
     2344 +        return (result);
     2345 +}
     2346 +
     2347 +/*
2293 2348   * Determine if there are any locks owned by the given sysid.
2294 2349   * Returns zero if not, non-zero if there are.  Note that this return code
2295 2350   * could be derived from flk_get_{sleeping,active}_locks, but this routine
2296 2351   * avoids all the memory allocations of those routines.
2297 2352   *
2298 2353   * This routine has the same synchronization issues as
2299 2354   * flk_has_remote_locks.
2300 2355   */
2301 2356  
2302 2357  int
↓ open down ↓ 18 lines elided ↑ open up ↑
2321 2376                          for (lock = ACTIVE_HEAD(gp)->l_next;
2322 2377                              lock != ACTIVE_HEAD(gp) && !has_locks;
2323 2378                              lock = lock->l_next) {
2324 2379                                  if (lock->l_flock.l_sysid == sysid)
2325 2380                                          has_locks = 1;
2326 2381                          }
2327 2382                  }
2328 2383  
2329 2384                  if (lck_type & FLK_QUERY_SLEEPING) {
2330 2385                          for (lock = SLEEPING_HEAD(gp)->l_next;
2331      -                                lock != SLEEPING_HEAD(gp) && !has_locks;
2332      -                                lock = lock->l_next) {
     2386 +                            lock != SLEEPING_HEAD(gp) && !has_locks;
     2387 +                            lock = lock->l_next) {
2333 2388                                  if (lock->l_flock.l_sysid == sysid)
2334 2389                                          has_locks = 1;
2335 2390                          }
2336 2391                  }
2337 2392                  mutex_exit(&gp->gp_mutex);
2338 2393          }
2339 2394  
2340 2395          return (has_locks);
2341 2396  }
2342 2397  
↓ open down ↓ 184 lines elided ↑ open up ↑
2527 2582          vnode_t *vp = request->l_vnode;
2528 2583  
2529 2584          ASSERT(MUTEX_HELD(&gp->gp_mutex));
2530 2585          ASSERT(IS_LOCKMGR(request));
2531 2586          SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2532 2587  
2533 2588          if (lock) {
2534 2589                  while (lock->l_vnode == vp) {
2535 2590                          nlock = lock->l_next;
2536 2591                          if (SAME_OWNER(lock, request) &&
2537      -                                lock->l_start == request->l_start &&
2538      -                                        lock->l_end == request->l_end) {
     2592 +                            lock->l_start == request->l_start &&
     2593 +                            lock->l_end == request->l_end) {
2539 2594                                  INTERRUPT_WAKEUP(lock);
2540 2595                                  return (1);
2541 2596                          }
2542 2597                          lock = nlock;
2543 2598                  }
2544 2599          }
2545 2600          return (0);
2546 2601  }
2547 2602  
2548 2603  /*
↓ open down ↓ 17 lines elided ↑ open up ↑
2566 2621  
2567 2622          CHECK_SLEEPING_LOCKS(gp);
2568 2623          CHECK_ACTIVE_LOCKS(gp);
2569 2624  
2570 2625          SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2571 2626  
2572 2627          if (lock) {
2573 2628                  do {
2574 2629                          nlock = lock->l_next;
2575 2630                          if ((lock->l_flock.l_pid == pid ||
2576      -                                        pid == IGN_PID) &&
2577      -                                lock->l_flock.l_sysid == sysid) {
     2631 +                            pid == IGN_PID) &&
     2632 +                            lock->l_flock.l_sysid == sysid) {
2578 2633                                  CANCEL_WAKEUP(lock);
2579 2634                          }
2580 2635                          lock = nlock;
2581 2636                  } while (lock->l_vnode == vp);
2582 2637          }
2583 2638  
2584 2639          SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2585 2640  
2586 2641          if (lock) {
2587 2642                  do {
2588 2643                          nlock = lock->l_next;
2589 2644                          if ((lock->l_flock.l_pid == pid ||
2590      -                                        pid == IGN_PID) &&
2591      -                                lock->l_flock.l_sysid == sysid) {
     2645 +                            pid == IGN_PID) &&
     2646 +                            lock->l_flock.l_sysid == sysid) {
2592 2647                                  flk_delete_active_lock(lock, 0);
2593 2648                                  STACK_PUSH(link_stack, lock, l_stack);
2594 2649                          }
2595 2650                          lock = nlock;
2596 2651                  } while (lock->l_vnode == vp);
2597 2652          }
2598 2653  
2599 2654          while ((lock = STACK_TOP(link_stack)) != NULL) {
2600 2655                  STACK_POP(link_stack, l_stack);
2601 2656                  flk_wakeup(lock, 1);
↓ open down ↓ 208 lines elided ↑ open up ↑
2810 2865          ep = FIRST_ADJ(lock);
2811 2866          while (ep != HEAD(lock)) {
2812 2867                  proc_vertex_t *adj_proc;
2813 2868                  adj_proc = flk_get_proc_vertex(ep->to_vertex);
2814 2869                  nep = NEXT_ADJ(ep);
2815 2870                  IN_LIST_REMOVE(ep);
2816 2871                  ADJ_LIST_REMOVE(ep);
2817 2872                  flk_free_edge(ep);
2818 2873                  ppep = start_vertex->edge;
2819 2874                  for (pep = start_vertex->edge; pep != NULL; ppep = pep,
2820      -                                                pep = ppep->next) {
     2875 +                    pep = ppep->next) {
2821 2876                          if (pep->to_proc == adj_proc) {
2822 2877                                  pep->refcount--;
2823 2878                                  if (pep->refcount == 0) {
2824 2879                                          if (pep == ppep) {
2825 2880                                                  start_vertex->edge = pep->next;
2826 2881                                          } else {
2827 2882                                                  ppep->next = pep->next;
2828 2883                                          }
2829 2884                                          adj_proc->incount--;
2830 2885                                          flk_proc_release(adj_proc);
↓ open down ↓ 7 lines elided ↑ open up ↑
2838 2893          ep = FIRST_IN(lock);
2839 2894          while (ep != HEAD(lock)) {
2840 2895                  proc_vertex_t *in_proc;
2841 2896                  in_proc = flk_get_proc_vertex(ep->from_vertex);
2842 2897                  nep = NEXT_IN(ep);
2843 2898                  IN_LIST_REMOVE(ep);
2844 2899                  ADJ_LIST_REMOVE(ep);
2845 2900                  flk_free_edge(ep);
2846 2901                  ppep = in_proc->edge;
2847 2902                  for (pep = in_proc->edge; pep != NULL; ppep = pep,
2848      -                                                pep = ppep->next) {
     2903 +                    pep = ppep->next) {
2849 2904                          if (pep->to_proc == start_vertex) {
2850 2905                                  pep->refcount--;
2851 2906                                  if (pep->refcount == 0) {
2852 2907                                          if (pep == ppep) {
2853 2908                                                  in_proc->edge = pep->next;
2854 2909                                          } else {
2855 2910                                                  ppep->next = pep->next;
2856 2911                                          }
2857 2912                                          start_vertex->incount--;
2858 2913                                          flk_proc_release(in_proc);
↓ open down ↓ 45 lines elided ↑ open up ↑
2904 2959                  for (i = 0; i < pgraph.gcount; i++) {
2905 2960                          if (pgraph.proc[i] == NULL) {
2906 2961                                  pgraph.proc[i] = pv;
2907 2962                                  lock->pvertex = pv->index = i;
2908 2963                                  pgraph.free--;
2909 2964                                  return (pv);
2910 2965                          }
2911 2966                  }
2912 2967          }
2913 2968          palloc = kmem_zalloc((pgraph.gcount + PROC_CHUNK) *
2914      -                                sizeof (proc_vertex_t *), KM_SLEEP);
     2969 +            sizeof (proc_vertex_t *), KM_SLEEP);
2915 2970  
2916 2971          if (pgraph.proc) {
2917 2972                  bcopy(pgraph.proc, palloc,
2918      -                        pgraph.gcount * sizeof (proc_vertex_t *));
     2973 +                    pgraph.gcount * sizeof (proc_vertex_t *));
2919 2974  
2920 2975                  kmem_free(pgraph.proc,
2921      -                        pgraph.gcount * sizeof (proc_vertex_t *));
     2976 +                    pgraph.gcount * sizeof (proc_vertex_t *));
2922 2977          }
2923 2978          pgraph.proc = palloc;
2924 2979          pgraph.free += (PROC_CHUNK - 1);
2925 2980          pv->index = lock->pvertex = pgraph.gcount;
2926 2981          pgraph.gcount += PROC_CHUNK;
2927 2982          pgraph.proc[pv->index] = pv;
2928 2983          return (pv);
2929 2984  }
2930 2985  
2931 2986  /*
↓ open down ↓ 183 lines elided ↑ open up ↑
3115 3170  
3116 3171          ASSERT(nlmid <= nlm_status_size && nlmid >= 0);
3117 3172          mutex_enter(&nlm_reg_lock);
3118 3173  
3119 3174          if (FLK_REGISTRY_IS_NLM_UNKNOWN(nlm_reg_status, nlmid)) {
3120 3175                  /*
3121 3176                   * If the NLM server "nlmid" is unknown in the NLM registry,
3122 3177                   * add it to the registry in the nlm shutting down state.
3123 3178                   */
3124 3179                  FLK_REGISTRY_CHANGE_NLM_STATE(nlm_reg_status, nlmid,
3125      -                        FLK_NLM_SHUTTING_DOWN);
     3180 +                    FLK_NLM_SHUTTING_DOWN);
3126 3181          } else {
3127 3182                  /*
3128 3183                   * Change the state of the NLM server identified by "nlmid"
3129 3184                   * in the NLM registry to the argument "nlm_state."
3130 3185                   */
3131 3186                  FLK_REGISTRY_CHANGE_NLM_STATE(nlm_reg_status, nlmid,
3132      -                        nlm_state);
     3187 +                    nlm_state);
3133 3188          }
3134 3189  
3135 3190          /*
3136 3191           *  The reason we must register the NLM server that is shutting down
3137 3192           *  with an LLM that doesn't already know about it (never sent a lock
3138 3193           *  request) is to handle correctly a race between shutdown and a new
3139 3194           *  lock request.  Suppose that a shutdown request from the NLM server
3140 3195           *  invokes this routine at the LLM, and a thread is spawned to
3141 3196           *  service the request. Now suppose a new lock request is in
3142 3197           *  progress and has already passed the first line of defense in
↓ open down ↓ 164 lines elided ↑ open up ↑
3307 3362          for (i = first_index; i < first_index + num_indexes; i++) {
3308 3363                  mutex_enter(&flock_lock);
3309 3364                  gp = lock_graph[i];
3310 3365                  mutex_exit(&flock_lock);
3311 3366                  if (gp == NULL) {
3312 3367                          continue;
3313 3368                  }
3314 3369  
3315 3370                  mutex_enter(&gp->gp_mutex);
3316 3371                  graph_head = (list_type == FLK_ACTIVE_STATE) ?
3317      -                        ACTIVE_HEAD(gp) : SLEEPING_HEAD(gp);
     3372 +                    ACTIVE_HEAD(gp) : SLEEPING_HEAD(gp);
3318 3373                  for (lock = graph_head->l_next;
3319 3374                      lock != graph_head;
3320 3375                      lock = lock->l_next) {
3321 3376                          if (use_sysid && lock->l_flock.l_sysid != sysid)
3322 3377                                  continue;
3323 3378                          if (pid != NOPID && lock->l_flock.l_pid != pid)
3324 3379                                  continue;
3325 3380                          if (vp != NULL && lock->l_vnode != vp)
3326 3381                                  continue;
3327 3382                          if (lock_state && !(lock_state & lock->l_state))
↓ open down ↓ 27 lines elided ↑ open up ↑
3355 3410   *
3356 3411   * In either case we don't particularly care to specify the zone of interest;
3357 3412   * the sysid-space is global across zones, so the sysid will map to exactly one
3358 3413   * zone, and we'll return information for that zone.
3359 3414   */
3360 3415  
3361 3416  locklist_t *
3362 3417  flk_get_sleeping_locks(int sysid, pid_t pid)
3363 3418  {
3364 3419          return (get_lock_list(FLK_SLEEPING_STATE, 0, sysid, B_TRUE, pid, NULL,
3365      -                    ALL_ZONES));
     3420 +            ALL_ZONES));
3366 3421  }
3367 3422  
3368 3423  locklist_t *
3369 3424  flk_get_active_locks(int sysid, pid_t pid)
3370 3425  {
3371 3426          return (get_lock_list(FLK_ACTIVE_STATE, 0, sysid, B_TRUE, pid, NULL,
3372      -                    ALL_ZONES));
     3427 +            ALL_ZONES));
3373 3428  }
3374 3429  
3375 3430  /*
3376 3431   * Another interface to get_lock_list.  This one returns all the active
3377 3432   * locks for a given vnode.  Again, see get_lock_list for details.
3378 3433   *
3379 3434   * We don't need to specify which zone's locks we're interested in.  The matter
3380 3435   * would only be interesting if the vnode belonged to NFS, and NFS vnodes can't
3381 3436   * be used by multiple zones, so the list of locks will all be from the right
3382 3437   * zone.
3383 3438   */
3384 3439  
3385 3440  locklist_t *
3386 3441  flk_active_locks_for_vp(const vnode_t *vp)
3387 3442  {
3388 3443          return (get_lock_list(FLK_ACTIVE_STATE, 0, 0, B_FALSE, NOPID, vp,
3389      -                    ALL_ZONES));
     3444 +            ALL_ZONES));
3390 3445  }
3391 3446  
3392 3447  /*
3393 3448   * Another interface to get_lock_list.  This one returns all the active
3394 3449   * nbmand locks for a given vnode.  Again, see get_lock_list for details.
3395 3450   *
3396 3451   * See the comment for flk_active_locks_for_vp() for why we don't care to
3397 3452   * specify the particular zone of interest.
3398 3453   */
3399 3454  locklist_t *
3400 3455  flk_active_nbmand_locks_for_vp(const vnode_t *vp)
3401 3456  {
3402 3457          return (get_lock_list(FLK_ACTIVE_STATE, NBMAND_LOCK, 0, B_FALSE,
3403      -                                NOPID, vp, ALL_ZONES));
     3458 +            NOPID, vp, ALL_ZONES));
3404 3459  }
3405 3460  
3406 3461  /*
3407 3462   * Another interface to get_lock_list.  This one returns all the active
3408 3463   * nbmand locks for a given pid.  Again, see get_lock_list for details.
3409 3464   *
3410 3465   * The zone doesn't need to be specified here; the locks held by a
3411 3466   * particular process will either be local (ie, non-NFS) or from the zone
3412 3467   * the process is executing in.  This is because other parts of the system
3413 3468   * ensure that an NFS vnode can't be used in a zone other than that in
3414 3469   * which it was opened.
3415 3470   */
3416 3471  locklist_t *
3417 3472  flk_active_nbmand_locks(pid_t pid)
3418 3473  {
3419 3474          return (get_lock_list(FLK_ACTIVE_STATE, NBMAND_LOCK, 0, B_FALSE,
3420      -                                pid, NULL, ALL_ZONES));
     3475 +            pid, NULL, ALL_ZONES));
3421 3476  }
3422 3477  
3423 3478  /*
3424 3479   * Free up all entries in the locklist.
3425 3480   */
3426 3481  void
3427 3482  flk_free_locklist(locklist_t *llp)
3428 3483  {
3429 3484          locklist_t *next_llp;
3430 3485  
↓ open down ↓ 108 lines elided ↑ open up ↑
3539 3594                          nlock = lock->l_next;
3540 3595                          /*
3541 3596                           * If NLM server request _and_ nlmid of lock matches
3542 3597                           * nlmid of argument, then set the NLM state of the
3543 3598                           * lock to NLM_SHUTTING_DOWN, and wake up sleeping
3544 3599                           * request.
3545 3600                           */
3546 3601                          if (IS_LOCKMGR(lock)) {
3547 3602                                  /* get NLM id */
3548 3603                                  lock_nlmid =
3549      -                                        GETNLMID(lock->l_flock.l_sysid);
     3604 +                                    GETNLMID(lock->l_flock.l_sysid);
3550 3605                                  if (nlmid == lock_nlmid) {
3551 3606                                          SET_NLM_STATE(lock,
3552      -                                                FLK_NLM_SHUTTING_DOWN);
     3607 +                                            FLK_NLM_SHUTTING_DOWN);
3553 3608                                          INTERRUPT_WAKEUP(lock);
3554 3609                                  }
3555 3610                          }
3556 3611                  }
3557 3612                  mutex_exit(&gp->gp_mutex);
3558 3613          }
3559 3614  }
3560 3615  
3561 3616  /*
3562 3617   * Requires: "nlmid" >= 1 and <= clconf_maximum_nodeid()
↓ open down ↓ 152 lines elided ↑ open up ↑
3715 3770  static void
3716 3771  create_flock(lock_descriptor_t *lp, flock64_t *flp)
3717 3772  {
3718 3773          ASSERT(lp->l_end == MAX_U_OFFSET_T || lp->l_end <= MAXEND);
3719 3774          ASSERT(lp->l_end >= lp->l_start);
3720 3775  
3721 3776          flp->l_type = lp->l_type;
3722 3777          flp->l_whence = 0;
3723 3778          flp->l_start = lp->l_start;
3724 3779          flp->l_len = (lp->l_end == MAX_U_OFFSET_T) ? 0 :
3725      -                (lp->l_end - lp->l_start + 1);
     3780 +            (lp->l_end - lp->l_start + 1);
3726 3781          flp->l_sysid = lp->l_flock.l_sysid;
3727 3782          flp->l_pid = lp->l_flock.l_pid;
3728 3783  }
3729 3784  
3730 3785  /*
3731 3786   * Convert flock_t data describing a lock range into unsigned long starting
3732 3787   * and ending points, which are put into lock_request.  Returns 0 or an
3733 3788   * errno value.
3734 3789   * Large Files: max is passed by the caller and we return EOVERFLOW
3735 3790   * as defined by LFS API.
↓ open down ↓ 60 lines elided ↑ open up ↑
3796 3851          /*
3797 3852           * The end (length) for local locking should never be greater
3798 3853           * than MAXEND. However, the representation for
3799 3854           * the entire file is MAX_U_OFFSET_T.
3800 3855           */
3801 3856          if ((start > max) ||
3802 3857              ((end > max) && (end != MAX_U_OFFSET_T))) {
3803 3858                  return (EINVAL);
3804 3859          }
3805 3860          if (start > end) {
3806      -            return (EINVAL);
     3861 +                return (EINVAL);
3807 3862          }
3808 3863          return (0);
3809 3864  }
3810 3865  
3811 3866  /*
3812 3867   * Fill in request->l_flock with information about the lock blocking the
3813 3868   * request.  The complexity here is that lock manager requests are allowed
3814 3869   * to see into the upper part of the 32-bit address range, whereas local
3815 3870   * requests are only allowed to see signed values.
3816 3871   *
↓ open down ↓ 29 lines elided ↑ open up ↑
3846 3901          } else {
3847 3902                  if (blocker->l_start > MAXEND) {
3848 3903                          flrp->l_start = MAXEND;
3849 3904                          flrp->l_len = 0;
3850 3905                  } else {
3851 3906                          flrp->l_start = blocker->l_start;
3852 3907                          if (blocker->l_end == MAX_U_OFFSET_T)
3853 3908                                  flrp->l_len = 0;
3854 3909                          else
3855 3910                                  flrp->l_len = blocker->l_end -
3856      -                                        blocker->l_start + 1;
     3911 +                                    blocker->l_start + 1;
3857 3912                  }
3858 3913          }
3859 3914  }
3860 3915  
3861 3916  /*
3862 3917   * PSARC case 1997/292
3863 3918   */
3864 3919  /*
3865 3920   * This is the public routine exported by flock.h.
3866 3921   */
↓ open down ↓ 55 lines elided ↑ open up ↑
3922 3977                  return (0);
3923 3978  
3924 3979          mutex_enter(&gp->gp_mutex);
3925 3980          SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
3926 3981  
3927 3982          for (; lock && lock->l_vnode == vp; lock = lock->l_next) {
3928 3983                  if ((svmand || (lock->l_state & NBMAND_LOCK)) &&
3929 3984                      (lock->l_flock.l_sysid != sysid ||
3930 3985                      lock->l_flock.l_pid != pid) &&
3931 3986                      lock_blocks_io(op, offset, length,
3932      -                                lock->l_type, lock->l_start, lock->l_end)) {
     3987 +                    lock->l_type, lock->l_start, lock->l_end)) {
3933 3988                          conflict = 1;
3934 3989                          break;
3935 3990                  }
3936 3991          }
3937 3992          mutex_exit(&gp->gp_mutex);
3938 3993  
3939 3994          return (conflict);
3940 3995  }
3941 3996  
3942 3997  /*
↓ open down ↓ 19 lines elided ↑ open up ↑
3962 4017  }
3963 4018  
3964 4019  #ifdef DEBUG
3965 4020  static void
3966 4021  check_active_locks(graph_t *gp)
3967 4022  {
3968 4023          lock_descriptor_t *lock, *lock1;
3969 4024          edge_t  *ep;
3970 4025  
3971 4026          for (lock = ACTIVE_HEAD(gp)->l_next; lock != ACTIVE_HEAD(gp);
3972      -                                                lock = lock->l_next) {
     4027 +            lock = lock->l_next) {
3973 4028                  ASSERT(IS_ACTIVE(lock));
3974 4029                  ASSERT(NOT_BLOCKED(lock));
3975 4030                  ASSERT(!IS_BARRIER(lock));
3976 4031  
3977 4032                  ep = FIRST_IN(lock);
3978 4033  
3979 4034                  while (ep != HEAD(lock)) {
3980 4035                          ASSERT(IS_SLEEPING(ep->from_vertex));
3981 4036                          ASSERT(!NOT_BLOCKED(ep->from_vertex));
3982 4037                          ep = NEXT_IN(ep);
3983 4038                  }
3984 4039  
3985 4040                  for (lock1 = lock->l_next; lock1 != ACTIVE_HEAD(gp);
3986      -                                        lock1 = lock1->l_next) {
     4041 +                    lock1 = lock1->l_next) {
3987 4042                          if (lock1->l_vnode == lock->l_vnode) {
3988 4043                          if (BLOCKS(lock1, lock)) {
3989 4044                                  cmn_err(CE_PANIC,
3990 4045                                      "active lock %p blocks %p",
3991 4046                                      (void *)lock1, (void *)lock);
3992 4047                          } else if (BLOCKS(lock, lock1)) {
3993 4048                                  cmn_err(CE_PANIC,
3994 4049                                      "active lock %p blocks %p",
3995 4050                                      (void *)lock, (void *)lock1);
3996 4051                          }
↓ open down ↓ 74 lines elided ↑ open up ↑
4071 4126                  return (1);
4072 4127          }
4073 4128  }
4074 4129  
4075 4130  static void
4076 4131  check_sleeping_locks(graph_t *gp)
4077 4132  {
4078 4133          lock_descriptor_t *lock1, *lock2;
4079 4134          edge_t *ep;
4080 4135          for (lock1 = SLEEPING_HEAD(gp)->l_next; lock1 != SLEEPING_HEAD(gp);
4081      -                                lock1 = lock1->l_next) {
     4136 +            lock1 = lock1->l_next) {
4082 4137                                  ASSERT(!IS_BARRIER(lock1));
4083 4138          for (lock2 = lock1->l_next; lock2 != SLEEPING_HEAD(gp);
4084      -                                lock2 = lock2->l_next) {
     4139 +            lock2 = lock2->l_next) {
4085 4140                  if (lock1->l_vnode == lock2->l_vnode) {
4086 4141                          if (BLOCKS(lock2, lock1)) {
4087 4142                                  ASSERT(!IS_GRANTED(lock1));
4088 4143                                  ASSERT(!NOT_BLOCKED(lock1));
4089 4144                                  path(lock1, lock2);
4090 4145                          }
4091 4146                  }
4092 4147          }
4093 4148  
4094 4149          for (lock2 = ACTIVE_HEAD(gp)->l_next; lock2 != ACTIVE_HEAD(gp);
4095      -                                        lock2 = lock2->l_next) {
     4150 +            lock2 = lock2->l_next) {
4096 4151                                  ASSERT(!IS_BARRIER(lock1));
4097 4152                  if (lock1->l_vnode == lock2->l_vnode) {
4098 4153                          if (BLOCKS(lock2, lock1)) {
4099 4154                                  ASSERT(!IS_GRANTED(lock1));
4100 4155                                  ASSERT(!NOT_BLOCKED(lock1));
4101 4156                                  path(lock1, lock2);
4102 4157                          }
4103 4158                  }
4104 4159          }
4105 4160          ep = FIRST_ADJ(lock1);
↓ open down ↓ 20 lines elided ↑ open up ↑
4126 4181                  if (no_path)
4127 4182                          ASSERT(ep->to_vertex != lock2);
4128 4183                  STACK_PUSH(vertex_stack, ep->to_vertex, l_dstack);
4129 4184                  COLOR(ep->to_vertex);
4130 4185                  ep = NEXT_ADJ(ep);
4131 4186          }
4132 4187  
4133 4188          while ((vertex = STACK_TOP(vertex_stack)) != NULL) {
4134 4189                  STACK_POP(vertex_stack, l_dstack);
4135 4190                  for (ep = FIRST_ADJ(vertex); ep != HEAD(vertex);
4136      -                                                ep = NEXT_ADJ(ep)) {
     4191 +                    ep = NEXT_ADJ(ep)) {
4137 4192                          if (COLORED(ep->to_vertex))
4138 4193                                  continue;
4139 4194                          COLOR(ep->to_vertex);
4140 4195                          if (ep->to_vertex == lock2)
4141 4196                                  return (1);
4142 4197  
4143 4198                          STACK_PUSH(vertex_stack, ep->to_vertex, l_dstack);
4144 4199                  }
4145 4200          }
4146 4201          return (0);
↓ open down ↓ 69 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX