Print this page
6253 F_GETLK doesn't always return lock owner
The F_GETLK fcntl doesn't return the offending lock if there is a read lock
on the file, a waiting write lock, and a read lock is requested.
The write lock blocks the locking request, but without this patch isn't
returned by GETLK.


2082 flk_get_first_blocking_lock(lock_descriptor_t *request)
2083 {
2084         graph_t *gp = request->l_graph;
2085         vnode_t *vp = request->l_vnode;
2086         lock_descriptor_t *lock, *blocker;
2087 
2088         ASSERT(MUTEX_HELD(&gp->gp_mutex));
2089         blocker = NULL;
2090         SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2091 
2092         if (lock) {
2093                 do {
2094                         if (BLOCKS(lock, request)) {
2095                                 blocker = lock;
2096                                 break;
2097                         }
2098                         lock = lock->l_next;
2099                 } while (lock->l_vnode == vp);
2100         }
2101 



















2102         if (blocker) {
2103                 report_blocker(blocker, request);
2104         } else
2105                 request->l_flock.l_type = F_UNLCK;
2106 }
2107 
2108 /*
2109  * Get the graph_t structure associated with a vnode.
2110  * If 'initialize' is non-zero, and the graph_t structure for this vnode has
2111  * not yet been initialized, then a new element is allocated and returned.
2112  */
2113 graph_t *
2114 flk_get_lock_graph(vnode_t *vp, int initialize)
2115 {
2116         graph_t *gp;
2117         graph_t *gp_alloc = NULL;
2118         int index = HASH_INDEX(vp);
2119 
2120         if (initialize == FLK_USE_GRAPH) {
2121                 mutex_enter(&flock_lock);




2082 flk_get_first_blocking_lock(lock_descriptor_t *request)
2083 {
2084         graph_t *gp = request->l_graph;
2085         vnode_t *vp = request->l_vnode;
2086         lock_descriptor_t *lock, *blocker;
2087 
2088         ASSERT(MUTEX_HELD(&gp->gp_mutex));
2089         blocker = NULL;
2090         SET_LOCK_TO_FIRST_ACTIVE_VP(gp, lock, vp);
2091 
2092         if (lock) {
2093                 do {
2094                         if (BLOCKS(lock, request)) {
2095                                 blocker = lock;
2096                                 break;
2097                         }
2098                         lock = lock->l_next;
2099                 } while (lock->l_vnode == vp);
2100         }
2101 
2102         if (blocker == NULL && request->l_flock.l_type == F_RDLCK) {
2103                 /*
2104                  * No active lock is blocking this request, but if a read
2105                  * lock is requested, it may also get blocked by a waiting
2106                  * writer. So search all sleeping locks and see if there is
2107                  * a writer waiting.
2108                  */
2109                 SET_LOCK_TO_FIRST_SLEEP_VP(gp, lock, vp);
2110                 if (lock) {
2111                         do {
2112                                 if (BLOCKS(lock, request)) {
2113                                         blocker = lock;
2114                                         break;
2115                                 }
2116                                 lock = lock->l_next;
2117                         } while (lock->l_vnode == vp);
2118                 }
2119         }
2120 
2121         if (blocker) {
2122                 report_blocker(blocker, request);
2123         } else
2124                 request->l_flock.l_type = F_UNLCK;
2125 }
2126 
2127 /*
2128  * Get the graph_t structure associated with a vnode.
2129  * If 'initialize' is non-zero, and the graph_t structure for this vnode has
2130  * not yet been initialized, then a new element is allocated and returned.
2131  */
2132 graph_t *
2133 flk_get_lock_graph(vnode_t *vp, int initialize)
2134 {
2135         graph_t *gp;
2136         graph_t *gp_alloc = NULL;
2137         int index = HASH_INDEX(vp);
2138 
2139         if (initialize == FLK_USE_GRAPH) {
2140                 mutex_enter(&flock_lock);