Print this page
8368 remove warlock leftovers from usr/src/uts

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/udfs/udf_vnops.c
          +++ new/usr/src/uts/common/fs/udfs/udf_vnops.c
↓ open down ↓ 291 lines elided ↑ open up ↑
 292  292          struct uio *uiop,
 293  293          int32_t ioflag,
 294  294          struct cred *cr,
 295  295          caller_context_t *ct)
 296  296  {
 297  297          struct ud_inode *ip = VTOI(vp);
 298  298          int32_t error;
 299  299  
 300  300          ud_printf("udf_read\n");
 301  301  
 302      -#ifdef  __lock_lint
 303      -        rw_enter(&ip->i_rwlock, RW_READER);
 304      -#endif
 305      -
 306  302          ASSERT(RW_READ_HELD(&ip->i_rwlock));
 307  303  
 308  304          if (MANDLOCK(vp, ip->i_char)) {
 309  305                  /*
 310  306                   * udf_getattr ends up being called by chklock
 311  307                   */
 312  308                  error = chklock(vp, FREAD, uiop->uio_loffset,
 313  309                      uiop->uio_resid, uiop->uio_fmode, ct);
 314  310                  if (error) {
 315  311                          goto end;
 316  312                  }
 317  313          }
 318  314  
 319  315          rw_enter(&ip->i_contents, RW_READER);
 320  316          error = ud_rdip(ip, uiop, ioflag, cr);
 321  317          rw_exit(&ip->i_contents);
 322  318  
 323  319  end:
 324      -#ifdef  __lock_lint
 325      -        rw_exit(&ip->i_rwlock);
 326      -#endif
 327      -
 328  320          return (error);
 329  321  }
 330  322  
 331  323  
 332  324  int32_t ud_WRITES = 1;
 333  325  int32_t ud_HW = 96 * 1024;
 334  326  int32_t ud_LW = 64 * 1024;
 335  327  int32_t ud_throttles = 0;
 336  328  
 337  329  /* ARGSUSED */
↓ open down ↓ 3 lines elided ↑ open up ↑
 341  333          struct uio *uiop,
 342  334          int32_t ioflag,
 343  335          struct cred *cr,
 344  336          caller_context_t *ct)
 345  337  {
 346  338          struct ud_inode *ip = VTOI(vp);
 347  339          int32_t error = 0;
 348  340  
 349  341          ud_printf("udf_write\n");
 350  342  
 351      -#ifdef  __lock_lint
 352      -        rw_enter(&ip->i_rwlock, RW_WRITER);
 353      -#endif
 354      -
 355  343          ASSERT(RW_WRITE_HELD(&ip->i_rwlock));
 356  344  
 357  345          if (MANDLOCK(vp, ip->i_char)) {
 358  346                  /*
 359  347                   * ud_getattr ends up being called by chklock
 360  348                   */
 361  349                  error = chklock(vp, FWRITE, uiop->uio_loffset,
 362  350                      uiop->uio_resid, uiop->uio_fmode, ct);
 363  351                  if (error) {
 364  352                          goto end;
↓ open down ↓ 18 lines elided ↑ open up ↑
 383  371          if ((ioflag & FAPPEND) != 0 && (ip->i_type == VREG)) {
 384  372                  /*
 385  373                   * In append mode start at end of file.
 386  374                   */
 387  375                  uiop->uio_loffset = ip->i_size;
 388  376          }
 389  377          error = ud_wrip(ip, uiop, ioflag, cr);
 390  378          rw_exit(&ip->i_contents);
 391  379  
 392  380  end:
 393      -#ifdef  __lock_lint
 394      -        rw_exit(&ip->i_rwlock);
 395      -#endif
 396      -
 397  381          return (error);
 398  382  }
 399  383  
 400  384  /* ARGSUSED */
 401  385  static int32_t
 402  386  udf_ioctl(
 403  387          struct vnode *vp,
 404  388          int32_t cmd,
 405  389          intptr_t arg,
 406  390          int32_t flag,
↓ open down ↓ 331 lines elided ↑ open up ↑
 738  722          } else {
 739  723                  xip = NULL;
 740  724                  rw_enter(&ip->i_rwlock, RW_WRITER);
 741  725                  error = ud_direnter(ip, name, DE_CREATE,
 742  726                      (struct ud_inode *)0, (struct ud_inode *)0,
 743  727                      vap, &xip, cr, ct);
 744  728                  rw_exit(&ip->i_rwlock);
 745  729                  ITIMES(ip);
 746  730                  ip = xip;
 747  731          }
 748      -#ifdef  __lock_lint
 749      -        rw_enter(&ip->i_contents, RW_WRITER);
 750      -#else
 751  732          if (ip != NULL) {
 752  733                  rw_enter(&ip->i_contents, RW_WRITER);
 753  734          }
 754      -#endif
 755  735  
 756  736          /*
 757  737           * If the file already exists and this is a non-exclusive create,
 758  738           * check permissions and allow access for non-directories.
 759  739           * Read-only create of an existing directory is also allowed.
 760  740           * We fail an exclusive create of anything which already exists.
 761  741           */
 762  742          if (error == EEXIST) {
 763  743                  if (excl == NONEXCL) {
 764  744                          if ((ip->i_type == VDIR) && (mode & VWRITE)) {
↓ open down ↓ 28 lines elided ↑ open up ↑
 793  773                                  rw_exit(&ip->i_rwlock);
 794  774                          }
 795  775                          vnevent_create(ITOV(ip), ct);
 796  776                  }
 797  777          }
 798  778  
 799  779          if (error == 0) {
 800  780                  *vpp = ITOV(ip);
 801  781                  ITIMES(ip);
 802  782          }
 803      -#ifdef  __lock_lint
 804      -        rw_exit(&ip->i_contents);
 805      -#else
 806  783          if (ip != NULL) {
 807  784                  rw_exit(&ip->i_contents);
 808  785          }
 809      -#endif
 810  786          if (error) {
 811  787                  goto out;
 812  788          }
 813  789  
 814  790          /*
 815  791           * If vnode is a device return special vnode instead.
 816  792           */
 817  793          if (!error && IS_DEVVP(*vpp)) {
 818  794                  struct vnode *newvp;
 819  795  
↓ open down ↓ 743 lines elided ↑ open up ↑
1563 1539  {
1564 1540          struct ud_inode *ip = VTOI(vp);
1565 1541  
1566 1542          ud_printf("udf_rwlock\n");
1567 1543  
1568 1544          if (write_lock) {
1569 1545                  rw_enter(&ip->i_rwlock, RW_WRITER);
1570 1546          } else {
1571 1547                  rw_enter(&ip->i_rwlock, RW_READER);
1572 1548          }
1573      -#ifdef  __lock_lint
1574      -        rw_exit(&ip->i_rwlock);
1575      -#endif
1576 1549          return (write_lock);
1577 1550  }
1578 1551  
1579 1552  /* ARGSUSED */
1580 1553  static void
1581 1554  udf_rwunlock(struct vnode *vp, int32_t write_lock, caller_context_t *ctp)
1582 1555  {
1583 1556          struct ud_inode *ip = VTOI(vp);
1584 1557  
1585 1558          ud_printf("udf_rwunlock\n");
1586 1559  
1587      -#ifdef  __lock_lint
1588      -        rw_enter(&ip->i_rwlock, RW_WRITER);
1589      -#endif
1590      -
1591 1560          rw_exit(&ip->i_rwlock);
1592 1561  
1593 1562  }
1594 1563  
1595 1564  /* ARGSUSED */
1596 1565  static int32_t
1597 1566  udf_seek(struct vnode *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
1598 1567  {
1599 1568          return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1600 1569  }
↓ open down ↓ 86 lines elided ↑ open up ↑
1687 1656                  *protp = PROT_ALL;
1688 1657          }
1689 1658          if (vp->v_flag & VNOMAP) {
1690 1659                  return (ENOSYS);
1691 1660          }
1692 1661          seqmode = ip->i_nextr == uoff && rw != S_CREATE;
1693 1662  
1694 1663          rwtype = RW_READER;
1695 1664          dolock = (rw_owner(&ip->i_contents) != curthread);
1696 1665  retrylock:
1697      -#ifdef  __lock_lint
1698      -        rw_enter(&ip->i_contents, rwtype);
1699      -#else
1700 1666          if (dolock) {
1701 1667                  rw_enter(&ip->i_contents, rwtype);
1702 1668          }
1703      -#endif
1704 1669  
1705 1670          /*
1706 1671           * We may be getting called as a side effect of a bmap using
1707 1672           * fbread() when the blocks might be being allocated and the
1708 1673           * size has not yet been up'ed.  In this case we want to be
1709 1674           * able to return zero pages if we get back UDF_HOLE from
1710 1675           * calling bmap for a non write case here.  We also might have
1711 1676           * to read some frags from the disk into a page if we are
1712 1677           * extending the number of frags for a given lbn in bmap().
1713 1678           */
1714 1679          beyond_eof = uoff + len > ip->i_size + PAGEOFFSET;
1715 1680          if (beyond_eof && seg != segkmap) {
1716      -#ifdef  __lock_lint
1717      -                rw_exit(&ip->i_contents);
1718      -#else
1719 1681                  if (dolock) {
1720 1682                          rw_exit(&ip->i_contents);
1721 1683                  }
1722      -#endif
1723 1684                  return (EFAULT);
1724 1685          }
1725 1686  
1726 1687          /*
1727 1688           * Must hold i_contents lock throughout the call to pvn_getpages
1728 1689           * since locked pages are returned from each call to ud_getapage.
1729 1690           * Must *not* return locked pages and then try for contents lock
1730 1691           * due to lock ordering requirements (inode > page)
1731 1692           */
1732 1693  
↓ open down ↓ 43 lines elided ↑ open up ↑
1776 1737                          if (error) {
1777 1738                                  goto update_inode;
1778 1739                          }
1779 1740                          offset += count; /* XXX - make this contig */
1780 1741                  }
1781 1742          }
1782 1743  
1783 1744          /*
1784 1745           * Can be a reader from now on.
1785 1746           */
1786      -#ifdef  __lock_lint
1787      -        if (rwtype == RW_WRITER) {
1788      -                rw_downgrade(&ip->i_contents);
1789      -        }
1790      -#else
1791 1747          if (dolock && rwtype == RW_WRITER) {
1792 1748                  rw_downgrade(&ip->i_contents);
1793 1749          }
1794      -#endif
1795 1750  
1796 1751          /*
1797 1752           * We remove PROT_WRITE in cases when the file has UDF holes
1798 1753           * because we don't  want to call bmap_read() to check each
1799 1754           * page if it is backed with a disk block.
1800 1755           */
1801 1756          if (protp && has_holes && rw != S_WRITE && rw != S_CREATE) {
1802 1757                  *protp &= ~PROT_WRITE;
1803 1758          }
1804 1759  
↓ open down ↓ 99 lines elided ↑ open up ↑
1904 1859                  /*
1905 1860                   * Release any pages we have locked.
1906 1861                   */
1907 1862                  while (pl > &plarr[0])
1908 1863                          page_unlock(*--pl);
1909 1864  
1910 1865                  plarr[0] = NULL;
1911 1866          }
1912 1867  
1913 1868  update_inode:
1914      -#ifdef  __lock_lint
1915      -        rw_exit(&ip->i_contents);
1916      -#else
1917 1869          if (dolock) {
1918 1870                  rw_exit(&ip->i_contents);
1919 1871          }
1920      -#endif
1921 1872  
1922 1873          /*
1923 1874           * If the inode is not already marked for IACC (in rwip() for read)
1924 1875           * and the inode is not marked for no access time update (in rwip()
1925 1876           * for write) then update the inode access time and mod time now.
1926 1877           */
1927 1878          mutex_enter(&ip->i_tlock);
1928 1879          if ((ip->i_flag & (IACC | INOACC)) == 0) {
1929 1880                  if ((rw != S_OTHER) && (ip->i_type != VDIR)) {
1930 1881                          ip->i_flag |= IACC;
↓ open down ↓ 19 lines elided ↑ open up ↑
1950 1901          int32_t flags,
1951 1902          struct cred *cr,
1952 1903          caller_context_t *ct)
1953 1904  {
1954 1905          struct ud_inode *ip;
1955 1906          int32_t error = 0;
1956 1907  
1957 1908          ud_printf("udf_putpage\n");
1958 1909  
1959 1910          ip = VTOI(vp);
1960      -#ifdef  __lock_lint
1961      -        rw_enter(&ip->i_contents, RW_WRITER);
1962      -#endif
1963 1911  
1964 1912          if (vp->v_count == 0) {
1965 1913                  cmn_err(CE_WARN, "ud_putpage : bad v_count");
1966 1914                  error = EINVAL;
1967 1915                  goto out;
1968 1916          }
1969 1917  
1970 1918          if (vp->v_flag & VNOMAP) {
1971 1919                  error = ENOSYS;
1972 1920                  goto out;
↓ open down ↓ 43 lines elided ↑ open up ↑
2016 1964                  }
2017 1965  
2018 1966                  /*
2019 1967                   * Must have weird flags or we are not clustering.
2020 1968                   */
2021 1969          }
2022 1970  
2023 1971          error = ud_putpages(vp, off, len, flags, cr);
2024 1972  
2025 1973  out:
2026      -#ifdef  __lock_lint
2027      -        rw_exit(&ip->i_contents);
2028      -#endif
2029 1974          return (error);
2030 1975  }
2031 1976  
2032 1977  /* ARGSUSED */
2033 1978  static int32_t
2034 1979  udf_map(
2035 1980          struct vnode *vp,
2036 1981          offset_t off,
2037 1982          struct as *as,
2038 1983          caddr_t *addrp,
↓ open down ↓ 141 lines elided ↑ open up ↑
2180 2125                  /* nanosecond timestamp resolution */
2181 2126                  *valp = 1L;
2182 2127          } else {
2183 2128                  error = fs_pathconf(vp, cmd, valp, cr, ct);
2184 2129          }
2185 2130  
2186 2131          return (error);
2187 2132  }
2188 2133  
2189 2134  uint32_t ud_pageio_reads = 0, ud_pageio_writes = 0;
2190      -#ifndef __lint
2191      -_NOTE(SCHEME_PROTECTS_DATA("safe sharing", ud_pageio_reads))
2192      -_NOTE(SCHEME_PROTECTS_DATA("safe sharing", ud_pageio_writes))
2193      -#endif
     2135 +
2194 2136  /*
2195 2137   * Assumption is that there will not be a pageio request
2196 2138   * to a enbedded file
2197 2139   */
2198 2140  /* ARGSUSED */
2199 2141  static int32_t
2200 2142  udf_pageio(
2201 2143          struct vnode *vp,
2202 2144          struct page *pp,
2203 2145          u_offset_t io_off,
↓ open down ↓ 18 lines elided ↑ open up ↑
2222 2164          /*
2223 2165           * We need a better check.  Ideally, we would use another
2224 2166           * vnodeops so that hlocked and forcibly unmounted file
2225 2167           * systems would return EIO where appropriate and w/o the
2226 2168           * need for these checks.
2227 2169           */
2228 2170          if (ip->i_udf == NULL) {
2229 2171                  return (EIO);
2230 2172          }
2231 2173  
2232      -#ifdef  __lock_lint
2233      -        rw_enter(&ip->i_contents, RW_READER);
2234      -#else
2235 2174          if (dolock) {
2236 2175                  rw_enter(&ip->i_contents, RW_READER);
2237 2176          }
2238      -#endif
2239 2177  
2240 2178          /*
2241 2179           * Break the io request into chunks, one for each contiguous
2242 2180           * stretch of disk blocks in the target file.
2243 2181           */
2244 2182          while (done_len < io_len) {
2245 2183                  ASSERT(cpp);
2246 2184                  bp = NULL;
2247 2185                  contig = 0;
2248 2186                  if (error = ud_bmap_read(ip, (u_offset_t)(io_off + done_len),
↓ open down ↓ 83 lines elided ↑ open up ↑
2332 2270                          } else {
2333 2271                                  pvn_write_done(cpp, B_ERROR);
2334 2272                          }
2335 2273                  } else {
2336 2274                          /* Re-assemble list and let caller clean up */
2337 2275                          page_list_concat(&opp, &cpp);
2338 2276                          page_list_concat(&opp, &npp);
2339 2277                  }
2340 2278          }
2341 2279  
2342      -#ifdef  __lock_lint
2343      -        rw_exit(&ip->i_contents);
2344      -#else
2345 2280          if (dolock) {
2346 2281                  rw_exit(&ip->i_contents);
2347 2282          }
2348      -#endif
     2283 +
2349 2284          return (error);
2350 2285  }
2351 2286  
2352 2287  
2353 2288  
2354 2289  
2355 2290  /* -------------------- local functions --------------------------- */
2356 2291  
2357 2292  
2358 2293  
↓ open down ↓ 316 lines elided ↑ open up ↑
2675 2610          /*
2676 2611           * Acquire the readers/write inode lock before locking
2677 2612           * any pages in this inode.
2678 2613           * The inode lock is held during i/o.
2679 2614           */
2680 2615          if (len == 0) {
2681 2616                  mutex_enter(&ip->i_tlock);
2682 2617                  ip->i_delayoff = ip->i_delaylen = 0;
2683 2618                  mutex_exit(&ip->i_tlock);
2684 2619          }
2685      -#ifdef  __lock_lint
2686      -        rw_enter(&ip->i_contents, RW_READER);
2687      -#else
2688 2620          dolock = (rw_owner(&ip->i_contents) != curthread);
2689 2621          if (dolock) {
2690 2622                  rw_enter(&ip->i_contents, RW_READER);
2691 2623          }
2692      -#endif
2693 2624  
2694 2625          if (!vn_has_cached_data(vp)) {
2695      -#ifdef  __lock_lint
2696      -                rw_exit(&ip->i_contents);
2697      -#else
2698 2626                  if (dolock) {
2699 2627                          rw_exit(&ip->i_contents);
2700 2628                  }
2701      -#endif
2702 2629                  return (0);
2703 2630          }
2704 2631  
2705 2632          if (len == 0) {
2706 2633                  /*
2707 2634                   * Search the entire vp list for pages >= off.
2708 2635                   */
2709 2636                  err = pvn_vplist_dirty(vp, (u_offset_t)off, ud_putapage,
2710 2637                      flags, cr);
2711 2638          } else {
↓ open down ↓ 44 lines elided ↑ open up ↑
2756 2683          }
2757 2684          if (err == 0 && off == 0 && (len == 0 || len >= ip->i_size)) {
2758 2685                  /*
2759 2686                   * We have just sync'ed back all the pages on
2760 2687                   * the inode, turn off the IMODTIME flag.
2761 2688                   */
2762 2689                  mutex_enter(&ip->i_tlock);
2763 2690                  ip->i_flag &= ~IMODTIME;
2764 2691                  mutex_exit(&ip->i_tlock);
2765 2692          }
2766      -#ifdef  __lock_lint
2767      -        rw_exit(&ip->i_contents);
2768      -#else
2769 2693          if (dolock) {
2770 2694                  rw_exit(&ip->i_contents);
2771 2695          }
2772      -#endif
2773 2696          return (err);
2774 2697  }
2775 2698  
2776 2699  /* ARGSUSED */
2777 2700  int32_t
2778 2701  ud_putapage(struct vnode *vp,
2779 2702          page_t *pp, u_offset_t *offp,
2780 2703          size_t *lenp, int32_t flags, struct cred *cr)
2781 2704  {
2782 2705          daddr_t bn;
↓ open down ↓ 265 lines elided ↑ open up ↑
3048 2971                          error = 0;
3049 2972                          goto out;
3050 2973                  }
3051 2974                  if (diff < (offset_t)n) {
3052 2975                          n = (int)diff;
3053 2976                  }
3054 2977                  dofree = ud_freebehind &&
3055 2978                      ip->i_nextr == (off & PAGEMASK) &&
3056 2979                      off > ud_smallfile;
3057 2980  
3058      -#ifndef __lock_lint
3059 2981                  if (rwtype == RW_READER) {
3060 2982                          rw_exit(&ip->i_contents);
3061 2983                  }
3062      -#endif
3063 2984  
3064 2985                  base = segmap_getmapflt(segkmap, vp, (off + mapon),
3065 2986                      (uint32_t)n, 1, S_READ);
3066 2987                  error = uiomove(base + mapon, (long)n, UIO_READ, uio);
3067 2988  
3068 2989                  flags = 0;
3069 2990                  if (!error) {
3070 2991                          /*
3071 2992                           * If read a whole block, or read to eof,
3072 2993                           * won't need this buffer again soon.
↓ open down ↓ 11 lines elided ↑ open up ↑
3084 3005                           */
3085 3006                          if ((ioflag & FRSYNC) && (ioflag & (FSYNC|FDSYNC))) {
3086 3007                                  flags &= ~SM_ASYNC;
3087 3008                                  flags |= SM_WRITE;
3088 3009                          }
3089 3010                          error = segmap_release(segkmap, base, flags);
3090 3011                  } else    {
3091 3012                          (void) segmap_release(segkmap, base, flags);
3092 3013                  }
3093 3014  
3094      -#ifndef __lock_lint
3095 3015                  if (rwtype == RW_READER) {
3096 3016                          rw_enter(&ip->i_contents, rwtype);
3097 3017                  }
3098      -#endif
3099 3018          } while (error == 0 && uio->uio_resid > 0 && n != 0);
3100 3019  out:
3101 3020          /*
3102 3021           * Inode is updated according to this table if FRSYNC is set.
3103 3022           *
3104 3023           *      FSYNC   FDSYNC(posix.4)
3105 3024           *      --------------------------
3106 3025           *      always  IATTCHG|IBDWRITE
3107 3026           */
3108 3027          if (ioflag & FRSYNC) {
↓ open down ↓ 512 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX