Print this page
OS-7753 THREAD_KPRI_RELEASE does nothing of the sort
Reviewed by: Bryan Cantrill <bryan@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  26   26   * Copyright 2016 Gary Mills
       27 + * Copyright 2019 Joyent, Inc.
  27   28   */
  28   29  
  29   30  /*
  30   31   * VM - Hardware Address Translation management for Spitfire MMU.
  31   32   *
  32   33   * This file implements the machine specific hardware translation
  33   34   * needed by the VM system.  The machine independent interface is
  34   35   * described in <vm/hat.h> while the machine dependent interface
  35   36   * and data structures are described in <vm/hat_sfmmu.h>.
  36   37   *
↓ open down ↓ 66 lines elided ↑ open up ↑
 103  104                  ASSERT(_rgnp->rgn_refcnt != 0);                         \
 104  105                  ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE));        \
 105  106                  ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) ==   \
 106  107                      SFMMU_REGION_HME);                                  \
 107  108                  ASSERT((saddr) >= _rgnp->rgn_saddr);                    \
 108  109                  ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size);   \
 109  110                  ASSERT(_eaddr > _rgnp->rgn_saddr);                      \
 110  111                  ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size);   \
 111  112          }
 112  113  
 113      -#define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)              \
 114      -{                                                                        \
 115      -                caddr_t _hsva;                                           \
 116      -                caddr_t _heva;                                           \
 117      -                caddr_t _rsva;                                           \
 118      -                caddr_t _reva;                                           \
 119      -                int     _ttesz = get_hblk_ttesz(hmeblkp);                \
 120      -                int     _flagtte;                                        \
 121      -                ASSERT((srdp)->srd_refcnt != 0);                         \
 122      -                ASSERT((rid) < SFMMU_MAX_HME_REGIONS);                   \
 123      -                ASSERT((rgnp)->rgn_id == rid);                           \
 124      -                ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));        \
 125      -                ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==   \
 126      -                    SFMMU_REGION_HME);                                   \
 127      -                ASSERT(_ttesz <= (rgnp)->rgn_pgszc);                     \
 128      -                _hsva = (caddr_t)get_hblk_base(hmeblkp);                 \
 129      -                _heva = get_hblk_endaddr(hmeblkp);                       \
 130      -                _rsva = (caddr_t)P2ALIGN(                                \
 131      -                    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);       \
 132      -                _reva = (caddr_t)P2ROUNDUP(                              \
 133      -                    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),   \
 134      -                    HBLK_MIN_BYTES);                                     \
 135      -                ASSERT(_hsva >= _rsva);                                  \
 136      -                ASSERT(_hsva < _reva);                                   \
 137      -                ASSERT(_heva > _rsva);                                   \
 138      -                ASSERT(_heva <= _reva);                                  \
 139      -                _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ :  \
 140      -                        _ttesz;                                          \
 141      -                ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));          \
      114 +#define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)             \
      115 +{                                                                       \
      116 +                caddr_t _hsva;                                          \
      117 +                caddr_t _heva;                                          \
      118 +                caddr_t _rsva;                                          \
      119 +                caddr_t _reva;                                          \
      120 +                int     _ttesz = get_hblk_ttesz(hmeblkp);               \
      121 +                int     _flagtte;                                       \
      122 +                ASSERT((srdp)->srd_refcnt != 0);                        \
      123 +                ASSERT((rid) < SFMMU_MAX_HME_REGIONS);                  \
      124 +                ASSERT((rgnp)->rgn_id == rid);                          \
      125 +                ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE));       \
      126 +                ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) ==  \
      127 +                    SFMMU_REGION_HME);                                  \
      128 +                ASSERT(_ttesz <= (rgnp)->rgn_pgszc);                    \
      129 +                _hsva = (caddr_t)get_hblk_base(hmeblkp);                \
      130 +                _heva = get_hblk_endaddr(hmeblkp);                      \
      131 +                _rsva = (caddr_t)P2ALIGN(                               \
      132 +                    (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES);      \
      133 +                _reva = (caddr_t)P2ROUNDUP(                             \
      134 +                    (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size),  \
      135 +                    HBLK_MIN_BYTES);                                    \
      136 +                ASSERT(_hsva >= _rsva);                                 \
      137 +                ASSERT(_hsva < _reva);                                  \
      138 +                ASSERT(_heva > _rsva);                                  \
      139 +                ASSERT(_heva <= _reva);                                 \
      140 +                _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \
      141 +                        _ttesz;                                         \
      142 +                ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte));         \
 142  143  }
 143  144  
 144  145  #else /* DEBUG */
 145  146  #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len)
 146  147  #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid)
 147  148  #endif /* DEBUG */
 148  149  
 149  150  #if defined(SF_ERRATA_57)
 150  151  extern caddr_t errata57_limit;
 151  152  #endif
↓ open down ↓ 38 lines elided ↑ open up ↑
 190  191   * to system memory. It is off by default. At the moment this
 191  192   * flag is used by the ecache error injector. The error injector
 192  193   * will turn it on when creating such a translation then shut it
 193  194   * off when it's finished.
 194  195   */
 195  196  
 196  197  int     sfmmu_allow_nc_trans = 0;
 197  198  
 198  199  /*
 199  200   * Flag to disable large page support.
 200      - *      value of 1 => disable all large pages.
      201 + *      value of 1 => disable all large pages.
 201  202   *      bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively.
 202  203   *
 203  204   * For example, use the value 0x4 to disable 512K pages.
 204  205   *
 205  206   */
 206  207  #define LARGE_PAGES_OFF         0x1
 207  208  
 208  209  /*
 209  210   * The disable_large_pages and disable_ism_large_pages variables control
 210  211   * hat_memload_array and the page sizes to be used by ISM and the kernel.
↓ open down ↓ 27 lines elided ↑ open up ↑
 238  239  static vmem_t *kmem_tsb_arena;
 239  240  
 240  241  /*
 241  242   * sfmmu static variables for hmeblk resource management.
 242  243   */
 243  244  static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */
 244  245  static struct kmem_cache *sfmmu8_cache;
 245  246  static struct kmem_cache *sfmmu1_cache;
 246  247  static struct kmem_cache *pa_hment_cache;
 247  248  
 248      -static kmutex_t         ism_mlist_lock; /* mutex for ism mapping list */
      249 +static kmutex_t         ism_mlist_lock; /* mutex for ism mapping list */
 249  250  /*
 250  251   * private data for ism
 251  252   */
 252  253  static struct kmem_cache *ism_blk_cache;
 253  254  static struct kmem_cache *ism_ment_cache;
 254  255  #define ISMID_STARTADDR NULL
 255  256  
 256  257  /*
 257  258   * Region management data structures and function declarations.
 258  259   */
↓ open down ↓ 125 lines elided ↑ open up ↑
 384  385   * Disabled by default.  This is incompatible with some
 385  386   * drivers (error injector, RSM) so if it breaks you get
 386  387   * to keep both pieces.
 387  388   */
 388  389  int hat_check_vtop = 0;
 389  390  
 390  391  /*
 391  392   * Private sfmmu routines (prototypes)
 392  393   */
 393  394  static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t);
 394      -static struct   hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
      395 +static struct   hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t,
 395  396                          struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t,
 396  397                          uint_t);
 397  398  static caddr_t  sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t,
 398  399                          caddr_t, demap_range_t *, uint_t);
 399  400  static caddr_t  sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t,
 400  401                          caddr_t, int);
 401  402  static void     sfmmu_hblk_free(struct hme_blk **);
 402  403  static void     sfmmu_hblks_list_purge(struct hme_blk **, int);
 403  404  static uint_t   sfmmu_get_free_hblk(struct hme_blk **, uint_t);
 404  405  static uint_t   sfmmu_put_free_hblk(struct hme_blk *, uint_t);
↓ open down ↓ 49 lines elided ↑ open up ↑
 454  455      struct hme_blk *, int);
 455  456  static void     sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
 456  457                          pfn_t, int, int, int, int);
 457  458  static void     sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *,
 458  459                          pfn_t, int);
 459  460  static void     sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int);
 460  461  static void     sfmmu_tlb_range_demap(demap_range_t *);
 461  462  static void     sfmmu_invalidate_ctx(sfmmu_t *);
 462  463  static void     sfmmu_sync_mmustate(sfmmu_t *);
 463  464  
 464      -static void     sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
      465 +static void     sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t);
 465  466  static int      sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t,
 466  467                          sfmmu_t *);
 467  468  static void     sfmmu_tsb_free(struct tsb_info *);
 468  469  static void     sfmmu_tsbinfo_free(struct tsb_info *);
 469  470  static int      sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t,
 470  471                          sfmmu_t *);
 471  472  static void     sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *);
 472  473  static void     sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *);
 473  474  static int      sfmmu_select_tsb_szc(pgcnt_t);
 474  475  static void     sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int);
↓ open down ↓ 77 lines elided ↑ open up ↑
 552  553  
 553  554  /*
 554  555   * Semi-private sfmmu data structures.  Some of them are initialize in
 555  556   * startup or in hat_init. Some of them are private but accessed by
 556  557   * assembly code or mach_sfmmu.c
 557  558   */
 558  559  struct hmehash_bucket *uhme_hash;       /* user hmeblk hash table */
 559  560  struct hmehash_bucket *khme_hash;       /* kernel hmeblk hash table */
 560  561  uint64_t        uhme_hash_pa;           /* PA of uhme_hash */
 561  562  uint64_t        khme_hash_pa;           /* PA of khme_hash */
 562      -int             uhmehash_num;           /* # of buckets in user hash table */
 563      -int             khmehash_num;           /* # of buckets in kernel hash table */
      563 +int             uhmehash_num;           /* # of buckets in user hash table */
      564 +int             khmehash_num;           /* # of buckets in kernel hash table */
 564  565  
 565  566  uint_t          max_mmu_ctxdoms = 0;    /* max context domains in the system */
 566  567  mmu_ctx_t       **mmu_ctxs_tbl;         /* global array of context domains */
 567  568  uint64_t        mmu_saved_gnum = 0;     /* to init incoming MMUs' gnums */
 568  569  
 569  570  #define DEFAULT_NUM_CTXS_PER_MMU 8192
 570  571  static uint_t   nctxs = DEFAULT_NUM_CTXS_PER_MMU;
 571  572  
 572  573  int             cache;                  /* describes system cache */
 573  574  
↓ open down ↓ 122 lines elided ↑ open up ↑
 696  697  
 697  698  /*
 698  699   * kstat data
 699  700   */
 700  701  struct sfmmu_global_stat sfmmu_global_stat;
 701  702  struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat;
 702  703  
 703  704  /*
 704  705   * Global data
 705  706   */
 706      -sfmmu_t         *ksfmmup;               /* kernel's hat id */
      707 +sfmmu_t         *ksfmmup;               /* kernel's hat id */
 707  708  
 708  709  #ifdef DEBUG
 709  710  static void     chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *);
 710  711  #endif
 711  712  
 712  713  /* sfmmu locking operations */
 713  714  static kmutex_t *sfmmu_mlspl_enter(struct page *, int);
 714  715  static int      sfmmu_mlspl_held(struct page *, int);
 715  716  
 716  717  kmutex_t *sfmmu_page_enter(page_t *);
↓ open down ↓ 131 lines elided ↑ open up ↑
 848  849   * Macro to use to unload entries from the TSB.
 849  850   * It has knowledge of which page sizes get replicated in the TSB
 850  851   * and will call the appropriate unload routine for the appropriate size.
 851  852   */
 852  853  #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat)         \
 853  854  {                                                                       \
 854  855          int ttesz = get_hblk_ttesz(hmeblkp);                            \
 855  856          if (ttesz == TTE8K || ttesz == TTE4M) {                         \
 856  857                  sfmmu_unload_tsb(sfmmup, addr, ttesz);                  \
 857  858          } else {                                                        \
 858      -                caddr_t sva = ismhat ? addr :                           \
      859 +                caddr_t sva = ismhat ? addr :                           \
 859  860                      (caddr_t)get_hblk_base(hmeblkp);                    \
 860  861                  caddr_t eva = sva + get_hblk_span(hmeblkp);             \
 861  862                  ASSERT(addr >= sva && addr < eva);                      \
 862  863                  sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz);        \
 863  864          }                                                               \
 864  865  }
 865  866  
 866  867  
 867  868  /* Update tsb_alloc_hiwater after memory is configured. */
 868  869  /*ARGSUSED*/
↓ open down ↓ 140 lines elided ↑ open up ↑
1009 1010                                                                          \
1010 1011          (hment) = &(hmeblkp)->hblk_hme[idx];                            \
1011 1012  }
1012 1013  
1013 1014  /*
1014 1015   * Disable any page sizes not supported by the CPU
1015 1016   */
1016 1017  void
1017 1018  hat_init_pagesizes()
1018 1019  {
1019      -        int             i;
     1020 +        int             i;
1020 1021  
1021 1022          mmu_exported_page_sizes = 0;
1022 1023          for (i = TTE8K; i < max_mmu_page_sizes; i++) {
1023 1024  
1024 1025                  szc_2_userszc[i] = (uint_t)-1;
1025 1026                  userszc_2_szc[i] = (uint_t)-1;
1026 1027  
1027 1028                  if ((mmu_exported_pagesize_mask & (1 << i)) == 0) {
1028 1029                          disable_large_pages |= (1 << i);
1029 1030                  } else {
↓ open down ↓ 20 lines elided ↑ open up ↑
1050 1051                      mmu_large_pages_disabled(HAT_AUTO_TEXT);
1051 1052          }
1052 1053  }
1053 1054  
1054 1055  /*
1055 1056   * Initialize the hardware address translation structures.
1056 1057   */
1057 1058  void
1058 1059  hat_init(void)
1059 1060  {
1060      -        int             i;
     1061 +        int             i;
1061 1062          uint_t          sz;
1062 1063          size_t          size;
1063 1064  
1064 1065          hat_lock_init();
1065 1066          hat_kstat_init();
1066 1067  
1067 1068          /*
1068 1069           * Hardware-only bits in a TTE
1069 1070           */
1070 1071          MAKE_TTE_MASK(&hw_tte);
↓ open down ↓ 1041 lines elided ↑ open up ↑
2112 2113                  sfmmu_tsb_free(freelist->tsbinfop);
2113 2114          }
2114 2115  }
2115 2116  
2116 2117  /*
2117 2118   * Duplicate the translations of an as into another newas
2118 2119   */
2119 2120  /* ARGSUSED */
2120 2121  int
2121 2122  hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2122      -        uint_t flag)
     2123 +    uint_t flag)
2123 2124  {
2124 2125          sf_srd_t *srdp;
2125 2126          sf_scd_t *scdp;
2126 2127          int i;
2127 2128          extern uint_t get_color_start(struct as *);
2128 2129  
2129 2130          ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2130 2131              (flag == HAT_DUP_SRD));
2131 2132          ASSERT(hat != ksfmmup);
2132 2133          ASSERT(newhat != ksfmmup);
↓ open down ↓ 44 lines elided ↑ open up ↑
2177 2178  
2178 2179          if (flag == HAT_DUP_ALL && consistent_coloring == 0 &&
2179 2180              update_proc_pgcolorbase_after_fork != 0) {
2180 2181                  hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as);
2181 2182          }
2182 2183          return (0);
2183 2184  }
2184 2185  
2185 2186  void
2186 2187  hat_memload(struct hat *hat, caddr_t addr, struct page *pp,
2187      -        uint_t attr, uint_t flags)
     2188 +    uint_t attr, uint_t flags)
2188 2189  {
2189 2190          hat_do_memload(hat, addr, pp, attr, flags,
2190 2191              SFMMU_INVALID_SHMERID);
2191 2192  }
2192 2193  
2193 2194  void
2194 2195  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2195      -        uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
     2196 +    uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2196 2197  {
2197 2198          uint_t rid;
2198 2199          if (rcookie == HAT_INVALID_REGION_COOKIE) {
2199 2200                  hat_do_memload(hat, addr, pp, attr, flags,
2200 2201                      SFMMU_INVALID_SHMERID);
2201 2202                  return;
2202 2203          }
2203 2204          rid = (uint_t)((uint64_t)rcookie);
2204 2205          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2205 2206          hat_do_memload(hat, addr, pp, attr, flags, rid);
2206 2207  }
2207 2208  
2208 2209  /*
2209 2210   * Set up addr to map to page pp with protection prot.
2210 2211   * As an optimization we also load the TSB with the
2211 2212   * corresponding tte but it is no big deal if  the tte gets kicked out.
2212 2213   */
2213 2214  static void
2214 2215  hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp,
2215      -        uint_t attr, uint_t flags, uint_t rid)
     2216 +    uint_t attr, uint_t flags, uint_t rid)
2216 2217  {
2217 2218          tte_t tte;
2218 2219  
2219 2220  
2220 2221          ASSERT(hat != NULL);
2221 2222          ASSERT(PAGE_LOCKED(pp));
2222 2223          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2223 2224          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2224 2225          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2225 2226          SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
↓ open down ↓ 36 lines elided ↑ open up ↑
2262 2263  /*
2263 2264   * hat_devload can be called to map real memory (e.g.
2264 2265   * /dev/kmem) and even though hat_devload will determine pf is
2265 2266   * for memory, it will be unable to get a shared lock on the
2266 2267   * page (because someone else has it exclusively) and will
2267 2268   * pass dp = NULL.  If tteload doesn't get a non-NULL
2268 2269   * page pointer it can't cache memory.
2269 2270   */
2270 2271  void
2271 2272  hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2272      -        uint_t attr, int flags)
     2273 +    uint_t attr, int flags)
2273 2274  {
2274 2275          tte_t tte;
2275 2276          struct page *pp = NULL;
2276 2277          int use_lgpg = 0;
2277 2278  
2278 2279          ASSERT(hat != NULL);
2279 2280  
2280 2281          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2281 2282          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2282 2283          ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as));
↓ open down ↓ 120 lines elided ↑ open up ↑
2403 2404          /*
2404 2405           * Check TSB and TLB page sizes.
2405 2406           */
2406 2407          if ((flags & HAT_LOAD_SHARE) == 0) {
2407 2408                  sfmmu_check_page_sizes(hat, 1);
2408 2409          }
2409 2410  }
2410 2411  
2411 2412  void
2412 2413  hat_memload_array(struct hat *hat, caddr_t addr, size_t len,
2413      -        struct page **pps, uint_t attr, uint_t flags)
     2414 +    struct page **pps, uint_t attr, uint_t flags)
2414 2415  {
2415 2416          hat_do_memload_array(hat, addr, len, pps, attr, flags,
2416 2417              SFMMU_INVALID_SHMERID);
2417 2418  }
2418 2419  
2419 2420  void
2420 2421  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2421      -        struct page **pps, uint_t attr, uint_t flags,
2422      -        hat_region_cookie_t rcookie)
     2422 +    struct page **pps, uint_t attr, uint_t flags,
     2423 +    hat_region_cookie_t rcookie)
2423 2424  {
2424 2425          uint_t rid;
2425 2426          if (rcookie == HAT_INVALID_REGION_COOKIE) {
2426 2427                  hat_do_memload_array(hat, addr, len, pps, attr, flags,
2427 2428                      SFMMU_INVALID_SHMERID);
2428 2429                  return;
2429 2430          }
2430 2431          rid = (uint_t)((uint64_t)rcookie);
2431 2432          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2432 2433          hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
↓ open down ↓ 5 lines elided ↑ open up ↑
2438 2439   * is specified in the p_szc field.  The p_szc field
2439 2440   * cannot change as long as there any mappings (large or small)
2440 2441   * to any of the pages that make up the large page. (ie. any
2441 2442   * promotion/demotion of page size is not up to the hat but up to
2442 2443   * the page free list manager).  The array
2443 2444   * should consist of properly aligned contigous pages that are
2444 2445   * part of a big page for a large mapping to be created.
2445 2446   */
2446 2447  static void
2447 2448  hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len,
2448      -        struct page **pps, uint_t attr, uint_t flags, uint_t rid)
     2449 +    struct page **pps, uint_t attr, uint_t flags, uint_t rid)
2449 2450  {
2450 2451          int  ttesz;
2451 2452          size_t mapsz;
2452 2453          pgcnt_t numpg, npgs;
2453 2454          tte_t tte;
2454 2455          page_t *pp;
2455 2456          uint_t large_pages_disable;
2456 2457  
2457 2458          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2458 2459          SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
↓ open down ↓ 89 lines elided ↑ open up ↑
2548 2549          if ((flags & HAT_LOAD_SHARE) == 0) {
2549 2550                  sfmmu_check_page_sizes(hat, 1);
2550 2551          }
2551 2552  }
2552 2553  
2553 2554  /*
2554 2555   * Function tries to batch 8K pages into the same hme blk.
2555 2556   */
2556 2557  static void
2557 2558  sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps,
2558      -                    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
     2559 +    uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid)
2559 2560  {
2560 2561          tte_t   tte;
2561 2562          page_t *pp;
2562 2563          struct hmehash_bucket *hmebp;
2563 2564          struct hme_blk *hmeblkp;
2564 2565          int     index;
2565 2566  
2566 2567          while (npgs) {
2567 2568                  /*
2568 2569                   * Acquire the hash bucket.
↓ open down ↓ 95 lines elided ↑ open up ↑
2664 2665   * hme_blk if one does not exist.
2665 2666   * If a page structure is specified then it will add the
2666 2667   * corresponding hment to the mapping list.
2667 2668   * It will also update the hmenum field for the tte.
2668 2669   *
2669 2670   * Currently this function is only used for kernel mappings.
2670 2671   * So pass invalid region to sfmmu_tteload_array().
2671 2672   */
2672 2673  void
2673 2674  sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp,
2674      -        uint_t flags)
     2675 +    uint_t flags)
2675 2676  {
2676 2677          ASSERT(sfmmup == ksfmmup);
2677 2678          (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags,
2678 2679              SFMMU_INVALID_SHMERID);
2679 2680  }
2680 2681  
2681 2682  /*
2682 2683   * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB.
2683 2684   * Assumes that a particular page size may only be resident in one TSB.
2684 2685   */
↓ open down ↓ 182 lines elided ↑ open up ↑
2867 2868   * hme_blk if one does not exist.
2868 2869   * If a page structure is specified then it will add the
2869 2870   * corresponding hment to the mapping list.
2870 2871   * It will also update the hmenum field for the tte.
2871 2872   * Furthermore, it attempts to create a large page translation
2872 2873   * for <addr,hat> at page array pps.  It assumes addr and first
2873 2874   * pp is correctly aligned.  It returns 0 if successful and 1 otherwise.
2874 2875   */
2875 2876  static int
2876 2877  sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr,
2877      -        page_t **pps, uint_t flags, uint_t rid)
     2878 +    page_t **pps, uint_t flags, uint_t rid)
2878 2879  {
2879 2880          struct hmehash_bucket *hmebp;
2880 2881          struct hme_blk *hmeblkp;
2881      -        int     ret;
     2882 +        int     ret;
2882 2883          uint_t  size;
2883 2884  
2884 2885          /*
2885 2886           * Get mapping size.
2886 2887           */
2887 2888          size = TTE_CSZ(ttep);
2888 2889          ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size)));
2889 2890  
2890 2891          /*
2891 2892           * Acquire the hash bucket.
↓ open down ↓ 44 lines elided ↑ open up ↑
2936 2937          return (hmebp);
2937 2938  }
2938 2939  
2939 2940  /*
2940 2941   * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the
2941 2942   * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is
2942 2943   * allocated.
2943 2944   */
2944 2945  static struct hme_blk *
2945 2946  sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp,
2946      -        caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
     2947 +    caddr_t vaddr, uint_t size, uint_t flags, uint_t rid)
2947 2948  {
2948 2949          hmeblk_tag hblktag;
2949 2950          int hmeshift;
2950 2951          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
2951 2952  
2952 2953          SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size));
2953 2954  
2954 2955          hblktag.htag_id = sfmmutohtagid(sfmmup, rid);
2955 2956          ASSERT(hblktag.htag_id != NULL);
2956 2957          hmeshift = HME_HASH_SHIFT(size);
↓ open down ↓ 72 lines elided ↑ open up ↑
3029 3030  
3030 3031          return (hmeblkp);
3031 3032  }
3032 3033  
3033 3034  /*
3034 3035   * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1
3035 3036   * otherwise.
3036 3037   */
3037 3038  static int
3038 3039  sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep,
3039      -        caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
     3040 +    caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid)
3040 3041  {
3041 3042          page_t *pp = *pps;
3042 3043          int hmenum, size, remap;
3043 3044          tte_t tteold, flush_tte;
3044 3045  #ifdef DEBUG
3045 3046          tte_t orig_old;
3046 3047  #endif /* DEBUG */
3047 3048          struct sf_hment *sfhme;
3048 3049          kmutex_t *pml, *pmtx;
3049 3050          hatlock_t *hatlockp;
↓ open down ↓ 325 lines elided ↑ open up ↑
3375 3376  /*
3376 3377   * function which checks and sets up page array for a large
3377 3378   * translation.  Will set p_vcolor, p_index, p_ro fields.
3378 3379   * Assumes addr and pfnum of first page are properly aligned.
3379 3380   * Will check for physical contiguity. If check fails it return
3380 3381   * non null.
3381 3382   */
3382 3383  static int
3383 3384  sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap)
3384 3385  {
3385      -        int     i, index, ttesz;
     3386 +        int     i, index, ttesz;
3386 3387          pfn_t   pfnum;
3387 3388          pgcnt_t npgs;
3388 3389          page_t *pp, *pp1;
3389 3390          kmutex_t *pmtx;
3390 3391  #ifdef VAC
3391 3392          int osz;
3392 3393          int cflags = 0;
3393 3394          int vac_err = 0;
3394 3395  #endif
3395 3396          int newidx = 0;
↓ open down ↓ 263 lines elided ↑ open up ↑
3659 3660   * This routine cleanup a previous shadow hmeblk and changes it to
3660 3661   * a regular hblk.  This happens rarely but it is possible
3661 3662   * when a process wants to use large pages and there are hblks still
3662 3663   * lying around from the previous as that used these hmeblks.
3663 3664   * The alternative was to cleanup the shadow hblks at unload time
3664 3665   * but since so few user processes actually use large pages, it is
3665 3666   * better to be lazy and cleanup at this time.
3666 3667   */
3667 3668  static void
3668 3669  sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
3669      -        struct hmehash_bucket *hmebp)
     3670 +    struct hmehash_bucket *hmebp)
3670 3671  {
3671 3672          caddr_t addr, endaddr;
3672 3673          int hashno, size;
3673 3674  
3674 3675          ASSERT(hmeblkp->hblk_shw_bit);
3675 3676          ASSERT(!hmeblkp->hblk_shared);
3676 3677  
3677 3678          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
3678 3679  
3679 3680          if (!hmeblkp->hblk_shw_mask) {
↓ open down ↓ 7 lines elided ↑ open up ↑
3687 3688          ASSERT(hashno > 0);
3688 3689          SFMMU_HASH_UNLOCK(hmebp);
3689 3690  
3690 3691          sfmmu_free_hblks(sfmmup, addr, endaddr, hashno);
3691 3692  
3692 3693          SFMMU_HASH_LOCK(hmebp);
3693 3694  }
3694 3695  
3695 3696  static void
3696 3697  sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr,
3697      -        int hashno)
     3698 +    int hashno)
3698 3699  {
3699 3700          int hmeshift, shadow = 0;
3700 3701          hmeblk_tag hblktag;
3701 3702          struct hmehash_bucket *hmebp;
3702 3703          struct hme_blk *hmeblkp;
3703 3704          struct hme_blk *nx_hblk, *pr_hblk, *list = NULL;
3704 3705  
3705 3706          ASSERT(hashno > 0);
3706 3707          hblktag.htag_id = sfmmup;
3707 3708          hblktag.htag_rehash = hashno;
↓ open down ↓ 493 lines elided ↑ open up ↑
4201 4202  /*
4202 4203   * Register a callback class.  Each subsystem should do this once and
4203 4204   * cache the id_t returned for use in setting up and tearing down callbacks.
4204 4205   *
4205 4206   * There is no facility for removing callback IDs once they are created;
4206 4207   * the "key" should be unique for each module, so in case a module is unloaded
4207 4208   * and subsequently re-loaded, we can recycle the module's previous entry.
4208 4209   */
4209 4210  id_t
4210 4211  hat_register_callback(int key,
4211      -        int (*prehandler)(caddr_t, uint_t, uint_t, void *),
4212      -        int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
4213      -        int (*errhandler)(caddr_t, uint_t, uint_t, void *),
4214      -        int capture_cpus)
     4212 +    int (*prehandler)(caddr_t, uint_t, uint_t, void *),
     4213 +    int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t),
     4214 +    int (*errhandler)(caddr_t, uint_t, uint_t, void *),
     4215 +    int capture_cpus)
4215 4216  {
4216 4217          id_t id;
4217 4218  
4218 4219          /*
4219 4220           * Search the table for a pre-existing callback associated with
4220 4221           * the identifier "key".  If one exists, we re-use that entry in
4221 4222           * the table for this instance, otherwise we assign the next
4222 4223           * available table slot.
4223 4224           */
4224 4225          for (id = 0; id < sfmmu_max_cb_id; id++) {
↓ open down ↓ 52 lines elided ↑ open up ↑
4277 4278   * Returns values:
4278 4279   *    0:      success
4279 4280   *    ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP)
4280 4281   *    EINVAL: callback ID is not valid
4281 4282   *    ENXIO:  ["vaddr", "vaddr" + len) is not mapped in the kernel's address
4282 4283   *            space
4283 4284   *    ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary
4284 4285   */
4285 4286  int
4286 4287  hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags,
4287      -        void *pvt, pfn_t *rpfn, void **cookiep)
     4288 +    void *pvt, pfn_t *rpfn, void **cookiep)
4288 4289  {
4289      -        struct          hmehash_bucket *hmebp;
4290      -        hmeblk_tag      hblktag;
     4290 +        struct          hmehash_bucket *hmebp;
     4291 +        hmeblk_tag      hblktag;
4291 4292          struct hme_blk  *hmeblkp;
4292      -        int             hmeshift, hashno;
4293      -        caddr_t         saddr, eaddr, baseaddr;
     4293 +        int             hmeshift, hashno;
     4294 +        caddr_t         saddr, eaddr, baseaddr;
4294 4295          struct pa_hment *pahmep;
4295 4296          struct sf_hment *sfhmep, *osfhmep;
4296 4297          kmutex_t        *pml;
4297      -        tte_t           tte;
     4298 +        tte_t           tte;
4298 4299          page_t          *pp;
4299 4300          vnode_t         *vp;
4300 4301          u_offset_t      off;
4301 4302          pfn_t           pfn;
4302 4303          int             kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP;
4303 4304          int             locked = 0;
4304 4305  
4305 4306          /*
4306 4307           * For KPM mappings, just return the physical address since we
4307 4308           * don't need to register any callbacks.
↓ open down ↓ 198 lines elided ↑ open up ↑
4506 4507                  *cookiep = (void *)pahmep;
4507 4508  
4508 4509          return (0);
4509 4510  }
4510 4511  
4511 4512  /*
4512 4513   * Remove the relocation callbacks from the specified addr/len.
4513 4514   */
4514 4515  void
4515 4516  hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags,
4516      -        void *cookie)
     4517 +    void *cookie)
4517 4518  {
4518 4519          struct          hmehash_bucket *hmebp;
4519 4520          hmeblk_tag      hblktag;
4520 4521          struct hme_blk  *hmeblkp;
4521 4522          int             hmeshift, hashno;
4522 4523          caddr_t         saddr;
4523 4524          struct pa_hment *pahmep;
4524 4525          struct sf_hment *sfhmep, *osfhmep;
4525 4526          kmutex_t        *pml;
4526 4527          tte_t           tte;
↓ open down ↓ 297 lines elided ↑ open up ↑
4824 4825          ASSERT(hat->sfmmu_as != NULL);
4825 4826  
4826 4827          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4827 4828  }
4828 4829  
4829 4830  /*
4830 4831   * Change attributes on an address range to that specified by attr and mode.
4831 4832   */
4832 4833  static void
4833 4834  sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4834      -        int mode)
     4835 +    int mode)
4835 4836  {
4836 4837          struct hmehash_bucket *hmebp;
4837 4838          hmeblk_tag hblktag;
4838 4839          int hmeshift, hashno = 1;
4839 4840          struct hme_blk *hmeblkp, *list = NULL;
4840 4841          caddr_t endaddr;
4841 4842          cpuset_t cpuset;
4842 4843          demap_range_t dmr;
4843 4844  
4844 4845          CPUSET_ZERO(cpuset);
↓ open down ↓ 75 lines elided ↑ open up ↑
4920 4921   * This function chgattr on a range of addresses in an hmeblk.  It returns the
4921 4922   * next addres that needs to be chgattr.
4922 4923   * It should be called with the hash lock held.
4923 4924   * XXX It should be possible to optimize chgattr by not flushing every time but
4924 4925   * on the other hand:
4925 4926   * 1. do one flush crosscall.
4926 4927   * 2. only flush if we are increasing permissions (make sure this will work)
4927 4928   */
4928 4929  static caddr_t
4929 4930  sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
4930      -        caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
     4931 +    caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode)
4931 4932  {
4932 4933          tte_t tte, tteattr, tteflags, ttemod;
4933 4934          struct sf_hment *sfhmep;
4934 4935          int ttesz;
4935 4936          struct page *pp = NULL;
4936 4937          kmutex_t *pml, *pmtx;
4937 4938          int ret;
4938 4939          int use_demap_range;
4939 4940  #if defined(SF_ERRATA_57)
4940 4941          int check_exec;
↓ open down ↓ 307 lines elided ↑ open up ↑
5248 5249   * This function chgprots a range of addresses in an hmeblk.  It returns the
5249 5250   * next addres that needs to be chgprot.
5250 5251   * It should be called with the hash lock held.
5251 5252   * XXX It shold be possible to optimize chgprot by not flushing every time but
5252 5253   * on the other hand:
5253 5254   * 1. do one flush crosscall.
5254 5255   * 2. only flush if we are increasing permissions (make sure this will work)
5255 5256   */
5256 5257  static caddr_t
5257 5258  sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5258      -        caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
     5259 +    caddr_t endaddr, demap_range_t *dmrp, uint_t vprot)
5259 5260  {
5260 5261          uint_t pprot;
5261 5262          tte_t tte, ttemod;
5262 5263          struct sf_hment *sfhmep;
5263 5264          uint_t tteflags;
5264 5265          int ttesz;
5265 5266          struct page *pp = NULL;
5266 5267          kmutex_t *pml, *pmtx;
5267 5268          int ret;
5268 5269          int use_demap_range;
↓ open down ↓ 138 lines elided ↑ open up ↑
5407 5408          if ((vprot == 0) || (vprot == PROT_USER) ||
5408 5409              ((vprot & PROT_ALL) != vprot)) {
5409 5410                  panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5410 5411          }
5411 5412  
5412 5413          switch (vprot) {
5413 5414          case (PROT_READ):
5414 5415          case (PROT_EXEC):
5415 5416          case (PROT_EXEC | PROT_READ):
5416 5417                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5417      -                return (TTE_PRIV_INT);          /* set prv and clr wrt */
     5418 +                return (TTE_PRIV_INT);          /* set prv and clr wrt */
5418 5419          case (PROT_WRITE):
5419 5420          case (PROT_WRITE | PROT_READ):
5420 5421          case (PROT_EXEC | PROT_WRITE):
5421 5422          case (PROT_EXEC | PROT_WRITE | PROT_READ):
5422 5423                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5423      -                return (TTE_PRIV_INT | TTE_WRPRM_INT);  /* set prv and wrt */
     5424 +                return (TTE_PRIV_INT | TTE_WRPRM_INT);  /* set prv and wrt */
5424 5425          case (PROT_USER | PROT_READ):
5425 5426          case (PROT_USER | PROT_EXEC):
5426 5427          case (PROT_USER | PROT_EXEC | PROT_READ):
5427 5428                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT;
5428      -                return (0);                     /* clr prv and wrt */
     5429 +                return (0);                     /* clr prv and wrt */
5429 5430          case (PROT_USER | PROT_WRITE):
5430 5431          case (PROT_USER | PROT_WRITE | PROT_READ):
5431 5432          case (PROT_USER | PROT_EXEC | PROT_WRITE):
5432 5433          case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ):
5433 5434                  *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT;
5434      -                return (TTE_WRPRM_INT);         /* clr prv and set wrt */
     5435 +                return (TTE_WRPRM_INT);         /* clr prv and set wrt */
5435 5436          default:
5436 5437                  panic("sfmmu_vtop_prot -- bad prot %x", vprot);
5437 5438          }
5438 5439          return (0);
5439 5440  }
5440 5441  
5441 5442  /*
5442 5443   * Alternate unload for very large virtual ranges. With a true 64 bit VA,
5443 5444   * the normal algorithm would take too long for a very large VA range with
5444 5445   * few real mappings. This routine just walks thru all HMEs in the global
5445 5446   * hash table to find and remove mappings.
5446 5447   */
5447 5448  static void
5448      -hat_unload_large_virtual(
5449      -        struct hat              *sfmmup,
5450      -        caddr_t                 startaddr,
5451      -        size_t                  len,
5452      -        uint_t                  flags,
5453      -        hat_callback_t          *callback)
     5449 +hat_unload_large_virtual(struct hat *sfmmup, caddr_t startaddr, size_t len,
     5450 +    uint_t flags, hat_callback_t *callback)
5454 5451  {
5455 5452          struct hmehash_bucket *hmebp;
5456 5453          struct hme_blk *hmeblkp;
5457 5454          struct hme_blk *pr_hblk = NULL;
5458 5455          struct hme_blk *nx_hblk;
5459 5456          struct hme_blk *list = NULL;
5460 5457          int i;
5461 5458          demap_range_t dmr, *dmrp;
5462 5459          cpuset_t cpuset;
5463 5460          caddr_t endaddr = startaddr + len;
↓ open down ↓ 117 lines elided ↑ open up ↑
5581 5578   * Unload all the mappings in the range [addr..addr+len). addr and len must
5582 5579   * be MMU_PAGESIZE aligned.
5583 5580   */
5584 5581  
5585 5582  extern struct seg *segkmap;
5586 5583  #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \
5587 5584  segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size))
5588 5585  
5589 5586  
5590 5587  void
5591      -hat_unload_callback(
5592      -        struct hat *sfmmup,
5593      -        caddr_t addr,
5594      -        size_t len,
5595      -        uint_t flags,
5596      -        hat_callback_t *callback)
     5588 +hat_unload_callback(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags,
     5589 +    hat_callback_t *callback)
5597 5590  {
5598 5591          struct hmehash_bucket *hmebp;
5599 5592          hmeblk_tag hblktag;
5600 5593          int hmeshift, hashno, iskernel;
5601 5594          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5602 5595          caddr_t endaddr;
5603 5596          cpuset_t cpuset;
5604 5597          int addr_count = 0;
5605 5598          int a;
5606 5599          caddr_t cb_start_addr[MAX_CB_ADDR];
↓ open down ↓ 302 lines elided ↑ open up ↑
5909 5902          return (sz);
5910 5903  }
5911 5904  
5912 5905  /*
5913 5906   * This function unloads a range of addresses for an hmeblk.
5914 5907   * It returns the next address to be unloaded.
5915 5908   * It should be called with the hash lock held.
5916 5909   */
5917 5910  static caddr_t
5918 5911  sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
5919      -        caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
     5912 +    caddr_t endaddr, demap_range_t *dmrp, uint_t flags)
5920 5913  {
5921 5914          tte_t   tte, ttemod;
5922 5915          struct  sf_hment *sfhmep;
5923 5916          int     ttesz;
5924 5917          long    ttecnt;
5925 5918          page_t *pp;
5926 5919          kmutex_t *pml;
5927 5920          int ret;
5928 5921          int use_demap_range;
5929 5922  
↓ open down ↓ 363 lines elided ↑ open up ↑
6293 6286                          hashno++;
6294 6287                  }
6295 6288          }
6296 6289          sfmmu_hblks_list_purge(&list, 0);
6297 6290          cpuset = sfmmup->sfmmu_cpusran;
6298 6291          xt_sync(cpuset);
6299 6292  }
6300 6293  
6301 6294  static caddr_t
6302 6295  sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr,
6303      -        caddr_t endaddr, int clearflag)
     6296 +    caddr_t endaddr, int clearflag)
6304 6297  {
6305 6298          tte_t   tte, ttemod;
6306 6299          struct sf_hment *sfhmep;
6307 6300          int ttesz;
6308 6301          struct page *pp;
6309 6302          kmutex_t *pml;
6310 6303          int ret;
6311 6304  
6312 6305          ASSERT(hmeblkp->hblk_shw_bit == 0);
6313 6306          ASSERT(!hmeblkp->hblk_shared);
↓ open down ↓ 54 lines elided ↑ open up ↑
6368 6361  /*
6369 6362   * This function will sync a tte to the page struct and it will
6370 6363   * update the hat stats. Currently it allows us to pass a NULL pp
6371 6364   * and we will simply update the stats.  We may want to change this
6372 6365   * so we only keep stats for pages backed by pp's.
6373 6366   */
6374 6367  static void
6375 6368  sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp)
6376 6369  {
6377 6370          uint_t rm = 0;
6378      -        int     sz;
     6371 +        int     sz;
6379 6372          pgcnt_t npgs;
6380 6373  
6381 6374          ASSERT(TTE_IS_VALID(ttep));
6382 6375  
6383 6376          if (TTE_IS_NOSYNC(ttep)) {
6384 6377                  return;
6385 6378          }
6386 6379  
6387 6380          if (TTE_IS_REF(ttep))  {
6388 6381                  rm = P_REF;
↓ open down ↓ 344 lines elided ↑ open up ↑
6733 6726  
6734 6727  #else   /* !DEBUG */
6735 6728  #define PAGE_RELOCATE_LOG(t, r, s, p)
6736 6729  #endif
6737 6730  
6738 6731  /*
6739 6732   * Core Kernel Page Relocation Algorithm
6740 6733   *
6741 6734   * Input:
6742 6735   *
6743      - * target :     constituent pages are SE_EXCL locked.
     6736 + * target :     constituent pages are SE_EXCL locked.
6744 6737   * replacement: constituent pages are SE_EXCL locked.
6745 6738   *
6746 6739   * Output:
6747 6740   *
6748 6741   * nrelocp:     number of pages relocated
6749 6742   */
6750 6743  int
6751 6744  hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp)
6752 6745  {
6753 6746          page_t          *targ, *repl;
↓ open down ↓ 725 lines elided ↑ open up ↑
7479 7472          xt_sync(cpuset);
7480 7473          sfmmu_mlist_exit(pml);
7481 7474          return (PP_GENERIC_ATTR(save_pp));
7482 7475  }
7483 7476  
7484 7477  /*
7485 7478   * Get all the hardware dependent attributes for a page struct
7486 7479   */
7487 7480  static cpuset_t
7488 7481  sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme,
7489      -        uint_t clearflag)
     7482 +    uint_t clearflag)
7490 7483  {
7491 7484          caddr_t addr;
7492 7485          tte_t tte, ttemod;
7493 7486          struct hme_blk *hmeblkp;
7494 7487          int ret;
7495 7488          sfmmu_t *sfmmup;
7496 7489          cpuset_t cpuset;
7497 7490  
7498 7491          ASSERT(pp != NULL);
7499 7492          ASSERT(sfmmu_mlist_held(pp));
↓ open down ↓ 846 lines elided ↑ open up ↑
8346 8339   * translations to a frame buffer with page structs.
8347 8340   * Also, it does not take sharing into account.
8348 8341   *
8349 8342   * Note that we don't acquire locks here since this function is most often
8350 8343   * called from the clock thread.
8351 8344   */
8352 8345  size_t
8353 8346  hat_get_mapped_size(struct hat *hat)
8354 8347  {
8355 8348          size_t          assize = 0;
8356      -        int             i;
     8349 +        int             i;
8357 8350  
8358 8351          if (hat == NULL)
8359 8352                  return (0);
8360 8353  
8361 8354          for (i = 0; i < mmu_page_sizes; i++)
8362 8355                  assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8363 8356                      (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8364 8357  
8365 8358          if (hat->sfmmu_iblk == NULL)
8366 8359                  return (assize);
↓ open down ↓ 79 lines elided ↑ open up ↑
8446 8439   * when saddr and daddr are not properly aligned.
8447 8440   *
8448 8441   * The top level mapping element determines the alignment
8449 8442   * requirement for saddr and daddr, depending on different
8450 8443   * architectures.
8451 8444   *
8452 8445   * When hat_share()/unshare() are not supported,
8453 8446   * HATOP_SHARE()/UNSHARE() return 0
8454 8447   */
8455 8448  int
8456      -hat_share(struct hat *sfmmup, caddr_t addr,
8457      -        struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc)
     8449 +hat_share(struct hat *sfmmup, caddr_t addr, struct hat *ism_hatid,
     8450 +    caddr_t sptaddr, size_t len, uint_t ismszc)
8458 8451  {
8459 8452          ism_blk_t       *ism_blkp;
8460 8453          ism_blk_t       *new_iblk;
8461      -        ism_map_t       *ism_map;
     8454 +        ism_map_t       *ism_map;
8462 8455          ism_ment_t      *ism_ment;
8463 8456          int             i, added;
8464 8457          hatlock_t       *hatlockp;
8465 8458          int             reload_mmu = 0;
8466 8459          uint_t          ismshift = page_get_shift(ismszc);
8467 8460          size_t          ismpgsz = page_get_pagesize(ismszc);
8468 8461          uint_t          ismmask = (uint_t)ismpgsz - 1;
8469 8462          size_t          sh_size = ISM_SHIFT(ismshift, len);
8470 8463          ushort_t        ismhatflag;
8471 8464          hat_region_cookie_t rcookie;
↓ open down ↓ 204 lines elided ↑ open up ↑
8676 8669  }
8677 8670  
8678 8671  /*
8679 8672   * hat_unshare removes exactly one ism_map from
8680 8673   * this process's as.  It expects multiple calls
8681 8674   * to hat_unshare for multiple shm segments.
8682 8675   */
8683 8676  void
8684 8677  hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc)
8685 8678  {
8686      -        ism_map_t       *ism_map;
     8679 +        ism_map_t       *ism_map;
8687 8680          ism_ment_t      *free_ment = NULL;
8688 8681          ism_blk_t       *ism_blkp;
8689 8682          struct hat      *ism_hatid;
8690      -        int             found, i;
     8683 +        int             found, i;
8691 8684          hatlock_t       *hatlockp;
8692 8685          struct tsb_info *tsbinfo;
8693 8686          uint_t          ismshift = page_get_shift(ismszc);
8694 8687          size_t          sh_size = ISM_SHIFT(ismshift, len);
8695 8688          uchar_t         ism_rid;
8696 8689          sf_scd_t        *old_scdp;
8697 8690  
8698 8691          ASSERT(ISM_ALIGNED(ismshift, addr));
8699 8692          ASSERT(ISM_ALIGNED(ismshift, len));
8700 8693          ASSERT(sfmmup != NULL);
↓ open down ↓ 585 lines elided ↑ open up ↑
9286 9279   * 8k pages.
9287 9280   */
9288 9281  int
9289 9282  tst_tnc(page_t *pp, pgcnt_t npages)
9290 9283  {
9291 9284          struct  sf_hment *sfhme;
9292 9285          struct  hme_blk *hmeblkp;
9293 9286          tte_t   tte;
9294 9287          caddr_t vaddr;
9295 9288          int     clr_valid = 0;
9296      -        int     color, color1, bcolor;
     9289 +        int     color, color1, bcolor;
9297 9290          int     i, ncolors;
9298 9291  
9299 9292          ASSERT(pp != NULL);
9300 9293          ASSERT(!(cache & CACHE_WRITEBACK));
9301 9294  
9302 9295          if (npages > 1) {
9303 9296                  ncolors = CACHE_NUM_COLOR;
9304 9297          }
9305 9298  
9306 9299          for (i = 0; i < npages; i++) {
↓ open down ↓ 49 lines elided ↑ open up ↑
9356 9349                  }
9357 9350  
9358 9351                  pp = PP_PAGENEXT(pp);
9359 9352          }
9360 9353  
9361 9354          return (1);
9362 9355  }
9363 9356  
9364 9357  void
9365 9358  sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag,
9366      -        pgcnt_t npages)
     9359 +    pgcnt_t npages)
9367 9360  {
9368 9361          kmutex_t *pmtx;
9369 9362          int i, ncolors, bcolor;
9370 9363          kpm_hlk_t *kpmp;
9371 9364          cpuset_t cpuset;
9372 9365  
9373 9366          ASSERT(pp != NULL);
9374 9367          ASSERT(!(cache & CACHE_WRITEBACK));
9375 9368  
9376 9369          kpmp = sfmmu_kpm_kpmp_enter(pp, npages);
↓ open down ↓ 778 lines elided ↑ open up ↑
10155 10148           * finished at this point.
10156 10149           */
10157 10150          if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) {
10158 10151                  return;
10159 10152          }
10160 10153          sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh);
10161 10154  }
10162 10155  
10163 10156  static void
10164 10157  sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt,
10165      -        uint64_t tte4m_cnt, int sectsb_thresh)
     10158 +    uint64_t tte4m_cnt, int sectsb_thresh)
10166 10159  {
10167 10160          int tsb_bits;
10168 10161          uint_t tsb_szc;
10169 10162          struct tsb_info *tsbinfop;
10170 10163          hatlock_t *hatlockp = NULL;
10171 10164  
10172 10165          hatlockp = sfmmu_hat_enter(sfmmup);
10173 10166          ASSERT(hatlockp != NULL);
10174 10167          tsbinfop = sfmmup->sfmmu_tsb;
10175 10168          ASSERT(tsbinfop != NULL);
↓ open down ↓ 120 lines elided ↑ open up ↑
10296 10289   * Free up a sfmmu
10297 10290   * Since the sfmmu is currently embedded in the hat struct we simply zero
10298 10291   * out our fields and free up the ism map blk list if any.
10299 10292   */
10300 10293  static void
10301 10294  sfmmu_free_sfmmu(sfmmu_t *sfmmup)
10302 10295  {
10303 10296          ism_blk_t       *blkp, *nx_blkp;
10304 10297  #ifdef  DEBUG
10305 10298          ism_map_t       *map;
10306      -        int             i;
     10299 +        int             i;
10307 10300  #endif
10308 10301  
10309 10302          ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
10310 10303          ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
10311 10304          ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
10312 10305          ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
10313 10306          ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
10314 10307          ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
10315 10308          ASSERT(SF_RGNMAP_ISNULL(sfmmup));
10316 10309  
↓ open down ↓ 552 lines elided ↑ open up ↑
10869 10862   * inversion from occurring, so we must request kernel priority in
10870 10863   * case we have to sleep to keep from getting buried while holding
10871 10864   * the HAT_ISMBUSY flag set, which in turn could block other kernel
10872 10865   * threads from running (for example, in sfmmu_uvatopfn()).
10873 10866   */
10874 10867  static void
10875 10868  sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held)
10876 10869  {
10877 10870          hatlock_t *hatlockp;
10878 10871  
10879      -        THREAD_KPRI_REQUEST();
10880 10872          if (!hatlock_held)
10881 10873                  hatlockp = sfmmu_hat_enter(sfmmup);
10882 10874          while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY))
10883 10875                  cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp));
10884 10876          SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY);
10885 10877          if (!hatlock_held)
10886 10878                  sfmmu_hat_exit(hatlockp);
10887 10879  }
10888 10880  
10889 10881  static void
↓ open down ↓ 1 lines elided ↑ open up ↑
10891 10883  {
10892 10884          hatlock_t *hatlockp;
10893 10885  
10894 10886          if (!hatlock_held)
10895 10887                  hatlockp = sfmmu_hat_enter(sfmmup);
10896 10888          ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY));
10897 10889          SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY);
10898 10890          cv_broadcast(&sfmmup->sfmmu_tsb_cv);
10899 10891          if (!hatlock_held)
10900 10892                  sfmmu_hat_exit(hatlockp);
10901      -        THREAD_KPRI_RELEASE();
10902 10893  }
10903 10894  
10904 10895  /*
10905 10896   *
10906 10897   * Algorithm:
10907 10898   *
10908 10899   * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed
10909 10900   *      hblks.
10910 10901   *
10911 10902   * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache,
10912 10903   *
10913      - *              (a) try to return an hblk from reserve pool of free hblks;
     10904 + *              (a) try to return an hblk from reserve pool of free hblks;
10914 10905   *              (b) if the reserve pool is empty, acquire hblk_reserve_lock
10915 10906   *                  and return hblk_reserve.
10916 10907   *
10917 10908   * (3) call kmem_cache_alloc() to allocate hblk;
10918 10909   *
10919 10910   *              (a) if hblk_reserve_lock is held by the current thread,
10920 10911   *                  atomically replace hblk_reserve by the hblk that is
10921 10912   *                  returned by kmem_cache_alloc; release hblk_reserve_lock
10922 10913   *                  and call kmem_cache_alloc() again.
10923 10914   *              (b) if reserve pool is not full, add the hblk that is
10924 10915   *                  returned by kmem_cache_alloc to reserve pool and
10925 10916   *                  call kmem_cache_alloc again.
10926 10917   *
10927 10918   */
10928 10919  static struct hme_blk *
10929 10920  sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr,
10930      -        struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
10931      -        uint_t flags, uint_t rid)
     10921 +    struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag,
     10922 +    uint_t flags, uint_t rid)
10932 10923  {
10933 10924          struct hme_blk *hmeblkp = NULL;
10934 10925          struct hme_blk *newhblkp;
10935 10926          struct hme_blk *shw_hblkp = NULL;
10936 10927          struct kmem_cache *sfmmu_cache = NULL;
10937 10928          uint64_t hblkpa;
10938 10929          ulong_t index;
10939 10930          uint_t owner;           /* set to 1 if using hblk_reserve */
10940 10931          uint_t forcefree;
10941 10932          int sleep;
↓ open down ↓ 486 lines elided ↑ open up ↑
11428 11419  
11429 11420  /*
11430 11421   * This routine does real work to prepare a hblk to be "stolen" by
11431 11422   * unloading the mappings, updating shadow counts ....
11432 11423   * It returns 1 if the block is ready to be reused (stolen), or 0
11433 11424   * means the block cannot be stolen yet- pageunload is still working
11434 11425   * on this hblk.
11435 11426   */
11436 11427  static int
11437 11428  sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
11438      -        uint64_t hblkpa, struct hme_blk *pr_hblk)
     11429 +    uint64_t hblkpa, struct hme_blk *pr_hblk)
11439 11430  {
11440 11431          int shw_size, vshift;
11441 11432          struct hme_blk *shw_hblkp;
11442 11433          caddr_t vaddr;
11443 11434          uint_t shw_mask, newshw_mask;
11444 11435          struct hme_blk *list = NULL;
11445 11436  
11446 11437          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
11447 11438  
11448 11439          /*
↓ open down ↓ 604 lines elided ↑ open up ↑
12053 12044  
12054 12045  /*
12055 12046   * Special routine to flush out ism mappings- TSBs, TLBs and D-caches.
12056 12047   * This routine may be called with all cpu's captured. Therefore, the
12057 12048   * caller is responsible for holding all locks and disabling kernel
12058 12049   * preemption.
12059 12050   */
12060 12051  /* ARGSUSED */
12061 12052  static void
12062 12053  sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup,
12063      -        struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
     12054 +    struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag)
12064 12055  {
12065      -        cpuset_t        cpuset;
12066      -        caddr_t         va;
     12056 +        cpuset_t        cpuset;
     12057 +        caddr_t         va;
12067 12058          ism_ment_t      *ment;
12068 12059          sfmmu_t         *sfmmup;
12069 12060  #ifdef VAC
12070      -        int             vcolor;
     12061 +        int             vcolor;
12071 12062  #endif
12072 12063  
12073 12064          sf_scd_t        *scdp;
12074 12065          uint_t          ism_rid;
12075 12066  
12076 12067          ASSERT(!hmeblkp->hblk_shared);
12077 12068          /*
12078 12069           * Walk the ism_hat's mapping list and flush the page
12079 12070           * from every hat sharing this ism_hat. This routine
12080 12071           * may be called while all cpu's have been captured.
↓ open down ↓ 69 lines elided ↑ open up ↑
12150 12141  }
12151 12142  
12152 12143  /*
12153 12144   * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of
12154 12145   * a particular virtual address and ctx.  If noflush is set we do not
12155 12146   * flush the TLB/TSB.  This function may or may not be called with the
12156 12147   * HAT lock held.
12157 12148   */
12158 12149  static void
12159 12150  sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12160      -        pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
12161      -        int hat_lock_held)
     12151 +    pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag,
     12152 +    int hat_lock_held)
12162 12153  {
12163 12154  #ifdef VAC
12164 12155          int vcolor;
12165 12156  #endif
12166 12157          cpuset_t cpuset;
12167 12158          hatlock_t *hatlockp;
12168 12159  
12169 12160          ASSERT(!hmeblkp->hblk_shared);
12170 12161  
12171 12162  #if defined(lint) && !defined(VAC)
↓ open down ↓ 67 lines elided ↑ open up ↑
12239 12230          kpreempt_enable();
12240 12231  }
12241 12232  
12242 12233  /*
12243 12234   * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual
12244 12235   * address and ctx.  If noflush is set we do not currently do anything.
12245 12236   * This function may or may not be called with the HAT lock held.
12246 12237   */
12247 12238  static void
12248 12239  sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp,
12249      -        int tlb_noflush, int hat_lock_held)
     12240 +    int tlb_noflush, int hat_lock_held)
12250 12241  {
12251 12242          cpuset_t cpuset;
12252 12243          hatlock_t *hatlockp;
12253 12244  
12254 12245          ASSERT(!hmeblkp->hblk_shared);
12255 12246  
12256 12247          /*
12257 12248           * If the process is exiting we have nothing to do.
12258 12249           */
12259 12250          if (tlb_noflush)
↓ open down ↓ 176 lines elided ↑ open up ↑
12436 12427          ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]);
12437 12428  
12438 12429          currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum;
12439 12430  
12440 12431          pstate_save = sfmmu_disable_intrs();
12441 12432  
12442 12433          lock_set(&sfmmup->sfmmu_ctx_lock);      /* acquire PP lock */
12443 12434          /* set HAT cnum invalid across all context domains. */
12444 12435          for (i = 0; i < max_mmu_ctxdoms; i++) {
12445 12436  
12446      -                cnum =  sfmmup->sfmmu_ctxs[i].cnum;
     12437 +                cnum = sfmmup->sfmmu_ctxs[i].cnum;
12447 12438                  if (cnum == INVALID_CONTEXT) {
12448 12439                          continue;
12449 12440                  }
12450 12441  
12451 12442                  sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT;
12452 12443          }
12453 12444          membar_enter(); /* make sure globally visible to all CPUs */
12454 12445          lock_clear(&sfmmup->sfmmu_ctx_lock);    /* release PP lock */
12455 12446  
12456 12447          sfmmu_enable_intrs(pstate_save);
↓ open down ↓ 168 lines elided ↑ open up ↑
12625 12616                          membar_consumer();
12626 12617                  }
12627 12618          }
12628 12619  
12629 12620          sfmmu_invalidate_ctx(sfmmup);
12630 12621  }
12631 12622  
12632 12623  /* ARGSUSED */
12633 12624  static int
12634 12625  sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags,
12635      -        void *tsbinfo, pfn_t newpfn)
     12626 +    void *tsbinfo, pfn_t newpfn)
12636 12627  {
12637 12628          hatlock_t *hatlockp;
12638 12629          struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo;
12639 12630          sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu;
12640 12631  
12641 12632          if (flags != HAT_POSTUNSUSPEND)
12642 12633                  return (0);
12643 12634  
12644 12635          hatlockp = sfmmu_hat_enter(sfmmup);
12645 12636  
↓ open down ↓ 23 lines elided ↑ open up ↑
12669 12660  
12670 12661          return (0);
12671 12662  }
12672 12663  
12673 12664  /*
12674 12665   * Allocate and initialize a tsb_info structure.  Note that we may or may not
12675 12666   * allocate a TSB here, depending on the flags passed in.
12676 12667   */
12677 12668  static int
12678 12669  sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask,
12679      -        uint_t flags, sfmmu_t *sfmmup)
     12670 +    uint_t flags, sfmmu_t *sfmmup)
12680 12671  {
12681 12672          int err;
12682 12673  
12683 12674          *tsbinfopp = (struct tsb_info *)kmem_cache_alloc(
12684 12675              sfmmu_tsbinfo_cache, KM_SLEEP);
12685 12676  
12686 12677          if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask,
12687 12678              tsb_szc, flags, sfmmup)) != 0) {
12688 12679                  kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp);
12689 12680                  SFMMU_STAT(sf_tsb_allocfail);
↓ open down ↓ 1109 lines elided ↑ open up ↑
13799 13790                                          srd_rgn_hashmask)
13800 13791  /*
13801 13792   * This routine implements the shared context functionality required when
13802 13793   * attaching a segment to an address space. It must be called from
13803 13794   * hat_share() for D(ISM) segments and from segvn_create() for segments
13804 13795   * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie
13805 13796   * which is saved in the private segment data for hme segments and
13806 13797   * the ism_map structure for ism segments.
13807 13798   */
13808 13799  hat_region_cookie_t
13809      -hat_join_region(struct hat *sfmmup,
13810      -        caddr_t r_saddr,
13811      -        size_t r_size,
13812      -        void *r_obj,
13813      -        u_offset_t r_objoff,
13814      -        uchar_t r_perm,
13815      -        uchar_t r_pgszc,
13816      -        hat_rgn_cb_func_t r_cb_function,
13817      -        uint_t flags)
     13800 +hat_join_region(struct hat *sfmmup, caddr_t r_saddr, size_t r_size,
     13801 +    void *r_obj, u_offset_t r_objoff, uchar_t r_perm, uchar_t r_pgszc,
     13802 +    hat_rgn_cb_func_t r_cb_function, uint_t flags)
13818 13803  {
13819 13804          sf_srd_t *srdp = sfmmup->sfmmu_srdp;
13820 13805          uint_t rhash;
13821 13806          uint_t rid;
13822 13807          hatlock_t *hatlockp;
13823 13808          sf_region_t *rgnp;
13824 13809          sf_region_t *new_rgnp = NULL;
13825 13810          int i;
13826 13811          uint16_t *nextidp;
13827 13812          sf_region_t **freelistp;
↓ open down ↓ 1543 lines elided ↑ open up ↑
15371 15356   * freeing process is to cross-call all cpus to ensure that there are no
15372 15357   * remaining cached references.
15373 15358   *
15374 15359   * If the local generation number is less than the global then we can free
15375 15360   * hmeblks which are already on the pending queue as another cpu has completed
15376 15361   * the cross-call.
15377 15362   *
15378 15363   * We cross-call to make sure that there are no threads on other cpus accessing
15379 15364   * these hmblks and then complete the process of freeing them under the
15380 15365   * following conditions:
15381      - *      The total number of pending hmeblks is greater than the threshold
     15366 + *      The total number of pending hmeblks is greater than the threshold
15382 15367   *      The reserve list has fewer than HBLK_RESERVE_CNT hmeblks
15383 15368   *      It is at least 1 second since the last time we cross-called
15384 15369   *
15385 15370   * Otherwise, we add the hmeblks to the per-cpu pending queue.
15386 15371   */
15387 15372  static void
15388 15373  sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree)
15389 15374  {
15390 15375          struct hme_blk *hblkp, *pr_hblkp = NULL;
15391 15376          int             count = 0;
↓ open down ↓ 57 lines elided ↑ open up ↑
15449 15434          } else {
15450 15435                  mutex_exit(&cpuhp->chp_mutex);
15451 15436          }
15452 15437  }
15453 15438  
15454 15439  /*
15455 15440   * Add an hmeblk to the the hash list.
15456 15441   */
15457 15442  void
15458 15443  sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15459      -        uint64_t hblkpa)
     15444 +    uint64_t hblkpa)
15460 15445  {
15461 15446          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15462 15447  #ifdef  DEBUG
15463 15448          if (hmebp->hmeblkp == NULL) {
15464 15449                  ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA);
15465 15450          }
15466 15451  #endif /* DEBUG */
15467 15452  
15468 15453          hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa;
15469 15454          /*
↓ open down ↓ 35 lines elided ↑ open up ↑
15505 15490   *       listp - pointer to list of hmeblks linked by virtual address
15506 15491   *       free_now flag - indicates that a complete removal from the hash chains
15507 15492   *                       is necessary.
15508 15493   *
15509 15494   * It is inefficient to use the free_now flag as a cross-call is required to
15510 15495   * remove a single hmeblk from the hash chain but is necessary when hmeblks are
15511 15496   * in short supply.
15512 15497   */
15513 15498  void
15514 15499  sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp,
15515      -    struct hme_blk *pr_hblk, struct hme_blk **listp,
15516      -    int free_now)
     15500 +    struct hme_blk *pr_hblk, struct hme_blk **listp, int free_now)
15517 15501  {
15518 15502          int shw_size, vshift;
15519 15503          struct hme_blk *shw_hblkp;
15520 15504          uint_t          shw_mask, newshw_mask;
15521 15505          caddr_t         vaddr;
15522 15506          int             size;
15523 15507          cpuset_t cpuset = cpu_ready_set;
15524 15508  
15525 15509          ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));
15526 15510  
↓ open down ↓ 128 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX