Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/htable.c
          +++ new/usr/src/uts/i86pc/vm/htable.c
↓ open down ↓ 14 lines elided ↑ open up ↑
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22   22  /*
  23   23   * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24   24   * Copyright (c) 2014 by Delphix. All rights reserved.
  25      - * Copyright 2015 Joyent, Inc.
       25 + * Copyright 2018 Joyent, Inc.
  26   26   */
  27   27  
  28   28  #include <sys/types.h>
  29   29  #include <sys/sysmacros.h>
  30   30  #include <sys/kmem.h>
  31   31  #include <sys/atomic.h>
  32   32  #include <sys/bitmap.h>
  33   33  #include <sys/machparam.h>
  34   34  #include <sys/machsystm.h>
  35   35  #include <sys/mman.h>
↓ open down ↓ 94 lines elided ↑ open up ↑
 130  130  /*
 131  131   * Deal with hypervisor complications.
 132  132   */
 133  133  void
 134  134  xen_flush_va(caddr_t va)
 135  135  {
 136  136          struct mmuext_op t;
 137  137          uint_t count;
 138  138  
 139  139          if (IN_XPV_PANIC()) {
 140      -                mmu_tlbflush_entry((caddr_t)va);
      140 +                mmu_flush_tlb_page((uintptr_t)va);
 141  141          } else {
 142  142                  t.cmd = MMUEXT_INVLPG_LOCAL;
 143  143                  t.arg1.linear_addr = (uintptr_t)va;
 144  144                  if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
 145  145                          panic("HYPERVISOR_mmuext_op() failed");
 146  146                  ASSERT(count == 1);
 147  147          }
 148  148  }
 149  149  
 150  150  void
 151  151  xen_gflush_va(caddr_t va, cpuset_t cpus)
 152  152  {
 153  153          struct mmuext_op t;
 154  154          uint_t count;
 155  155  
 156  156          if (IN_XPV_PANIC()) {
 157      -                mmu_tlbflush_entry((caddr_t)va);
      157 +                mmu_flush_tlb_page((uintptr_t)va);
 158  158                  return;
 159  159          }
 160  160  
 161  161          t.cmd = MMUEXT_INVLPG_MULTI;
 162  162          t.arg1.linear_addr = (uintptr_t)va;
 163  163          /*LINTED: constant in conditional context*/
 164  164          set_xen_guest_handle(t.arg2.vcpumask, &cpus);
 165  165          if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
 166  166                  panic("HYPERVISOR_mmuext_op() failed");
 167  167          ASSERT(count == 1);
↓ open down ↓ 446 lines elided ↑ open up ↑
 614  614                           * Skip any hat that is already being stolen from.
 615  615                           *
 616  616                           * We skip SHARED hats, as these are dummy
 617  617                           * hats that host ISM shared page tables.
 618  618                           *
 619  619                           * We also skip if HAT_FREEING because hat_pte_unmap()
 620  620                           * won't zero out the PTE's. That would lead to hitting
 621  621                           * stale PTEs either here or under hat_unload() when we
 622  622                           * steal and unload the same page table in competing
 623  623                           * threads.
      624 +                         *
      625 +                         * We skip HATs that belong to CPUs, to make our lives
      626 +                         * simpler.
 624  627                           */
 625      -                        while (hat != NULL &&
 626      -                            (hat->hat_flags &
 627      -                            (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0)
      628 +                        while (hat != NULL && (hat->hat_flags &
      629 +                            (HAT_VICTIM | HAT_SHARED | HAT_FREEING |
      630 +                            HAT_PCP)) != 0) {
 628  631                                  hat = hat->hat_next;
      632 +                        }
 629  633  
 630  634                          if (hat == NULL)
 631  635                                  break;
 632  636  
 633  637                          /*
 634  638                           * Mark the HAT as a stealing victim so that it is
 635  639                           * not freed from under us, e.g. in as_free()
 636  640                           */
 637  641                          hat->hat_flags |= HAT_VICTIM;
 638  642                          mutex_exit(&hat_list_lock);
↓ open down ↓ 22 lines elided ↑ open up ↑
 661  665                           * do synchronous teardown for the reap case so that
 662  666                           * we can forget hat; at this time, hat is
 663  667                           * guaranteed to be around because HAT_VICTIM is set
 664  668                           * (see htable_free() for similar code)
 665  669                           */
 666  670                          for (ht = list; (ht) && (reap); ht = ht->ht_next) {
 667  671                                  if (ht->ht_hat == NULL)
 668  672                                          continue;
 669  673                                  ASSERT(ht->ht_hat == hat);
 670  674  #if defined(__xpv) && defined(__amd64)
 671      -                                if (!(ht->ht_flags & HTABLE_VLP) &&
 672      -                                    ht->ht_level == mmu.max_level) {
      675 +                                ASSERT(!(ht->ht_flags & HTABLE_COPIED));
      676 +                                if (ht->ht_level == mmu.max_level) {
 673  677                                          ptable_free(hat->hat_user_ptable);
 674  678                                          hat->hat_user_ptable = PFN_INVALID;
 675  679                                  }
 676  680  #endif
 677  681                                  /*
 678  682                                   * forget the hat
 679  683                                   */
 680  684                                  ht->ht_hat = NULL;
 681  685                          }
 682  686  
↓ open down ↓ 89 lines elided ↑ open up ↑
 772  776   * Allocate an htable, stealing one or using the reserve if necessary
 773  777   */
 774  778  static htable_t *
 775  779  htable_alloc(
 776  780          hat_t           *hat,
 777  781          uintptr_t       vaddr,
 778  782          level_t         level,
 779  783          htable_t        *shared)
 780  784  {
 781  785          htable_t        *ht = NULL;
 782      -        uint_t          is_vlp;
      786 +        uint_t          is_copied;
 783  787          uint_t          is_bare = 0;
 784  788          uint_t          need_to_zero = 1;
 785  789          int             kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP);
 786  790  
 787  791          if (level < 0 || level > TOP_LEVEL(hat))
 788  792                  panic("htable_alloc(): level %d out of range\n", level);
 789  793  
 790      -        is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL;
 791      -        if (is_vlp || shared != NULL)
      794 +        is_copied = (hat->hat_flags & HAT_COPIED) &&
      795 +            level == hat->hat_max_level;
      796 +        if (is_copied || shared != NULL)
 792  797                  is_bare = 1;
 793  798  
 794  799          /*
 795  800           * First reuse a cached htable from the hat_ht_cached field, this
 796  801           * avoids unnecessary trips through kmem/page allocators.
 797  802           */
 798  803          if (hat->hat_ht_cached != NULL && !is_bare) {
 799  804                  hat_enter(hat);
 800  805                  ht = hat->hat_ht_cached;
 801  806                  if (ht != NULL) {
↓ open down ↓ 121 lines elided ↑ open up ↑
 923  928                  ht->ht_valid_cnt = 0;           /* updated in hat_share() */
 924  929                  ht->ht_shares = shared;
 925  930                  need_to_zero = 0;
 926  931          } else {
 927  932                  ht->ht_shares = NULL;
 928  933                  ht->ht_lock_cnt = 0;
 929  934                  ht->ht_valid_cnt = 0;
 930  935          }
 931  936  
 932  937          /*
 933      -         * setup flags, etc. for VLP htables
      938 +         * setup flags, etc. for copied page tables.
 934  939           */
 935      -        if (is_vlp) {
 936      -                ht->ht_flags |= HTABLE_VLP;
      940 +        if (is_copied) {
      941 +                ht->ht_flags |= HTABLE_COPIED;
 937  942                  ASSERT(ht->ht_pfn == PFN_INVALID);
 938  943                  need_to_zero = 0;
 939  944          }
 940  945  
 941  946          /*
 942  947           * fill in the htable
 943  948           */
 944  949          ht->ht_hat = hat;
 945  950          ht->ht_parent = NULL;
 946  951          ht->ht_vaddr = vaddr;
↓ open down ↓ 30 lines elided ↑ open up ↑
 977  982  
 978  983          /*
 979  984           * If the process isn't exiting, cache the free htable in the hat
 980  985           * structure. We always do this for the boot time reserve. We don't
 981  986           * do this if the hat is exiting or we are stealing/reaping htables.
 982  987           */
 983  988          if (hat != NULL &&
 984  989              !(ht->ht_flags & HTABLE_SHARED_PFN) &&
 985  990              (use_boot_reserve ||
 986  991              (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) {
 987      -                ASSERT((ht->ht_flags & HTABLE_VLP) == 0);
      992 +                ASSERT((ht->ht_flags & HTABLE_COPIED) == 0);
 988  993                  ASSERT(ht->ht_pfn != PFN_INVALID);
 989  994                  hat_enter(hat);
 990  995                  ht->ht_next = hat->hat_ht_cached;
 991  996                  hat->hat_ht_cached = ht;
 992  997                  hat_exit(hat);
 993  998                  return;
 994  999          }
 995 1000  
 996 1001          /*
 997 1002           * If we have a hardware page table, free it.
 998 1003           * We don't free page tables that are accessed by sharing.
 999 1004           */
1000 1005          if (ht->ht_flags & HTABLE_SHARED_PFN) {
1001 1006                  ASSERT(ht->ht_pfn != PFN_INVALID);
1002      -        } else if (!(ht->ht_flags & HTABLE_VLP)) {
     1007 +        } else if (!(ht->ht_flags & HTABLE_COPIED)) {
1003 1008                  ptable_free(ht->ht_pfn);
1004 1009  #if defined(__amd64) && defined(__xpv)
1005 1010                  if (ht->ht_level == mmu.max_level && hat != NULL) {
1006 1011                          ptable_free(hat->hat_user_ptable);
1007 1012                          hat->hat_user_ptable = PFN_INVALID;
1008 1013                  }
1009 1014  #endif
1010 1015          }
1011 1016          ht->ht_pfn = PFN_INVALID;
1012 1017  
↓ open down ↓ 91 lines elided ↑ open up ↑
1104 1109           * pagetables from the upper page table. So allow PTP to be 0 already.
1105 1110           */
1106 1111          if (found != expect && found != 0)
1107 1112  #else
1108 1113          if (found != expect)
1109 1114  #endif
1110 1115                  panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE,
1111 1116                      found, expect);
1112 1117  
1113 1118          /*
1114      -         * When a top level VLP page table entry changes, we must issue
1115      -         * a reload of cr3 on all processors.
     1119 +         * When a top level PTE changes for a copied htable, we must trigger a
     1120 +         * hat_pcp_update() on all HAT CPUs.
1116 1121           *
1117      -         * If we don't need do do that, then we still have to INVLPG against
1118      -         * an address covered by the inner page table, as the latest processors
     1122 +         * If we don't need do do that, then we still have to INVLPG against an
     1123 +         * address covered by the inner page table, as the latest processors
1119 1124           * have TLB-like caches for non-leaf page table entries.
1120 1125           */
1121 1126          if (!(hat->hat_flags & HAT_FREEING)) {
1122      -                hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ?
     1127 +                hat_tlb_inval(hat, (higher->ht_flags & HTABLE_COPIED) ?
1123 1128                      DEMAP_ALL_ADDR : old->ht_vaddr);
1124 1129          }
1125 1130  
1126 1131          HTABLE_DEC(higher->ht_valid_cnt);
1127 1132  }
1128 1133  
1129 1134  /*
1130 1135   * Link an entry for a new table at vaddr and level into the existing table
1131 1136   * one level higher. We are always holding the HASH_ENTER() when doing this.
1132 1137   */
↓ open down ↓ 8 lines elided ↑ open up ↑
1141 1146  
1142 1147          ASSERT(new->ht_level != mmu.max_level);
1143 1148  
1144 1149          HTABLE_INC(higher->ht_valid_cnt);
1145 1150  
1146 1151          found = x86pte_cas(higher, entry, 0, newptp);
1147 1152          if ((found & ~PT_REF) != 0)
1148 1153                  panic("HAT: ptp not 0, found=" FMT_PTE, found);
1149 1154  
1150 1155          /*
1151      -         * When any top level VLP page table entry changes, we must issue
1152      -         * a reload of cr3 on all processors using it.
     1156 +         * When a top level PTE changes for a copied htable, we must trigger a
     1157 +         * hat_pcp_update() on all HAT CPUs.
     1158 +         *
1153 1159           * We also need to do this for the kernel hat on PAE 32 bit kernel.
1154 1160           */
1155 1161          if (
1156 1162  #ifdef __i386
1157      -            (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) ||
     1163 +            (higher->ht_hat == kas.a_hat &&
     1164 +            higher->ht_level == higher->ht_hat->hat_max_level) ||
1158 1165  #endif
1159      -            (higher->ht_flags & HTABLE_VLP))
     1166 +            (higher->ht_flags & HTABLE_COPIED))
1160 1167                  hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR);
1161 1168  }
1162 1169  
1163 1170  /*
1164 1171   * Release of hold on an htable. If this is the last use and the pagetable
1165 1172   * is empty we may want to free it, then recursively look at the pagetable
1166 1173   * above it. The recursion is handled by the outer while() loop.
1167 1174   *
1168 1175   * On the metal, during process exit, we don't bother unlinking the tables from
1169 1176   * upper level pagetables. They are instead handled in bulk by hat_free_end().
↓ open down ↓ 118 lines elided ↑ open up ↑
1288 1295  
1289 1296          ASSERT(level >= 0);
1290 1297          ASSERT(level <= TOP_LEVEL(hat));
1291 1298  
1292 1299          if (level == TOP_LEVEL(hat)) {
1293 1300  #if defined(__amd64)
1294 1301                  /*
1295 1302                   * 32 bit address spaces on 64 bit kernels need to check
1296 1303                   * for overflow of the 32 bit address space
1297 1304                   */
1298      -                if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32))
     1305 +                if ((hat->hat_flags & HAT_COPIED_32) &&
     1306 +                    vaddr >= ((uint64_t)1 << 32))
1299 1307                          return (NULL);
1300 1308  #endif
1301 1309                  base = 0;
1302 1310          } else {
1303 1311                  base = vaddr & LEVEL_MASK(level + 1);
1304 1312          }
1305 1313  
1306 1314          hashval = HTABLE_HASH(hat, base, level);
1307 1315          HTABLE_ENTER(hashval);
1308 1316          for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) {
↓ open down ↓ 627 lines elided ↑ open up ↑
1936 1944  
1937 1945  /*
1938 1946   * Disable preemption and establish a mapping to the pagetable with the
1939 1947   * given pfn. This is optimized for there case where it's the same
1940 1948   * pfn as we last used referenced from this CPU.
1941 1949   */
1942 1950  static x86pte_t *
1943 1951  x86pte_access_pagetable(htable_t *ht, uint_t index)
1944 1952  {
1945 1953          /*
1946      -         * VLP pagetables are contained in the hat_t
     1954 +         * HTABLE_COPIED pagetables are contained in the hat_t
1947 1955           */
1948      -        if (ht->ht_flags & HTABLE_VLP)
1949      -                return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index));
     1956 +        if (ht->ht_flags & HTABLE_COPIED) {
     1957 +                ASSERT3U(index, <, ht->ht_hat->hat_num_copied);
     1958 +                return (PT_INDEX_PTR(ht->ht_hat->hat_copied_ptes, index));
     1959 +        }
1950 1960          return (x86pte_mapin(ht->ht_pfn, index, ht));
1951 1961  }
1952 1962  
1953 1963  /*
1954 1964   * map the given pfn into the page table window.
1955 1965   */
1956 1966  /*ARGSUSED*/
1957 1967  x86pte_t *
1958 1968  x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht)
1959 1969  {
↓ open down ↓ 12 lines elided ↑ open up ↑
1972 1982          /*
1973 1983           * If kpm is available, use it.
1974 1984           */
1975 1985          if (kpm_vbase)
1976 1986                  return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index));
1977 1987  
1978 1988          /*
1979 1989           * Disable preemption and grab the CPU's hci_mutex
1980 1990           */
1981 1991          kpreempt_disable();
     1992 +
1982 1993          ASSERT(CPU->cpu_hat_info != NULL);
     1994 +        ASSERT(!(getcr4() & CR4_PCIDE));
     1995 +
1983 1996          mutex_enter(&CPU->cpu_hat_info->hci_mutex);
1984 1997          x = PWIN_TABLE(CPU->cpu_id);
1985 1998          pteptr = (x86pte_t *)PWIN_PTE_VA(x);
1986 1999  #ifndef __xpv
1987 2000          if (mmu.pae_hat)
1988 2001                  pte = *pteptr;
1989 2002          else
1990 2003                  pte = *(x86pte32_t *)pteptr;
1991 2004  #endif
1992 2005  
↓ open down ↓ 14 lines elided ↑ open up ↑
2007 2020                          xen_map(newpte, PWIN_VA(x));
2008 2021                  } else
2009 2022  #endif
2010 2023                  {
2011 2024                          XPV_ALLOW_PAGETABLE_UPDATES();
2012 2025                          if (mmu.pae_hat)
2013 2026                                  *pteptr = newpte;
2014 2027                          else
2015 2028                                  *(x86pte32_t *)pteptr = newpte;
2016 2029                          XPV_DISALLOW_PAGETABLE_UPDATES();
2017      -                        mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
     2030 +                        mmu_flush_tlb_kpage((uintptr_t)PWIN_VA(x));
2018 2031                  }
2019 2032          }
2020 2033          return (PT_INDEX_PTR(PWIN_VA(x), index));
2021 2034  }
2022 2035  
2023 2036  /*
2024 2037   * Release access to a page table.
2025 2038   */
2026 2039  static void
2027 2040  x86pte_release_pagetable(htable_t *ht)
2028 2041  {
2029      -        /*
2030      -         * nothing to do for VLP htables
2031      -         */
2032      -        if (ht->ht_flags & HTABLE_VLP)
     2042 +        if (ht->ht_flags & HTABLE_COPIED)
2033 2043                  return;
2034 2044  
2035 2045          x86pte_mapout();
2036 2046  }
2037 2047  
2038 2048  void
2039 2049  x86pte_mapout(void)
2040 2050  {
2041 2051          if (kpm_vbase != NULL || !khat_running)
2042 2052                  return;
↓ open down ↓ 80 lines elided ↑ open up ↑
2123 2133                   * Another thread may have installed this mapping already,
2124 2134                   * flush the local TLB and be done.
2125 2135                   */
2126 2136                  if (prev == n) {
2127 2137                          old = new;
2128 2138  #ifdef __xpv
2129 2139                          if (!IN_XPV_PANIC())
2130 2140                                  xen_flush_va((caddr_t)addr);
2131 2141                          else
2132 2142  #endif
2133      -                                mmu_tlbflush_entry((caddr_t)addr);
     2143 +                                mmu_flush_tlb_page(addr);
2134 2144                          goto done;
2135 2145                  }
2136 2146  
2137 2147                  /*
2138 2148                   * Detect if we have a collision of installing a large
2139 2149                   * page mapping where there already is a lower page table.
2140 2150                   */
2141 2151                  if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) {
2142 2152                          old = LPAGE_ERROR;
2143 2153                          goto done;
↓ open down ↓ 38 lines elided ↑ open up ↑
2182 2192  #ifdef __xpv
2183 2193          /*
2184 2194           * We can't use writable pagetables for upper level tables, so fake it.
2185 2195           */
2186 2196          mmu_update_t t[2];
2187 2197          int cnt = 1;
2188 2198          int count;
2189 2199          maddr_t ma;
2190 2200  
2191 2201          if (!IN_XPV_PANIC()) {
2192      -                ASSERT(!(ht->ht_flags & HTABLE_VLP));   /* no VLP yet */
     2202 +                ASSERT(!(ht->ht_flags & HTABLE_COPIED));
2193 2203                  ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry));
2194 2204                  t[0].ptr = ma | MMU_NORMAL_PT_UPDATE;
2195 2205                  t[0].val = new;
2196 2206  
2197 2207  #if defined(__amd64)
2198 2208                  /*
2199 2209                   * On the 64-bit hypervisor we need to maintain the user mode
2200 2210                   * top page table too.
2201 2211                   */
2202 2212                  if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) {
↓ open down ↓ 136 lines elided ↑ open up ↑
2339 2349                  }
2340 2350          }
2341 2351          x86pte_release_pagetable(ht);
2342 2352          return (found);
2343 2353  }
2344 2354  
2345 2355  #ifndef __xpv
2346 2356  /*
2347 2357   * Copy page tables - this is just a little more complicated than the
2348 2358   * previous routines. Note that it's also not atomic! It also is never
2349      - * used for VLP pagetables.
     2359 + * used for HTABLE_COPIED pagetables.
2350 2360   */
2351 2361  void
2352 2362  x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count)
2353 2363  {
2354 2364          caddr_t src_va;
2355 2365          caddr_t dst_va;
2356 2366          size_t size;
2357 2367          x86pte_t *pteptr;
2358 2368          x86pte_t pte;
2359 2369  
2360 2370          ASSERT(khat_running);
2361      -        ASSERT(!(dest->ht_flags & HTABLE_VLP));
2362      -        ASSERT(!(src->ht_flags & HTABLE_VLP));
     2371 +        ASSERT(!(dest->ht_flags & HTABLE_COPIED));
     2372 +        ASSERT(!(src->ht_flags & HTABLE_COPIED));
2363 2373          ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN));
2364 2374          ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
2365 2375  
2366 2376          /*
2367 2377           * Acquire access to the CPU pagetable windows for the dest and source.
2368 2378           */
2369 2379          dst_va = (caddr_t)x86pte_access_pagetable(dest, entry);
2370 2380          if (kpm_vbase) {
2371 2381                  src_va = (caddr_t)
2372 2382                      PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry);
2373 2383          } else {
2374 2384                  uint_t x = PWIN_SRC(CPU->cpu_id);
2375 2385  
     2386 +                ASSERT(!(getcr4() & CR4_PCIDE));
     2387 +
2376 2388                  /*
2377 2389                   * Finish defining the src pagetable mapping
2378 2390                   */
2379 2391                  src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry);
2380 2392                  pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx;
2381 2393                  pteptr = (x86pte_t *)PWIN_PTE_VA(x);
2382 2394                  if (mmu.pae_hat)
2383 2395                          *pteptr = pte;
2384 2396                  else
2385 2397                          *(x86pte32_t *)pteptr = pte;
2386      -                mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
     2398 +                mmu_flush_tlb_kpage((uintptr_t)PWIN_VA(x));
2387 2399          }
2388 2400  
2389 2401          /*
2390 2402           * now do the copy
2391 2403           */
2392 2404          size = count << mmu.pte_size_shift;
2393 2405          bcopy(src_va, dst_va, size);
2394 2406  
2395 2407          x86pte_release_pagetable(dest);
2396 2408  }
↓ open down ↓ 46 lines elided ↑ open up ↑
2443 2455          size_t size;
2444 2456  #ifdef __xpv
2445 2457          int x;
2446 2458          x86pte_t newpte;
2447 2459  #endif
2448 2460  
2449 2461          /*
2450 2462           * Map in the page table to be zeroed.
2451 2463           */
2452 2464          ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
2453      -        ASSERT(!(dest->ht_flags & HTABLE_VLP));
     2465 +        ASSERT(!(dest->ht_flags & HTABLE_COPIED));
2454 2466  
2455 2467          /*
2456 2468           * On the hypervisor we don't use x86pte_access_pagetable() since
2457 2469           * in this case the page is not pinned yet.
2458 2470           */
2459 2471  #ifdef __xpv
2460 2472          if (kpm_vbase == NULL) {
2461 2473                  kpreempt_disable();
2462 2474                  ASSERT(CPU->cpu_hat_info != NULL);
2463 2475                  mutex_enter(&CPU->cpu_hat_info->hci_mutex);
↓ open down ↓ 33 lines elided ↑ open up ↑
2497 2509          hat_t *hat;
2498 2510          uint_t h;
2499 2511          htable_t *ht;
2500 2512  
2501 2513          /*
2502 2514           * Dump all page tables
2503 2515           */
2504 2516          for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) {
2505 2517                  for (h = 0; h < hat->hat_num_hash; ++h) {
2506 2518                          for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
2507      -                                if ((ht->ht_flags & HTABLE_VLP) == 0)
     2519 +                                if ((ht->ht_flags & HTABLE_COPIED) == 0)
2508 2520                                          dump_page(ht->ht_pfn);
2509 2521                          }
2510 2522                  }
2511 2523          }
2512 2524  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX