Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/vm/vm_machdep.c
          +++ new/usr/src/uts/i86pc/vm/vm_machdep.c
↓ open down ↓ 16 lines elided ↑ open up ↑
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   */
  24   24  /*
  25   25   * Copyright (c) 2010, Intel Corporation.
  26   26   * All rights reserved.
  27      - * Copyright 2016 Joyent, Inc.
       27 + * Copyright 2018 Joyent, Inc.
  28   28   */
  29   29  
  30   30  /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  31   31  /*      All Rights Reserved   */
  32   32  
  33   33  /*
  34   34   * Portions of this source code were derived from Berkeley 4.3 BSD
  35   35   * under license from the Regents of the University of California.
  36   36   */
  37   37  
↓ open down ↓ 319 lines elided ↑ open up ↑
 357  357  /*
 358  358   * Only let one thread at a time try to coalesce large pages, to
 359  359   * prevent them from working against each other.
 360  360   */
 361  361  static kmutex_t contig_lock;
 362  362  #define CONTIG_LOCK()   mutex_enter(&contig_lock);
 363  363  #define CONTIG_UNLOCK() mutex_exit(&contig_lock);
 364  364  
 365  365  #define PFN_16M         (mmu_btop((uint64_t)0x1000000))
 366  366  
      367 +caddr_t
      368 +i86devmap(pfn_t pf, pgcnt_t pgcnt, uint_t prot)
      369 +{
      370 +        caddr_t addr;
      371 +        caddr_t addr1;
      372 +        page_t *pp;
      373 +
      374 +        addr1 = addr = vmem_alloc(heap_arena, mmu_ptob(pgcnt), VM_SLEEP);
      375 +
      376 +        for (; pgcnt != 0; addr += MMU_PAGESIZE, ++pf, --pgcnt) {
      377 +                pp = page_numtopp_nolock(pf);
      378 +                if (pp == NULL) {
      379 +                        hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pf,
      380 +                            prot | HAT_NOSYNC, HAT_LOAD_LOCK);
      381 +                } else {
      382 +                        hat_memload(kas.a_hat, addr, pp,
      383 +                            prot | HAT_NOSYNC, HAT_LOAD_LOCK);
      384 +                }
      385 +        }
      386 +
      387 +        return (addr1);
      388 +}
      389 +
 367  390  /*
      391 + * This routine is like page_numtopp, but accepts only free pages, which
      392 + * it allocates (unfrees) and returns with the exclusive lock held.
      393 + * It is used by machdep.c/dma_init() to find contiguous free pages.
      394 + */
      395 +page_t *
      396 +page_numtopp_alloc(pfn_t pfnum)
      397 +{
      398 +        page_t *pp;
      399 +
      400 +retry:
      401 +        pp = page_numtopp_nolock(pfnum);
      402 +        if (pp == NULL) {
      403 +                return (NULL);
      404 +        }
      405 +
      406 +        if (!page_trylock(pp, SE_EXCL)) {
      407 +                return (NULL);
      408 +        }
      409 +
      410 +        if (page_pptonum(pp) != pfnum) {
      411 +                page_unlock(pp);
      412 +                goto retry;
      413 +        }
      414 +
      415 +        if (!PP_ISFREE(pp)) {
      416 +                page_unlock(pp);
      417 +                return (NULL);
      418 +        }
      419 +        if (pp->p_szc) {
      420 +                page_demote_free_pages(pp);
      421 +                page_unlock(pp);
      422 +                goto retry;
      423 +        }
      424 +
      425 +        /* If associated with a vnode, destroy mappings */
      426 +
      427 +        if (pp->p_vnode) {
      428 +
      429 +                page_destroy_free(pp);
      430 +
      431 +                if (!page_lock(pp, SE_EXCL, (kmutex_t *)NULL, P_NO_RECLAIM)) {
      432 +                        return (NULL);
      433 +                }
      434 +
      435 +                if (page_pptonum(pp) != pfnum) {
      436 +                        page_unlock(pp);
      437 +                        goto retry;
      438 +                }
      439 +        }
      440 +
      441 +        if (!PP_ISFREE(pp)) {
      442 +                page_unlock(pp);
      443 +                return (NULL);
      444 +        }
      445 +
      446 +        if (!page_reclaim(pp, (kmutex_t *)NULL))
      447 +                return (NULL);
      448 +
      449 +        return (pp);
      450 +}
      451 +
      452 +/*
 368  453   * Return the optimum page size for a given mapping
 369  454   */
 370  455  /*ARGSUSED*/
 371  456  size_t
 372  457  map_pgsz(int maptype, struct proc *p, caddr_t addr, size_t len, int memcntl)
 373  458  {
 374  459          level_t l = 0;
 375  460          size_t pgsz = MMU_PAGESIZE;
 376  461          size_t max_lpsize;
 377  462          uint_t mszc;
↓ open down ↓ 3642 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX