Print this page
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
          +++ new/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  27      -
  28   26  /*
  29   27   * SFMMU primitives.  These primitives should only be used by sfmmu
  30   28   * routines.
  31   29   */
  32   30  
  33      -#if defined(lint)
  34      -#include <sys/types.h>
  35      -#else   /* lint */
  36   31  #include "assym.h"
  37      -#endif  /* lint */
  38   32  
  39   33  #include <sys/asm_linkage.h>
  40   34  #include <sys/machtrap.h>
  41   35  #include <sys/machasi.h>
  42   36  #include <sys/sun4asi.h>
  43   37  #include <sys/pte.h>
  44   38  #include <sys/mmu.h>
  45   39  #include <vm/hat_sfmmu.h>
  46   40  #include <vm/seg_spt.h>
  47   41  #include <sys/machparam.h>
↓ open down ↓ 1 lines elided ↑ open up ↑
  49   43  #include <sys/scb.h>
  50   44  #include <sys/intreg.h>
  51   45  #include <sys/machthread.h>
  52   46  #include <sys/clock.h>
  53   47  #include <sys/trapstat.h>
  54   48  
  55   49  /*
  56   50   * sfmmu related subroutines
  57   51   */
  58   52  
  59      -#if defined (lint)
  60      -
  61   53  /*
  62      - * sfmmu related subroutines
  63      - */
  64      -/* ARGSUSED */
  65      -void
  66      -sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
  67      -{}
  68      -
  69      -/* ARGSUSED */
  70      -void
  71      -sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
  72      -{}
  73      -
  74      -/* ARGSUSED */
  75      -void
  76      -sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
  77      -{}
  78      -
  79      -int
  80      -sfmmu_getctx_pri()
  81      -{ return(0); }
  82      -
  83      -int
  84      -sfmmu_getctx_sec()
  85      -{ return(0); }
  86      -
  87      -/* ARGSUSED */
  88      -void
  89      -sfmmu_setctx_sec(uint_t ctx)
  90      -{}
  91      -
  92      -/* ARGSUSED */
  93      -void
  94      -sfmmu_load_mmustate(sfmmu_t *sfmmup)
  95      -{
  96      -}
  97      -
  98      -#else   /* lint */
  99      -
 100      -/*
 101   54   * Invalidate either the context of a specific victim or any process
 102   55   * currently running on this CPU. 
 103   56   *
 104   57   * %g1 = sfmmup whose ctx is being invalidated
 105   58   *       when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
 106   59   * Note %g1 is the only input argument used by this xcall handler.
 107   60   */
 108   61          ENTRY(sfmmu_raise_tsb_exception)
 109   62          !
 110   63          ! if (victim == INVALID_CONTEXT ||
↓ open down ↓ 491 lines elided ↑ open up ↑
 602  555  7:
 603  556          add     %o4, SCD_HMERMAP, %o1
 604  557          SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
 605  558  #endif /* UTSB_PHYS */
 606  559  
 607  560  8:
 608  561          retl
 609  562            nop
 610  563          SET_SIZE(sfmmu_load_mmustate)
 611  564  
 612      -#endif /* lint */
 613      -
 614      -#if defined (lint)
 615      -/*
 616      - * Invalidate all of the entries within the tsb, by setting the inv bit
 617      - * in the tte_tag field of each tsbe.
 618      - *
 619      - * We take advantage of the fact TSBs are page aligned and a multiple of
 620      - * PAGESIZE to use block stores.
 621      - *
 622      - * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
 623      - * (in short, we set all bits in the upper word of the tag, and we give the
 624      - * invalid bit precedence over other tag bits in both places).
 625      - */
 626      -/* ARGSUSED */
 627      -void
 628      -sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
 629      -{}
 630      -
 631      -#else /* lint */
 632      -
 633  565  #define VIS_BLOCKSIZE   64
 634  566  
 635  567          ENTRY(sfmmu_inv_tsb_fast)
 636  568  
 637  569          ! Get space for aligned block of saved fp regs.
 638  570          save    %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
 639  571  
 640  572          ! kpreempt_disable();
 641  573          ldsb    [THREAD_REG + T_PREEMPT], %l3
 642  574          inc     %l3
↓ open down ↓ 57 lines elided ↑ open up ↑
 700  632  
 701  633  .sfmmu_inv_finished:
 702  634          ! kpreempt_enable();
 703  635          ldsb    [THREAD_REG + T_PREEMPT], %l3
 704  636          dec     %l3
 705  637          stb     %l3, [THREAD_REG + T_PREEMPT]
 706  638          ret
 707  639            restore
 708  640          SET_SIZE(sfmmu_inv_tsb_fast)
 709  641  
 710      -#endif /* lint */
 711      -
 712      -#if defined(lint)
 713      -
 714      -/*
 715      - * Prefetch "struct tsbe" while walking TSBs.
 716      - * prefetch 7 cache lines ahead of where we are at now.
 717      - * #n_reads is being used since #one_read only applies to
 718      - * floating point reads, and we are not doing floating point
 719      - * reads.  However, this has the negative side effect of polluting
 720      - * the ecache.
 721      - * The 448 comes from (7 * 64) which is how far ahead of our current
 722      - * address, we want to prefetch.
 723      - */
 724      -/*ARGSUSED*/
 725      -void
 726      -prefetch_tsbe_read(struct tsbe *tsbep)
 727      -{}
 728      -
 729      -/* Prefetch the tsbe that we are about to write */
 730      -/*ARGSUSED*/
 731      -void
 732      -prefetch_tsbe_write(struct tsbe *tsbep)
 733      -{}
 734      -
 735      -#else /* lint */
 736      -
 737  642          ENTRY(prefetch_tsbe_read)
 738  643          retl
 739  644            prefetch      [%o0+448], #n_reads
 740  645          SET_SIZE(prefetch_tsbe_read)
 741  646  
 742  647          ENTRY(prefetch_tsbe_write)
 743  648          retl
 744  649            prefetch      [%o0], #n_writes
 745  650          SET_SIZE(prefetch_tsbe_write)
 746      -#endif /* lint */
 747  651  
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX