Print this page
restore sparc comments
de-linting of .s files

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
          +++ new/usr/src/uts/sun4u/vm/mach_sfmmu_asm.s
↓ open down ↓ 15 lines elided ↑ open up ↑
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23   23   * Use is subject to license terms.
  24   24   */
  25   25  
  26      -#pragma ident   "%Z%%M% %I%     %E% SMI"
  27      -
  28   26  /*
  29   27   * SFMMU primitives.  These primitives should only be used by sfmmu
  30   28   * routines.
  31   29   */
  32   30  
  33      -#if defined(lint)
  34      -#include <sys/types.h>
  35      -#else   /* lint */
  36   31  #include "assym.h"
  37      -#endif  /* lint */
  38   32  
  39   33  #include <sys/asm_linkage.h>
  40   34  #include <sys/machtrap.h>
  41   35  #include <sys/machasi.h>
  42   36  #include <sys/sun4asi.h>
  43   37  #include <sys/pte.h>
  44   38  #include <sys/mmu.h>
  45   39  #include <vm/hat_sfmmu.h>
  46   40  #include <vm/seg_spt.h>
  47   41  #include <sys/machparam.h>
↓ open down ↓ 1 lines elided ↑ open up ↑
  49   43  #include <sys/scb.h>
  50   44  #include <sys/intreg.h>
  51   45  #include <sys/machthread.h>
  52   46  #include <sys/clock.h>
  53   47  #include <sys/trapstat.h>
  54   48  
  55   49  /*
  56   50   * sfmmu related subroutines
  57   51   */
  58   52  
  59      -#if defined (lint)
  60      -
  61   53  /*
  62      - * sfmmu related subroutines
  63      - */
  64      -/* ARGSUSED */
  65      -void
  66      -sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
  67      -{}
  68      -
  69      -/* ARGSUSED */
  70      -void
  71      -sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
  72      -{}
  73      -
  74      -/* ARGSUSED */
  75      -void
  76      -sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
  77      -{}
  78      -
  79      -int
  80      -sfmmu_getctx_pri()
  81      -{ return(0); }
  82      -
  83      -int
  84      -sfmmu_getctx_sec()
  85      -{ return(0); }
  86      -
  87      -/* ARGSUSED */
  88      -void
  89      -sfmmu_setctx_sec(uint_t ctx)
  90      -{}
  91      -
  92      -/* ARGSUSED */
  93      -void
  94      -sfmmu_load_mmustate(sfmmu_t *sfmmup)
  95      -{
  96      -}
  97      -
  98      -#else   /* lint */
  99      -
 100      -/*
 101   54   * Invalidate either the context of a specific victim or any process
 102   55   * currently running on this CPU. 
 103   56   *
 104   57   * %g1 = sfmmup whose ctx is being invalidated
 105   58   *       when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
 106   59   * Note %g1 is the only input argument used by this xcall handler.
 107   60   */
 108   61          ENTRY(sfmmu_raise_tsb_exception)
 109   62          !
 110   63          ! if (victim == INVALID_CONTEXT ||
↓ open down ↓ 491 lines elided ↑ open up ↑
 602  555  7:
 603  556          add     %o4, SCD_HMERMAP, %o1
 604  557          SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
 605  558  #endif /* UTSB_PHYS */
 606  559  
 607  560  8:
 608  561          retl
 609  562            nop
 610  563          SET_SIZE(sfmmu_load_mmustate)
 611  564  
 612      -#endif /* lint */
 613      -
 614      -#if defined (lint)
 615  565  /*
 616      - * Invalidate all of the entries within the tsb, by setting the inv bit
      566 + * Invalidate all of the entries within the TSB, by setting the inv bit
 617  567   * in the tte_tag field of each tsbe.
 618  568   *
 619      - * We take advantage of the fact TSBs are page aligned and a multiple of
 620      - * PAGESIZE to use block stores.
      569 + * We take advantage of the fact that the TSBs are page aligned and a
      570 + * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
 621  571   *
 622  572   * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
 623  573   * (in short, we set all bits in the upper word of the tag, and we give the
 624  574   * invalid bit precedence over other tag bits in both places).
 625  575   */
 626      -/* ARGSUSED */
 627      -void
 628      -sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
 629      -{}
 630  576  
 631      -#else /* lint */
 632      -
 633  577  #define VIS_BLOCKSIZE   64
 634  578  
 635  579          ENTRY(sfmmu_inv_tsb_fast)
 636  580  
 637  581          ! Get space for aligned block of saved fp regs.
 638  582          save    %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
 639  583  
 640  584          ! kpreempt_disable();
 641  585          ldsb    [THREAD_REG + T_PREEMPT], %l3
 642  586          inc     %l3
↓ open down ↓ 57 lines elided ↑ open up ↑
 700  644  
 701  645  .sfmmu_inv_finished:
 702  646          ! kpreempt_enable();
 703  647          ldsb    [THREAD_REG + T_PREEMPT], %l3
 704  648          dec     %l3
 705  649          stb     %l3, [THREAD_REG + T_PREEMPT]
 706  650          ret
 707  651            restore
 708  652          SET_SIZE(sfmmu_inv_tsb_fast)
 709  653  
 710      -#endif /* lint */
 711      -
 712      -#if defined(lint)
 713      -
 714  654  /*
 715  655   * Prefetch "struct tsbe" while walking TSBs.
 716  656   * prefetch 7 cache lines ahead of where we are at now.
 717  657   * #n_reads is being used since #one_read only applies to
 718  658   * floating point reads, and we are not doing floating point
 719  659   * reads.  However, this has the negative side effect of polluting
 720  660   * the ecache.
 721  661   * The 448 comes from (7 * 64) which is how far ahead of our current
 722  662   * address, we want to prefetch.
 723  663   */
 724      -/*ARGSUSED*/
 725      -void
 726      -prefetch_tsbe_read(struct tsbe *tsbep)
 727      -{}
 728      -
 729      -/* Prefetch the tsbe that we are about to write */
 730      -/*ARGSUSED*/
 731      -void
 732      -prefetch_tsbe_write(struct tsbe *tsbep)
 733      -{}
 734      -
 735      -#else /* lint */
 736      -
 737  664          ENTRY(prefetch_tsbe_read)
 738  665          retl
 739  666            prefetch      [%o0+448], #n_reads
 740  667          SET_SIZE(prefetch_tsbe_read)
 741  668  
      669 +/* Prefetch the tsbe that we are about to write */
 742  670          ENTRY(prefetch_tsbe_write)
 743  671          retl
 744  672            prefetch      [%o0], #n_writes
 745  673          SET_SIZE(prefetch_tsbe_write)
 746      -#endif /* lint */
 747  674  
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX