6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #pragma ident "%Z%%M% %I% %E% SMI"
27
28 /*
29 * SFMMU primitives. These primitives should only be used by sfmmu
30 * routines.
31 */
32
33 #if defined(lint)
34 #include <sys/types.h>
35 #else /* lint */
36 #include "assym.h"
37 #endif /* lint */
38
39 #include <sys/asm_linkage.h>
40 #include <sys/machtrap.h>
41 #include <sys/machasi.h>
42 #include <sys/sun4asi.h>
43 #include <sys/pte.h>
44 #include <sys/mmu.h>
45 #include <vm/hat_sfmmu.h>
46 #include <vm/seg_spt.h>
47 #include <sys/machparam.h>
48 #include <sys/privregs.h>
49 #include <sys/scb.h>
50 #include <sys/intreg.h>
51 #include <sys/machthread.h>
52 #include <sys/clock.h>
53 #include <sys/trapstat.h>
54
55 /*
56 * sfmmu related subroutines
57 */
58
59 #if defined (lint)
60
61 /*
62 * sfmmu related subroutines
63 */
64 /* ARGSUSED */
65 void
66 sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
67 {}
68
69 /* ARGSUSED */
70 void
71 sfmmu_itlb_ld_kva(caddr_t vaddr, tte_t *tte)
72 {}
73
74 /* ARGSUSED */
75 void
76 sfmmu_dtlb_ld_kva(caddr_t vaddr, tte_t *tte)
77 {}
78
79 int
80 sfmmu_getctx_pri()
81 { return(0); }
82
83 int
84 sfmmu_getctx_sec()
85 { return(0); }
86
87 /* ARGSUSED */
88 void
89 sfmmu_setctx_sec(uint_t ctx)
90 {}
91
92 /* ARGSUSED */
93 void
94 sfmmu_load_mmustate(sfmmu_t *sfmmup)
95 {
96 }
97
98 #else /* lint */
99
100 /*
101 * Invalidate either the context of a specific victim or any process
102 * currently running on this CPU.
103 *
104 * %g1 = sfmmup whose ctx is being invalidated
105 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
106 * Note %g1 is the only input argument used by this xcall handler.
107 */
108 ENTRY(sfmmu_raise_tsb_exception)
109 !
110 ! if (victim == INVALID_CONTEXT ||
111 ! current CPU tsbmiss->usfmmup == victim sfmmup) {
112 ! if (shctx_on) {
113 ! shctx = INVALID;
114 ! }
115 ! if (sec-ctx > INVALID_CONTEXT) {
116 ! write INVALID_CONTEXT to sec-ctx
117 ! }
118 ! if (pri-ctx > INVALID_CONTEXT) {
119 ! write INVALID_CONTEXT to pri-ctx
120 ! }
592 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
593
594 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd
595 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
596 mov SFMMU_HMERGNMAP_WORDS, %o3
597 brnz,pt %o4, 7f ! check for sfmmu_scdp else
598 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
599 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
600 ba 8f
601 nop
602 7:
603 add %o4, SCD_HMERMAP, %o1
604 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
605 #endif /* UTSB_PHYS */
606
607 8:
608 retl
609 nop
610 SET_SIZE(sfmmu_load_mmustate)
611
612 #endif /* lint */
613
614 #if defined (lint)
615 /*
616 * Invalidate all of the entries within the tsb, by setting the inv bit
617 * in the tte_tag field of each tsbe.
618 *
619 * We take advantage of the fact TSBs are page aligned and a multiple of
620 * PAGESIZE to use block stores.
621 *
622 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
623 * (in short, we set all bits in the upper word of the tag, and we give the
624 * invalid bit precedence over other tag bits in both places).
625 */
626 /* ARGSUSED */
627 void
628 sfmmu_inv_tsb_fast(caddr_t tsb_base, uint_t tsb_bytes)
629 {}
630
631 #else /* lint */
632
633 #define VIS_BLOCKSIZE 64
634
635 ENTRY(sfmmu_inv_tsb_fast)
636
637 ! Get space for aligned block of saved fp regs.
638 save %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
639
640 ! kpreempt_disable();
641 ldsb [THREAD_REG + T_PREEMPT], %l3
642 inc %l3
643 stb %l3, [THREAD_REG + T_PREEMPT]
644
645 ! See if fpu was in use. If it was, we need to save off the
646 ! floating point registers to the stack.
647 rd %fprs, %l0 ! %l0 = cached copy of fprs
648 btst FPRS_FEF, %l0
649 bz,pt %icc, 4f
650 nop
651
652 ! save in-use fpregs on stack
690 .sfmmu_inv_finish:
691 membar #Sync
692 btst FPRS_FEF, %l0 ! saved from above
693 bz,a .sfmmu_inv_finished
694 wr %l0, 0, %fprs ! restore fprs
695
696 ! restore fpregs from stack
697 ldda [%l1]ASI_BLK_P, %d0
698 membar #Sync
699 wr %l0, 0, %fprs ! restore fprs
700
701 .sfmmu_inv_finished:
702 ! kpreempt_enable();
703 ldsb [THREAD_REG + T_PREEMPT], %l3
704 dec %l3
705 stb %l3, [THREAD_REG + T_PREEMPT]
706 ret
707 restore
708 SET_SIZE(sfmmu_inv_tsb_fast)
709
710 #endif /* lint */
711
712 #if defined(lint)
713
714 /*
715 * Prefetch "struct tsbe" while walking TSBs.
716 * prefetch 7 cache lines ahead of where we are at now.
717 * #n_reads is being used since #one_read only applies to
718 * floating point reads, and we are not doing floating point
719 * reads. However, this has the negative side effect of polluting
720 * the ecache.
721 * The 448 comes from (7 * 64) which is how far ahead of our current
722 * address, we want to prefetch.
723 */
724 /*ARGSUSED*/
725 void
726 prefetch_tsbe_read(struct tsbe *tsbep)
727 {}
728
729 /* Prefetch the tsbe that we are about to write */
730 /*ARGSUSED*/
731 void
732 prefetch_tsbe_write(struct tsbe *tsbep)
733 {}
734
735 #else /* lint */
736
737 ENTRY(prefetch_tsbe_read)
738 retl
739 prefetch [%o0+448], #n_reads
740 SET_SIZE(prefetch_tsbe_read)
741
742 ENTRY(prefetch_tsbe_write)
743 retl
744 prefetch [%o0], #n_writes
745 SET_SIZE(prefetch_tsbe_write)
746 #endif /* lint */
747
|
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * SFMMU primitives. These primitives should only be used by sfmmu
28 * routines.
29 */
30
31 #include "assym.h"
32
33 #include <sys/asm_linkage.h>
34 #include <sys/machtrap.h>
35 #include <sys/machasi.h>
36 #include <sys/sun4asi.h>
37 #include <sys/pte.h>
38 #include <sys/mmu.h>
39 #include <vm/hat_sfmmu.h>
40 #include <vm/seg_spt.h>
41 #include <sys/machparam.h>
42 #include <sys/privregs.h>
43 #include <sys/scb.h>
44 #include <sys/intreg.h>
45 #include <sys/machthread.h>
46 #include <sys/clock.h>
47 #include <sys/trapstat.h>
48
49 /*
50 * sfmmu related subroutines
51 */
52
53 /*
54 * Invalidate either the context of a specific victim or any process
55 * currently running on this CPU.
56 *
57 * %g1 = sfmmup whose ctx is being invalidated
58 * when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT
59 * Note %g1 is the only input argument used by this xcall handler.
60 */
61 ENTRY(sfmmu_raise_tsb_exception)
62 !
63 ! if (victim == INVALID_CONTEXT ||
64 ! current CPU tsbmiss->usfmmup == victim sfmmup) {
65 ! if (shctx_on) {
66 ! shctx = INVALID;
67 ! }
68 ! if (sec-ctx > INVALID_CONTEXT) {
69 ! write INVALID_CONTEXT to sec-ctx
70 ! }
71 ! if (pri-ctx > INVALID_CONTEXT) {
72 ! write INVALID_CONTEXT to pri-ctx
73 ! }
545 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
546
547 ldx [%o0 + SFMMU_SCDP], %o4 ! %o4 = sfmmu_scd
548 CPU_TSBMISS_AREA(%o2, %o3) ! %o2 = tsbmiss area
549 mov SFMMU_HMERGNMAP_WORDS, %o3
550 brnz,pt %o4, 7f ! check for sfmmu_scdp else
551 add %o2, TSBMISS_SCDSHMERMAP, %o2 ! zero tsbmiss scd_shmermap
552 ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
553 ba 8f
554 nop
555 7:
556 add %o4, SCD_HMERMAP, %o1
557 SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
558 #endif /* UTSB_PHYS */
559
560 8:
561 retl
562 nop
563 SET_SIZE(sfmmu_load_mmustate)
564
565 /*
566 * Invalidate all of the entries within the TSB, by setting the inv bit
567 * in the tte_tag field of each tsbe.
568 *
569 * We take advantage of the fact that the TSBs are page aligned and a
570 * multiple of PAGESIZE to use ASI_BLK_INIT_xxx ASI.
571 *
572 * See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
573 * (in short, we set all bits in the upper word of the tag, and we give the
574 * invalid bit precedence over other tag bits in both places).
575 */
576
577 #define VIS_BLOCKSIZE 64
578
579 ENTRY(sfmmu_inv_tsb_fast)
580
581 ! Get space for aligned block of saved fp regs.
582 save %sp, -SA(MINFRAME + 2*VIS_BLOCKSIZE), %sp
583
584 ! kpreempt_disable();
585 ldsb [THREAD_REG + T_PREEMPT], %l3
586 inc %l3
587 stb %l3, [THREAD_REG + T_PREEMPT]
588
589 ! See if fpu was in use. If it was, we need to save off the
590 ! floating point registers to the stack.
591 rd %fprs, %l0 ! %l0 = cached copy of fprs
592 btst FPRS_FEF, %l0
593 bz,pt %icc, 4f
594 nop
595
596 ! save in-use fpregs on stack
634 .sfmmu_inv_finish:
635 membar #Sync
636 btst FPRS_FEF, %l0 ! saved from above
637 bz,a .sfmmu_inv_finished
638 wr %l0, 0, %fprs ! restore fprs
639
640 ! restore fpregs from stack
641 ldda [%l1]ASI_BLK_P, %d0
642 membar #Sync
643 wr %l0, 0, %fprs ! restore fprs
644
645 .sfmmu_inv_finished:
646 ! kpreempt_enable();
647 ldsb [THREAD_REG + T_PREEMPT], %l3
648 dec %l3
649 stb %l3, [THREAD_REG + T_PREEMPT]
650 ret
651 restore
652 SET_SIZE(sfmmu_inv_tsb_fast)
653
654 /*
655 * Prefetch "struct tsbe" while walking TSBs.
656 * prefetch 7 cache lines ahead of where we are at now.
657 * #n_reads is being used since #one_read only applies to
658 * floating point reads, and we are not doing floating point
659 * reads. However, this has the negative side effect of polluting
660 * the ecache.
661 * The 448 comes from (7 * 64) which is how far ahead of our current
662 * address, we want to prefetch.
663 */
664 ENTRY(prefetch_tsbe_read)
665 retl
666 prefetch [%o0+448], #n_reads
667 SET_SIZE(prefetch_tsbe_read)
668
669 /* Prefetch the tsbe that we are about to write */
670 ENTRY(prefetch_tsbe_write)
671 retl
672 prefetch [%o0], #n_writes
673 SET_SIZE(prefetch_tsbe_write)
674
|