1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2019 Joyent, Inc.
14 */
15
16 .file "retpoline.s"
17
18 /*
19 * This file implements the various hooks that are needed for retpolines and
20 * return stack buffer (RSB) stuffing. For more information, please see the
21 * 'Speculative Execution CPU Side Channel Security' section of the
22 * uts/i86pc/os/cpuid.c big theory statement.
23 */
24
25 #include <sys/asm_linkage.h>
26 #include <sys/x86_archext.h>
27
28 #if defined(__amd64)
29
30 /*
31 * This macro generates the default retpoline entry point that the compiler
32 * expects. It implements the expected retpoline form.
33 */
34 #define RETPOLINE_MKTHUNK(reg) \
35 ENTRY(__x86_indirect_thunk_/**/reg) \
36 call 2f; \
37 1: \
38 pause; \
39 lfence; \
40 jmp 1b; \
41 2: \
42 movq %/**/reg, (%rsp); \
43 ret; \
44 SET_SIZE(__x86_indirect_thunk_/**/reg)
45
46 /*
47 * This macro generates the default retpoline form. It exists in addition to the
48 * thunk so if we need to restore the default retpoline behavior to the thunk
49 * we can.
50 */
51 #define RETPOLINE_MKGENERIC(reg) \
52 ENTRY(__x86_indirect_thunk_gen_/**/reg) \
53 call 2f; \
54 1: \
55 pause; \
56 lfence; \
57 jmp 1b; \
58 2: \
59 movq %/**/reg, (%rsp); \
60 ret; \
61 SET_SIZE(__x86_indirect_thunk_gen_/**/reg)
62
63 /*
64 * This macro generates the AMD optimized form of a retpoline which will be used
65 * on systems where the lfence dispatch serializing behavior has been changed.
66 */
67 #define RETPOLINE_MKLFENCE(reg) \
68 ENTRY(__x86_indirect_thunk_amd_/**/reg) \
69 lfence; \
70 jmp *%/**/reg; \
71 SET_SIZE(__x86_indirect_thunk_amd_/**/reg)
72
73
74 /*
75 * This macro generates the no-op form of the retpoline which will be used if we
76 * either need to disable retpolines because we have enhanced IBRS or because we
77 * have been asked to disable mitigations.
78 */
79 #define RETPOLINE_MKJUMP(reg) \
80 ENTRY(__x86_indirect_thunk_jmp_/**/reg) \
81 jmp *%/**/reg; \
82 SET_SIZE(__x86_indirect_thunk_jmp_/**/reg)
83
84 RETPOLINE_MKTHUNK(rax)
85 RETPOLINE_MKTHUNK(rbx)
86 RETPOLINE_MKTHUNK(rcx)
87 RETPOLINE_MKTHUNK(rdx)
88 RETPOLINE_MKTHUNK(rdi)
89 RETPOLINE_MKTHUNK(rsi)
90 RETPOLINE_MKTHUNK(rbp)
91 RETPOLINE_MKTHUNK(r8)
92 RETPOLINE_MKTHUNK(r9)
93 RETPOLINE_MKTHUNK(r10)
94 RETPOLINE_MKTHUNK(r11)
95 RETPOLINE_MKTHUNK(r12)
96 RETPOLINE_MKTHUNK(r13)
97 RETPOLINE_MKTHUNK(r14)
98 RETPOLINE_MKTHUNK(r15)
99
100 RETPOLINE_MKGENERIC(rax)
101 RETPOLINE_MKGENERIC(rbx)
102 RETPOLINE_MKGENERIC(rcx)
103 RETPOLINE_MKGENERIC(rdx)
104 RETPOLINE_MKGENERIC(rdi)
105 RETPOLINE_MKGENERIC(rsi)
106 RETPOLINE_MKGENERIC(rbp)
107 RETPOLINE_MKGENERIC(r8)
108 RETPOLINE_MKGENERIC(r9)
109 RETPOLINE_MKGENERIC(r10)
110 RETPOLINE_MKGENERIC(r11)
111 RETPOLINE_MKGENERIC(r12)
112 RETPOLINE_MKGENERIC(r13)
113 RETPOLINE_MKGENERIC(r14)
114 RETPOLINE_MKGENERIC(r15)
115
116 RETPOLINE_MKLFENCE(rax)
117 RETPOLINE_MKLFENCE(rbx)
118 RETPOLINE_MKLFENCE(rcx)
119 RETPOLINE_MKLFENCE(rdx)
120 RETPOLINE_MKLFENCE(rdi)
121 RETPOLINE_MKLFENCE(rsi)
122 RETPOLINE_MKLFENCE(rbp)
123 RETPOLINE_MKLFENCE(r8)
124 RETPOLINE_MKLFENCE(r9)
125 RETPOLINE_MKLFENCE(r10)
126 RETPOLINE_MKLFENCE(r11)
127 RETPOLINE_MKLFENCE(r12)
128 RETPOLINE_MKLFENCE(r13)
129 RETPOLINE_MKLFENCE(r14)
130 RETPOLINE_MKLFENCE(r15)
131
132 RETPOLINE_MKJUMP(rax)
133 RETPOLINE_MKJUMP(rbx)
134 RETPOLINE_MKJUMP(rcx)
135 RETPOLINE_MKJUMP(rdx)
136 RETPOLINE_MKJUMP(rdi)
137 RETPOLINE_MKJUMP(rsi)
138 RETPOLINE_MKJUMP(rbp)
139 RETPOLINE_MKJUMP(r8)
140 RETPOLINE_MKJUMP(r9)
141 RETPOLINE_MKJUMP(r10)
142 RETPOLINE_MKJUMP(r11)
143 RETPOLINE_MKJUMP(r12)
144 RETPOLINE_MKJUMP(r13)
145 RETPOLINE_MKJUMP(r14)
146 RETPOLINE_MKJUMP(r15)
147
148 /*
149 * The x86_rsb_stuff function is called from pretty arbitrary
150 * contexts. It's much easier for us to save and restore all the
151 * registers we touch rather than clobber them for callers. You must
152 * preserve this property or the system will panic at best.
153 */
154 ENTRY(x86_rsb_stuff)
155 /*
156 * These nops are present so we can patch a ret instruction if we need
157 * to disable RSB stuffing because enhanced IBRS is present or we're
158 * disabling mitigations.
159 */
160 nop
161 nop
162 pushq %rdi
163 pushq %rax
164 movl $16, %edi
165 movq %rsp, %rax
166 rsb_loop:
167 call 2f
168 1:
169 pause
170 call 1b
171 2:
172 call 2f
173 1:
174 pause
175 call 1b
176 2:
177 subl $1, %edi
178 jnz rsb_loop
179 movq %rax, %rsp
180 popq %rax
181 popq %rdi
182 ret
183 SET_SIZE(x86_rsb_stuff)
184
185 #elif defined(__i386)
186
187 /*
188 * While the kernel is 64-bit only, dboot is still 32-bit, so there are a
189 * limited number of variants that are used for 32-bit. However as dboot is
190 * short lived and uses them sparingly, we only do the full variant and do not
191 * have an AMD specific version.
192 */
193
194 #define RETPOLINE_MKTHUNK(reg) \
195 ENTRY(__x86_indirect_thunk_/**/reg) \
196 call 2f; \
197 1: \
198 pause; \
199 lfence; \
200 jmp 1b; \
201 2: \
202 movl %/**/reg, (%esp); \
203 ret; \
204 SET_SIZE(__x86_indirect_thunk_/**/reg)
205
206 RETPOLINE_MKTHUNK(edi)
207 RETPOLINE_MKTHUNK(eax)
208
209 #else
210 #error "Your architecture is in another castle."
211 #endif