Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

*** 34,44 **** /* Copyright (c) 1987, 1988 Microsoft Corporation */ /* All Rights Reserved */ /* ! * Copyright (c) 2018 Joyent, Inc. */ #include <sys/errno.h> #include <sys/asm_linkage.h> --- 34,44 ---- /* Copyright (c) 1987, 1988 Microsoft Corporation */ /* All Rights Reserved */ /* ! * Copyright 2019 Joyent, Inc. */ #include <sys/errno.h> #include <sys/asm_linkage.h>
*** 480,490 **** leaq L(fwdPxQx)(%rip), %r10 addq %rdx, %rdi addq %rdx, %rsi movslq (%r10,%rdx,4), %rcx leaq (%rcx,%r10,1), %r10 ! jmpq *%r10 .p2align 4 L(fwdPxQx): .int L(P0Q0)-L(fwdPxQx) /* 0 */ .int L(P1Q0)-L(fwdPxQx) --- 480,490 ---- leaq L(fwdPxQx)(%rip), %r10 addq %rdx, %rdi addq %rdx, %rsi movslq (%r10,%rdx,4), %rcx leaq (%rcx,%r10,1), %r10 ! INDIRECT_JMP_REG(r10) .p2align 4 L(fwdPxQx): .int L(P0Q0)-L(fwdPxQx) /* 0 */ .int L(P1Q0)-L(fwdPxQx)
*** 936,946 **** leaq L(fwdPxQx)(%rip), %r10 addq %rdx, %rdi addq %rdx, %rsi movslq (%r10,%rdx,4), %rcx leaq (%rcx,%r10,1), %r10 ! jmpq *%r10 /* * Use rep smovq. Clear remainder via unrolled code */ .p2align 4 --- 936,946 ---- leaq L(fwdPxQx)(%rip), %r10 addq %rdx, %rdi addq %rdx, %rsi movslq (%r10,%rdx,4), %rcx leaq (%rcx,%r10,1), %r10 ! INDIRECT_JMP_REG(r10) /* * Use rep smovq. Clear remainder via unrolled code */ .p2align 4
*** 1168,1178 **** */ leaq L(setPxQx)(%rip), %r10 addq %rsi, %rdi movslq (%r10,%rsi,4), %rcx leaq (%rcx,%r10,1), %r10 ! jmpq *%r10 .p2align 4 L(setPxQx): .int L(P0Q0)-L(setPxQx) /* 0 */ .int L(P1Q0)-L(setPxQx) --- 1168,1178 ---- */ leaq L(setPxQx)(%rip), %r10 addq %rsi, %rdi movslq (%r10,%rsi,4), %rcx leaq (%rcx,%r10,1), %r10 ! INDIRECT_JMP_REG(r10) .p2align 4 L(setPxQx): .int L(P0Q0)-L(setPxQx) /* 0 */ .int L(P1Q0)-L(setPxQx)
*** 1439,1449 **** 9: leaq L(setPxQx)(%rip), %r10 addq %rsi, %rdi movslq (%r10,%rsi,4), %rcx leaq (%rcx,%r10,1), %r10 ! jmpq *%r10 /* * Use rep sstoq. Clear any remainder via unrolled code */ .p2align 4 --- 1439,1449 ---- 9: leaq L(setPxQx)(%rip), %r10 addq %rsi, %rdi movslq (%r10,%rsi,4), %rcx leaq (%rcx,%r10,1), %r10 ! INDIRECT_JMP_REG(r10) /* * Use rep sstoq. Clear any remainder via unrolled code */ .p2align 4
*** 1575,1585 **** */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! jmp *CP_COPYIN(%rax) 2: movl $-1, %eax leave ret SET_SIZE(copyin) --- 1575,1586 ---- */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! movq CP_COPYIN(%rax), %rax ! INDIRECT_JMP_REG(rax) 2: movl $-1, %eax leave ret SET_SIZE(copyin)
*** 1720,1730 **** */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! jmp *CP_XCOPYIN(%r8) 2: leave ret SET_SIZE(xcopyin_nta) --- 1721,1732 ---- */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! movq CP_XCOPYIN(%r8), %r8 ! INDIRECT_JMP_REG(r8) 2: leave ret SET_SIZE(xcopyin_nta)
*** 1863,1873 **** */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! jmp *CP_COPYOUT(%rax) 2: movl $-1, %eax leave ret SET_SIZE(copyout) --- 1865,1876 ---- */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! movq CP_COPYOUT(%rax), %rax ! INDIRECT_JMP_REG(rax) 2: movl $-1, %eax leave ret SET_SIZE(copyout)
*** 2008,2018 **** */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! jmp *CP_XCOPYOUT(%r8) 2: leave ret SET_SIZE(xcopyout_nta) --- 2011,2022 ---- */ movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx leave ! movq CP_XCOPYOUT(%r8), %r8 ! INDIRECT_JMP_REG(r8) 2: leave ret SET_SIZE(xcopyout_nta)
*** 2322,2332 **** movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x18(%rsp), %rcx leave ! jmp *CP_COPYINSTR(%rax) 2: movl $EFAULT, %eax /* return EFAULT */ leave ret SET_SIZE(copyinstr) --- 2326,2337 ---- movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x18(%rsp), %rcx leave ! movq CP_COPYINSTR(%rax), %rax ! INDIRECT_JMP_REG(rax) 2: movl $EFAULT, %eax /* return EFAULT */ leave ret SET_SIZE(copyinstr)
*** 2444,2454 **** movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x18(%rsp), %rcx leave ! jmp *CP_COPYOUTSTR(%rax) 2: movl $EFAULT, %eax /* return EFAULT */ leave ret SET_SIZE(copyoutstr) --- 2449,2460 ---- movq (%rsp), %rdi movq 0x8(%rsp), %rsi movq 0x10(%rsp), %rdx movq 0x18(%rsp), %rcx leave ! movq CP_COPYOUTSTR(%rax), %rax ! INDIRECT_JMP_REG(rax) 2: movl $EFAULT, %eax /* return EFAULT */ leave ret SET_SIZE(copyoutstr)
*** 2560,2570 **** movq $0, T_LOFAULT(%r9); \ 1: \ movq T_COPYOPS(%r9), %rax; \ cmpq $0, %rax; \ jz 2f; \ ! jmp *COPYOP(%rax); \ 2: \ movl $-1, %eax; \ ret; \ SET_SIZE(NAME) --- 2566,2577 ---- movq $0, T_LOFAULT(%r9); \ 1: \ movq T_COPYOPS(%r9), %rax; \ cmpq $0, %rax; \ jz 2f; \ ! movq COPYOP(%rax), %rax; \ ! INDIRECT_JMP_REG(rax); \ 2: \ movl $-1, %eax; \ ret; \ SET_SIZE(NAME)
*** 2669,2679 **** movq $0, T_LOFAULT(%r9); \ 1: \ movq T_COPYOPS(%r9), %rax; \ cmpq $0, %rax; \ jz 3f; \ ! jmp *COPYOP(%rax); \ 3: \ movl $-1, %eax; \ ret; \ SET_SIZE(NAME) --- 2676,2687 ---- movq $0, T_LOFAULT(%r9); \ 1: \ movq T_COPYOPS(%r9), %rax; \ cmpq $0, %rax; \ jz 3f; \ ! movq COPYOP(%rax), %rax; \ ! INDIRECT_JMP_REG(rax); \ 3: \ movl $-1, %eax; \ ret; \ SET_SIZE(NAME)