Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

*** 197,207 **** movq _CONST(_MUL(callback_id, CPTRSIZE))(%r15), %r15 ;\ cmpq $0, %r15 ;\ je 1f ;\ movq %r15, 16(%rsp) /* save the callback pointer */ ;\ push_userland_ret /* push the return address */ ;\ ! call *24(%rsp) /* call callback */ ;\ 1: movq %gs:CPU_RTMP_R15, %r15 /* restore %r15 */ ;\ movq %gs:CPU_RTMP_RSP, %rsp /* restore the stack pointer */ #define MSTATE_TRANSITION(from, to) \ movl $from, %edi; \ --- 197,208 ---- movq _CONST(_MUL(callback_id, CPTRSIZE))(%r15), %r15 ;\ cmpq $0, %r15 ;\ je 1f ;\ movq %r15, 16(%rsp) /* save the callback pointer */ ;\ push_userland_ret /* push the return address */ ;\ ! movq 24(%rsp), %r15 /* load callback pointer */ ;\ ! INDIRECT_CALL_REG(r15) /* call callback */ ;\ 1: movq %gs:CPU_RTMP_R15, %r15 /* restore %r15 */ ;\ movq %gs:CPU_RTMP_RSP, %rsp /* restore the stack pointer */ #define MSTATE_TRANSITION(from, to) \ movl $from, %edi; \
*** 573,583 **** cmpl $NSYSCALL, %eax jae _syscall_ill shll $SYSENT_SIZE_SHIFT, %eax leaq sysent(%rax), %rbx ! call *SY_CALLC(%rbx) movq %rax, %r12 movq %rdx, %r13 /* --- 574,585 ---- cmpl $NSYSCALL, %eax jae _syscall_ill shll $SYSENT_SIZE_SHIFT, %eax leaq sysent(%rax), %rbx ! movq SY_CALLC(%rbx), %rax ! INDIRECT_CALL_REG(rax) movq %rax, %r12 movq %rdx, %r13 /*
*** 649,659 **** * getting here. This should be safe because it means that the only * values on the bus after this are based on the user's registers and * potentially the addresses where we stored them. Given the constraints * of sysret, that's how it has to be. */ ! call *x86_md_clear /* * To get back to userland, we need the return %rip in %rcx and * the return %rfl in %r11d. The sysretq instruction also arranges * to fix up %cs and %ss; everything else is our responsibility. --- 651,661 ---- * getting here. This should be safe because it means that the only * values on the bus after this are based on the user's registers and * potentially the addresses where we stored them. Given the constraints * of sysret, that's how it has to be. */ ! call x86_md_clear /* * To get back to userland, we need the return %rip in %rcx and * the return %rfl in %r11d. The sysretq instruction also arranges * to fix up %cs and %ss; everything else is our responsibility.
*** 900,910 **** movl 0x10(%rsp), %edx movl 0x18(%rsp), %ecx movl 0x20(%rsp), %r8d movl 0x28(%rsp), %r9d ! call *SY_CALLC(%rbx) movq %rbp, %rsp /* pop the args */ /* * amd64 syscall handlers -always- return a 64-bit value in %rax. --- 902,913 ---- movl 0x10(%rsp), %edx movl 0x18(%rsp), %ecx movl 0x20(%rsp), %r8d movl 0x28(%rsp), %r9d ! movq SY_CALLC(%rbx), %rax ! INDIRECT_CALL_REG(rax) movq %rbp, %rsp /* pop the args */ /* * amd64 syscall handlers -always- return a 64-bit value in %rax.
*** 947,957 **** * getting here. This should be safe because it means that the only * values on the bus after this are based on the user's registers and * potentially the addresses where we stored them. Given the constraints * of sysret, that's how it has to be. */ ! call *x86_md_clear /* * To get back to userland, we need to put the return %rip in %rcx and * the return %rfl in %r11d. The sysret instruction also arranges * to fix up %cs and %ss; everything else is our responsibility. --- 950,960 ---- * getting here. This should be safe because it means that the only * values on the bus after this are based on the user's registers and * potentially the addresses where we stored them. Given the constraints * of sysret, that's how it has to be. */ ! call x86_md_clear /* * To get back to userland, we need to put the return %rip in %rcx and * the return %rfl in %r11d. The sysret instruction also arranges * to fix up %cs and %ss; everything else is our responsibility.
*** 1187,1197 **** movl 0x10(%rsp), %edx movl 0x18(%rsp), %ecx movl 0x20(%rsp), %r8d movl 0x28(%rsp), %r9d ! call *SY_CALLC(%rbx) movq %rbp, %rsp /* pop the args */ /* * amd64 syscall handlers -always- return a 64-bit value in %rax. --- 1190,1201 ---- movl 0x10(%rsp), %edx movl 0x18(%rsp), %ecx movl 0x20(%rsp), %r8d movl 0x28(%rsp), %r9d ! movq SY_CALLC(%rbx), %rax ! INDIRECT_CALL_REG(rax) movq %rbp, %rsp /* pop the args */ /* * amd64 syscall handlers -always- return a 64-bit value in %rax.
*** 1255,1265 **** movl REGOFF_RIP(%rsp), %edx /* sysexit: %edx -> %eip */ pushq REGOFF_RFL(%rsp) popfq movl REGOFF_RSP(%rsp), %ecx /* sysexit: %ecx -> %esp */ ALTENTRY(sys_sysenter_swapgs_sysexit) ! call *x86_md_clear jmp tr_sysexit SET_SIZE(sys_sysenter_swapgs_sysexit) SET_SIZE(sys_sysenter) SET_SIZE(_sys_sysenter_post_swapgs) SET_SIZE(brand_sys_sysenter) --- 1259,1269 ---- movl REGOFF_RIP(%rsp), %edx /* sysexit: %edx -> %eip */ pushq REGOFF_RFL(%rsp) popfq movl REGOFF_RSP(%rsp), %ecx /* sysexit: %ecx -> %esp */ ALTENTRY(sys_sysenter_swapgs_sysexit) ! call x86_md_clear jmp tr_sysexit SET_SIZE(sys_sysenter_swapgs_sysexit) SET_SIZE(sys_sysenter) SET_SIZE(_sys_sysenter_post_swapgs) SET_SIZE(brand_sys_sysenter)
*** 1312,1322 **** * * We want to swapgs to maintain the invariant that all entries into * tr_iret_user are done on the user gsbase. */ ALTENTRY(sys_sysint_swapgs_iret) ! call *x86_md_clear SWAPGS jmp tr_iret_user /*NOTREACHED*/ SET_SIZE(sys_sysint_swapgs_iret) SET_SIZE(sys_syscall_int) --- 1316,1326 ---- * * We want to swapgs to maintain the invariant that all entries into * tr_iret_user are done on the user gsbase. */ ALTENTRY(sys_sysint_swapgs_iret) ! call x86_md_clear SWAPGS jmp tr_iret_user /*NOTREACHED*/ SET_SIZE(sys_sysint_swapgs_iret) SET_SIZE(sys_syscall_int)