Skip to content

Commit

Permalink
Merge tag 'v4.14.297' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/stable/linux into fod_onpress

This is the 4.14.297 stable release

Signed-off-by: Harish <[email protected]>
  • Loading branch information
Codecity001 committed Nov 2, 2022
1 parent 0d7aa44 commit 6586e5f
Show file tree
Hide file tree
Showing 27 changed files with 898 additions and 162 deletions.
8 changes: 8 additions & 0 deletions Documentation/admin-guide/hw-vuln/spectre.rst
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,14 @@ The possible values in this file are:
'RSB filling' Protection of RSB on context switch enabled
============= ===========================================

- EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:

=========================== =======================================================
'PBRSB-eIBRS: SW sequence' CPU is affected and protection of RSB on VMEXIT enabled
'PBRSB-eIBRS: Vulnerable' CPU is vulnerable
'PBRSB-eIBRS: Not affected' CPU is not affected by PBRSB
=========================== =======================================================

Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
report vulnerability.
Expand Down
13 changes: 13 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4007,6 +4007,18 @@

retain_initrd [RAM] Keep initrd memory after extraction

retbleed= [X86] Control mitigation of RETBleed (Arbitrary
Speculative Code Execution with Return Instructions)
vulnerability.

off - unconditionally disable
auto - automatically select a migitation

Selecting 'auto' will choose a mitigation method at run
time according to the CPU.

Not specifying this option is equivalent to retbleed=auto.

rfkill.default_state=
0 "airplane mode". All wifi, bluetooth, wimax, gps, fm,
etc. communication is blocked by default.
Expand Down Expand Up @@ -4246,6 +4258,7 @@
eibrs - enhanced IBRS
eibrs,retpoline - enhanced IBRS + Retpolines
eibrs,lfence - enhanced IBRS + LFENCE
ibrs - use IBRS to protect kernel

Not specifying this option is equivalent to
spectre_v2=auto.
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 296
SUBLEVEL = 297
EXTRAVERSION =
NAME = Petit Gorille

Expand Down
68 changes: 59 additions & 9 deletions arch/x86/entry/calling.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
#include <asm/msr.h>
#include <asm/nospec-branch.h>

/*
Expand Down Expand Up @@ -146,27 +148,19 @@ For 32-bit we have the following conventions - kernel is built with

.endm

.macro POP_REGS pop_rdi=1 skip_r11rcx=0
.macro POP_REGS pop_rdi=1
popq %r15
popq %r14
popq %r13
popq %r12
popq %rbp
popq %rbx
.if \skip_r11rcx
popq %rsi
.else
popq %r11
.endif
popq %r10
popq %r9
popq %r8
popq %rax
.if \skip_r11rcx
popq %rsi
.else
popq %rcx
.endif
popq %rdx
popq %rsi
.if \pop_rdi
Expand Down Expand Up @@ -336,6 +330,62 @@ For 32-bit we have the following conventions - kernel is built with

#endif

/*
* IBRS kernel mitigation for Spectre_v2.
*
* Assumes full context is established (PUSH_REGS, CR3 and GS) and it clobbers
* the regs it uses (AX, CX, DX). Must be called before the first RET
* instruction (NOTE! UNTRAIN_RET includes a RET instruction)
*
* The optional argument is used to save/restore the current value,
* which is used on the paranoid paths.
*
* Assumes x86_spec_ctrl_{base,current} to have SPEC_CTRL_IBRS set.
*/
.macro IBRS_ENTER save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx

.ifnb \save_reg
rdmsr
shl $32, %rdx
or %rdx, %rax
mov %rax, \save_reg
test $SPEC_CTRL_IBRS, %eax
jz .Ldo_wrmsr_\@
lfence
jmp .Lend_\@
.Ldo_wrmsr_\@:
.endif

movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm

/*
* Similar to IBRS_ENTER, requires KERNEL GS,CR3 and clobbers (AX, CX, DX)
* regs. Must be called after the last RET.
*/
.macro IBRS_EXIT save_reg
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_KERNEL_IBRS
movl $MSR_IA32_SPEC_CTRL, %ecx

.ifnb \save_reg
mov \save_reg, %rdx
.else
movq PER_CPU_VAR(x86_spec_ctrl_current), %rdx
andl $(~SPEC_CTRL_IBRS), %edx
.endif

movl %edx, %eax
shr $32, %rdx
wrmsr
.Lend_\@:
.endm

/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*
Expand Down
2 changes: 0 additions & 2 deletions arch/x86/entry/entry_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,6 @@ ENTRY(__switch_to_asm)
movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
#endif

#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
Expand All @@ -254,7 +253,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif

/* restore callee-saved registers */
popfl
Expand Down
38 changes: 34 additions & 4 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,10 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)

/* IRQs are off. */
movq %rsp, %rdi

/* clobbers %rax, make sure it is after saving the syscall nr */
IBRS_ENTER

call do_syscall_64 /* returns with IRQs disabled */

TRACE_IRQS_IRETQ /* we're about to change IF */
Expand Down Expand Up @@ -301,8 +305,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
* perf profiles. Nothing jumps here.
*/
syscall_return_via_sysret:
/* rcx and r11 are already restored (see code above) */
POP_REGS pop_rdi=0 skip_r11rcx=1
IBRS_EXIT
POP_REGS pop_rdi=0

/*
* Now all regs are restored except RSP and RDI.
Expand Down Expand Up @@ -353,7 +357,6 @@ ENTRY(__switch_to_asm)
movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
#endif

#ifdef CONFIG_RETPOLINE
/*
* When switching from a shallower to a deeper call stack
* the RSB may either underflow or use entries populated
Expand All @@ -362,7 +365,6 @@ ENTRY(__switch_to_asm)
* speculative execution to prevent attack.
*/
FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
#endif

/* restore callee-saved registers */
popfq
Expand Down Expand Up @@ -591,6 +593,7 @@ GLOBAL(retint_user)
TRACE_IRQS_IRETQ

GLOBAL(swapgs_restore_regs_and_return_to_usermode)
IBRS_EXIT
#ifdef CONFIG_DEBUG_ENTRY
/* Assert that pt_regs indicates user mode. */
testb $3, CS(%rsp)
Expand Down Expand Up @@ -1134,6 +1137,9 @@ idtentry machine_check do_mce has_error_code=0 paranoid=1
* Save all registers in pt_regs, and switch gs if needed.
* Use slow, but surefire "are we in kernel?" check.
* Return: ebx=0: need swapgs on exit, ebx=1: otherwise
*
* R14 - old CR3
* R15 - old SPEC_CTRL
*/
ENTRY(paranoid_entry)
UNWIND_HINT_FUNC
Expand All @@ -1157,6 +1163,12 @@ ENTRY(paranoid_entry)
*/
FENCE_SWAPGS_KERNEL_ENTRY

/*
* Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
* CR3 above, keep the old value in a callee saved register.
*/
IBRS_ENTER save_reg=%r15

ret
END(paranoid_entry)

Expand All @@ -1171,9 +1183,19 @@ END(paranoid_entry)
* to try to handle preemption here.
*
* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
*
* R14 - old CR3
* R15 - old SPEC_CTRL
*/
ENTRY(paranoid_exit)
UNWIND_HINT_REGS

/*
* Must restore IBRS state before both CR3 and %GS since we need access
* to the per-CPU x86_spec_ctrl_shadow variable.
*/
IBRS_EXIT save_reg=%r15

DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF_DEBUG
testl %ebx, %ebx /* swapgs needed? */
Expand Down Expand Up @@ -1208,8 +1230,10 @@ ENTRY(error_entry)
FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER

.Lerror_entry_from_usermode_after_swapgs:

/* Put us onto the real thread stack. */
popq %r12 /* save return addr in %12 */
movq %rsp, %rdi /* arg0 = pt_regs pointer */
Expand Down Expand Up @@ -1272,6 +1296,7 @@ ENTRY(error_entry)
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
IBRS_ENTER

/*
* Pretend that the exception came from user mode: set up pt_regs
Expand Down Expand Up @@ -1377,6 +1402,8 @@ ENTRY(nmi)
PUSH_AND_CLEAR_REGS rdx=(%rdx)
ENCODE_FRAME_POINTER

IBRS_ENTER

/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
Expand Down Expand Up @@ -1600,6 +1627,9 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi

/* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
IBRS_EXIT save_reg=%r15

RESTORE_CR3 scratch_reg=%r15 save_reg=%r14

testl %ebx, %ebx /* swapgs needed? */
Expand Down
12 changes: 11 additions & 1 deletion arch/x86/entry/entry_64_compat.S
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
*
* Copyright 2000-2002 Andi Kleen, SuSE Labs.
*/
#include "calling.h"
#include <asm/asm-offsets.h>
#include <asm/current.h>
#include <asm/errno.h>
Expand All @@ -17,6 +16,8 @@
#include <linux/linkage.h>
#include <linux/err.h>

#include "calling.h"

.section .entry.text, "ax"

/*
Expand Down Expand Up @@ -106,6 +107,8 @@ ENTRY(entry_SYSENTER_compat)
xorl %r15d, %r15d /* nospec r15 */
cld

IBRS_ENTER

/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
Expand Down Expand Up @@ -250,6 +253,8 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
*/
TRACE_IRQS_OFF

IBRS_ENTER

movq %rsp, %rdi
call do_fast_syscall_32
/* XEN PV guests always use IRET path */
Expand All @@ -259,6 +264,9 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
/* Opportunistic SYSRET */
sysret32_from_system_call:
TRACE_IRQS_ON /* User mode traces as IRQs on. */

IBRS_EXIT

movq RBX(%rsp), %rbx /* pt_regs->rbx */
movq RBP(%rsp), %rbp /* pt_regs->rbp */
movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */
Expand Down Expand Up @@ -385,6 +393,8 @@ ENTRY(entry_INT80_compat)
*/
TRACE_IRQS_OFF

IBRS_ENTER

movq %rsp, %rdi
call do_int80_syscall_32
.Lsyscall_32_done:
Expand Down
Loading

0 comments on commit 6586e5f

Please sign in to comment.