Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
forked fromtorvalds/linux

Commit5515114

Browse files
committed
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 pti fixes from Thomas Gleixner: "A small set of fixes for the meltdown/spectre mitigations: - Make kprobes aware of retpolines to prevent probes in the retpoline thunks. - Make the machine check exception speculation protected. MCE used to issue an indirect call directly from the ASM entry code. Convert that to a direct call into a C-function and issue the indirect call from there so the compiler can add the retpoline protection, - Make the vmexit_fill_RSB() assembly less stupid - Fix a typo in the PTI documentation"* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/retpoline: Optimize inline assembler for vmexit_fill_RSB x86/pti: Document fix wrong index kprobes/x86: Disable optimizing on the function jumps to indirect thunk kprobes/x86: Blacklist indirect thunk functions for kprobes retpoline: Introduce start/end markers of indirect thunk x86/mce: Make machine check speculation protected
2 parents319f1e0 +3f7d875 commit5515114

File tree

8 files changed

+46
-8
lines changed

8 files changed

+46
-8
lines changed

‎Documentation/x86/pti.txt‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ this protection comes at a cost:
7878
non-PTI SYSCALL entry code, so requires mapping fewer
7979
things into the userspace page tables. The downside is
8080
that stacks must be switched at entry time.
81-
d. Global pages are disabled for all kernel structures not
81+
c. Global pages are disabled for all kernel structures not
8282
mapped into both kernel and userspace page tables. This
8383
feature of the MMU allows different processes to share TLB
8484
entries mapping the kernel. Losing the feature means more

‎arch/x86/entry/entry_64.S‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1264,7 +1264,7 @@ idtentry async_page_faultdo_async_page_faulthas_error_code=1
12641264
#endif
12651265

12661266
#ifdef CONFIG_X86_MCE
1267-
idtentry machine_checkhas_error_code=0paranoid=1do_sym=*machine_check_vector(%rip)
1267+
idtentry machine_checkdo_mcehas_error_code=0paranoid=1
12681268
#endif
12691269

12701270
/*

‎arch/x86/include/asm/nospec-branch.h‎

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -194,6 +194,9 @@ enum spectre_v2_mitigation {
194194
SPECTRE_V2_IBRS,
195195
};
196196

197+
externchar__indirect_thunk_start[];
198+
externchar__indirect_thunk_end[];
199+
197200
/*
198201
* On VMEXIT we must ensure that no RSB predictions learned in the guest
199202
* can be followed in the host, by overwriting the RSB completely. Both
@@ -203,16 +206,17 @@ enum spectre_v2_mitigation {
203206
staticinlinevoidvmexit_fill_RSB(void)
204207
{
205208
#ifdefCONFIG_RETPOLINE
206-
unsigned longloops=RSB_CLEAR_LOOPS /2;
209+
unsigned longloops;
207210

208211
asmvolatile (ANNOTATE_NOSPEC_ALTERNATIVE
209212
ALTERNATIVE("jmp 910f",
210213
__stringify(__FILL_RETURN_BUFFER(%0,RSB_CLEAR_LOOPS, %1)),
211214
X86_FEATURE_RETPOLINE)
212215
"910:"
213-
:"=&r" (loops),ASM_CALL_CONSTRAINT
214-
:"r" (loops):"memory" );
216+
:"=r" (loops),ASM_CALL_CONSTRAINT
217+
: :"memory" );
215218
#endif
216219
}
220+
217221
#endif/* __ASSEMBLY__ */
218222
#endif/* __NOSPEC_BRANCH_H__ */

‎arch/x86/include/asm/traps.h‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
8888
#ifdefCONFIG_X86_32
8989
dotraplinkagevoiddo_iret_error(structpt_regs*,long);
9090
#endif
91+
dotraplinkagevoiddo_mce(structpt_regs*,long);
9192

9293
staticinlineintget_si_code(unsigned longcondition)
9394
{

‎arch/x86/kernel/cpu/mcheck/mce.c‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17851785
void (*machine_check_vector)(structpt_regs*,longerror_code)=
17861786
unexpected_machine_check;
17871787

1788+
dotraplinkagevoiddo_mce(structpt_regs*regs,longerror_code)
1789+
{
1790+
machine_check_vector(regs,error_code);
1791+
}
1792+
17881793
/*
17891794
* Called for each booted CPU to set up machine checks.
17901795
* Must be called with preempt off:

‎arch/x86/kernel/kprobes/opt.c‎

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
#include<asm/debugreg.h>
4141
#include<asm/set_memory.h>
4242
#include<asm/sections.h>
43+
#include<asm/nospec-branch.h>
4344

4445
#include"common.h"
4546

@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
203204
}
204205

205206
/* Check whether insn is indirect jump */
206-
staticintinsn_is_indirect_jump(structinsn*insn)
207+
staticint__insn_is_indirect_jump(structinsn*insn)
207208
{
208209
return ((insn->opcode.bytes[0]==0xff&&
209210
(X86_MODRM_REG(insn->modrm.value)&6)==4)||/* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
237238
return (start <=target&&target <=start+len);
238239
}
239240

241+
staticintinsn_is_indirect_jump(structinsn*insn)
242+
{
243+
intret=__insn_is_indirect_jump(insn);
244+
245+
#ifdefCONFIG_RETPOLINE
246+
/*
247+
* Jump to x86_indirect_thunk_* is treated as an indirect jump.
248+
* Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
249+
* older gcc may use indirect jump. So we add this check instead of
250+
* replace indirect-jump check.
251+
*/
252+
if (!ret)
253+
ret=insn_jump_into_range(insn,
254+
(unsigned long)__indirect_thunk_start,
255+
(unsigned long)__indirect_thunk_end-
256+
(unsigned long)__indirect_thunk_start);
257+
#endif
258+
returnret;
259+
}
260+
240261
/* Decode whole function to ensure any instructions don't jump into target */
241262
staticintcan_optimize(unsigned longpaddr)
242263
{

‎arch/x86/kernel/vmlinux.lds.S‎

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,12 @@ SECTIONS
124124
ASSERT(. -_entry_trampoline ==PAGE_SIZE,"entry trampoline is too big");
125125
#endif
126126

127+
#ifdef CONFIG_RETPOLINE
128+
__indirect_thunk_start =.;
129+
*(.text.__x86.indirect_thunk)
130+
__indirect_thunk_end =.;
131+
#endif
132+
127133
/* End of text section */
128134
_etext =.;
129135
} :text =0x9090

‎arch/x86/lib/retpoline.S‎

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
#include<asm/nospec-branch.h>
1010

1111
.macro THUNK reg
12-
.section.text.__x86.indirect_thunk.\reg
12+
.section.text.__x86.indirect_thunk
1313

1414
ENTRY(__x86_indirect_thunk_\reg)
1515
CFI_STARTPROC
@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
2525
* than one per register with the correct names. So we do it
2626
* the simple and nasty way...
2727
*/
28-
#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_## reg)
28+
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
29+
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_## reg)
2930
#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
3031

3132
GENERATE_THUNK(_ASM_AX)

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp