Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
forked fromtorvalds/linux

Commit9e1909b

Browse files
committed
Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86/pti updates from Thomas Gleixner: "Another set of melted spectrum updates: - Iron out the last late microcode loading issues by actually checking whether new microcode is present and preventing the CPU synchronization to run into a timeout induced hang. - Remove Skylake C2 from the microcode blacklist according to the latest Intel documentation - Fix the VM86 POPF emulation which traps if VIP is set, but VIF is not. Enhance the selftests to catch that kind of issue - Annotate indirect calls/jumps for objtool on 32bit. This is not a functional issue, but for consistency sake its the right thing to do. - Fix a jump label build warning observed on SPARC64 which uses 32bit storage for the code location which is casted to 64 bit pointer w/o extending it to 64bit first. - Add two new cpufeature bits. Not really an urgent issue, but provides them for both x86 and x86/kvm work. No impact on the current kernel"* 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode: Fix CPU synchronization routine x86/microcode: Attempt late loading only when new microcode is present x86/speculation: Remove Skylake C2 from Speculation Control microcode blacklist jump_label: Fix sparc64 warning x86/speculation, objtool: Annotate indirect calls/jumps for objtool on 32-bit kernels x86/vm86/32: Fix POPF emulation selftests/x86/entry_from_vm86: Add test cases for POPF selftests/x86/entry_from_vm86: Exit with 1 if we fail x86/cpufeatures: Add Intel PCONFIG cpufeature x86/cpufeatures: Add Intel Total Memory Encryption cpufeature
2 parentsdf4fe17 +bb8c13d commit9e1909b

File tree

10 files changed

+108
-55
lines changed

10 files changed

+108
-55
lines changed

‎arch/x86/include/asm/cpufeatures.h‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -316,6 +316,7 @@
316316
#defineX86_FEATURE_VPCLMULQDQ(16*32+10)/* Carry-Less Multiplication Double Quadword */
317317
#defineX86_FEATURE_AVX512_VNNI(16*32+11)/* Vector Neural Network Instructions */
318318
#defineX86_FEATURE_AVX512_BITALG(16*32+12)/* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
319+
#defineX86_FEATURE_TME(16*32+13)/* Intel Total Memory Encryption */
319320
#defineX86_FEATURE_AVX512_VPOPCNTDQ(16*32+14)/* POPCNT for vectors of DW/QW */
320321
#defineX86_FEATURE_LA57(16*32+16)/* 5-level page tables */
321322
#defineX86_FEATURE_RDPID(16*32+22)/* RDPID instruction */
@@ -328,6 +329,7 @@
328329
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
329330
#defineX86_FEATURE_AVX512_4VNNIW(18*32+ 2)/* AVX-512 Neural Network Instructions */
330331
#defineX86_FEATURE_AVX512_4FMAPS(18*32+ 3)/* AVX-512 Multiply Accumulation Single precision */
332+
#defineX86_FEATURE_PCONFIG(18*32+18)/* Intel PCONFIG */
331333
#defineX86_FEATURE_SPEC_CTRL(18*32+26)/* "" Speculation Control (IBRS + IBPB) */
332334
#defineX86_FEATURE_INTEL_STIBP(18*32+27)/* "" Single Thread Indirect Branch Predictors */
333335
#defineX86_FEATURE_ARCH_CAPABILITIES(18*32+29)/* IA32_ARCH_CAPABILITIES MSR (Intel) */

‎arch/x86/include/asm/microcode.h‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ struct device;
3939

4040
enumucode_state {
4141
UCODE_OK=0,
42+
UCODE_NEW,
4243
UCODE_UPDATED,
4344
UCODE_NFOUND,
4445
UCODE_ERROR,

‎arch/x86/include/asm/nospec-branch.h‎

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,10 @@
183183
* otherwise we'll run out of registers. We don't care about CET
184184
* here, anyway.
185185
*/
186-
# defineCALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n",\
186+
# defineCALL_NOSPEC\
187+
ALTERNATIVE(\
188+
ANNOTATE_RETPOLINE_SAFE\
189+
"call *%[thunk_target]\n",\
187190
" jmp 904f;\n"\
188191
" .align 16\n"\
189192
"901:call 903f;\n"\

‎arch/x86/kernel/cpu/intel.c‎

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
105105
/*
106106
* Early microcode releases for the Spectre v2 mitigation were broken.
107107
* Information taken from;
108-
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
108+
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
109109
* - https://kb.vmware.com/s/article/52345
110110
* - Microcode revisions observed in the wild
111111
* - Release note from 20180108 microcode release
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
123123
{INTEL_FAM6_KABYLAKE_MOBILE,0x09,0x80 },
124124
{INTEL_FAM6_SKYLAKE_X,0x03,0x0100013e },
125125
{INTEL_FAM6_SKYLAKE_X,0x04,0x0200003c },
126-
{INTEL_FAM6_SKYLAKE_DESKTOP,0x03,0xc2 },
127126
{INTEL_FAM6_BROADWELL_CORE,0x04,0x28 },
128127
{INTEL_FAM6_BROADWELL_GT3E,0x01,0x1b },
129128
{INTEL_FAM6_BROADWELL_XEON_D,0x02,0x14 },

‎arch/x86/kernel/cpu/microcode/amd.c‎

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
339339
return-EINVAL;
340340

341341
ret=load_microcode_amd(true,x86_family(cpuid_1_eax),desc.data,desc.size);
342-
if (ret!=UCODE_OK)
342+
if (ret>UCODE_UPDATED)
343343
return-EINVAL;
344344

345345
return0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
683683
staticenumucode_state
684684
load_microcode_amd(boolsave,u8family,constu8*data,size_tsize)
685685
{
686+
structucode_patch*p;
686687
enumucode_stateret;
687688

688689
/* free old equiv table */
689690
free_equiv_cpu_table();
690691

691692
ret=__load_microcode_amd(family,data,size);
692-
693-
if (ret!=UCODE_OK)
693+
if (ret!=UCODE_OK) {
694694
cleanup();
695+
returnret;
696+
}
695697

696-
#ifdefCONFIG_X86_32
697-
/* save BSP's matching patch for early load */
698-
if (save) {
699-
structucode_patch*p=find_patch(0);
700-
if (p) {
701-
memset(amd_ucode_patch,0,PATCH_MAX_SIZE);
702-
memcpy(amd_ucode_patch,p->data,min_t(u32,ksize(p->data),
703-
PATCH_MAX_SIZE));
704-
}
698+
p=find_patch(0);
699+
if (!p) {
700+
returnret;
701+
}else {
702+
if (boot_cpu_data.microcode==p->patch_id)
703+
returnret;
704+
705+
ret=UCODE_NEW;
705706
}
706-
#endif
707+
708+
/* save BSP's matching patch for early load */
709+
if (!save)
710+
returnret;
711+
712+
memset(amd_ucode_patch,0,PATCH_MAX_SIZE);
713+
memcpy(amd_ucode_patch,p->data,min_t(u32,ksize(p->data),PATCH_MAX_SIZE));
714+
707715
returnret;
708716
}
709717

‎arch/x86/kernel/cpu/microcode/core.c‎

Lines changed: 44 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,29 @@ static int check_online_cpus(void)
517517
return-EINVAL;
518518
}
519519

520-
staticatomic_tlate_cpus;
520+
staticatomic_tlate_cpus_in;
521+
staticatomic_tlate_cpus_out;
522+
523+
staticint__wait_for_cpus(atomic_t*t,long longtimeout)
524+
{
525+
intall_cpus=num_online_cpus();
526+
527+
atomic_inc(t);
528+
529+
while (atomic_read(t)<all_cpus) {
530+
if (timeout<SPINUNIT) {
531+
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
532+
all_cpus-atomic_read(t));
533+
return1;
534+
}
535+
536+
ndelay(SPINUNIT);
537+
timeout-=SPINUNIT;
538+
539+
touch_nmi_watchdog();
540+
}
541+
return0;
542+
}
521543

522544
/*
523545
* Returns:
@@ -527,46 +549,39 @@ static atomic_t late_cpus;
527549
*/
528550
staticint__reload_late(void*info)
529551
{
530-
unsignedinttimeout=NSEC_PER_SEC;
531-
intall_cpus=num_online_cpus();
532552
intcpu=smp_processor_id();
533553
enumucode_stateerr;
534554
intret=0;
535555

536-
atomic_dec(&late_cpus);
537-
538556
/*
539557
* Wait for all CPUs to arrive. A load will not be attempted unless all
540558
* CPUs show up.
541559
* */
542-
while (atomic_read(&late_cpus)) {
543-
if (timeout<SPINUNIT) {
544-
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
545-
atomic_read(&late_cpus));
546-
return-1;
547-
}
548-
549-
ndelay(SPINUNIT);
550-
timeout-=SPINUNIT;
551-
552-
touch_nmi_watchdog();
553-
}
560+
if (__wait_for_cpus(&late_cpus_in,NSEC_PER_SEC))
561+
return-1;
554562

555563
spin_lock(&update_lock);
556564
apply_microcode_local(&err);
557565
spin_unlock(&update_lock);
558566

559567
if (err>UCODE_NFOUND) {
560568
pr_warn("Error reloading microcode on CPU %d\n",cpu);
561-
ret=-1;
562-
}elseif (err==UCODE_UPDATED) {
569+
return-1;
570+
/* siblings return UCODE_OK because their engine got updated already */
571+
}elseif (err==UCODE_UPDATED||err==UCODE_OK) {
563572
ret=1;
573+
}else {
574+
returnret;
564575
}
565576

566-
atomic_inc(&late_cpus);
567-
568-
while (atomic_read(&late_cpus)!=all_cpus)
569-
cpu_relax();
577+
/*
578+
* Increase the wait timeout to a safe value here since we're
579+
* serializing the microcode update and that could take a while on a
580+
* large number of CPUs. And that is fine as the *actual* timeout will
581+
* be determined by the last CPU finished updating and thus cut short.
582+
*/
583+
if (__wait_for_cpus(&late_cpus_out,NSEC_PER_SEC*num_online_cpus()))
584+
panic("Timeout during microcode update!\n");
570585

571586
returnret;
572587
}
@@ -579,12 +594,11 @@ static int microcode_reload_late(void)
579594
{
580595
intret;
581596

582-
atomic_set(&late_cpus,num_online_cpus());
597+
atomic_set(&late_cpus_in,0);
598+
atomic_set(&late_cpus_out,0);
583599

584600
ret=stop_machine_cpuslocked(__reload_late,NULL,cpu_online_mask);
585-
if (ret<0)
586-
returnret;
587-
elseif (ret>0)
601+
if (ret>0)
588602
microcode_check();
589603

590604
returnret;
@@ -607,7 +621,7 @@ static ssize_t reload_store(struct device *dev,
607621
returnsize;
608622

609623
tmp_ret=microcode_ops->request_microcode_fw(bsp,&microcode_pdev->dev, true);
610-
if (tmp_ret!=UCODE_OK)
624+
if (tmp_ret!=UCODE_NEW)
611625
returnsize;
612626

613627
get_online_cpus();
@@ -691,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
691705
if (system_state!=SYSTEM_RUNNING)
692706
returnUCODE_NFOUND;
693707

694-
ustate=microcode_ops->request_microcode_fw(cpu,&microcode_pdev->dev,
695-
refresh_fw);
696-
697-
if (ustate==UCODE_OK) {
708+
ustate=microcode_ops->request_microcode_fw(cpu,&microcode_pdev->dev,refresh_fw);
709+
if (ustate==UCODE_NEW) {
698710
pr_debug("CPU%d updated upon init\n",cpu);
699711
apply_microcode_on_target(cpu);
700712
}

‎arch/x86/kernel/cpu/microcode/intel.c‎

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -862,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
862862
unsignedintleftover=size;
863863
unsignedintcurr_mc_size=0,new_mc_size=0;
864864
unsignedintcsig,cpf;
865+
enumucode_stateret=UCODE_OK;
865866

866867
while (leftover) {
867868
structmicrocode_header_intelmc_header;
@@ -903,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
903904
new_mc=mc;
904905
new_mc_size=mc_size;
905906
mc=NULL;/* trigger new vmalloc */
907+
ret=UCODE_NEW;
906908
}
907909

908910
ucode_ptr+=mc_size;
@@ -932,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
932934
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
933935
cpu,new_rev,uci->cpu_sig.rev);
934936

935-
returnUCODE_OK;
937+
returnret;
936938
}
937939

938940
staticintget_ucode_fw(void*to,constvoid*from,size_tn)

‎arch/x86/kernel/vm86_32.c‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
727727
return;
728728

729729
check_vip:
730-
if (VEFLAGS&X86_EFLAGS_VIP) {
730+
if ((VEFLAGS& (X86_EFLAGS_VIP |X86_EFLAGS_VIF))==
731+
(X86_EFLAGS_VIP |X86_EFLAGS_VIF)) {
731732
save_v86_state(regs,VM86_STI);
732733
return;
733734
}

‎kernel/jump_label.c‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,8 @@ static void __jump_label_update(struct static_key *key,
373373
if (kernel_text_address(entry->code))
374374
arch_jump_label_transform(entry,jump_label_type(entry));
375375
else
376-
WARN_ONCE(1,"can't patch jump_label at %pS", (void*)entry->code);
376+
WARN_ONCE(1,"can't patch jump_label at %pS",
377+
(void*)(unsigned long)entry->code);
377378
}
378379
}
379380
}

‎tools/testing/selftests/x86/entry_from_vm86.c‎

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,10 @@ asm (
9595
"int3\n\t"
9696
"vmcode_int80:\n\t"
9797
"int $0x80\n\t"
98+
"vmcode_popf_hlt:\n\t"
99+
"push %ax\n\t"
100+
"popf\n\t"
101+
"hlt\n\t"
98102
"vmcode_umip:\n\t"
99103
/* addressing via displacements */
100104
"smsw (2052)\n\t"
@@ -124,8 +128,8 @@ asm (
124128

125129
externunsignedcharvmcode[],end_vmcode[];
126130
externunsignedcharvmcode_bound[],vmcode_sysenter[],vmcode_syscall[],
127-
vmcode_sti[],vmcode_int3[],vmcode_int80[],vmcode_umip[],
128-
vmcode_umip_str[],vmcode_umip_sldt[];
131+
vmcode_sti[],vmcode_int3[],vmcode_int80[],vmcode_popf_hlt[],
132+
vmcode_umip[],vmcode_umip_str[],vmcode_umip_sldt[];
129133

130134
/* Returns false if the test was skipped. */
131135
staticbooldo_test(structvm86plus_struct*v86,unsigned longeip,
@@ -175,7 +179,7 @@ static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
175179
(VM86_TYPE(ret)==rettype&&VM86_ARG(ret)==retarg)) {
176180
printf("[OK]\tReturned correctly\n");
177181
}else {
178-
printf("[FAIL]\tIncorrect return reason\n");
182+
printf("[FAIL]\tIncorrect return reason (started at eip = 0x%lx, ended at eip = 0x%lx)\n",eip,v86->regs.eip);
179183
nerrs++;
180184
}
181185

@@ -264,6 +268,9 @@ int main(void)
264268
v86.regs.ds=load_addr /16;
265269
v86.regs.es=load_addr /16;
266270

271+
/* Use the end of the page as our stack. */
272+
v86.regs.esp=4096;
273+
267274
assert((v86.regs.cs&3)==0);/* Looks like RPL = 0 */
268275

269276
/* #BR -- should deliver SIG??? */
@@ -295,6 +302,23 @@ int main(void)
295302
v86.regs.eflags &= ~X86_EFLAGS_IF;
296303
do_test(&v86,vmcode_sti-vmcode,VM86_STI,0,"STI with VIP set");
297304

305+
/* POPF with VIP set but IF clear: should not trap */
306+
v86.regs.eflags=X86_EFLAGS_VIP;
307+
v86.regs.eax=0;
308+
do_test(&v86,vmcode_popf_hlt-vmcode,VM86_UNKNOWN,0,"POPF with VIP set and IF clear");
309+
310+
/* POPF with VIP set and IF set: should trap */
311+
v86.regs.eflags=X86_EFLAGS_VIP;
312+
v86.regs.eax=X86_EFLAGS_IF;
313+
do_test(&v86,vmcode_popf_hlt-vmcode,VM86_STI,0,"POPF with VIP and IF set");
314+
315+
/* POPF with VIP clear and IF set: should not trap */
316+
v86.regs.eflags=0;
317+
v86.regs.eax=X86_EFLAGS_IF;
318+
do_test(&v86,vmcode_popf_hlt-vmcode,VM86_UNKNOWN,0,"POPF with VIP clear and IF set");
319+
320+
v86.regs.eflags=0;
321+
298322
/* INT3 -- should cause #BP */
299323
do_test(&v86,vmcode_int3-vmcode,VM86_TRAP,3,"INT3");
300324

@@ -318,7 +342,7 @@ int main(void)
318342
clearhandler(SIGSEGV);
319343

320344
/* Make sure nothing explodes if we fork. */
321-
if (fork()>0)
345+
if (fork()==0)
322346
return0;
323347

324348
return (nerrs==0 ?0 :1);

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp