Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings
forked fromtorvalds/linux

Commit32c6cdf

Browse files
committed
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of small fixes for 4.15: - Fix vmapped stack synchronization on systems with 4-level paging and a large amount of memory caused by a missing 5-level folding which made the pgd synchronization logic to fail and causing double faults. - Add a missing sanity check in the vmalloc_fault() logic on 5-level paging systems. - Bring back protection against accessing a freed initrd in the microcode loader which was lost by a wrong merge conflict resolution. - Extend the Broadwell micro code loading sanity check. - Add a missing ENDPROC annotation in ftrace assembly code which makes ORC unhappy. - Prevent loading the AMD power module on !AMD platforms. The load itself is uncritical, but an unload attempt results in a kernel crash. - Update Peter Anvins role in the MAINTAINERS file"* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/ftrace: Add one more ENDPROC annotation x86: Mark hpa as a "Designated Reviewer" for the time being x86/mm/64: Tighten up vmalloc_fault() sanity checks on 5-level kernels x86/mm/64: Fix vmapped stack syncing on very-large-memory 4-level systems x86/microcode: Fix again accessing initrd after having been freed x86/microcode/intel: Extend BDW late-loading further with LLC size check perf/x86/amd/power: Do not load AMD power module on !AMD platforms
2 parents07b0137 +dd08516 commit32c6cdf

File tree

7 files changed

+60
-34
lines changed

7 files changed

+60
-34
lines changed

‎MAINTAINERS‎

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6617,16 +6617,6 @@ L:linux-i2c@vger.kernel.org
66176617
S:Maintained
66186618
F:drivers/i2c/i2c-stub.c
66196619

6620-
i386 BOOT CODE
6621-
M:"H. Peter Anvin" <hpa@zytor.com>
6622-
S:Maintained
6623-
F:arch/x86/boot/
6624-
6625-
i386 SETUP CODE / CPU ERRATA WORKAROUNDS
6626-
M:"H. Peter Anvin" <hpa@zytor.com>
6627-
T:git git://git.kernel.org/pub/scm/linux/kernel/git/hpa/linux-2.6-x86setup.git
6628-
S:Maintained
6629-
66306620
IA64 (Itanium) PLATFORM
66316621
M:Tony Luck <tony.luck@intel.com>
66326622
M:Fenghua Yu <fenghua.yu@intel.com>
@@ -14866,7 +14856,7 @@ F:net/x25/
1486614856
X86 ARCHITECTURE (32-BIT AND 64-BIT)
1486714857
M:Thomas Gleixner <tglx@linutronix.de>
1486814858
M:Ingo Molnar <mingo@redhat.com>
14869-
M:"H. Peter Anvin" <hpa@zytor.com>
14859+
R:"H. Peter Anvin" <hpa@zytor.com>
1487014860
M:x86@kernel.org
1487114861
L:linux-kernel@vger.kernel.org
1487214862
T:git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core

‎arch/x86/events/amd/power.c‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
277277
intret;
278278

279279
if (!x86_match_cpu(cpu_match))
280-
return0;
280+
return-ENODEV;
281281

282282
if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
283283
return-ENODEV;

‎arch/x86/kernel/cpu/microcode/core.c‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ static int __init save_microcode_in_initrd(void)
239239
break;
240240
caseX86_VENDOR_AMD:
241241
if (c->x86 >=0x10)
242-
returnsave_microcode_in_initrd_amd(cpuid_eax(1));
242+
ret=save_microcode_in_initrd_amd(cpuid_eax(1));
243243
break;
244244
default:
245245
break;

‎arch/x86/kernel/cpu/microcode/intel.c‎

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,9 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
4545
/* Current microcode patch used in early patching on the APs. */
4646
staticstructmicrocode_intel*intel_ucode_patch;
4747

48+
/* last level cache size per core */
49+
staticintllc_size_per_core;
50+
4851
staticinlineboolcpu_signatures_match(unsignedints1,unsignedintp1,
4952
unsignedints2,unsignedintp2)
5053
{
@@ -912,12 +915,14 @@ static bool is_blacklisted(unsigned int cpu)
912915

913916
/*
914917
* Late loading on model 79 with microcode revision less than 0x0b000021
915-
* may result in a system hang. This behavior is documented in item
916-
* BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
918+
* and LLC size per core bigger than 2.5MB may result in a system hang.
919+
* This behavior is documented in item BDF90, #334165 (Intel Xeon
920+
* Processor E7-8800/4800 v4 Product Family).
917921
*/
918922
if (c->x86==6&&
919923
c->x86_model==INTEL_FAM6_BROADWELL_X&&
920924
c->x86_mask==0x01&&
925+
llc_size_per_core>2621440&&
921926
c->microcode<0x0b000021) {
922927
pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n",c->microcode);
923928
pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
@@ -975,6 +980,15 @@ static struct microcode_ops microcode_intel_ops = {
975980
.apply_microcode=apply_microcode_intel,
976981
};
977982

983+
staticint__initcalc_llc_size_per_core(structcpuinfo_x86*c)
984+
{
985+
u64llc_size=c->x86_cache_size*1024;
986+
987+
do_div(llc_size,c->x86_max_cores);
988+
989+
return (int)llc_size;
990+
}
991+
978992
structmicrocode_ops*__initinit_intel_microcode(void)
979993
{
980994
structcpuinfo_x86*c=&boot_cpu_data;
@@ -985,5 +999,7 @@ struct microcode_ops * __init init_intel_microcode(void)
985999
returnNULL;
9861000
}
9871001

1002+
llc_size_per_core=calc_llc_size_per_core(c);
1003+
9881004
return&microcode_intel_ops;
9891005
}

‎arch/x86/kernel/ftrace_64.S‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ trace:
295295
restore_mcount_regs
296296

297297
jmp fgraph_trace
298-
END(function_hook)
298+
ENDPROC(function_hook)
299299
#endif /* CONFIG_DYNAMIC_FTRACE */
300300

301301
#ifdef CONFIG_FUNCTION_GRAPH_TRACER

‎arch/x86/mm/fault.c‎

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -439,18 +439,13 @@ static noinline int vmalloc_fault(unsigned long address)
439439
if (pgd_none(*pgd_ref))
440440
return-1;
441441

442-
if (pgd_none(*pgd)) {
443-
set_pgd(pgd,*pgd_ref);
444-
arch_flush_lazy_mmu_mode();
445-
}elseif (CONFIG_PGTABLE_LEVELS>4) {
446-
/*
447-
* With folded p4d, pgd_none() is always false, so the pgd may
448-
* point to an empty page table entry and pgd_page_vaddr()
449-
* will return garbage.
450-
*
451-
* We will do the correct sanity check on the p4d level.
452-
*/
453-
BUG_ON(pgd_page_vaddr(*pgd)!=pgd_page_vaddr(*pgd_ref));
442+
if (CONFIG_PGTABLE_LEVELS>4) {
443+
if (pgd_none(*pgd)) {
444+
set_pgd(pgd,*pgd_ref);
445+
arch_flush_lazy_mmu_mode();
446+
}else {
447+
BUG_ON(pgd_page_vaddr(*pgd)!=pgd_page_vaddr(*pgd_ref));
448+
}
454449
}
455450

456451
/* With 4-level paging, copying happens on the p4d level. */
@@ -459,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address)
459454
if (p4d_none(*p4d_ref))
460455
return-1;
461456

462-
if (p4d_none(*p4d)) {
457+
if (p4d_none(*p4d)&&CONFIG_PGTABLE_LEVELS==4) {
463458
set_p4d(p4d,*p4d_ref);
464459
arch_flush_lazy_mmu_mode();
465460
}else {
@@ -470,6 +465,7 @@ static noinline int vmalloc_fault(unsigned long address)
470465
* Below here mismatches are bugs because these lower tables
471466
* are shared:
472467
*/
468+
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS<4);
473469

474470
pud=pud_offset(p4d,address);
475471
pud_ref=pud_offset(p4d_ref,address);

‎arch/x86/mm/tlb.c‎

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,34 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
151151
local_irq_restore(flags);
152152
}
153153

154+
staticvoidsync_current_stack_to_mm(structmm_struct*mm)
155+
{
156+
unsigned longsp=current_stack_pointer;
157+
pgd_t*pgd=pgd_offset(mm,sp);
158+
159+
if (CONFIG_PGTABLE_LEVELS>4) {
160+
if (unlikely(pgd_none(*pgd))) {
161+
pgd_t*pgd_ref=pgd_offset_k(sp);
162+
163+
set_pgd(pgd,*pgd_ref);
164+
}
165+
}else {
166+
/*
167+
* "pgd" is faked. The top level entries are "p4d"s, so sync
168+
* the p4d. This compiles to approximately the same code as
169+
* the 5-level case.
170+
*/
171+
p4d_t*p4d=p4d_offset(pgd,sp);
172+
173+
if (unlikely(p4d_none(*p4d))) {
174+
pgd_t*pgd_ref=pgd_offset_k(sp);
175+
p4d_t*p4d_ref=p4d_offset(pgd_ref,sp);
176+
177+
set_p4d(p4d,*p4d_ref);
178+
}
179+
}
180+
}
181+
154182
voidswitch_mm_irqs_off(structmm_struct*prev,structmm_struct*next,
155183
structtask_struct*tsk)
156184
{
@@ -226,11 +254,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
226254
* mapped in the new pgd, we'll double-fault. Forcibly
227255
* map it.
228256
*/
229-
unsignedintindex=pgd_index(current_stack_pointer);
230-
pgd_t*pgd=next->pgd+index;
231-
232-
if (unlikely(pgd_none(*pgd)))
233-
set_pgd(pgd,init_mm.pgd[index]);
257+
sync_current_stack_to_mm(next);
234258
}
235259

236260
/* Stop remote flushes for the previous mm */

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp