Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitc50cb04

Browse files
David BrazdilMarc Zyngier
David Brazdil
authored and
Marc Zyngier
committed
KVM: arm64: Remove __hyp_text macro, use build rules instead
With nVHE code now fully separated from the rest of the kernel, the effects ofthe __hyp_text macro (which had to be applied on all nVHE code) can beachieved with build rules instead. The macro used to: (a) move code to .hyp.text ELF section, now done by renaming .text using `objcopy`, and (b) `notrace` and `__noscs` would negate effects of CC_FLAGS_FTRACE and CC_FLAGS_SCS, respectivelly, now those flags are erased from KBUILD_CFLAGS (same way as in EFI stub).Note that by removing __hyp_text from code shared with VHE, all VHE code is nowcompiled into .text and without `notrace` and `__noscs`.Use of '.pushsection .hyp.text' removed from assembly files as this is now alsocovered by the build rules.For MAINTAINERS: if needed to re-run, uses of macro were removed with thefollowing command. Formatting was fixed up manually. find arch/arm64/kvm/hyp -type f -name '*.c' -o -name '*.h' \ -exec sed -i 's/ __hyp_text//g' {} +Signed-off-by: David Brazdil <dbrazdil@google.com>Signed-off-by: Marc Zyngier <maz@kernel.org>Link:https://lore.kernel.org/r/20200625131420.71444-15-dbrazdil@google.com
1 parentc04dd45 commitc50cb04

File tree

17 files changed

+132
-147
lines changed

17 files changed

+132
-147
lines changed

‎arch/arm64/include/asm/kvm_emulate.h‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -516,7 +516,7 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
516516
* Skip an instruction which has been emulated at hyp while most guest sysregs
517517
* are live.
518518
*/
519-
static__always_inlinevoid__hyp_text__kvm_skip_instr(structkvm_vcpu*vcpu)
519+
static__always_inlinevoid__kvm_skip_instr(structkvm_vcpu*vcpu)
520520
{
521521
*vcpu_pc(vcpu)=read_sysreg_el2(SYS_ELR);
522522
vcpu->arch.ctxt.gp_regs.regs.pstate=read_sysreg_el2(SYS_SPSR);

‎arch/arm64/include/asm/kvm_hyp.h‎

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@
1212
#include<asm/alternative.h>
1313
#include<asm/sysreg.h>
1414

15-
#define__hyp_text __section(.hyp.text) notrace __noscs
16-
1715
#defineread_sysreg_elx(r,nvh,vh)\
1816
({\
1917
u64 reg;\

‎arch/arm64/kvm/hyp/aarch32.c‎

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static const unsigned short cc_map[16] = {
4444
/*
4545
* Check if a trapped instruction should have been executed or not.
4646
*/
47-
bool__hyp_textkvm_condition_valid32(conststructkvm_vcpu*vcpu)
47+
boolkvm_condition_valid32(conststructkvm_vcpu*vcpu)
4848
{
4949
unsigned longcpsr;
5050
u32cpsr_cond;
@@ -93,7 +93,7 @@ bool __hyp_text kvm_condition_valid32(const struct kvm_vcpu *vcpu)
9393
*
9494
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
9595
*/
96-
staticvoid__hyp_textkvm_adjust_itstate(structkvm_vcpu*vcpu)
96+
staticvoidkvm_adjust_itstate(structkvm_vcpu*vcpu)
9797
{
9898
unsigned longitbits,cond;
9999
unsigned longcpsr=*vcpu_cpsr(vcpu);
@@ -123,7 +123,7 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
123123
* kvm_skip_instr - skip a trapped instruction and proceed to the next
124124
* @vcpu: The vcpu pointer
125125
*/
126-
void__hyp_textkvm_skip_instr32(structkvm_vcpu*vcpu,boolis_wide_instr)
126+
voidkvm_skip_instr32(structkvm_vcpu*vcpu,boolis_wide_instr)
127127
{
128128
u32pc=*vcpu_pc(vcpu);
129129
boolis_thumb;

‎arch/arm64/kvm/hyp/entry.S‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
#define CPU_SP_EL0_OFFSET(CPU_XREG_OFFSET(30)+8)
2222

2323
.text
24-
.pushsection.hyp.text,"ax"
2524

2625
/*
2726
* We treatx18 as callee-saved as the host may use it as a platform

‎arch/arm64/kvm/hyp/fpsimd.S‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
#include<asm/fpsimdmacros.h>
1010

1111
.text
12-
.pushsection.hyp.text,"ax"
1312

1413
SYM_FUNC_START(__fpsimd_save_state)
1514
fpsimd_savex0,1

‎arch/arm64/kvm/hyp/hyp-entry.S‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
#include <asm/mmu.h>
1717

1818
.text
19-
.pushsection.hyp.text,"ax"
2019

2120
.macro do_el2_call
2221
/*

‎arch/arm64/kvm/hyp/include/hyp/debug-sr.h‎

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -88,9 +88,9 @@
8888
default:write_debug(ptr[0],reg,0);\
8989
}
9090

91-
staticinlinevoid__hyp_text__debug_save_state(structkvm_vcpu*vcpu,
92-
structkvm_guest_debug_arch*dbg,
93-
structkvm_cpu_context*ctxt)
91+
staticinlinevoid__debug_save_state(structkvm_vcpu*vcpu,
92+
structkvm_guest_debug_arch*dbg,
93+
structkvm_cpu_context*ctxt)
9494
{
9595
u64aa64dfr0;
9696
intbrps,wrps;
@@ -107,9 +107,9 @@ static inline void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
107107
ctxt->sys_regs[MDCCINT_EL1]=read_sysreg(mdccint_el1);
108108
}
109109

110-
staticinlinevoid__hyp_text__debug_restore_state(structkvm_vcpu*vcpu,
111-
structkvm_guest_debug_arch*dbg,
112-
structkvm_cpu_context*ctxt)
110+
staticinlinevoid__debug_restore_state(structkvm_vcpu*vcpu,
111+
structkvm_guest_debug_arch*dbg,
112+
structkvm_cpu_context*ctxt)
113113
{
114114
u64aa64dfr0;
115115
intbrps,wrps;
@@ -127,7 +127,7 @@ static inline void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
127127
write_sysreg(ctxt->sys_regs[MDCCINT_EL1],mdccint_el1);
128128
}
129129

130-
staticinlinevoid__hyp_text__debug_switch_to_guest_common(structkvm_vcpu*vcpu)
130+
staticinlinevoid__debug_switch_to_guest_common(structkvm_vcpu*vcpu)
131131
{
132132
structkvm_cpu_context*host_ctxt;
133133
structkvm_cpu_context*guest_ctxt;
@@ -146,7 +146,7 @@ static inline void __hyp_text __debug_switch_to_guest_common(struct kvm_vcpu *vc
146146
__debug_restore_state(vcpu,guest_dbg,guest_ctxt);
147147
}
148148

149-
staticinlinevoid__hyp_text__debug_switch_to_host_common(structkvm_vcpu*vcpu)
149+
staticinlinevoid__debug_switch_to_host_common(structkvm_vcpu*vcpu)
150150
{
151151
structkvm_cpu_context*host_ctxt;
152152
structkvm_cpu_context*guest_ctxt;

‎arch/arm64/kvm/hyp/include/hyp/switch.h‎

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
externconstchar__hyp_panic_string[];
3131

3232
/* Check whether the FP regs were dirtied while in the host-side run loop: */
33-
staticinlinebool__hyp_textupdate_fp_enabled(structkvm_vcpu*vcpu)
33+
staticinlineboolupdate_fp_enabled(structkvm_vcpu*vcpu)
3434
{
3535
/*
3636
* When the system doesn't support FP/SIMD, we cannot rely on
@@ -48,15 +48,15 @@ static inline bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
4848
}
4949

5050
/* Save the 32-bit only FPSIMD system register state */
51-
staticinlinevoid__hyp_text__fpsimd_save_fpexc32(structkvm_vcpu*vcpu)
51+
staticinlinevoid__fpsimd_save_fpexc32(structkvm_vcpu*vcpu)
5252
{
5353
if (!vcpu_el1_is_32bit(vcpu))
5454
return;
5555

5656
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2]=read_sysreg(fpexc32_el2);
5757
}
5858

59-
staticinlinevoid__hyp_text__activate_traps_fpsimd32(structkvm_vcpu*vcpu)
59+
staticinlinevoid__activate_traps_fpsimd32(structkvm_vcpu*vcpu)
6060
{
6161
/*
6262
* We are about to set CPTR_EL2.TFP to trap all floating point
@@ -73,7 +73,7 @@ static inline void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
7373
}
7474
}
7575

76-
staticinlinevoid__hyp_text__activate_traps_common(structkvm_vcpu*vcpu)
76+
staticinlinevoid__activate_traps_common(structkvm_vcpu*vcpu)
7777
{
7878
/* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
7979
write_sysreg(1 <<15,hstr_el2);
@@ -89,13 +89,13 @@ static inline void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
8989
write_sysreg(vcpu->arch.mdcr_el2,mdcr_el2);
9090
}
9191

92-
staticinlinevoid__hyp_text__deactivate_traps_common(void)
92+
staticinlinevoid__deactivate_traps_common(void)
9393
{
9494
write_sysreg(0,hstr_el2);
9595
write_sysreg(0,pmuserenr_el0);
9696
}
9797

98-
staticinlinevoid__hyp_text___activate_traps(structkvm_vcpu*vcpu)
98+
staticinlinevoid___activate_traps(structkvm_vcpu*vcpu)
9999
{
100100
u64hcr=vcpu->arch.hcr_el2;
101101

@@ -108,7 +108,7 @@ static inline void __hyp_text ___activate_traps(struct kvm_vcpu *vcpu)
108108
write_sysreg_s(vcpu->arch.vsesr_el2,SYS_VSESR_EL2);
109109
}
110110

111-
staticinlinevoid__hyp_text___deactivate_traps(structkvm_vcpu*vcpu)
111+
staticinlinevoid___deactivate_traps(structkvm_vcpu*vcpu)
112112
{
113113
/*
114114
* If we pended a virtual abort, preserve it until it gets
@@ -122,12 +122,12 @@ static inline void __hyp_text ___deactivate_traps(struct kvm_vcpu *vcpu)
122122
}
123123
}
124124

125-
staticinlinevoid__hyp_text__activate_vm(structkvm*kvm)
125+
staticinlinevoid__activate_vm(structkvm*kvm)
126126
{
127127
__load_guest_stage2(kvm);
128128
}
129129

130-
staticinlinebool__hyp_text__translate_far_to_hpfar(u64far,u64*hpfar)
130+
staticinlinebool__translate_far_to_hpfar(u64far,u64*hpfar)
131131
{
132132
u64par,tmp;
133133

@@ -156,7 +156,7 @@ static inline bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
156156
return true;
157157
}
158158

159-
staticinlinebool__hyp_text__populate_fault_info(structkvm_vcpu*vcpu)
159+
staticinlinebool__populate_fault_info(structkvm_vcpu*vcpu)
160160
{
161161
u8ec;
162162
u64esr;
@@ -196,7 +196,7 @@ static inline bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
196196
}
197197

198198
/* Check for an FPSIMD/SVE trap and handle as appropriate */
199-
staticinlinebool__hyp_text__hyp_handle_fpsimd(structkvm_vcpu*vcpu)
199+
staticinlinebool__hyp_handle_fpsimd(structkvm_vcpu*vcpu)
200200
{
201201
boolvhe,sve_guest,sve_host;
202202
u8hsr_ec;
@@ -283,7 +283,7 @@ static inline bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
283283
return true;
284284
}
285285

286-
staticinlinebool__hyp_texthandle_tx2_tvm(structkvm_vcpu*vcpu)
286+
staticinlineboolhandle_tx2_tvm(structkvm_vcpu*vcpu)
287287
{
288288
u32sysreg=esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
289289
intrt=kvm_vcpu_sys_get_rt(vcpu);
@@ -338,7 +338,7 @@ static inline bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
338338
return true;
339339
}
340340

341-
staticinlinebool__hyp_textesr_is_ptrauth_trap(u32esr)
341+
staticinlineboolesr_is_ptrauth_trap(u32esr)
342342
{
343343
u32ec=ESR_ELx_EC(esr);
344344

@@ -371,7 +371,7 @@ static inline bool __hyp_text esr_is_ptrauth_trap(u32 esr)
371371
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1);\
372372
})
373373

374-
staticinlinebool__hyp_text__hyp_handle_ptrauth(structkvm_vcpu*vcpu)
374+
staticinlinebool__hyp_handle_ptrauth(structkvm_vcpu*vcpu)
375375
{
376376
structkvm_cpu_context*ctxt;
377377
u64val;
@@ -401,7 +401,7 @@ static inline bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
401401
* the guest, false when we should restore the host state and return to the
402402
* main run loop.
403403
*/
404-
staticinlinebool__hyp_textfixup_guest_exit(structkvm_vcpu*vcpu,u64*exit_code)
404+
staticinlineboolfixup_guest_exit(structkvm_vcpu*vcpu,u64*exit_code)
405405
{
406406
if (ARM_EXCEPTION_CODE(*exit_code)!=ARM_EXCEPTION_IRQ)
407407
vcpu->arch.fault.esr_el2=read_sysreg_el2(SYS_ESR);
@@ -473,15 +473,15 @@ static inline bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_
473473
return false;
474474
}
475475

476-
staticinlinebool__hyp_text__needs_ssbd_off(structkvm_vcpu*vcpu)
476+
staticinlinebool__needs_ssbd_off(structkvm_vcpu*vcpu)
477477
{
478478
if (!cpus_have_final_cap(ARM64_SSBD))
479479
return false;
480480

481481
return !(vcpu->arch.workaround_flags&VCPU_WORKAROUND_2_FLAG);
482482
}
483483

484-
staticinlinevoid__hyp_text__set_guest_arch_workaround_state(structkvm_vcpu*vcpu)
484+
staticinlinevoid__set_guest_arch_workaround_state(structkvm_vcpu*vcpu)
485485
{
486486
#ifdefCONFIG_ARM64_SSBD
487487
/*
@@ -494,7 +494,7 @@ static inline void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu
494494
#endif
495495
}
496496

497-
staticinlinevoid__hyp_text__set_host_arch_workaround_state(structkvm_vcpu*vcpu)
497+
staticinlinevoid__set_host_arch_workaround_state(structkvm_vcpu*vcpu)
498498
{
499499
#ifdefCONFIG_ARM64_SSBD
500500
/*

‎arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h‎

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,18 +15,18 @@
1515
#include<asm/kvm_emulate.h>
1616
#include<asm/kvm_hyp.h>
1717

18-
staticinlinevoid__hyp_text__sysreg_save_common_state(structkvm_cpu_context*ctxt)
18+
staticinlinevoid__sysreg_save_common_state(structkvm_cpu_context*ctxt)
1919
{
2020
ctxt->sys_regs[MDSCR_EL1]=read_sysreg(mdscr_el1);
2121
}
2222

23-
staticinlinevoid__hyp_text__sysreg_save_user_state(structkvm_cpu_context*ctxt)
23+
staticinlinevoid__sysreg_save_user_state(structkvm_cpu_context*ctxt)
2424
{
2525
ctxt->sys_regs[TPIDR_EL0]=read_sysreg(tpidr_el0);
2626
ctxt->sys_regs[TPIDRRO_EL0]=read_sysreg(tpidrro_el0);
2727
}
2828

29-
staticinlinevoid__hyp_text__sysreg_save_el1_state(structkvm_cpu_context*ctxt)
29+
staticinlinevoid__sysreg_save_el1_state(structkvm_cpu_context*ctxt)
3030
{
3131
ctxt->sys_regs[CSSELR_EL1]=read_sysreg(csselr_el1);
3232
ctxt->sys_regs[SCTLR_EL1]=read_sysreg_el1(SYS_SCTLR);
@@ -51,7 +51,7 @@ static inline void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ct
5151
ctxt->gp_regs.spsr[KVM_SPSR_EL1]=read_sysreg_el1(SYS_SPSR);
5252
}
5353

54-
staticinlinevoid__hyp_text__sysreg_save_el2_return_state(structkvm_cpu_context*ctxt)
54+
staticinlinevoid__sysreg_save_el2_return_state(structkvm_cpu_context*ctxt)
5555
{
5656
ctxt->gp_regs.regs.pc=read_sysreg_el2(SYS_ELR);
5757
ctxt->gp_regs.regs.pstate=read_sysreg_el2(SYS_SPSR);
@@ -60,18 +60,18 @@ static inline void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_cont
6060
ctxt->sys_regs[DISR_EL1]=read_sysreg_s(SYS_VDISR_EL2);
6161
}
6262

63-
staticinlinevoid__hyp_text__sysreg_restore_common_state(structkvm_cpu_context*ctxt)
63+
staticinlinevoid__sysreg_restore_common_state(structkvm_cpu_context*ctxt)
6464
{
6565
write_sysreg(ctxt->sys_regs[MDSCR_EL1],mdscr_el1);
6666
}
6767

68-
staticinlinevoid__hyp_text__sysreg_restore_user_state(structkvm_cpu_context*ctxt)
68+
staticinlinevoid__sysreg_restore_user_state(structkvm_cpu_context*ctxt)
6969
{
7070
write_sysreg(ctxt->sys_regs[TPIDR_EL0],tpidr_el0);
7171
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0],tpidrro_el0);
7272
}
7373

74-
staticinlinevoid__hyp_text__sysreg_restore_el1_state(structkvm_cpu_context*ctxt)
74+
staticinlinevoid__sysreg_restore_el1_state(structkvm_cpu_context*ctxt)
7575
{
7676
write_sysreg(ctxt->sys_regs[MPIDR_EL1],vmpidr_el2);
7777
write_sysreg(ctxt->sys_regs[CSSELR_EL1],csselr_el1);
@@ -130,7 +130,7 @@ static inline void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context
130130
write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
131131
}
132132

133-
staticinlinevoid__hyp_text__sysreg_restore_el2_return_state(structkvm_cpu_context*ctxt)
133+
staticinlinevoid__sysreg_restore_el2_return_state(structkvm_cpu_context*ctxt)
134134
{
135135
u64pstate=ctxt->gp_regs.regs.pstate;
136136
u64mode=pstate&PSR_AA32_MODE_MASK;
@@ -156,7 +156,7 @@ static inline void __hyp_text __sysreg_restore_el2_return_state(struct kvm_cpu_c
156156
write_sysreg_s(ctxt->sys_regs[DISR_EL1],SYS_VDISR_EL2);
157157
}
158158

159-
staticinlinevoid__hyp_text__sysreg32_save_state(structkvm_vcpu*vcpu)
159+
staticinlinevoid__sysreg32_save_state(structkvm_vcpu*vcpu)
160160
{
161161
u64*spsr,*sysreg;
162162

@@ -178,7 +178,7 @@ static inline void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
178178
sysreg[DBGVCR32_EL2]=read_sysreg(dbgvcr32_el2);
179179
}
180180

181-
staticinlinevoid__hyp_text__sysreg32_restore_state(structkvm_vcpu*vcpu)
181+
staticinlinevoid__sysreg32_restore_state(structkvm_vcpu*vcpu)
182182
{
183183
u64*spsr,*sysreg;
184184

‎arch/arm64/kvm/hyp/nvhe/Makefile‎

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,13 @@ $(obj)/%.hyp.o: $(obj)/%.hyp.tmp.o FORCE
2121
$(call if_changed,hypcopy)
2222

2323
quiet_cmd_hypcopy = HYPCOPY$@
24-
cmd_hypcopy =$(OBJCOPY) --prefix-symbols=__kvm_nvhe_$<$@
24+
cmd_hypcopy =$(OBJCOPY)--prefix-symbols=__kvm_nvhe_\
25+
--rename-section=.text=.hyp.text\
26+
$<$@
27+
28+
# Remove ftrace and Shadow Call Stack CFLAGS.
29+
# This is equivalent to the 'notrace' and '__noscs' annotations.
30+
KBUILD_CFLAGS :=$(filter-out$(CC_FLAGS_FTRACE)$(CC_FLAGS_SCS),$(KBUILD_CFLAGS))
2531

2632
# KVM nVHE code is run at a different exception code with a different map, so
2733
# compiler instrumentation that inserts callbacks or checks into the code may

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp