Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commita0e50aa

Browse files
christofferdall-armMarc Zyngier
authored and
Marc Zyngier
committed
KVM: arm64: Factor out stage 2 page table data from struct kvm
As we are about to reuse our stage 2 page table manipulation code forshadow stage 2 page tables in the context of nested virtualization, weare going to manage multiple stage 2 page tables for a single VM.This requires some pretty invasive changes to our data structures,which moves the vmid and pgd pointers into a separate structure andchange pretty much all of our mmu code to operate on this structureinstead.The new structure is called struct kvm_s2_mmu.There is no intended functional change by this patch alone.Reviewed-by: James Morse <james.morse@arm.com>Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com>[Designed data structure layout in collaboration]Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>Co-developed-by: Marc Zyngier <maz@kernel.org>[maz: Moved the last_vcpu_ran down to the S2 MMU structure as well]Signed-off-by: Marc Zyngier <maz@kernel.org>
1 parentae4bffb commita0e50aa

File tree

10 files changed

+240
-198
lines changed

10 files changed

+240
-198
lines changed

‎arch/arm64/include/asm/kvm_asm.h‎

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,7 @@
7777

7878
structkvm;
7979
structkvm_vcpu;
80+
structkvm_s2_mmu;
8081

8182
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
8283
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
@@ -90,9 +91,9 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
9091
#endif
9192

9293
externvoid__kvm_flush_vm_context(void);
93-
externvoid__kvm_tlb_flush_vmid_ipa(structkvm*kvm,phys_addr_tipa);
94-
externvoid__kvm_tlb_flush_vmid(structkvm*kvm);
95-
externvoid__kvm_tlb_flush_local_vmid(structkvm_vcpu*vcpu);
94+
externvoid__kvm_tlb_flush_vmid_ipa(structkvm_s2_mmu*mmu,phys_addr_tipa);
95+
externvoid__kvm_tlb_flush_vmid(structkvm_s2_mmu*mmu);
96+
externvoid__kvm_tlb_flush_local_vmid(structkvm_s2_mmu*mmu);
9697

9798
externvoid__kvm_timer_set_cntvoff(u64cntvoff);
9899

‎arch/arm64/include/asm/kvm_host.h‎

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -66,19 +66,34 @@ struct kvm_vmid {
6666
u32vmid;
6767
};
6868

69-
structkvm_arch {
69+
structkvm_s2_mmu {
7070
structkvm_vmidvmid;
7171

72-
/* stage2 entry level table */
73-
pgd_t*pgd;
74-
phys_addr_tpgd_phys;
75-
76-
/* VTCR_EL2 value for this VM */
77-
u64vtcr;
72+
/*
73+
* stage2 entry level table
74+
*
75+
* Two kvm_s2_mmu structures in the same VM can point to the same
76+
* pgd here. This happens when running a guest using a
77+
* translation regime that isn't affected by its own stage-2
78+
* translation, such as a non-VHE hypervisor running at vEL2, or
79+
* for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
80+
* canonical stage-2 page tables.
81+
*/
82+
pgd_t*pgd;
83+
phys_addr_tpgd_phys;
7884

7985
/* The last vcpu id that ran on each physical CPU */
8086
int__percpu*last_vcpu_ran;
8187

88+
structkvm*kvm;
89+
};
90+
91+
structkvm_arch {
92+
structkvm_s2_mmummu;
93+
94+
/* VTCR_EL2 value for this VM */
95+
u64vtcr;
96+
8297
/* The maximum number of vCPUs depends on the used GIC model */
8398
intmax_vcpus;
8499

@@ -254,6 +269,9 @@ struct kvm_vcpu_arch {
254269
void*sve_state;
255270
unsignedintsve_max_vl;
256271

272+
/* Stage 2 paging state used by the hardware on next switch */
273+
structkvm_s2_mmu*hw_mmu;
274+
257275
/* HYP configuration */
258276
u64hcr_el2;
259277
u32mdcr_el2;

‎arch/arm64/include/asm/kvm_mmu.h‎

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
134134
voidfree_hyp_pgds(void);
135135

136136
voidstage2_unmap_vm(structkvm*kvm);
137-
intkvm_alloc_stage2_pgd(structkvm*kvm);
138-
voidkvm_free_stage2_pgd(structkvm*kvm);
137+
intkvm_init_stage2_mmu(structkvm*kvm,structkvm_s2_mmu*mmu);
138+
voidkvm_free_stage2_pgd(structkvm_s2_mmu*mmu);
139139
intkvm_phys_addr_ioremap(structkvm*kvm,phys_addr_tguest_ipa,
140140
phys_addr_tpa,unsigned longsize,boolwritable);
141141

@@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
577577
returnvttbr_baddr_mask(kvm_phys_shift(kvm),kvm_stage2_levels(kvm));
578578
}
579579

580-
static__always_inlineu64kvm_get_vttbr(structkvm*kvm)
580+
static__always_inlineu64kvm_get_vttbr(structkvm_s2_mmu*mmu)
581581
{
582-
structkvm_vmid*vmid=&kvm->arch.vmid;
582+
structkvm_vmid*vmid=&mmu->vmid;
583583
u64vmid_field,baddr;
584584
u64cnp=system_supports_cnp() ?VTTBR_CNP_BIT :0;
585585

586-
baddr=kvm->arch.pgd_phys;
586+
baddr=mmu->pgd_phys;
587587
vmid_field= (u64)vmid->vmid <<VTTBR_VMID_SHIFT;
588588
returnkvm_phys_to_vttbr(baddr) |vmid_field |cnp;
589589
}
@@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
592592
* Must be called from hyp code running at EL2 with an updated VTTBR
593593
* and interrupts disabled.
594594
*/
595-
static__always_inlinevoid__load_guest_stage2(structkvm*kvm)
595+
static__always_inlinevoid__load_guest_stage2(structkvm_s2_mmu*mmu)
596596
{
597-
write_sysreg(kvm->arch.vtcr,vtcr_el2);
598-
write_sysreg(kvm_get_vttbr(kvm),vttbr_el2);
597+
write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr,vtcr_el2);
598+
write_sysreg(kvm_get_vttbr(mmu),vttbr_el2);
599599

600600
/*
601601
* ARM errata 1165522 and 1530923 require the actual execution of the

‎arch/arm64/kvm/arm.c‎

Lines changed: 12 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -106,41 +106,28 @@ static int kvm_arm_default_max_vcpus(void)
106106
*/
107107
intkvm_arch_init_vm(structkvm*kvm,unsigned longtype)
108108
{
109-
intret,cpu;
109+
intret;
110110

111111
ret=kvm_arm_setup_stage2(kvm,type);
112112
if (ret)
113113
returnret;
114114

115-
kvm->arch.last_vcpu_ran=alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
116-
if (!kvm->arch.last_vcpu_ran)
117-
return-ENOMEM;
118-
119-
for_each_possible_cpu(cpu)
120-
*per_cpu_ptr(kvm->arch.last_vcpu_ran,cpu)=-1;
121-
122-
ret=kvm_alloc_stage2_pgd(kvm);
115+
ret=kvm_init_stage2_mmu(kvm,&kvm->arch.mmu);
123116
if (ret)
124-
gotoout_fail_alloc;
117+
returnret;
125118

126119
ret=create_hyp_mappings(kvm,kvm+1,PAGE_HYP);
127120
if (ret)
128121
gotoout_free_stage2_pgd;
129122

130123
kvm_vgic_early_init(kvm);
131124

132-
/* Mark the initial VMID generation invalid */
133-
kvm->arch.vmid.vmid_gen=0;
134-
135125
/* The maximum number of VCPUs is limited by the host's GIC model */
136126
kvm->arch.max_vcpus=kvm_arm_default_max_vcpus();
137127

138128
returnret;
139129
out_free_stage2_pgd:
140-
kvm_free_stage2_pgd(kvm);
141-
out_fail_alloc:
142-
free_percpu(kvm->arch.last_vcpu_ran);
143-
kvm->arch.last_vcpu_ran=NULL;
130+
kvm_free_stage2_pgd(&kvm->arch.mmu);
144131
returnret;
145132
}
146133

@@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
160147

161148
kvm_vgic_destroy(kvm);
162149

163-
free_percpu(kvm->arch.last_vcpu_ran);
164-
kvm->arch.last_vcpu_ran=NULL;
165-
166150
for (i=0;i<KVM_MAX_VCPUS;++i) {
167151
if (kvm->vcpus[i]) {
168152
kvm_vcpu_destroy(kvm->vcpus[i]);
@@ -279,6 +263,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
279263

280264
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
281265

266+
vcpu->arch.hw_mmu=&vcpu->kvm->arch.mmu;
267+
282268
err=kvm_vgic_vcpu_init(vcpu);
283269
if (err)
284270
returnerr;
@@ -334,16 +320,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
334320

335321
voidkvm_arch_vcpu_load(structkvm_vcpu*vcpu,intcpu)
336322
{
323+
structkvm_s2_mmu*mmu;
337324
int*last_ran;
338325

339-
last_ran=this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
326+
mmu=vcpu->arch.hw_mmu;
327+
last_ran=this_cpu_ptr(mmu->last_vcpu_ran);
340328

341329
/*
342330
* We might get preempted before the vCPU actually runs, but
343331
* over-invalidation doesn't affect correctness.
344332
*/
345333
if (*last_ran!=vcpu->vcpu_id) {
346-
kvm_call_hyp(__kvm_tlb_flush_local_vmid,vcpu);
334+
kvm_call_hyp(__kvm_tlb_flush_local_vmid,mmu);
347335
*last_ran=vcpu->vcpu_id;
348336
}
349337

@@ -680,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
680668
*/
681669
cond_resched();
682670

683-
update_vmid(&vcpu->kvm->arch.vmid);
671+
update_vmid(&vcpu->arch.hw_mmu->vmid);
684672

685673
check_vcpu_requests(vcpu);
686674

@@ -729,7 +717,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
729717
*/
730718
smp_store_mb(vcpu->mode,IN_GUEST_MODE);
731719

732-
if (ret <=0||need_new_vmid_gen(&vcpu->kvm->arch.vmid)||
720+
if (ret <=0||need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid)||
733721
kvm_request_pending(vcpu)) {
734722
vcpu->mode=OUTSIDE_GUEST_MODE;
735723
isb();/* Ensure work in x_flush_hwstate is committed */

‎arch/arm64/kvm/hyp/include/hyp/switch.h‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
122122
}
123123
}
124124

125-
staticinlinevoid__activate_vm(structkvm*kvm)
125+
staticinlinevoid__activate_vm(structkvm_s2_mmu*mmu)
126126
{
127-
__load_guest_stage2(kvm);
127+
__load_guest_stage2(mmu);
128128
}
129129

130130
staticinlinebool__translate_far_to_hpfar(u64far,u64*hpfar)

‎arch/arm64/kvm/hyp/nvhe/switch.c‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
194194
__sysreg32_restore_state(vcpu);
195195
__sysreg_restore_state_nvhe(guest_ctxt);
196196

197-
__activate_vm(kern_hyp_va(vcpu->kvm));
197+
__activate_vm(kern_hyp_va(vcpu->arch.hw_mmu));
198198
__activate_traps(vcpu);
199199

200200
__hyp_vgic_restore_state(vcpu);

‎arch/arm64/kvm/hyp/nvhe/tlb.c‎

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,8 @@ struct tlb_inv_context {
1212
u64tcr;
1313
};
1414

15-
staticvoid__tlb_switch_to_guest(structkvm*kvm,structtlb_inv_context*cxt)
15+
staticvoid__tlb_switch_to_guest(structkvm_s2_mmu*mmu,
16+
structtlb_inv_context*cxt)
1617
{
1718
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
1819
u64val;
@@ -30,12 +31,10 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
3031
isb();
3132
}
3233

33-
/* __load_guest_stage2() includes an ISB for the workaround. */
34-
__load_guest_stage2(kvm);
35-
asm(ALTERNATIVE("isb","nop",ARM64_WORKAROUND_SPECULATIVE_AT));
34+
__load_guest_stage2(mmu);
3635
}
3736

38-
staticvoid__tlb_switch_to_host(structkvm*kvm,structtlb_inv_context*cxt)
37+
staticvoid__tlb_switch_to_host(structtlb_inv_context*cxt)
3938
{
4039
write_sysreg(0,vttbr_el2);
4140

@@ -47,15 +46,15 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
4746
}
4847
}
4948

50-
void__kvm_tlb_flush_vmid_ipa(structkvm*kvm,phys_addr_tipa)
49+
void__kvm_tlb_flush_vmid_ipa(structkvm_s2_mmu*mmu,phys_addr_tipa)
5150
{
5251
structtlb_inv_contextcxt;
5352

5453
dsb(ishst);
5554

5655
/* Switch to requested VMID */
57-
kvm=kern_hyp_va(kvm);
58-
__tlb_switch_to_guest(kvm,&cxt);
56+
mmu=kern_hyp_va(mmu);
57+
__tlb_switch_to_guest(mmu,&cxt);
5958

6059
/*
6160
* We could do so much better if we had the VA as well.
@@ -98,39 +97,39 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
9897
if (icache_is_vpipt())
9998
__flush_icache_all();
10099

101-
__tlb_switch_to_host(kvm,&cxt);
100+
__tlb_switch_to_host(&cxt);
102101
}
103102

104-
void__kvm_tlb_flush_vmid(structkvm*kvm)
103+
void__kvm_tlb_flush_vmid(structkvm_s2_mmu*mmu)
105104
{
106105
structtlb_inv_contextcxt;
107106

108107
dsb(ishst);
109108

110109
/* Switch to requested VMID */
111-
kvm=kern_hyp_va(kvm);
112-
__tlb_switch_to_guest(kvm,&cxt);
110+
mmu=kern_hyp_va(mmu);
111+
__tlb_switch_to_guest(mmu,&cxt);
113112

114113
__tlbi(vmalls12e1is);
115114
dsb(ish);
116115
isb();
117116

118-
__tlb_switch_to_host(kvm,&cxt);
117+
__tlb_switch_to_host(&cxt);
119118
}
120119

121-
void__kvm_tlb_flush_local_vmid(structkvm_vcpu*vcpu)
120+
void__kvm_tlb_flush_local_vmid(structkvm_s2_mmu*mmu)
122121
{
123-
structkvm*kvm=kern_hyp_va(kern_hyp_va(vcpu)->kvm);
124122
structtlb_inv_contextcxt;
125123

126124
/* Switch to requested VMID */
127-
__tlb_switch_to_guest(kvm,&cxt);
125+
mmu=kern_hyp_va(mmu);
126+
__tlb_switch_to_guest(mmu,&cxt);
128127

129128
__tlbi(vmalle1);
130129
dsb(nsh);
131130
isb();
132131

133-
__tlb_switch_to_host(kvm,&cxt);
132+
__tlb_switch_to_host(&cxt);
134133
}
135134

136135
void__kvm_flush_vm_context(void)

‎arch/arm64/kvm/hyp/vhe/switch.c‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
125125
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE
126126
* (among other things).
127127
*/
128-
__activate_vm(vcpu->kvm);
128+
__activate_vm(vcpu->arch.hw_mmu);
129129
__activate_traps(vcpu);
130130

131131
sysreg_restore_guest_state_vhe(guest_ctxt);

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp