Skip to content

Commit 27ea98a

Browse files
Andre-ARMgregkh
authored andcommitted
KVM: arm/arm64: VGIC/ITS: protect kvm_read_guest() calls with SRCU lock
commit bf30824 upstream. kvm_read_guest() will eventually look up in kvm_memslots(), which requires either to hold the kvm->slots_lock or to be inside a kvm->srcu critical section. In contrast to x86 and s390 we don't take the SRCU lock on every guest exit, so we have to do it individually for each kvm_read_guest() call. Provide a wrapper which does that and use that everywhere. Note that ending the SRCU critical section before returning from the kvm_read_guest() wrapper is safe, because the data has been *copied*, so we don't need to rely on valid references to the memslot anymore. Cc: Stable <[email protected]> # 4.8+ Reported-by: Jan Glauber <[email protected]> Signed-off-by: Andre Przywara <[email protected]> Acked-by: Christoffer Dall <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent b6f6d8b commit 27ea98a

File tree

3 files changed

+40
-7
lines changed

3 files changed

+40
-7
lines changed

arch/arm/include/asm/kvm_mmu.h

+16
Original file line numberDiff line numberDiff line change
@@ -221,6 +221,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
221221
return 8;
222222
}
223223

224+
/*
225+
* We are not in the kvm->srcu critical section most of the time, so we take
226+
* the SRCU read lock here. Since we copy the data from the user page, we
227+
* can immediately drop the lock again.
228+
*/
229+
static inline int kvm_read_guest_lock(struct kvm *kvm,
230+
gpa_t gpa, void *data, unsigned long len)
231+
{
232+
int srcu_idx = srcu_read_lock(&kvm->srcu);
233+
int ret = kvm_read_guest(kvm, gpa, data, len);
234+
235+
srcu_read_unlock(&kvm->srcu, srcu_idx);
236+
237+
return ret;
238+
}
239+
224240
static inline void *kvm_get_hyp_vector(void)
225241
{
226242
return kvm_ksym_ref(__kvm_hyp_vector);

arch/arm64/include/asm/kvm_mmu.h

+16
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
309309
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
310310
}
311311

312+
/*
313+
* We are not in the kvm->srcu critical section most of the time, so we take
314+
* the SRCU read lock here. Since we copy the data from the user page, we
315+
* can immediately drop the lock again.
316+
*/
317+
static inline int kvm_read_guest_lock(struct kvm *kvm,
318+
gpa_t gpa, void *data, unsigned long len)
319+
{
320+
int srcu_idx = srcu_read_lock(&kvm->srcu);
321+
int ret = kvm_read_guest(kvm, gpa, data, len);
322+
323+
srcu_read_unlock(&kvm->srcu, srcu_idx);
324+
325+
return ret;
326+
}
327+
312328
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
313329
#include <asm/mmu.h>
314330

virt/kvm/arm/vgic/vgic-its.c

+8-7
Original file line numberDiff line numberDiff line change
@@ -279,8 +279,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
279279
u8 prop;
280280
int ret;
281281

282-
ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
283-
&prop, 1);
282+
ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
283+
&prop, 1);
284284

285285
if (ret)
286286
return ret;
@@ -413,8 +413,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
413413
* this very same byte in the last iteration. Reuse that.
414414
*/
415415
if (byte_offset != last_byte_offset) {
416-
ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
417-
&pendmask, 1);
416+
ret = kvm_read_guest_lock(vcpu->kvm,
417+
pendbase + byte_offset,
418+
&pendmask, 1);
418419
if (ret) {
419420
kfree(intids);
420421
return ret;
@@ -740,7 +741,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
740741
return false;
741742

742743
/* Each 1st level entry is represented by a 64-bit value. */
743-
if (kvm_read_guest(its->dev->kvm,
744+
if (kvm_read_guest_lock(its->dev->kvm,
744745
BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
745746
&indirect_ptr, sizeof(indirect_ptr)))
746747
return false;
@@ -1297,8 +1298,8 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
12971298
cbaser = CBASER_ADDRESS(its->cbaser);
12981299

12991300
while (its->cwriter != its->creadr) {
1300-
int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1301-
cmd_buf, ITS_CMD_SIZE);
1301+
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1302+
cmd_buf, ITS_CMD_SIZE);
13021303
/*
13031304
* If kvm_read_guest() fails, this could be due to the guest
13041305
* programming a bogus value in CBASER or something else going

0 commit comments

Comments
 (0)