summaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index af2730858ebd..615d69bcde2c 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1515,12 +1515,6 @@ static size_t kvm_memslots_size(int slots)
(sizeof(struct kvm_memory_slot) * slots);
}
-static void kvm_copy_memslots(struct kvm_memslots *to,
- struct kvm_memslots *from)
-{
- memcpy(to, from, kvm_memslots_size(from->used_slots));
-}
-
/*
* Note, at a minimum, the current number of used slots must be allocated, even
* when deleting a memslot, as we need a complete duplicate of the memslots for
@@ -1539,11 +1533,22 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
slots = kvzalloc(new_size, GFP_KERNEL_ACCOUNT);
if (likely(slots))
- kvm_copy_memslots(slots, old);
+ memcpy(slots, old, kvm_memslots_size(old->used_slots));
return slots;
}
+static void kvm_copy_memslots_arch(struct kvm_memslots *to,
+ struct kvm_memslots *from)
+{
+ int i;
+
+ WARN_ON_ONCE(to->used_slots != from->used_slots);
+
+ for (i = 0; i < from->used_slots; i++)
+ to->memslots[i].arch = from->memslots[i].arch;
+}
+
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *new, int as_id,
@@ -1584,9 +1589,10 @@ static int kvm_set_memslot(struct kvm *kvm,
slot->flags |= KVM_MEMSLOT_INVALID;
/*
- * We can re-use the memory from the old memslots.
- * It will be overwritten with a copy of the new memslots
- * after reacquiring the slots_arch_lock below.
+ * We can re-use the old memslots, the only difference from the
+ * newly installed memslots is the invalid flag, which will get
+ * dropped by update_memslots anyway. We'll also revert to the
+ * old memslots if preparing the new memory region fails.
*/
slots = install_new_memslots(kvm, as_id, slots);
@@ -1603,12 +1609,14 @@ static int kvm_set_memslot(struct kvm *kvm,
mutex_lock(&kvm->slots_arch_lock);
/*
- * The arch-specific fields of the memslots could have changed
- * between releasing the slots_arch_lock in
- * install_new_memslots and here, so get a fresh copy of the
- * slots.
+ * The arch-specific fields of the now-active memslots could
+ * have been modified between releasing slots_arch_lock in
+ * install_new_memslots and re-acquiring slots_arch_lock above.
+ * Copy them to the inactive memslots. Arch code is required
+ * to retrieve memslots *after* acquiring slots_arch_lock, thus
+ * the active memslots are guaranteed to be fresh.
*/
- kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
+ kvm_copy_memslots_arch(slots, __kvm_memslots(kvm, as_id));
}
/*
@@ -1657,13 +1665,10 @@ static int kvm_set_memslot(struct kvm *kvm,
return 0;
out_slots:
- if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- slot = id_to_memslot(slots, new->id);
- slot->flags &= ~KVM_MEMSLOT_INVALID;
+ if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
slots = install_new_memslots(kvm, as_id, slots);
- } else {
+ else
mutex_unlock(&kvm->slots_arch_lock);
- }
kvfree(slots);
return r;
}