diff options
author | Igor Mammedov <imammedo@redhat.com> | 2014-11-14 00:00:13 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-11-14 10:49:04 +0100 |
commit | 063584d44377ebde5ebc6e99cedc1bc6561939d7 (patch) | |
tree | c7ce69f00f8b1edfd89ca621d51a590709b5f14f /virt | |
parent | kvm: x86: increase user memory slots to 509 (diff) | |
download | linux-063584d44377ebde5ebc6e99cedc1bc6561939d7.tar.xz linux-063584d44377ebde5ebc6e99cedc1bc6561939d7.zip |
kvm: memslots: replace heap sort with an insertion sort pass
memslots is a sorted array. When a slot is changed, heapsort (lib/sort.c)
would take O(n log n) time to update it; an optimized insertion sort will
only cost O(n) on an array with just one item out of order.
Replace sort() with a custom sort that takes advantage of memslots usage
pattern and the known position of the changed slot.
performance change of 128 memslots insertions with gradually increasing
size (the worst case):
heap sort custom sort
max: 249747 2500 cycles
with custom sort alg taking ~98% less then original
update time.
Signed-off-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3a31ec6e396b..c0c2202e6c4f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -668,31 +668,37 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } -static int cmp_memslot(const void *slot1, const void *slot2) -{ - struct kvm_memory_slot *s1, *s2; - - s1 = (struct kvm_memory_slot *)slot1; - s2 = (struct kvm_memory_slot *)slot2; - - if (s1->npages < s2->npages) - return 1; - if (s1->npages > s2->npages) - return -1; - - return 0; -} - /* - * Sort the memslots base on its size, so the larger slots - * will get better fit. + * Insert memslot and re-sort memslots based on their size, + * so the larger slots will get better fit. Sorting algorithm + * takes advantage of having initially sorted array and + * known changed memslot position. */ -static void sort_memslots(struct kvm_memslots *slots) +static void insert_memslot(struct kvm_memslots *slots, + struct kvm_memory_slot *new) { - int i; + int i = slots->id_to_index[new->id]; + struct kvm_memory_slot *old = id_to_memslot(slots, new->id); + struct kvm_memory_slot *mslots = slots->memslots; - sort(slots->memslots, KVM_MEM_SLOTS_NUM, - sizeof(struct kvm_memory_slot), cmp_memslot, NULL); + if (new->npages == old->npages) { + *old = *new; + return; + } + + while (1) { + if (i < (KVM_MEM_SLOTS_NUM - 1) && + new->npages < mslots[i + 1].npages) { + mslots[i] = mslots[i + 1]; + i++; + } else if (i > 0 && new->npages > mslots[i - 1].npages) { + mslots[i] = mslots[i - 1]; + i--; + } else { + mslots[i] = *new; + break; + } + } for (i = 0; i < KVM_MEM_SLOTS_NUM; i++) slots->id_to_index[slots->memslots[i].id] = i; @@ -702,13 +708,7 @@ static void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new) { if (new) { - int id = new->id; - struct kvm_memory_slot *old = id_to_memslot(slots, id); - unsigned long npages = old->npages; - - *old = *new; - if (new->npages != npages) - sort_memslots(slots); + insert_memslot(slots, new); } } |