diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2011-05-01 07:33:07 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-22 14:48:14 +0200 |
commit | c8cfbb555eb3632bf3dcbe1a591c1f4d0c28681c (patch) | |
tree | 4d7b9e26a8f1eee7c598bb132a6cd24e3aa4b211 /arch/x86/kvm/paging_tmpl.h | |
parent | KVM: Fix kvm mmu_notifier initialization order (diff) | |
download | linux-c8cfbb555eb3632bf3dcbe1a591c1f4d0c28681c.tar.xz linux-c8cfbb555eb3632bf3dcbe1a591c1f4d0c28681c.zip |
KVM: MMU: Use ptep_user for cmpxchg_gpte()
The address of the gpte was already calculated and stored in ptep_user
before entering cmpxchg_gpte().
This patch makes cmpxchg_gpte() to use that to make it clear that we
are using the same address during walk_addr_generic().
Note that the unlikely annotations are used to show that the conditions
are something unusual rather than for performance.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index e3f81418797e..6c4dc010c4cb 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -79,21 +79,19 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) } static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, - gfn_t table_gfn, unsigned index, - pt_element_t orig_pte, pt_element_t new_pte) + pt_element_t __user *ptep_user, unsigned index, + pt_element_t orig_pte, pt_element_t new_pte) { + int npages; pt_element_t ret; pt_element_t *table; struct page *page; - gpa_t gpa; - gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT, - PFERR_USER_MASK|PFERR_WRITE_MASK); - if (gpa == UNMAPPED_GVA) + npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page); + /* Check if the user is doing something meaningless. */ + if (unlikely(npages != 1)) return -EFAULT; - page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa)); - table = kmap_atomic(page, KM_USER0); ret = CMPXCHG(&table[index], orig_pte, new_pte); kunmap_atomic(table, KM_USER0); @@ -220,9 +218,9 @@ walk: int ret; trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); - ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, - index, pte, pte|PT_ACCESSED_MASK); - if (ret < 0) { + ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, + pte, pte|PT_ACCESSED_MASK); + if (unlikely(ret < 0)) { present = false; break; } else if (ret) @@ -279,9 +277,9 @@ walk: int ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); - ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, - pte|PT_DIRTY_MASK); - if (ret < 0) { + ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, + pte, pte|PT_DIRTY_MASK); + if (unlikely(ret < 0)) { present = false; goto error; } else if (ret) |