summaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-04-18 18:22:47 +0200
committerMarcelo Tosatti <mtosatti@redhat.com>2012-04-20 01:35:07 +0200
commitf78146b0f9230765c6315b2e14f56112513389ad (patch)
tree2e6780f2989c73c1bf214a5728514c1dc8e15f09 /arch/ia64/kvm
parentMerge branch 'linus' into queue (diff)
downloadlinux-f78146b0f9230765c6315b2e14f56112513389ad.tar.xz
linux-f78146b0f9230765c6315b2e14f56112513389ad.zip
KVM: Fix page-crossing MMIO
MMIO that are split across a page boundary are currently broken - the code does not expect to be aborted by the exit to userspace for the first MMIO fragment. This patch fixes the problem by generalizing the current code for handling 16-byte MMIOs to handle a number of "fragments", and changes the MMIO code to create those fragments. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 9d80ff8d9eff..882ab21a8dcd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
goto mmio;
vcpu->mmio_needed = 1;
- vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
- vcpu->mmio_size = kvm_run->mmio.len = p->size;
+ vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
+ vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
if (vcpu->mmio_is_write)
- memcpy(vcpu->mmio_data, &p->data, p->size);
+ memcpy(vcpu->arch.mmio_data, &p->data, p->size);
memcpy(kvm_run->mmio.data, &p->data, p->size);
kvm_run->exit_reason = KVM_EXIT_MMIO;
return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
if (!vcpu->mmio_is_write)
- memcpy(&p->data, vcpu->mmio_data, 8);
+ memcpy(&p->data, vcpu->arch.mmio_data, 8);
p->state = STATE_IORESP_READY;
}
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
}
if (vcpu->mmio_needed) {
- memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+ memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
kvm_set_mmio_data(vcpu);
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;