summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c13
-rw-r--r--virt/kvm/vfio.c161
2 files changed, 89 insertions, 85 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index d63cf1c4f5a7..486800a7024b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2543,7 +2543,18 @@ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool interruptible, bool *writable, kvm_pfn_t *pfn)
{
- unsigned int flags = FOLL_HWPOISON;
+ /*
+ * When a VCPU accesses a page that is not mapped into the secondary
+ * MMU, we lookup the page using GUP to map it, so the guest VCPU can
+ * make progress. We always want to honor NUMA hinting faults in that
+ * case, because GUP usage corresponds to memory accesses from the VCPU.
+ * Otherwise, we'd not trigger NUMA hinting faults once a page is
+ * mapped into the secondary MMU and gets accessed by a VCPU.
+ *
+ * Note that get_user_page_fast_only() and FOLL_WRITE for now
+ * implicitly honor NUMA hinting faults and don't need this flag.
+ */
+ unsigned int flags = FOLL_HWPOISON | FOLL_HONOR_NUMA_FAULT;
struct page *page;
int npages;
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 9584eb57e0ed..ca24ce120906 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -21,7 +21,7 @@
#include <asm/kvm_ppc.h>
#endif
-struct kvm_vfio_group {
+struct kvm_vfio_file {
struct list_head node;
struct file *file;
#ifdef CONFIG_SPAPR_TCE_IOMMU
@@ -30,7 +30,7 @@ struct kvm_vfio_group {
};
struct kvm_vfio {
- struct list_head group_list;
+ struct list_head file_list;
struct mutex lock;
bool noncoherent;
};
@@ -64,18 +64,18 @@ static bool kvm_vfio_file_enforced_coherent(struct file *file)
return ret;
}
-static bool kvm_vfio_file_is_group(struct file *file)
+static bool kvm_vfio_file_is_valid(struct file *file)
{
bool (*fn)(struct file *file);
bool ret;
- fn = symbol_get(vfio_file_is_group);
+ fn = symbol_get(vfio_file_is_valid);
if (!fn)
return false;
ret = fn(file);
- symbol_put(vfio_file_is_group);
+ symbol_put(vfio_file_is_valid);
return ret;
}
@@ -98,34 +98,33 @@ static struct iommu_group *kvm_vfio_file_iommu_group(struct file *file)
}
static void kvm_spapr_tce_release_vfio_group(struct kvm *kvm,
- struct kvm_vfio_group *kvg)
+ struct kvm_vfio_file *kvf)
{
- if (WARN_ON_ONCE(!kvg->iommu_group))
+ if (WARN_ON_ONCE(!kvf->iommu_group))
return;
- kvm_spapr_tce_release_iommu_group(kvm, kvg->iommu_group);
- iommu_group_put(kvg->iommu_group);
- kvg->iommu_group = NULL;
+ kvm_spapr_tce_release_iommu_group(kvm, kvf->iommu_group);
+ iommu_group_put(kvf->iommu_group);
+ kvf->iommu_group = NULL;
}
#endif
/*
- * Groups can use the same or different IOMMU domains. If the same then
- * adding a new group may change the coherency of groups we've previously
- * been told about. We don't want to care about any of that so we retest
- * each group and bail as soon as we find one that's noncoherent. This
- * means we only ever [un]register_noncoherent_dma once for the whole device.
+ * Groups/devices can use the same or different IOMMU domains. If the same
+ * then adding a new group/device may change the coherency of groups/devices
+ * we've previously been told about. We don't want to care about any of
+ * that so we retest each group/device and bail as soon as we find one that's
+ * noncoherent. This means we only ever [un]register_noncoherent_dma once
+ * for the whole device.
*/
static void kvm_vfio_update_coherency(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
bool noncoherent = false;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
- mutex_lock(&kv->lock);
-
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (!kvm_vfio_file_enforced_coherent(kvg->file)) {
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (!kvm_vfio_file_enforced_coherent(kvf->file)) {
noncoherent = true;
break;
}
@@ -139,64 +138,58 @@ static void kvm_vfio_update_coherency(struct kvm_device *dev)
else
kvm_arch_unregister_noncoherent_dma(dev->kvm);
}
-
- mutex_unlock(&kv->lock);
}
-static int kvm_vfio_group_add(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_add(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
struct file *filp;
- int ret;
+ int ret = 0;
filp = fget(fd);
if (!filp)
return -EBADF;
- /* Ensure the FD is a vfio group FD.*/
- if (!kvm_vfio_file_is_group(filp)) {
+ /* Ensure the FD is a vfio FD. */
+ if (!kvm_vfio_file_is_valid(filp)) {
ret = -EINVAL;
- goto err_fput;
+ goto out_fput;
}
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file == filp) {
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file == filp) {
ret = -EEXIST;
- goto err_unlock;
+ goto out_unlock;
}
}
- kvg = kzalloc(sizeof(*kvg), GFP_KERNEL_ACCOUNT);
- if (!kvg) {
+ kvf = kzalloc(sizeof(*kvf), GFP_KERNEL_ACCOUNT);
+ if (!kvf) {
ret = -ENOMEM;
- goto err_unlock;
+ goto out_unlock;
}
- kvg->file = filp;
- list_add_tail(&kvg->node, &kv->group_list);
+ kvf->file = get_file(filp);
+ list_add_tail(&kvf->node, &kv->file_list);
kvm_arch_start_assignment(dev->kvm);
-
- mutex_unlock(&kv->lock);
-
- kvm_vfio_file_set_kvm(kvg->file, dev->kvm);
+ kvm_vfio_file_set_kvm(kvf->file, dev->kvm);
kvm_vfio_update_coherency(dev);
- return 0;
-err_unlock:
+out_unlock:
mutex_unlock(&kv->lock);
-err_fput:
+out_fput:
fput(filp);
return ret;
}
-static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
+static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
struct fd f;
int ret;
@@ -208,38 +201,38 @@ static int kvm_vfio_group_del(struct kvm_device *dev, unsigned int fd)
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file != f.file)
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file != f.file)
continue;
- list_del(&kvg->node);
+ list_del(&kvf->node);
kvm_arch_end_assignment(dev->kvm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvg->file, NULL);
- fput(kvg->file);
- kfree(kvg);
+ kvm_vfio_file_set_kvm(kvf->file, NULL);
+ fput(kvf->file);
+ kfree(kvf);
ret = 0;
break;
}
+ kvm_vfio_update_coherency(dev);
+
mutex_unlock(&kv->lock);
fdput(f);
- kvm_vfio_update_coherency(dev);
-
return ret;
}
#ifdef CONFIG_SPAPR_TCE_IOMMU
-static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
- void __user *arg)
+static int kvm_vfio_file_set_spapr_tce(struct kvm_device *dev,
+ void __user *arg)
{
struct kvm_vfio_spapr_tce param;
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg;
+ struct kvm_vfio_file *kvf;
struct fd f;
int ret;
@@ -254,20 +247,20 @@ static int kvm_vfio_group_set_spapr_tce(struct kvm_device *dev,
mutex_lock(&kv->lock);
- list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->file != f.file)
+ list_for_each_entry(kvf, &kv->file_list, node) {
+ if (kvf->file != f.file)
continue;
- if (!kvg->iommu_group) {
- kvg->iommu_group = kvm_vfio_file_iommu_group(kvg->file);
- if (WARN_ON_ONCE(!kvg->iommu_group)) {
+ if (!kvf->iommu_group) {
+ kvf->iommu_group = kvm_vfio_file_iommu_group(kvf->file);
+ if (WARN_ON_ONCE(!kvf->iommu_group)) {
ret = -EIO;
goto err_fdput;
}
}
ret = kvm_spapr_tce_attach_iommu_group(dev->kvm, param.tablefd,
- kvg->iommu_group);
+ kvf->iommu_group);
break;
}
@@ -278,26 +271,26 @@ err_fdput:
}
#endif
-static int kvm_vfio_set_group(struct kvm_device *dev, long attr,
- void __user *arg)
+static int kvm_vfio_set_file(struct kvm_device *dev, long attr,
+ void __user *arg)
{
int32_t __user *argp = arg;
int32_t fd;
switch (attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
+ case KVM_DEV_VFIO_FILE_ADD:
if (get_user(fd, argp))
return -EFAULT;
- return kvm_vfio_group_add(dev, fd);
+ return kvm_vfio_file_add(dev, fd);
- case KVM_DEV_VFIO_GROUP_DEL:
+ case KVM_DEV_VFIO_FILE_DEL:
if (get_user(fd, argp))
return -EFAULT;
- return kvm_vfio_group_del(dev, fd);
+ return kvm_vfio_file_del(dev, fd);
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
- return kvm_vfio_group_set_spapr_tce(dev, arg);
+ return kvm_vfio_file_set_spapr_tce(dev, arg);
#endif
}
@@ -308,9 +301,9 @@ static int kvm_vfio_set_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
- case KVM_DEV_VFIO_GROUP:
- return kvm_vfio_set_group(dev, attr->attr,
- u64_to_user_ptr(attr->addr));
+ case KVM_DEV_VFIO_FILE:
+ return kvm_vfio_set_file(dev, attr->attr,
+ u64_to_user_ptr(attr->addr));
}
return -ENXIO;
@@ -320,10 +313,10 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
struct kvm_device_attr *attr)
{
switch (attr->group) {
- case KVM_DEV_VFIO_GROUP:
+ case KVM_DEV_VFIO_FILE:
switch (attr->attr) {
- case KVM_DEV_VFIO_GROUP_ADD:
- case KVM_DEV_VFIO_GROUP_DEL:
+ case KVM_DEV_VFIO_FILE_ADD:
+ case KVM_DEV_VFIO_FILE_DEL:
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE:
#endif
@@ -339,16 +332,16 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
- struct kvm_vfio_group *kvg, *tmp;
+ struct kvm_vfio_file *kvf, *tmp;
- list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
+ list_for_each_entry_safe(kvf, tmp, &kv->file_list, node) {
#ifdef CONFIG_SPAPR_TCE_IOMMU
- kvm_spapr_tce_release_vfio_group(dev->kvm, kvg);
+ kvm_spapr_tce_release_vfio_group(dev->kvm, kvf);
#endif
- kvm_vfio_file_set_kvm(kvg->file, NULL);
- fput(kvg->file);
- list_del(&kvg->node);
- kfree(kvg);
+ kvm_vfio_file_set_kvm(kvf->file, NULL);
+ fput(kvf->file);
+ list_del(&kvf->node);
+ kfree(kvf);
kvm_arch_end_assignment(dev->kvm);
}
@@ -382,7 +375,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type)
if (!kv)
return -ENOMEM;
- INIT_LIST_HEAD(&kv->group_list);
+ INIT_LIST_HEAD(&kv->file_list);
mutex_init(&kv->lock);
dev->private = kv;