summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-18 11:05:56 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-08-20 14:41:35 +0200
commit6f42193fd86eb5eb518984032c85e9b5582b7625 (patch)
tree0087427aa44d658bfd44079acfb14052426fd732 /kernel
parentmemremap: remove the dev field in struct dev_pagemap (diff)
downloadlinux-6f42193fd86eb5eb518984032c85e9b5582b7625.tar.xz
linux-6f42193fd86eb5eb518984032c85e9b5582b7625.zip
memremap: don't use a separate devm action for devmap_managed_enable_get
Just clean up for early failures and then piggy back on devm_memremap_pages_release. This helps with a pending not device managed version of devm_memremap_pages. Link: https://lore.kernel.org/r/20190818090557.17853-4-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Bharata B Rao <bharata@linux.ibm.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/memremap.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 600a14cbe663..09a087ca30ff 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key);
static atomic_t devmap_managed_enable;
-static void devmap_managed_enable_put(void *data)
+static void devmap_managed_enable_put(void)
{
if (atomic_dec_and_test(&devmap_managed_enable))
static_branch_disable(&devmap_managed_key);
}
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
if (!pgmap->ops || !pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
@@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm
if (atomic_inc_return(&devmap_managed_enable) == 1)
static_branch_enable(&devmap_managed_key);
- return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
+ return 0;
}
#else
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
+static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
return -EINVAL;
}
+static void devmap_managed_enable_put(void)
+{
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static void pgmap_array_delete(struct resource *res)
@@ -123,6 +126,7 @@ static void devm_memremap_pages_release(void *data)
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
pgmap_array_delete(res);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
+ devmap_managed_enable_put();
}
static void dev_pagemap_percpu_release(struct percpu_ref *ref)
@@ -212,7 +216,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
}
if (need_devmap_managed) {
- error = devmap_managed_enable_get(dev, pgmap);
+ error = devmap_managed_enable_get(pgmap);
if (error)
return ERR_PTR(error);
}
@@ -321,6 +325,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_array:
dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap);
+ devmap_managed_enable_put();
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(devm_memremap_pages);