summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 03:49:20 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 03:49:20 +0200
commit53ef7d0e208fa38c3f63d287e0c3ab174f1e1235 (patch)
tree7d437edf73ef6deb0d77ce291aa25f041837d056 /drivers/nvdimm
parentMerge tag 'staging-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentMerge branch 'for-4.12/dax' into libnvdimm-for-next (diff)
downloadlinux-53ef7d0e208fa38c3f63d287e0c3ab174f1e1235.tar.xz
linux-53ef7d0e208fa38c3f63d287e0c3ab174f1e1235.zip
Merge tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Dan Williams: "The bulk of this has been in multiple -next releases. There were a few late breaking fixes and small features that got added in the last couple days, but the whole set has received a build success notification from the kbuild robot. Change summary: - Region media error reporting: A libnvdimm region device is the parent to one or more namespaces. To date, media errors have been reported via the "badblocks" attribute attached to pmem block devices for namespaces in "raw" or "memory" mode. Given that namespaces can be in "device-dax" or "btt-sector" mode this new interface reports media errors generically, i.e. independent of namespace modes or state. This subsequently allows userspace tooling to craft "ACPI 6.1 Section 9.20.7.6 Function Index 4 - Clear Uncorrectable Error" requests and submit them via the ioctl path for NVDIMM root bus devices. - Introduce 'struct dax_device' and 'struct dax_operations': Prompted by a request from Linus and feedback from Christoph this allows for dax capable drivers to publish their own custom dax operations. This fixes the broken assumption that all dax operations are related to a persistent memory device, and makes it easier for other architectures and platforms to add customized persistent memory support. - 'libnvdimm' core updates: A new "deep_flush" sysfs attribute is available for storage appliance applications to manually trigger memory controllers to drain write-pending buffers that would otherwise be flushed automatically by the platform ADR (asynchronous-DRAM-refresh) mechanism at a power loss event. Support for "locked" DIMMs is included to prevent namespaces from surfacing when the namespace label data area is locked. Finally, fixes for various reported deadlocks and crashes, also tagged for -stable. - ACPI / nfit driver updates: General updates of the nfit driver to add DSM command overrides, ACPI 6.1 health state flags support, DSM payload debug available by default, and various fixes. Acknowledgements that came after the branch was pushed: - commmit 565851c972b5 "device-dax: fix sysfs attribute deadlock": Tested-by: Yi Zhang <yizhan@redhat.com> - commit 23f498448362 "libnvdimm: rework region badblocks clearing" Tested-by: Toshi Kani <toshi.kani@hpe.com>" * tag 'libnvdimm-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (52 commits) libnvdimm, pfn: fix 'npfns' vs section alignment libnvdimm: handle locked label storage areas libnvdimm: convert NDD_ flags to use bitops, introduce NDD_LOCKED brd: fix uninitialized use of brd->dax_dev block, dax: use correct format string in bdev_dax_supported device-dax: fix sysfs attribute deadlock libnvdimm: restore "libnvdimm: band aid btt vs clear poison locking" libnvdimm: fix nvdimm_bus_lock() vs device_lock() ordering libnvdimm: rework region badblocks clearing acpi, nfit: kill ACPI_NFIT_DEBUG libnvdimm: fix clear length of nvdimm_forget_poison() libnvdimm, pmem: fix a NULL pointer BUG in nd_pmem_notify libnvdimm, region: sysfs trigger for nvdimm_flush() libnvdimm: fix phys_addr for nvdimm_clear_poison x86, dax, pmem: remove indirection around memcpy_from_pmem() block: remove block_device_operations ->direct_access() block, dax: convert bdev_dax_supported() to dax_direct_access() filesystem-dax: convert to dax_direct_access() Revert "block: use DAX for partition table reads" ext2, ext4, xfs: retrieve dax_device for iomap operations ...
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/Kconfig1
-rw-r--r--drivers/nvdimm/btt_devs.c2
-rw-r--r--drivers/nvdimm/bus.c122
-rw-r--r--drivers/nvdimm/claim.c37
-rw-r--r--drivers/nvdimm/core.c51
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c19
-rw-r--r--drivers/nvdimm/namespace_devs.c17
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c8
-rw-r--r--drivers/nvdimm/pmem.c90
-rw-r--r--drivers/nvdimm/pmem.h7
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c83
16 files changed, 379 insertions, 89 deletions
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 59e750183b7f..5bdd499b5f4f 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -20,6 +20,7 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
+ select DAX
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 97dd2925ed6e..4b76af2b8715 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -314,7 +314,7 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
if (rc < 0) {
struct nd_btt *nd_btt = to_nd_btt(btt_dev);
- __nd_detach_ndns(btt_dev, &nd_btt->ndns);
+ nd_detach_ndns(btt_dev, &nd_btt->ndns);
put_device(btt_dev);
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 351bac8f6503..e9361bffe5ee 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -27,6 +27,7 @@
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"
+#include "pfn.h"
int nvdimm_major;
static int nvdimm_bus_major;
@@ -171,6 +172,57 @@ void nvdimm_region_notify(struct nd_region *nd_region, enum nvdimm_event event)
}
EXPORT_SYMBOL_GPL(nvdimm_region_notify);
+struct clear_badblocks_context {
+ resource_size_t phys, cleared;
+};
+
+static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
+{
+ struct clear_badblocks_context *ctx = data;
+ struct nd_region *nd_region;
+ resource_size_t ndr_end;
+ sector_t sector;
+
+ /* make sure device is a region */
+ if (!is_nd_pmem(dev))
+ return 0;
+
+ nd_region = to_nd_region(dev);
+ ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
+
+ /* make sure we are in the region */
+ if (ctx->phys < nd_region->ndr_start
+ || (ctx->phys + ctx->cleared) > ndr_end)
+ return 0;
+
+ sector = (ctx->phys - nd_region->ndr_start) / 512;
+ badblocks_clear(&nd_region->bb, sector, ctx->cleared / 512);
+
+ return 0;
+}
+
+static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t phys, u64 cleared)
+{
+ struct clear_badblocks_context ctx = {
+ .phys = phys,
+ .cleared = cleared,
+ };
+
+ device_for_each_child(&nvdimm_bus->dev, &ctx,
+ nvdimm_clear_badblocks_region);
+}
+
+static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
+ phys_addr_t phys, u64 cleared)
+{
+ if (cleared > 0)
+ nvdimm_forget_poison(nvdimm_bus, phys, cleared);
+
+ if (cleared > 0 && cleared / 512)
+ nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
+}
+
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len)
{
@@ -218,7 +270,8 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
if (cmd_rc < 0)
return cmd_rc;
- nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
+ nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
+
return clear_err.cleared;
}
EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
@@ -286,6 +339,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
init_waitqueue_head(&nvdimm_bus->probe_wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex);
+ spin_lock_init(&nvdimm_bus->poison_lock);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
return NULL;
@@ -354,9 +408,9 @@ static int nd_bus_remove(struct device *dev)
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
free_poison_list(&nvdimm_bus->poison_list);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
nvdimm_bus_destroy_ndctl(nvdimm_bus);
@@ -769,16 +823,55 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
} while (true);
}
-static int pmem_active(struct device *dev, void *data)
+static int nd_pmem_forget_poison_check(struct device *dev, void *data)
{
- if (is_nd_pmem(dev) && dev->driver)
+ struct nd_cmd_clear_error *clear_err =
+ (struct nd_cmd_clear_error *)data;
+ struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
+ struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
+ struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
+ struct nd_namespace_common *ndns = NULL;
+ struct nd_namespace_io *nsio;
+ resource_size_t offset = 0, end_trunc = 0, start, end, pstart, pend;
+
+ if (nd_dax || !dev->driver)
+ return 0;
+
+ start = clear_err->address;
+ end = clear_err->address + clear_err->cleared - 1;
+
+ if (nd_btt || nd_pfn || nd_dax) {
+ if (nd_btt)
+ ndns = nd_btt->ndns;
+ else if (nd_pfn)
+ ndns = nd_pfn->ndns;
+ else if (nd_dax)
+ ndns = nd_dax->nd_pfn.ndns;
+
+ if (!ndns)
+ return 0;
+ } else
+ ndns = to_ndns(dev);
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ pstart = nsio->res.start + offset;
+ pend = nsio->res.end - end_trunc;
+
+ if ((pstart >= start) && (pend <= end))
return -EBUSY;
+
return 0;
+
+}
+
+static int nd_ns_forget_poison_check(struct device *dev, void *data)
+{
+ return device_for_each_child(dev, data, nd_pmem_forget_poison_check);
}
/* set_config requires an idle interleave set */
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
- struct nvdimm *nvdimm, unsigned int cmd)
+ struct nvdimm *nvdimm, unsigned int cmd, void *data)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -792,8 +885,8 @@ static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
/* require clear error to go through the pmem driver */
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
- return device_for_each_child(&nvdimm_bus->dev, NULL,
- pmem_active);
+ return device_for_each_child(&nvdimm_bus->dev, data,
+ nd_ns_forget_poison_check);
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
return 0;
@@ -820,7 +913,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
const char *cmd_name, *dimm_name;
unsigned long cmd_mask;
void *buf;
- int rc, i;
+ int rc, i, cmd_rc;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -927,13 +1020,20 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
nvdimm_bus_lock(&nvdimm_bus->dev);
- rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd);
+ rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, cmd, buf);
if (rc)
goto out_unlock;
- rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
+ rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
if (rc < 0)
goto out_unlock;
+
+ if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
+ struct nd_cmd_clear_error *clear_err = buf;
+
+ nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
+ clear_err->cleared);
+ }
nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index ca6d572c48fc..93d128da1c92 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -21,8 +21,13 @@
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
+ struct nvdimm_bus *nvdimm_bus;
- lockdep_assert_held(&ndns->dev.mutex);
+ if (!ndns)
+ return;
+
+ nvdimm_bus = walk_to_nvdimm_bus(&ndns->dev);
+ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, ndns->claim != dev, "%s: invalid claim\n", __func__);
ndns->claim = NULL;
*_ndns = NULL;
@@ -37,18 +42,20 @@ void nd_detach_ndns(struct device *dev,
if (!ndns)
return;
get_device(&ndns->dev);
- device_lock(&ndns->dev);
+ nvdimm_bus_lock(&ndns->dev);
__nd_detach_ndns(dev, _ndns);
- device_unlock(&ndns->dev);
+ nvdimm_bus_unlock(&ndns->dev);
put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns)
{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
+
if (attach->claim)
return false;
- lockdep_assert_held(&attach->dev.mutex);
+ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
dev_WARN_ONCE(dev, *_ndns, "%s: invalid claim\n", __func__);
attach->claim = dev;
*_ndns = attach;
@@ -61,9 +68,9 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
{
bool claimed;
- device_lock(&attach->dev);
+ nvdimm_bus_lock(&attach->dev);
claimed = __nd_attach_ndns(dev, attach, _ndns);
- device_unlock(&attach->dev);
+ nvdimm_bus_unlock(&attach->dev);
return claimed;
}
@@ -114,7 +121,7 @@ static void nd_detach_and_reset(struct device *dev,
struct nd_namespace_common **_ndns)
{
/* detach the namespace and destroy / reset the device */
- nd_detach_ndns(dev, _ndns);
+ __nd_detach_ndns(dev, _ndns);
if (is_idle(dev, *_ndns)) {
nd_device_unregister(dev, ND_ASYNC);
} else if (is_nd_btt(dev)) {
@@ -184,7 +191,7 @@ ssize_t nd_namespace_store(struct device *dev,
}
WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
- if (!nd_attach_ndns(dev, ndns, _ndns)) {
+ if (!__nd_attach_ndns(dev, ndns, _ndns)) {
dev_dbg(dev, "%s already claimed\n",
dev_name(&ndns->dev));
len = -EBUSY;
@@ -239,22 +246,24 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
if (rw == READ) {
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align)))
return -EIO;
- return memcpy_from_pmem(buf, nsio->addr + offset, size);
+ return memcpy_mcsafe(buf, nsio->addr + offset, size);
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
/*
* FIXME: nsio_rw_bytes() may be called from atomic
- * context in the btt case and nvdimm_clear_poison()
- * takes a sleeping lock. Until the locking can be
- * reworked this capability requires that the namespace
- * is not claimed by btt.
+ * context in the btt case and the ACPI DSM path for
+ * clearing the error takes sleeping locks and allocates
+ * memory. An explicit error clearing path, and support
+ * for tracking badblocks in BTT metadata is needed to
+ * work around this collision.
*/
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
long cleared;
- cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
+ cleared = nvdimm_clear_poison(&ndns->dev,
+ nsio->res.start + offset, size);
if (cleared < size)
rc = -EIO;
if (cleared > 0 && cleared / 512) {
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9303cfeb8bee..2dee908e4bae 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -518,6 +518,15 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
+static void append_poison_entry(struct nvdimm_bus *nvdimm_bus,
+ struct nd_poison *pl, u64 addr, u64 length)
+{
+ lockdep_assert_held(&nvdimm_bus->poison_lock);
+ pl->start = addr;
+ pl->length = length;
+ list_add_tail(&pl->list, &nvdimm_bus->poison_list);
+}
+
static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
gfp_t flags)
{
@@ -527,19 +536,24 @@ static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
if (!pl)
return -ENOMEM;
- pl->start = addr;
- pl->length = length;
- list_add_tail(&pl->list, &nvdimm_bus->poison_list);
-
+ append_poison_entry(nvdimm_bus, pl, addr, length);
return 0;
}
static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
- struct nd_poison *pl;
+ struct nd_poison *pl, *pl_new;
- if (list_empty(&nvdimm_bus->poison_list))
- return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
+ spin_unlock(&nvdimm_bus->poison_lock);
+ pl_new = kzalloc(sizeof(*pl_new), GFP_KERNEL);
+ spin_lock(&nvdimm_bus->poison_lock);
+
+ if (list_empty(&nvdimm_bus->poison_list)) {
+ if (!pl_new)
+ return -ENOMEM;
+ append_poison_entry(nvdimm_bus, pl_new, addr, length);
+ return 0;
+ }
/*
* There is a chance this is a duplicate, check for those first.
@@ -551,6 +565,7 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
/* If length has changed, update this list entry */
if (pl->length != length)
pl->length = length;
+ kfree(pl_new);
return 0;
}
@@ -559,29 +574,33 @@ static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
* as any overlapping ranges will get resolved when the list is consumed
* and converted to badblocks
*/
- return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
+ if (!pl_new)
+ return -ENOMEM;
+ append_poison_entry(nvdimm_bus, pl_new, addr, length);
+
+ return 0;
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
{
int rc;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
rc = bus_add_poison(nvdimm_bus, addr, length);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
return rc;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
-void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t start, unsigned int len)
+void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus, phys_addr_t start,
+ unsigned int len)
{
struct list_head *poison_list = &nvdimm_bus->poison_list;
u64 clr_end = start + len - 1;
struct nd_poison *pl, *next;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ spin_lock(&nvdimm_bus->poison_lock);
WARN_ON_ONCE(list_empty(poison_list));
/*
@@ -628,15 +647,15 @@ void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
u64 new_len = pl_end - new_start + 1;
/* Add new entry covering the right half */
- add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
+ add_poison(nvdimm_bus, new_start, new_len, GFP_NOWAIT);
/* Adjust this entry to cover the left half */
pl->length = start - pl->start;
continue;
}
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
+ spin_unlock(&nvdimm_bus->poison_lock);
}
-EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
+EXPORT_SYMBOL_GPL(nvdimm_forget_poison);
#ifdef CONFIG_BLK_DEV_INTEGRITY
int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 45fa82cae87c..c1b6556aea6e 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -124,7 +124,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
dev_dbg(dev, "%s: dax: %s\n", __func__,
rc == 0 ? dev_name(dax_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(dax_dev, &nd_pfn->ndns);
+ nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev);
} else
__nd_device_register(dax_dev);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index ee0b412827bf..e0f0e3ce1a32 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -49,6 +49,8 @@ static int nvdimm_probe(struct device *dev)
kref_init(&ndd->kref);
rc = nvdimm_init_nsarea(ndd);
+ if (rc == -EACCES)
+ nvdimm_set_locked(dev);
if (rc)
goto err;
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 8b721321be5b..fac1e9fbd11d 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -34,7 +34,7 @@ int nvdimm_check_config_data(struct device *dev)
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
- if (nvdimm->flags & NDD_ALIASING)
+ if (test_bit(NDD_ALIASING, &nvdimm->flags))
return -ENXIO;
else
return -ENOTTY;
@@ -67,6 +67,7 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
struct nvdimm_bus_descriptor *nd_desc;
int rc = validate_dimm(ndd);
+ int cmd_rc = 0;
if (rc)
return rc;
@@ -76,8 +77,11 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
memset(cmd, 0, sizeof(*cmd));
nd_desc = nvdimm_bus->nd_desc;
- return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), NULL);
+ rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
+ ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
+ if (rc < 0)
+ return rc;
+ return cmd_rc;
}
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
@@ -188,7 +192,14 @@ void nvdimm_set_aliasing(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- nvdimm->flags |= NDD_ALIASING;
+ set_bit(NDD_ALIASING, &nvdimm->flags);
+}
+
+void nvdimm_set_locked(struct device *dev)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ set_bit(NDD_LOCKED, &nvdimm->flags);
}
static void nvdimm_release(struct device *dev)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1b481a5fb966..2f9dfbd2dbec 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2236,14 +2236,21 @@ static int init_active_labels(struct nd_region *nd_region)
int count, j;
/*
- * If the dimm is disabled then prevent the region from
- * being activated if it aliases DPA.
+ * If the dimm is disabled then we may need to prevent
+ * the region from being activated.
*/
if (!ndd) {
- if ((nvdimm->flags & NDD_ALIASING) == 0)
+ if (test_bit(NDD_LOCKED, &nvdimm->flags))
+ /* fail, label data may be unreadable */;
+ else if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ /* fail, labels needed to disambiguate dpa */;
+ else
return 0;
- dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
- dev_name(&nd_mapping->nvdimm->dev));
+
+ dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
+ dev_name(&nd_mapping->nvdimm->dev),
+ test_bit(NDD_LOCKED, &nvdimm->flags)
+ ? "locked" : "disabled");
return -ENXIO;
}
nd_mapping->ndd = ndd;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 8623e57c2ce3..4c4bd209e725 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -32,6 +32,7 @@ struct nvdimm_bus {
struct list_head poison_list;
struct list_head mapping_list;
struct mutex reconfig_mutex;
+ spinlock_t poison_lock;
};
struct nvdimm {
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 2a99c83aa19f..77d032192bf7 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -154,6 +154,7 @@ struct nd_region {
u64 ndr_start;
int id, num_lanes, ro, numa_node;
void *provider_data;
+ struct badblocks bb;
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
struct nd_mapping mapping[0];
@@ -239,6 +240,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
void nvdimm_set_aliasing(struct device *dev);
+void nvdimm_set_locked(struct device *dev);
struct nd_btt *to_nd_btt(struct device *dev);
struct nd_gen_sb {
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 6c033c9a2f06..335c8175410b 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -484,7 +484,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
dev_dbg(dev, "%s: pfn: %s\n", __func__,
rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+ nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev);
} else
__nd_device_register(pfn_dev);
@@ -538,7 +538,8 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
altmap = NULL;
} else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+ nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
+ - offset) / PAGE_SIZE);
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
"number of pfns truncated from %lld to %ld\n",
@@ -625,7 +626,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
*/
start += start_pad;
size = resource_size(&nsio->res);
- npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+ npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
+ / PAGE_SIZE);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
* vmemmap_populate_hugepages() allocates the memmap array in
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fbc640bf06b0..c544d466ea51 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -29,6 +29,7 @@
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/pmem.h>
+#include <linux/dax.h>
#include <linux/nd.h>
#include "pmem.h"
#include "pfn.h"
@@ -89,7 +90,7 @@ static int read_pmem(struct page *page, unsigned int off,
int rc;
void *mem = kmap_atomic(page);
- rc = memcpy_from_pmem(mem + off, pmem_addr, len);
+ rc = memcpy_mcsafe(mem + off, pmem_addr, len);
kunmap_atomic(mem);
if (rc)
return -EIO;
@@ -200,13 +201,13 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
-__weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size)
+__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct pmem_device *pmem = bdev->bd_queue->queuedata;
- resource_size_t offset = sector * 512 + pmem->data_offset;
+ resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
- if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
+ PFN_PHYS(nr_pages))))
return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
@@ -216,17 +217,28 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
* requested range.
*/
if (unlikely(pmem->bb.count))
- return size;
- return pmem->size - pmem->pfn_pad - offset;
+ return nr_pages;
+ return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.rw_page = pmem_rw_page,
- .direct_access = pmem_direct_access,
.revalidate_disk = nvdimm_revalidate_disk,
};
+static long pmem_dax_direct_access(struct dax_device *dax_dev,
+ pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
+}
+
+static const struct dax_operations pmem_dax_ops = {
+ .direct_access = pmem_dax_direct_access,
+};
+
static void pmem_release_queue(void *q)
{
blk_cleanup_queue(q);
@@ -237,10 +249,14 @@ static void pmem_freeze_queue(void *q)
blk_freeze_queue_start(q);
}
-static void pmem_release_disk(void *disk)
+static void pmem_release_disk(void *__pmem)
{
- del_gendisk(disk);
- put_disk(disk);
+ struct pmem_device *pmem = __pmem;
+
+ kill_dax(pmem->dax_dev);
+ put_dax(pmem->dax_dev);
+ del_gendisk(pmem->disk);
+ put_disk(pmem->disk);
}
static int pmem_attach_disk(struct device *dev,
@@ -251,6 +267,7 @@ static int pmem_attach_disk(struct device *dev,
struct vmem_altmap __altmap, *altmap = NULL;
struct resource *res = &nsio->res;
struct nd_pfn *nd_pfn = NULL;
+ struct dax_device *dax_dev;
int nid = dev_to_node(dev);
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
@@ -334,6 +351,7 @@ static int pmem_attach_disk(struct device *dev,
disk = alloc_disk_node(0, nid);
if (!disk)
return -ENOMEM;
+ pmem->disk = disk;
disk->fops = &pmem_fops;
disk->queue = q;
@@ -345,9 +363,16 @@ static int pmem_attach_disk(struct device *dev,
return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
disk->bb = &pmem->bb;
- device_add_disk(dev, disk);
- if (devm_add_action_or_reset(dev, pmem_release_disk, disk))
+ dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
+ if (!dax_dev) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ pmem->dax_dev = dax_dev;
+
+ device_add_disk(dev, disk);
+ if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
revalidate_disk(disk);
@@ -397,12 +422,12 @@ static void nd_pmem_shutdown(struct device *dev)
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
- struct nd_region *nd_region = to_region(pmem);
+ struct nd_region *nd_region;
resource_size_t offset = 0, end_trunc = 0;
struct nd_namespace_common *ndns;
struct nd_namespace_io *nsio;
struct resource res;
+ struct badblocks *bb;
if (event != NVDIMM_REVALIDATE_POISON)
return;
@@ -411,20 +436,33 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
struct nd_btt *nd_btt = to_nd_btt(dev);
ndns = nd_btt->ndns;
- } else if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ nd_region = to_nd_region(ndns->dev.parent);
+ nsio = to_nd_namespace_io(&ndns->dev);
+ bb = &nsio->bb;
+ } else {
+ struct pmem_device *pmem = dev_get_drvdata(dev);
- ndns = nd_pfn->ndns;
- offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
- end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- } else
- ndns = to_ndns(dev);
+ nd_region = to_region(pmem);
+ bb = &pmem->bb;
+
+ if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+
+ ndns = nd_pfn->ndns;
+ offset = pmem->data_offset +
+ __le32_to_cpu(pfn_sb->start_pad);
+ end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ } else {
+ ndns = to_ndns(dev);
+ }
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ }
- nsio = to_nd_namespace_io(&ndns->dev);
res.start = nsio->res.start + offset;
res.end = nsio->res.end - end_trunc;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
+ nvdimm_badblocks_populate(nd_region, bb, &res);
}
MODULE_ALIAS("pmem");
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index b4ee4f71b4a1..7f4dbd72a90a 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -5,8 +5,6 @@
#include <linux/pfn_t.h>
#include <linux/fs.h>
-long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void **kaddr, pfn_t *pfn, long size);
/* this definition is in it's own header for tools/testing/nvdimm to consume */
struct pmem_device {
/* One contiguous memory region per device */
@@ -20,5 +18,10 @@ struct pmem_device {
/* trim size when namespace capacity has been section aligned */
u32 pfn_pad;
struct badblocks bb;
+ struct dax_device *dax_dev;
+ struct gendisk *disk;
};
+
+long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
#endif /* __NVDIMM_PMEM_H__ */
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 8f241772ec0b..869a886c292e 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/nd.h>
+#include "nd-core.h"
#include "nd.h"
static int nd_region_probe(struct device *dev)
@@ -52,6 +53,17 @@ static int nd_region_probe(struct device *dev)
if (rc && err && rc == err)
return -ENODEV;
+ if (is_nd_pmem(&nd_region->dev)) {
+ struct resource ndr_res;
+
+ if (devm_init_badblocks(dev, &nd_region->bb))
+ return -ENODEV;
+ ndr_res.start = nd_region->ndr_start;
+ ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
+ nvdimm_badblocks_populate(nd_region,
+ &nd_region->bb, &ndr_res);
+ }
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
@@ -104,6 +116,18 @@ static int child_notify(struct device *dev, void *data)
static void nd_region_notify(struct device *dev, enum nvdimm_event event)
{
+ if (event == NVDIMM_REVALIDATE_POISON) {
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct resource res;
+
+ if (is_nd_pmem(&nd_region->dev)) {
+ res.start = nd_region->ndr_start;
+ res.end = nd_region->ndr_start +
+ nd_region->ndr_size - 1;
+ nvdimm_badblocks_populate(nd_region,
+ &nd_region->bb, &res);
+ }
+ }
device_for_each_child(dev, &event, child_notify);
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index b7cb5066d961..b550edf2571f 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -222,7 +222,7 @@ int nd_region_to_nstype(struct nd_region *nd_region)
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (nvdimm->flags & NDD_ALIASING)
+ if (test_bit(NDD_ALIASING, &nvdimm->flags))
alias++;
}
if (alias)
@@ -255,6 +255,35 @@ static ssize_t size_show(struct device *dev,
}
static DEVICE_ATTR_RO(size);
+static ssize_t deep_flush_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ /*
+ * NOTE: in the nvdimm_has_flush() error case this attribute is
+ * not visible.
+ */
+ return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
+}
+
+static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ bool flush;
+ int rc = strtobool(buf, &flush);
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ if (rc)
+ return rc;
+ if (!flush)
+ return -EINVAL;
+ nvdimm_flush(nd_region);
+
+ return len;
+}
+static DEVICE_ATTR_RW(deep_flush);
+
static ssize_t mappings_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -448,6 +477,25 @@ static ssize_t read_only_store(struct device *dev,
}
static DEVICE_ATTR_RW(read_only);
+static ssize_t region_badblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return badblocks_show(&nd_region->bb, buf, 0);
+}
+
+static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
+
+static ssize_t resource_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%#llx\n", nd_region->ndr_start);
+}
+static DEVICE_ATTR_RO(resource);
+
static struct attribute *nd_region_attributes[] = {
&dev_attr_size.attr,
&dev_attr_nstype.attr,
@@ -455,11 +503,14 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_btt_seed.attr,
&dev_attr_pfn_seed.attr,
&dev_attr_dax_seed.attr,
+ &dev_attr_deep_flush.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
&dev_attr_namespace_seed.attr,
&dev_attr_init_namespaces.attr,
+ &dev_attr_badblocks.attr,
+ &dev_attr_resource.attr,
NULL,
};
@@ -476,6 +527,23 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
return 0;
+ if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
+ return 0;
+
+ if (!is_nd_pmem(dev) && a == &dev_attr_resource.attr)
+ return 0;
+
+ if (a == &dev_attr_deep_flush.attr) {
+ int has_flush = nvdimm_has_flush(nd_region);
+
+ if (has_flush == 1)
+ return a->mode;
+ else if (has_flush == 0)
+ return 0444;
+ else
+ return 0;
+ }
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -813,7 +881,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return NULL;
}
- if (nvdimm->flags & NDD_UNARMED)
+ if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
}
@@ -968,17 +1036,20 @@ EXPORT_SYMBOL_GPL(nvdimm_flush);
*/
int nvdimm_has_flush(struct nd_region *nd_region)
{
- struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
int i;
/* no nvdimm == flushing capability unknown */
if (nd_region->ndr_mappings == 0)
return -ENXIO;
- for (i = 0; i < nd_region->ndr_mappings; i++)
- /* flush hints present, flushing required */
- if (ndrd_get_flush_wpq(ndrd, i, 0))
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ /* flush hints present / available */
+ if (nvdimm->num_flush)
return 1;
+ }
/*
* The platform defines dimm devices without hints, assume