diff options
author | Ross Zwisler <ross.zwisler@linux.intel.com> | 2015-08-18 21:55:39 +0200 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2015-08-20 20:07:23 +0200 |
commit | 5de490daec8b6354b90d5c9d3e2415b195f5adb6 (patch) | |
tree | 0f9091d0827916e8a810cff47315445bed81d726 /arch/x86/include/asm/pmem.h | |
parent | pmem, x86: clean up conditional pmem includes (diff) | |
download | linux-5de490daec8b6354b90d5c9d3e2415b195f5adb6.tar.xz linux-5de490daec8b6354b90d5c9d3e2415b195f5adb6.zip |
pmem: add copy_from_iter_pmem() and clear_pmem()
Add support for two new PMEM APIs, copy_from_iter_pmem() and
clear_pmem(). copy_from_iter_pmem() is used to copy data from an
iterator into a PMEM buffer. clear_pmem() zeros a PMEM memory range.
Both of these new APIs must be explicitly ordered using a wmb_pmem()
function call and are implemented in such a way that the wmb_pmem()
will make the stores to PMEM durable. Because both APIs are unordered
they can be called as needed without introducing any unwanted memory
barriers.
Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'arch/x86/include/asm/pmem.h')
-rw-r--r-- | arch/x86/include/asm/pmem.h | 75 |
1 files changed, 75 insertions, 0 deletions
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 7f3413fce46c..a3a0df6545ee 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -66,6 +66,81 @@ static inline void arch_wmb_pmem(void) pcommit_sfence(); } +/** + * __arch_wb_cache_pmem - write back a cache range with CLWB + * @vaddr: virtual start address + * @size: number of bytes to write back + * + * Write back a cache range using the CLWB (cache line write back) + * instruction. This function requires explicit ordering with an + * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation. + */ +static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) +{ + u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; + unsigned long clflush_mask = x86_clflush_size - 1; + void *vend = vaddr + size; + void *p; + + for (p = (void *)((unsigned long)vaddr & ~clflush_mask); + p < vend; p += x86_clflush_size) + clwb(p); +} + +/* + * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec + * iterators, so for other types (bvec & kvec) we must do a cache write-back. + */ +static inline bool __iter_needs_pmem_wb(struct iov_iter *i) +{ + return iter_is_iovec(i) == false; +} + +/** + * arch_copy_from_iter_pmem - copy data from an iterator to PMEM + * @addr: PMEM destination address + * @bytes: number of bytes to copy + * @i: iterator with source data + * + * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'. + * This function requires explicit ordering with an arch_wmb_pmem() call. + */ +static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, + struct iov_iter *i) +{ + void *vaddr = (void __force *)addr; + size_t len; + + /* TODO: skip the write-back by always using non-temporal stores */ + len = copy_from_iter_nocache(vaddr, bytes, i); + + if (__iter_needs_pmem_wb(i)) + __arch_wb_cache_pmem(vaddr, bytes); + + return len; +} + +/** + * arch_clear_pmem - zero a PMEM memory range + * @addr: virtual start address + * @size: number of bytes to zero + * + * Write zeros into the memory range starting at 'addr' for 'size' bytes. + * This function requires explicit ordering with an arch_wmb_pmem() call. + */ +static inline void arch_clear_pmem(void __pmem *addr, size_t size) +{ + void *vaddr = (void __force *)addr; + + /* TODO: implement the zeroing via non-temporal writes */ + if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0) + clear_page(vaddr); + else + memset(vaddr, 0, size); + + __arch_wb_cache_pmem(vaddr, size); +} + static inline bool arch_has_wmb_pmem(void) { #ifdef CONFIG_X86_64 |