]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drivers/nvdimm: Use local kmaps
authorDavidlohr Bueso <dave@stgolabs.net>
Fri, 28 Nov 2025 21:23:03 +0000 (13:23 -0800)
committerIra Weiny <ira.weiny@intel.com>
Mon, 26 Jan 2026 18:04:55 +0000 (12:04 -0600)
Replace the now deprecated kmap_atomic() with kmap_local_page().

Optimizing nvdimm/pmem for highmem makes no sense as this is always
64bit, and the mapped regions for both btt and pmem do not require
disabling preemption and pagefaults. Specifically, kmap does not care
about the caller's atomic context (such as reads holding the btt arena
spinlock) or NVDIMM_IO_ATOMIC semantics to avoid error handling when
accessing the btt arena in general. Same for the memcpy cases. kmap
local temporary mappings will hold valid across any context switches.

Signed-off-by: Davidlohr Bueso <dave@stgolabs.net>
Reviewed-by: Dave Jiang <dave.jiang@intel.com>> ---
Link: https://patch.msgid.link/20251128212303.2170933-1-dave@stgolabs.net
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
drivers/nvdimm/btt.c
drivers/nvdimm/pmem.c

index a933db961ed701df2c30aa106676587e021b9a93..237edfa1c624b8e2de5bfd90d9d23c4870ba5945 100644 (file)
@@ -1104,10 +1104,10 @@ static int btt_data_read(struct arena_info *arena, struct page *page,
 {
        int ret;
        u64 nsoff = to_namespace_offset(arena, lba);
-       void *mem = kmap_atomic(page);
+       void *mem = kmap_local_page(page);
 
        ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
-       kunmap_atomic(mem);
+       kunmap_local(mem);
 
        return ret;
 }
@@ -1117,20 +1117,20 @@ static int btt_data_write(struct arena_info *arena, u32 lba,
 {
        int ret;
        u64 nsoff = to_namespace_offset(arena, lba);
-       void *mem = kmap_atomic(page);
+       void *mem = kmap_local_page(page);
 
        ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
-       kunmap_atomic(mem);
+       kunmap_local(mem);
 
        return ret;
 }
 
 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
 {
-       void *mem = kmap_atomic(page);
+       void *mem = kmap_local_page(page);
 
        memset(mem + off, 0, len);
-       kunmap_atomic(mem);
+       kunmap_local(mem);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
index 05785ff21a8bd40d06daa4d5d97083bf0bc10595..92c67fbbc1c85dedec326d4370cc87e9f12fb8f7 100644 (file)
@@ -128,10 +128,10 @@ static void write_pmem(void *pmem_addr, struct page *page,
        void *mem;
 
        while (len) {
-               mem = kmap_atomic(page);
+               mem = kmap_local_page(page);
                chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                memcpy_flushcache(pmem_addr, mem + off, chunk);
-               kunmap_atomic(mem);
+               kunmap_local(mem);
                len -= chunk;
                off = 0;
                page++;
@@ -147,10 +147,10 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
        void *mem;
 
        while (len) {
-               mem = kmap_atomic(page);
+               mem = kmap_local_page(page);
                chunk = min_t(unsigned int, len, PAGE_SIZE - off);
                rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk);
-               kunmap_atomic(mem);
+               kunmap_local(mem);
                if (rem)
                        return BLK_STS_IOERR;
                len -= chunk;