1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/pagemap.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/pfn_t.h>
8 #include <linux/cdev.h>
9 #include <linux/slab.h>
10 #include <linux/dax.h>
13 #include <linux/mman.h>
14 #include "dax-private.h"
17 static struct dev_dax
*ref_to_dev_dax(struct percpu_ref
*ref
)
19 return container_of(ref
, struct dev_dax
, ref
);
22 static void dev_dax_percpu_release(struct percpu_ref
*ref
)
24 struct dev_dax
*dev_dax
= ref_to_dev_dax(ref
);
26 dev_dbg(&dev_dax
->dev
, "%s\n", __func__
);
27 complete(&dev_dax
->cmp
);
30 static void dev_dax_percpu_exit(struct percpu_ref
*ref
)
32 struct dev_dax
*dev_dax
= ref_to_dev_dax(ref
);
34 dev_dbg(&dev_dax
->dev
, "%s\n", __func__
);
35 wait_for_completion(&dev_dax
->cmp
);
39 static void dev_dax_percpu_kill(struct percpu_ref
*data
)
41 struct percpu_ref
*ref
= data
;
42 struct dev_dax
*dev_dax
= ref_to_dev_dax(ref
);
44 dev_dbg(&dev_dax
->dev
, "%s\n", __func__
);
48 static int check_vma(struct dev_dax
*dev_dax
, struct vm_area_struct
*vma
,
51 struct dax_region
*dax_region
= dev_dax
->region
;
52 struct device
*dev
= &dev_dax
->dev
;
55 if (!dax_alive(dev_dax
->dax_dev
))
58 /* prevent private mappings from being established */
59 if ((vma
->vm_flags
& VM_MAYSHARE
) != VM_MAYSHARE
) {
60 dev_info_ratelimited(dev
,
61 "%s: %s: fail, attempted private mapping\n",
66 mask
= dax_region
->align
- 1;
67 if (vma
->vm_start
& mask
|| vma
->vm_end
& mask
) {
68 dev_info_ratelimited(dev
,
69 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
70 current
->comm
, func
, vma
->vm_start
, vma
->vm_end
,
75 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) == PFN_DEV
76 && (vma
->vm_flags
& VM_DONTCOPY
) == 0) {
77 dev_info_ratelimited(dev
,
78 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
83 if (!vma_is_dax(vma
)) {
84 dev_info_ratelimited(dev
,
85 "%s: %s: fail, vma is not DAX capable\n",
93 /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
94 __weak phys_addr_t
dax_pgoff_to_phys(struct dev_dax
*dev_dax
, pgoff_t pgoff
,
97 struct resource
*res
= &dev_dax
->region
->res
;
100 phys
= pgoff
* PAGE_SIZE
+ res
->start
;
101 if (phys
>= res
->start
&& phys
<= res
->end
) {
102 if (phys
+ size
- 1 <= res
->end
)
109 static vm_fault_t
__dev_dax_pte_fault(struct dev_dax
*dev_dax
,
110 struct vm_fault
*vmf
, pfn_t
*pfn
)
112 struct device
*dev
= &dev_dax
->dev
;
113 struct dax_region
*dax_region
;
115 unsigned int fault_size
= PAGE_SIZE
;
117 if (check_vma(dev_dax
, vmf
->vma
, __func__
))
118 return VM_FAULT_SIGBUS
;
120 dax_region
= dev_dax
->region
;
121 if (dax_region
->align
> PAGE_SIZE
) {
122 dev_dbg(dev
, "alignment (%#x) > fault size (%#x)\n",
123 dax_region
->align
, fault_size
);
124 return VM_FAULT_SIGBUS
;
127 if (fault_size
!= dax_region
->align
)
128 return VM_FAULT_SIGBUS
;
130 phys
= dax_pgoff_to_phys(dev_dax
, vmf
->pgoff
, PAGE_SIZE
);
132 dev_dbg(dev
, "pgoff_to_phys(%#lx) failed\n", vmf
->pgoff
);
133 return VM_FAULT_SIGBUS
;
136 *pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
138 return vmf_insert_mixed(vmf
->vma
, vmf
->address
, *pfn
);
141 static vm_fault_t
__dev_dax_pmd_fault(struct dev_dax
*dev_dax
,
142 struct vm_fault
*vmf
, pfn_t
*pfn
)
144 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
145 struct device
*dev
= &dev_dax
->dev
;
146 struct dax_region
*dax_region
;
149 unsigned int fault_size
= PMD_SIZE
;
151 if (check_vma(dev_dax
, vmf
->vma
, __func__
))
152 return VM_FAULT_SIGBUS
;
154 dax_region
= dev_dax
->region
;
155 if (dax_region
->align
> PMD_SIZE
) {
156 dev_dbg(dev
, "alignment (%#x) > fault size (%#x)\n",
157 dax_region
->align
, fault_size
);
158 return VM_FAULT_SIGBUS
;
161 /* dax pmd mappings require pfn_t_devmap() */
162 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
163 dev_dbg(dev
, "region lacks devmap flags\n");
164 return VM_FAULT_SIGBUS
;
167 if (fault_size
< dax_region
->align
)
168 return VM_FAULT_SIGBUS
;
169 else if (fault_size
> dax_region
->align
)
170 return VM_FAULT_FALLBACK
;
172 /* if we are outside of the VMA */
173 if (pmd_addr
< vmf
->vma
->vm_start
||
174 (pmd_addr
+ PMD_SIZE
) > vmf
->vma
->vm_end
)
175 return VM_FAULT_SIGBUS
;
177 pgoff
= linear_page_index(vmf
->vma
, pmd_addr
);
178 phys
= dax_pgoff_to_phys(dev_dax
, pgoff
, PMD_SIZE
);
180 dev_dbg(dev
, "pgoff_to_phys(%#lx) failed\n", pgoff
);
181 return VM_FAULT_SIGBUS
;
184 *pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
186 return vmf_insert_pfn_pmd(vmf
, *pfn
, vmf
->flags
& FAULT_FLAG_WRITE
);
189 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
190 static vm_fault_t
__dev_dax_pud_fault(struct dev_dax
*dev_dax
,
191 struct vm_fault
*vmf
, pfn_t
*pfn
)
193 unsigned long pud_addr
= vmf
->address
& PUD_MASK
;
194 struct device
*dev
= &dev_dax
->dev
;
195 struct dax_region
*dax_region
;
198 unsigned int fault_size
= PUD_SIZE
;
201 if (check_vma(dev_dax
, vmf
->vma
, __func__
))
202 return VM_FAULT_SIGBUS
;
204 dax_region
= dev_dax
->region
;
205 if (dax_region
->align
> PUD_SIZE
) {
206 dev_dbg(dev
, "alignment (%#x) > fault size (%#x)\n",
207 dax_region
->align
, fault_size
);
208 return VM_FAULT_SIGBUS
;
211 /* dax pud mappings require pfn_t_devmap() */
212 if ((dax_region
->pfn_flags
& (PFN_DEV
|PFN_MAP
)) != (PFN_DEV
|PFN_MAP
)) {
213 dev_dbg(dev
, "region lacks devmap flags\n");
214 return VM_FAULT_SIGBUS
;
217 if (fault_size
< dax_region
->align
)
218 return VM_FAULT_SIGBUS
;
219 else if (fault_size
> dax_region
->align
)
220 return VM_FAULT_FALLBACK
;
222 /* if we are outside of the VMA */
223 if (pud_addr
< vmf
->vma
->vm_start
||
224 (pud_addr
+ PUD_SIZE
) > vmf
->vma
->vm_end
)
225 return VM_FAULT_SIGBUS
;
227 pgoff
= linear_page_index(vmf
->vma
, pud_addr
);
228 phys
= dax_pgoff_to_phys(dev_dax
, pgoff
, PUD_SIZE
);
230 dev_dbg(dev
, "pgoff_to_phys(%#lx) failed\n", pgoff
);
231 return VM_FAULT_SIGBUS
;
234 *pfn
= phys_to_pfn_t(phys
, dax_region
->pfn_flags
);
236 return vmf_insert_pfn_pud(vmf
, *pfn
, vmf
->flags
& FAULT_FLAG_WRITE
);
239 static vm_fault_t
__dev_dax_pud_fault(struct dev_dax
*dev_dax
,
240 struct vm_fault
*vmf
, pfn_t
*pfn
)
242 return VM_FAULT_FALLBACK
;
244 #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
246 static vm_fault_t
dev_dax_huge_fault(struct vm_fault
*vmf
,
247 enum page_entry_size pe_size
)
249 struct file
*filp
= vmf
->vma
->vm_file
;
250 unsigned long fault_size
;
251 vm_fault_t rc
= VM_FAULT_SIGBUS
;
254 struct dev_dax
*dev_dax
= filp
->private_data
;
256 dev_dbg(&dev_dax
->dev
, "%s: %s (%#lx - %#lx) size = %d\n", current
->comm
,
257 (vmf
->flags
& FAULT_FLAG_WRITE
) ? "write" : "read",
258 vmf
->vma
->vm_start
, vmf
->vma
->vm_end
, pe_size
);
260 id
= dax_read_lock();
263 fault_size
= PAGE_SIZE
;
264 rc
= __dev_dax_pte_fault(dev_dax
, vmf
, &pfn
);
267 fault_size
= PMD_SIZE
;
268 rc
= __dev_dax_pmd_fault(dev_dax
, vmf
, &pfn
);
271 fault_size
= PUD_SIZE
;
272 rc
= __dev_dax_pud_fault(dev_dax
, vmf
, &pfn
);
275 rc
= VM_FAULT_SIGBUS
;
278 if (rc
== VM_FAULT_NOPAGE
) {
283 * In the device-dax case the only possibility for a
284 * VM_FAULT_NOPAGE result is when device-dax capacity is
285 * mapped. No need to consider the zero page, or racing
286 * conflicting mappings.
288 pgoff
= linear_page_index(vmf
->vma
, vmf
->address
289 & ~(fault_size
- 1));
290 for (i
= 0; i
< fault_size
/ PAGE_SIZE
; i
++) {
293 page
= pfn_to_page(pfn_t_to_pfn(pfn
) + i
);
296 page
->mapping
= filp
->f_mapping
;
297 page
->index
= pgoff
+ i
;
305 static vm_fault_t
dev_dax_fault(struct vm_fault
*vmf
)
307 return dev_dax_huge_fault(vmf
, PE_SIZE_PTE
);
310 static int dev_dax_split(struct vm_area_struct
*vma
, unsigned long addr
)
312 struct file
*filp
= vma
->vm_file
;
313 struct dev_dax
*dev_dax
= filp
->private_data
;
314 struct dax_region
*dax_region
= dev_dax
->region
;
316 if (!IS_ALIGNED(addr
, dax_region
->align
))
321 static unsigned long dev_dax_pagesize(struct vm_area_struct
*vma
)
323 struct file
*filp
= vma
->vm_file
;
324 struct dev_dax
*dev_dax
= filp
->private_data
;
325 struct dax_region
*dax_region
= dev_dax
->region
;
327 return dax_region
->align
;
330 static const struct vm_operations_struct dax_vm_ops
= {
331 .fault
= dev_dax_fault
,
332 .huge_fault
= dev_dax_huge_fault
,
333 .split
= dev_dax_split
,
334 .pagesize
= dev_dax_pagesize
,
337 static int dax_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
339 struct dev_dax
*dev_dax
= filp
->private_data
;
342 dev_dbg(&dev_dax
->dev
, "trace\n");
345 * We lock to check dax_dev liveness and will re-check at
348 id
= dax_read_lock();
349 rc
= check_vma(dev_dax
, vma
, __func__
);
354 vma
->vm_ops
= &dax_vm_ops
;
355 vma
->vm_flags
|= VM_HUGEPAGE
;
359 /* return an unmapped area aligned to the dax region specified alignment */
360 static unsigned long dax_get_unmapped_area(struct file
*filp
,
361 unsigned long addr
, unsigned long len
, unsigned long pgoff
,
364 unsigned long off
, off_end
, off_align
, len_align
, addr_align
, align
;
365 struct dev_dax
*dev_dax
= filp
? filp
->private_data
: NULL
;
366 struct dax_region
*dax_region
;
368 if (!dev_dax
|| addr
)
371 dax_region
= dev_dax
->region
;
372 align
= dax_region
->align
;
373 off
= pgoff
<< PAGE_SHIFT
;
375 off_align
= round_up(off
, align
);
377 if ((off_end
<= off_align
) || ((off_end
- off_align
) < align
))
380 len_align
= len
+ align
;
381 if ((off
+ len_align
) < off
)
384 addr_align
= current
->mm
->get_unmapped_area(filp
, addr
, len_align
,
386 if (!IS_ERR_VALUE(addr_align
)) {
387 addr_align
+= (off
- addr_align
) & (align
- 1);
391 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
394 static const struct address_space_operations dev_dax_aops
= {
395 .set_page_dirty
= noop_set_page_dirty
,
396 .invalidatepage
= noop_invalidatepage
,
399 static int dax_open(struct inode
*inode
, struct file
*filp
)
401 struct dax_device
*dax_dev
= inode_dax(inode
);
402 struct inode
*__dax_inode
= dax_inode(dax_dev
);
403 struct dev_dax
*dev_dax
= dax_get_private(dax_dev
);
405 dev_dbg(&dev_dax
->dev
, "trace\n");
406 inode
->i_mapping
= __dax_inode
->i_mapping
;
407 inode
->i_mapping
->host
= __dax_inode
;
408 inode
->i_mapping
->a_ops
= &dev_dax_aops
;
409 filp
->f_mapping
= inode
->i_mapping
;
410 filp
->f_wb_err
= filemap_sample_wb_err(filp
->f_mapping
);
411 filp
->private_data
= dev_dax
;
412 inode
->i_flags
= S_DAX
;
417 static int dax_release(struct inode
*inode
, struct file
*filp
)
419 struct dev_dax
*dev_dax
= filp
->private_data
;
421 dev_dbg(&dev_dax
->dev
, "trace\n");
425 static const struct file_operations dax_fops
= {
426 .llseek
= noop_llseek
,
427 .owner
= THIS_MODULE
,
429 .release
= dax_release
,
430 .get_unmapped_area
= dax_get_unmapped_area
,
432 .mmap_supported_flags
= MAP_SYNC
,
435 static void dev_dax_cdev_del(void *cdev
)
440 static void dev_dax_kill(void *dev_dax
)
442 kill_dev_dax(dev_dax
);
445 int dev_dax_probe(struct device
*dev
)
447 struct dev_dax
*dev_dax
= to_dev_dax(dev
);
448 struct dax_device
*dax_dev
= dev_dax
->dax_dev
;
449 struct resource
*res
= &dev_dax
->region
->res
;
455 /* 1:1 map region resource range to device-dax instance range */
456 if (!devm_request_mem_region(dev
, res
->start
, resource_size(res
),
458 dev_warn(dev
, "could not reserve region %pR\n", res
);
462 init_completion(&dev_dax
->cmp
);
463 rc
= percpu_ref_init(&dev_dax
->ref
, dev_dax_percpu_release
, 0,
468 dev_dax
->pgmap
.ref
= &dev_dax
->ref
;
469 dev_dax
->pgmap
.kill
= dev_dax_percpu_kill
;
470 dev_dax
->pgmap
.cleanup
= dev_dax_percpu_exit
;
471 addr
= devm_memremap_pages(dev
, &dev_dax
->pgmap
);
473 return PTR_ERR(addr
);
475 inode
= dax_inode(dax_dev
);
476 cdev
= inode
->i_cdev
;
477 cdev_init(cdev
, &dax_fops
);
479 /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
480 cdev
->owner
= dev
->parent
->driver
->owner
;
482 cdev
->owner
= dev
->driver
->owner
;
483 cdev_set_parent(cdev
, &dev
->kobj
);
484 rc
= cdev_add(cdev
, dev
->devt
, 1);
488 rc
= devm_add_action_or_reset(dev
, dev_dax_cdev_del
, cdev
);
493 return devm_add_action_or_reset(dev
, dev_dax_kill
, dev_dax
);
495 EXPORT_SYMBOL_GPL(dev_dax_probe
);
497 static int dev_dax_remove(struct device
*dev
)
499 /* all probe actions are unwound by devm */
503 static struct dax_device_driver device_dax_driver
= {
505 .probe
= dev_dax_probe
,
506 .remove
= dev_dax_remove
,
511 static int __init
dax_init(void)
513 return dax_driver_register(&device_dax_driver
);
516 static void __exit
dax_exit(void)
518 dax_driver_unregister(&device_dax_driver
);
521 MODULE_AUTHOR("Intel Corporation");
522 MODULE_LICENSE("GPL v2");
523 module_init(dax_init
);
524 module_exit(dax_exit
);
525 MODULE_ALIAS_DAX_DEVICE(0);