1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5 #include <linux/memremap.h>
6 #include <linux/blkdev.h>
7 #include <linux/device.h>
8 #include <linux/genhd.h>
9 #include <linux/sizes.h>
10 #include <linux/slab.h>
17 static void nd_pfn_release(struct device
*dev
)
19 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
20 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
22 dev_dbg(dev
, "trace\n");
23 nd_detach_ndns(&nd_pfn
->dev
, &nd_pfn
->ndns
);
24 ida_simple_remove(&nd_region
->pfn_ida
, nd_pfn
->id
);
29 static struct device_type nd_pfn_device_type
= {
31 .release
= nd_pfn_release
,
34 bool is_nd_pfn(struct device
*dev
)
36 return dev
? dev
->type
== &nd_pfn_device_type
: false;
38 EXPORT_SYMBOL(is_nd_pfn
);
40 struct nd_pfn
*to_nd_pfn(struct device
*dev
)
42 struct nd_pfn
*nd_pfn
= container_of(dev
, struct nd_pfn
, dev
);
44 WARN_ON(!is_nd_pfn(dev
));
47 EXPORT_SYMBOL(to_nd_pfn
);
49 static ssize_t
mode_show(struct device
*dev
,
50 struct device_attribute
*attr
, char *buf
)
52 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
54 switch (nd_pfn
->mode
) {
56 return sprintf(buf
, "ram\n");
58 return sprintf(buf
, "pmem\n");
60 return sprintf(buf
, "none\n");
64 static ssize_t
mode_store(struct device
*dev
,
65 struct device_attribute
*attr
, const char *buf
, size_t len
)
67 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
77 if (strncmp(buf
, "pmem\n", n
) == 0
78 || strncmp(buf
, "pmem", n
) == 0) {
79 nd_pfn
->mode
= PFN_MODE_PMEM
;
80 } else if (strncmp(buf
, "ram\n", n
) == 0
81 || strncmp(buf
, "ram", n
) == 0)
82 nd_pfn
->mode
= PFN_MODE_RAM
;
83 else if (strncmp(buf
, "none\n", n
) == 0
84 || strncmp(buf
, "none", n
) == 0)
85 nd_pfn
->mode
= PFN_MODE_NONE
;
89 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
90 buf
[len
- 1] == '\n' ? "" : "\n");
91 nvdimm_bus_unlock(dev
);
96 static DEVICE_ATTR_RW(mode
);
98 static ssize_t
align_show(struct device
*dev
,
99 struct device_attribute
*attr
, char *buf
)
101 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
103 return sprintf(buf
, "%ld\n", nd_pfn
->align
);
106 static const unsigned long *nd_pfn_supported_alignments(void)
109 * This needs to be a non-static variable because the *_SIZE
110 * macros aren't always constants.
112 const unsigned long supported_alignments
[] = {
114 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
116 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
122 static unsigned long data
[ARRAY_SIZE(supported_alignments
)];
124 memcpy(data
, supported_alignments
, sizeof(data
));
129 static ssize_t
align_store(struct device
*dev
,
130 struct device_attribute
*attr
, const char *buf
, size_t len
)
132 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
136 nvdimm_bus_lock(dev
);
137 rc
= nd_size_select_store(dev
, buf
, &nd_pfn
->align
,
138 nd_pfn_supported_alignments());
139 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
140 buf
[len
- 1] == '\n' ? "" : "\n");
141 nvdimm_bus_unlock(dev
);
144 return rc
? rc
: len
;
146 static DEVICE_ATTR_RW(align
);
148 static ssize_t
uuid_show(struct device
*dev
,
149 struct device_attribute
*attr
, char *buf
)
151 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
154 return sprintf(buf
, "%pUb\n", nd_pfn
->uuid
);
155 return sprintf(buf
, "\n");
158 static ssize_t
uuid_store(struct device
*dev
,
159 struct device_attribute
*attr
, const char *buf
, size_t len
)
161 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
165 rc
= nd_uuid_store(dev
, &nd_pfn
->uuid
, buf
, len
);
166 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
167 buf
[len
- 1] == '\n' ? "" : "\n");
170 return rc
? rc
: len
;
172 static DEVICE_ATTR_RW(uuid
);
174 static ssize_t
namespace_show(struct device
*dev
,
175 struct device_attribute
*attr
, char *buf
)
177 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
180 nvdimm_bus_lock(dev
);
181 rc
= sprintf(buf
, "%s\n", nd_pfn
->ndns
182 ? dev_name(&nd_pfn
->ndns
->dev
) : "");
183 nvdimm_bus_unlock(dev
);
187 static ssize_t
namespace_store(struct device
*dev
,
188 struct device_attribute
*attr
, const char *buf
, size_t len
)
190 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
194 nvdimm_bus_lock(dev
);
195 rc
= nd_namespace_store(dev
, &nd_pfn
->ndns
, buf
, len
);
196 dev_dbg(dev
, "result: %zd wrote: %s%s", rc
, buf
,
197 buf
[len
- 1] == '\n' ? "" : "\n");
198 nvdimm_bus_unlock(dev
);
203 static DEVICE_ATTR_RW(namespace);
205 static ssize_t
resource_show(struct device
*dev
,
206 struct device_attribute
*attr
, char *buf
)
208 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
213 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
214 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
215 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
216 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
217 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
219 rc
= sprintf(buf
, "%#llx\n", (unsigned long long) nsio
->res
.start
220 + start_pad
+ offset
);
222 /* no address to convey if the pfn instance is disabled */
229 static DEVICE_ATTR_RO(resource
);
231 static ssize_t
size_show(struct device
*dev
,
232 struct device_attribute
*attr
, char *buf
)
234 struct nd_pfn
*nd_pfn
= to_nd_pfn_safe(dev
);
239 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
240 u64 offset
= __le64_to_cpu(pfn_sb
->dataoff
);
241 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
242 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
243 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
244 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
246 rc
= sprintf(buf
, "%llu\n", (unsigned long long)
247 resource_size(&nsio
->res
) - start_pad
248 - end_trunc
- offset
);
250 /* no size to convey if the pfn instance is disabled */
257 static DEVICE_ATTR_RO(size
);
259 static ssize_t
supported_alignments_show(struct device
*dev
,
260 struct device_attribute
*attr
, char *buf
)
262 return nd_size_select_show(0, nd_pfn_supported_alignments(), buf
);
264 static DEVICE_ATTR_RO(supported_alignments
);
266 static struct attribute
*nd_pfn_attributes
[] = {
268 &dev_attr_namespace
.attr
,
270 &dev_attr_align
.attr
,
271 &dev_attr_resource
.attr
,
273 &dev_attr_supported_alignments
.attr
,
277 static umode_t
pfn_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
279 if (a
== &dev_attr_resource
.attr
)
284 struct attribute_group nd_pfn_attribute_group
= {
285 .attrs
= nd_pfn_attributes
,
286 .is_visible
= pfn_visible
,
289 static const struct attribute_group
*nd_pfn_attribute_groups
[] = {
290 &nd_pfn_attribute_group
,
291 &nd_device_attribute_group
,
292 &nd_numa_attribute_group
,
296 struct device
*nd_pfn_devinit(struct nd_pfn
*nd_pfn
,
297 struct nd_namespace_common
*ndns
)
304 nd_pfn
->mode
= PFN_MODE_NONE
;
305 nd_pfn
->align
= PFN_DEFAULT_ALIGNMENT
;
307 device_initialize(&nd_pfn
->dev
);
308 if (ndns
&& !__nd_attach_ndns(&nd_pfn
->dev
, ndns
, &nd_pfn
->ndns
)) {
309 dev_dbg(&ndns
->dev
, "failed, already claimed by %s\n",
310 dev_name(ndns
->claim
));
317 static struct nd_pfn
*nd_pfn_alloc(struct nd_region
*nd_region
)
319 struct nd_pfn
*nd_pfn
;
322 nd_pfn
= kzalloc(sizeof(*nd_pfn
), GFP_KERNEL
);
326 nd_pfn
->id
= ida_simple_get(&nd_region
->pfn_ida
, 0, 0, GFP_KERNEL
);
327 if (nd_pfn
->id
< 0) {
333 dev_set_name(dev
, "pfn%d.%d", nd_region
->id
, nd_pfn
->id
);
334 dev
->groups
= nd_pfn_attribute_groups
;
335 dev
->type
= &nd_pfn_device_type
;
336 dev
->parent
= &nd_region
->dev
;
341 struct device
*nd_pfn_create(struct nd_region
*nd_region
)
343 struct nd_pfn
*nd_pfn
;
346 if (!is_memory(&nd_region
->dev
))
349 nd_pfn
= nd_pfn_alloc(nd_region
);
350 dev
= nd_pfn_devinit(nd_pfn
, NULL
);
352 __nd_device_register(dev
);
357 * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
358 * space associated with the namespace. If the memmap is set to DRAM, then
359 * this is a no-op. Since the memmap area is freshly initialized during
360 * probe, we have an opportunity to clear any badblocks in this area.
362 static int nd_pfn_clear_memmap_errors(struct nd_pfn
*nd_pfn
)
364 struct nd_region
*nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
365 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
366 void *zero_page
= page_address(ZERO_PAGE(0));
367 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
368 int num_bad
, meta_num
, rc
, bb_present
;
369 sector_t first_bad
, meta_start
;
370 struct nd_namespace_io
*nsio
;
372 if (nd_pfn
->mode
!= PFN_MODE_PMEM
)
375 nsio
= to_nd_namespace_io(&ndns
->dev
);
376 meta_start
= (SZ_4K
+ sizeof(*pfn_sb
)) >> 9;
377 meta_num
= (le64_to_cpu(pfn_sb
->dataoff
) >> 9) - meta_start
;
380 unsigned long zero_len
;
383 bb_present
= badblocks_check(&nd_region
->bb
, meta_start
,
384 meta_num
, &first_bad
, &num_bad
);
386 dev_dbg(&nd_pfn
->dev
, "meta: %x badblocks at %llx\n",
388 nsoff
= ALIGN_DOWN((nd_region
->ndr_start
389 + (first_bad
<< 9)) - nsio
->res
.start
,
391 zero_len
= ALIGN(num_bad
<< 9, PAGE_SIZE
);
393 unsigned long chunk
= min(zero_len
, PAGE_SIZE
);
395 rc
= nvdimm_write_bytes(ndns
, nsoff
, zero_page
,
404 dev_err(&nd_pfn
->dev
,
405 "error clearing %x badblocks at %llx\n",
410 } while (bb_present
);
416 * nd_pfn_validate - read and validate info-block
417 * @nd_pfn: fsdax namespace runtime state / properties
418 * @sig: 'devdax' or 'fsdax' signature
420 * Upon return the info-block buffer contents (->pfn_sb) are
421 * indeterminate when validation fails, and a coherent info-block
424 int nd_pfn_validate(struct nd_pfn
*nd_pfn
, const char *sig
)
426 u64 checksum
, offset
;
427 enum nd_pfn_mode mode
;
428 struct nd_namespace_io
*nsio
;
429 unsigned long align
, start_pad
;
430 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
431 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
432 const u8
*parent_uuid
= nd_dev_to_uuid(&ndns
->dev
);
434 if (!pfn_sb
|| !ndns
)
437 if (!is_memory(nd_pfn
->dev
.parent
))
440 if (nvdimm_read_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
), 0))
443 if (memcmp(pfn_sb
->signature
, sig
, PFN_SIG_LEN
) != 0)
446 checksum
= le64_to_cpu(pfn_sb
->checksum
);
447 pfn_sb
->checksum
= 0;
448 if (checksum
!= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
))
450 pfn_sb
->checksum
= cpu_to_le64(checksum
);
452 if (memcmp(pfn_sb
->parent_uuid
, parent_uuid
, 16) != 0)
455 if (__le16_to_cpu(pfn_sb
->version_minor
) < 1) {
456 pfn_sb
->start_pad
= 0;
457 pfn_sb
->end_trunc
= 0;
460 if (__le16_to_cpu(pfn_sb
->version_minor
) < 2)
463 switch (le32_to_cpu(pfn_sb
->mode
)) {
471 align
= le32_to_cpu(pfn_sb
->align
);
472 offset
= le64_to_cpu(pfn_sb
->dataoff
);
473 start_pad
= le32_to_cpu(pfn_sb
->start_pad
);
475 align
= 1UL << ilog2(offset
);
476 mode
= le32_to_cpu(pfn_sb
->mode
);
480 * When probing a namepace via nd_pfn_probe() the uuid
481 * is NULL (see: nd_pfn_devinit()) we init settings from
484 nd_pfn
->uuid
= kmemdup(pfn_sb
->uuid
, 16, GFP_KERNEL
);
487 nd_pfn
->align
= align
;
491 * When probing a pfn / dax instance we validate the
492 * live settings against the pfn_sb
494 if (memcmp(nd_pfn
->uuid
, pfn_sb
->uuid
, 16) != 0)
498 * If the uuid validates, but other settings mismatch
499 * return EINVAL because userspace has managed to change
500 * the configuration without specifying new
503 if (nd_pfn
->align
!= align
|| nd_pfn
->mode
!= mode
) {
504 dev_err(&nd_pfn
->dev
,
505 "init failed, settings mismatch\n");
506 dev_dbg(&nd_pfn
->dev
, "align: %lx:%lx mode: %d:%d\n",
507 nd_pfn
->align
, align
, nd_pfn
->mode
,
513 if (align
> nvdimm_namespace_capacity(ndns
)) {
514 dev_err(&nd_pfn
->dev
, "alignment: %lx exceeds capacity %llx\n",
515 align
, nvdimm_namespace_capacity(ndns
));
520 * These warnings are verbose because they can only trigger in
521 * the case where the physical address alignment of the
522 * namespace has changed since the pfn superblock was
525 nsio
= to_nd_namespace_io(&ndns
->dev
);
526 if (offset
>= resource_size(&nsio
->res
)) {
527 dev_err(&nd_pfn
->dev
, "pfn array size exceeds capacity of %s\n",
528 dev_name(&ndns
->dev
));
532 if ((align
&& !IS_ALIGNED(nsio
->res
.start
+ offset
+ start_pad
, align
))
533 || !IS_ALIGNED(offset
, PAGE_SIZE
)) {
534 dev_err(&nd_pfn
->dev
,
535 "bad offset: %#llx dax disabled align: %#lx\n",
540 return nd_pfn_clear_memmap_errors(nd_pfn
);
542 EXPORT_SYMBOL(nd_pfn_validate
);
544 int nd_pfn_probe(struct device
*dev
, struct nd_namespace_common
*ndns
)
547 struct nd_pfn
*nd_pfn
;
548 struct device
*pfn_dev
;
549 struct nd_pfn_sb
*pfn_sb
;
550 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
555 switch (ndns
->claim_class
) {
556 case NVDIMM_CCLASS_NONE
:
557 case NVDIMM_CCLASS_PFN
:
563 nvdimm_bus_lock(&ndns
->dev
);
564 nd_pfn
= nd_pfn_alloc(nd_region
);
565 pfn_dev
= nd_pfn_devinit(nd_pfn
, ndns
);
566 nvdimm_bus_unlock(&ndns
->dev
);
569 pfn_sb
= devm_kmalloc(dev
, sizeof(*pfn_sb
), GFP_KERNEL
);
570 nd_pfn
= to_nd_pfn(pfn_dev
);
571 nd_pfn
->pfn_sb
= pfn_sb
;
572 rc
= nd_pfn_validate(nd_pfn
, PFN_SIG
);
573 dev_dbg(dev
, "pfn: %s\n", rc
== 0 ? dev_name(pfn_dev
) : "<none>");
575 nd_detach_ndns(pfn_dev
, &nd_pfn
->ndns
);
578 __nd_device_register(pfn_dev
);
582 EXPORT_SYMBOL(nd_pfn_probe
);
584 static u32
info_block_reserve(void)
586 return ALIGN(SZ_8K
, PAGE_SIZE
);
590 * We hotplug memory at section granularity, pad the reserved area from
591 * the previous section base to the namespace base address.
593 static unsigned long init_altmap_base(resource_size_t base
)
595 unsigned long base_pfn
= PHYS_PFN(base
);
597 return PFN_SECTION_ALIGN_DOWN(base_pfn
);
600 static unsigned long init_altmap_reserve(resource_size_t base
)
602 unsigned long reserve
= info_block_reserve() >> PAGE_SHIFT
;
603 unsigned long base_pfn
= PHYS_PFN(base
);
605 reserve
+= base_pfn
- PFN_SECTION_ALIGN_DOWN(base_pfn
);
609 static int __nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
)
611 struct resource
*res
= &pgmap
->res
;
612 struct vmem_altmap
*altmap
= &pgmap
->altmap
;
613 struct nd_pfn_sb
*pfn_sb
= nd_pfn
->pfn_sb
;
614 u64 offset
= le64_to_cpu(pfn_sb
->dataoff
);
615 u32 start_pad
= __le32_to_cpu(pfn_sb
->start_pad
);
616 u32 end_trunc
= __le32_to_cpu(pfn_sb
->end_trunc
);
617 u32 reserve
= info_block_reserve();
618 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
619 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
620 resource_size_t base
= nsio
->res
.start
+ start_pad
;
621 struct vmem_altmap __altmap
= {
622 .base_pfn
= init_altmap_base(base
),
623 .reserve
= init_altmap_reserve(base
),
626 memcpy(res
, &nsio
->res
, sizeof(*res
));
627 res
->start
+= start_pad
;
628 res
->end
-= end_trunc
;
630 if (nd_pfn
->mode
== PFN_MODE_RAM
) {
631 if (offset
< reserve
)
633 nd_pfn
->npfns
= le64_to_cpu(pfn_sb
->npfns
);
634 } else if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
635 nd_pfn
->npfns
= PFN_SECTION_ALIGN_UP((resource_size(res
)
636 - offset
) / PAGE_SIZE
);
637 if (le64_to_cpu(nd_pfn
->pfn_sb
->npfns
) > nd_pfn
->npfns
)
638 dev_info(&nd_pfn
->dev
,
639 "number of pfns truncated from %lld to %ld\n",
640 le64_to_cpu(nd_pfn
->pfn_sb
->npfns
),
642 memcpy(altmap
, &__altmap
, sizeof(*altmap
));
643 altmap
->free
= PHYS_PFN(offset
- reserve
);
645 pgmap
->flags
|= PGMAP_ALTMAP_VALID
;
652 static u64
phys_pmem_align_down(struct nd_pfn
*nd_pfn
, u64 phys
)
654 return min_t(u64
, PHYS_SECTION_ALIGN_DOWN(phys
),
655 ALIGN_DOWN(phys
, nd_pfn
->align
));
659 * Check if pmem collides with 'System RAM', or other regions when
660 * section aligned. Trim it accordingly.
662 static void trim_pfn_device(struct nd_pfn
*nd_pfn
, u32
*start_pad
, u32
*end_trunc
)
664 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
665 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
666 struct nd_region
*nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
667 const resource_size_t start
= nsio
->res
.start
;
668 const resource_size_t end
= start
+ resource_size(&nsio
->res
);
669 resource_size_t adjust
, size
;
674 adjust
= start
- PHYS_SECTION_ALIGN_DOWN(start
);
675 size
= resource_size(&nsio
->res
) + adjust
;
676 if (region_intersects(start
- adjust
, size
, IORESOURCE_SYSTEM_RAM
,
677 IORES_DESC_NONE
) == REGION_MIXED
678 || nd_region_conflict(nd_region
, start
- adjust
, size
))
679 *start_pad
= PHYS_SECTION_ALIGN_UP(start
) - start
;
681 /* Now check that end of the range does not collide. */
682 adjust
= PHYS_SECTION_ALIGN_UP(end
) - end
;
683 size
= resource_size(&nsio
->res
) + adjust
;
684 if (region_intersects(start
, size
, IORESOURCE_SYSTEM_RAM
,
685 IORES_DESC_NONE
) == REGION_MIXED
686 || !IS_ALIGNED(end
, nd_pfn
->align
)
687 || nd_region_conflict(nd_region
, start
, size
))
688 *end_trunc
= end
- phys_pmem_align_down(nd_pfn
, end
);
691 static int nd_pfn_init(struct nd_pfn
*nd_pfn
)
693 struct nd_namespace_common
*ndns
= nd_pfn
->ndns
;
694 struct nd_namespace_io
*nsio
= to_nd_namespace_io(&ndns
->dev
);
695 u32 start_pad
, end_trunc
, reserve
= info_block_reserve();
696 resource_size_t start
, size
;
697 struct nd_region
*nd_region
;
698 struct nd_pfn_sb
*pfn_sb
;
705 pfn_sb
= devm_kmalloc(&nd_pfn
->dev
, sizeof(*pfn_sb
), GFP_KERNEL
);
709 nd_pfn
->pfn_sb
= pfn_sb
;
710 if (is_nd_dax(&nd_pfn
->dev
))
715 rc
= nd_pfn_validate(nd_pfn
, sig
);
719 /* no info block, do init */;
720 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
722 nd_region
= to_nd_region(nd_pfn
->dev
.parent
);
724 dev_info(&nd_pfn
->dev
,
725 "%s is read-only, unable to init metadata\n",
726 dev_name(&nd_region
->dev
));
730 memset(pfn_sb
, 0, sizeof(*pfn_sb
));
732 trim_pfn_device(nd_pfn
, &start_pad
, &end_trunc
);
733 if (start_pad
+ end_trunc
)
734 dev_info(&nd_pfn
->dev
, "%s alignment collision, truncate %d bytes\n",
735 dev_name(&ndns
->dev
), start_pad
+ end_trunc
);
738 * Note, we use 64 here for the standard size of struct page,
739 * debugging options may cause it to be larger in which case the
740 * implementation will limit the pfns advertised through
741 * ->direct_access() to those that are included in the memmap.
743 start
= nsio
->res
.start
+ start_pad
;
744 size
= resource_size(&nsio
->res
);
745 npfns
= PFN_SECTION_ALIGN_UP((size
- start_pad
- end_trunc
- reserve
)
747 if (nd_pfn
->mode
== PFN_MODE_PMEM
) {
749 * The altmap should be padded out to the block size used
750 * when populating the vmemmap. This *should* be equal to
751 * PMD_SIZE for most architectures.
753 offset
= ALIGN(start
+ reserve
+ 64 * npfns
,
754 max(nd_pfn
->align
, PMD_SIZE
)) - start
;
755 } else if (nd_pfn
->mode
== PFN_MODE_RAM
)
756 offset
= ALIGN(start
+ reserve
, nd_pfn
->align
) - start
;
760 if (offset
+ start_pad
+ end_trunc
>= size
) {
761 dev_err(&nd_pfn
->dev
, "%s unable to satisfy requested alignment\n",
762 dev_name(&ndns
->dev
));
766 npfns
= (size
- offset
- start_pad
- end_trunc
) / SZ_4K
;
767 pfn_sb
->mode
= cpu_to_le32(nd_pfn
->mode
);
768 pfn_sb
->dataoff
= cpu_to_le64(offset
);
769 pfn_sb
->npfns
= cpu_to_le64(npfns
);
770 memcpy(pfn_sb
->signature
, sig
, PFN_SIG_LEN
);
771 memcpy(pfn_sb
->uuid
, nd_pfn
->uuid
, 16);
772 memcpy(pfn_sb
->parent_uuid
, nd_dev_to_uuid(&ndns
->dev
), 16);
773 pfn_sb
->version_major
= cpu_to_le16(1);
774 pfn_sb
->version_minor
= cpu_to_le16(3);
775 pfn_sb
->start_pad
= cpu_to_le32(start_pad
);
776 pfn_sb
->end_trunc
= cpu_to_le32(end_trunc
);
777 pfn_sb
->align
= cpu_to_le32(nd_pfn
->align
);
778 checksum
= nd_sb_checksum((struct nd_gen_sb
*) pfn_sb
);
779 pfn_sb
->checksum
= cpu_to_le64(checksum
);
781 return nvdimm_write_bytes(ndns
, SZ_4K
, pfn_sb
, sizeof(*pfn_sb
), 0);
785 * Determine the effective resource range and vmem_altmap from an nd_pfn
788 int nvdimm_setup_pfn(struct nd_pfn
*nd_pfn
, struct dev_pagemap
*pgmap
)
792 if (!nd_pfn
->uuid
|| !nd_pfn
->ndns
)
795 rc
= nd_pfn_init(nd_pfn
);
799 /* we need a valid pfn_sb before we can init a dev_pagemap */
800 return __nvdimm_setup_pfn(nd_pfn
, pgmap
);
802 EXPORT_SYMBOL_GPL(nvdimm_setup_pfn
);