1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/hash.h>
10 #include <linux/sort.h>
17 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
20 #include <linux/io-64-nonatomic-hi-lo.h>
22 static DEFINE_IDA(region_ida
);
23 static DEFINE_PER_CPU(int, flush_idx
);
25 static int nvdimm_map_flush(struct device
*dev
, struct nvdimm
*nvdimm
, int dimm
,
26 struct nd_region_data
*ndrd
)
30 dev_dbg(dev
, "%s: map %d flush address%s\n", nvdimm_name(nvdimm
),
31 nvdimm
->num_flush
, nvdimm
->num_flush
== 1 ? "" : "es");
32 for (i
= 0; i
< (1 << ndrd
->hints_shift
); i
++) {
33 struct resource
*res
= &nvdimm
->flush_wpq
[i
];
34 unsigned long pfn
= PHYS_PFN(res
->start
);
35 void __iomem
*flush_page
;
37 /* check if flush hints share a page */
38 for (j
= 0; j
< i
; j
++) {
39 struct resource
*res_j
= &nvdimm
->flush_wpq
[j
];
40 unsigned long pfn_j
= PHYS_PFN(res_j
->start
);
47 flush_page
= (void __iomem
*) ((unsigned long)
48 ndrd_get_flush_wpq(ndrd
, dimm
, j
)
51 flush_page
= devm_nvdimm_ioremap(dev
,
52 PFN_PHYS(pfn
), PAGE_SIZE
);
55 ndrd_set_flush_wpq(ndrd
, dimm
, i
, flush_page
56 + (res
->start
& ~PAGE_MASK
));
62 int nd_region_activate(struct nd_region
*nd_region
)
64 int i
, j
, num_flush
= 0;
65 struct nd_region_data
*ndrd
;
66 struct device
*dev
= &nd_region
->dev
;
67 size_t flush_data_size
= sizeof(void *);
69 nvdimm_bus_lock(&nd_region
->dev
);
70 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
71 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
72 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
74 if (test_bit(NDD_SECURITY_OVERWRITE
, &nvdimm
->flags
)) {
75 nvdimm_bus_unlock(&nd_region
->dev
);
79 /* at least one null hint slot per-dimm for the "no-hint" case */
80 flush_data_size
+= sizeof(void *);
81 num_flush
= min_not_zero(num_flush
, nvdimm
->num_flush
);
82 if (!nvdimm
->num_flush
)
84 flush_data_size
+= nvdimm
->num_flush
* sizeof(void *);
86 nvdimm_bus_unlock(&nd_region
->dev
);
88 ndrd
= devm_kzalloc(dev
, sizeof(*ndrd
) + flush_data_size
, GFP_KERNEL
);
91 dev_set_drvdata(dev
, ndrd
);
96 ndrd
->hints_shift
= ilog2(num_flush
);
97 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
98 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
99 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
100 int rc
= nvdimm_map_flush(&nd_region
->dev
, nvdimm
, i
, ndrd
);
107 * Clear out entries that are duplicates. This should prevent the
110 for (i
= 0; i
< nd_region
->ndr_mappings
- 1; i
++) {
111 /* ignore if NULL already */
112 if (!ndrd_get_flush_wpq(ndrd
, i
, 0))
115 for (j
= i
+ 1; j
< nd_region
->ndr_mappings
; j
++)
116 if (ndrd_get_flush_wpq(ndrd
, i
, 0) ==
117 ndrd_get_flush_wpq(ndrd
, j
, 0))
118 ndrd_set_flush_wpq(ndrd
, j
, 0, NULL
);
124 static void nd_region_release(struct device
*dev
)
126 struct nd_region
*nd_region
= to_nd_region(dev
);
129 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
130 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
131 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
133 put_device(&nvdimm
->dev
);
135 free_percpu(nd_region
->lane
);
136 ida_simple_remove(®ion_ida
, nd_region
->id
);
138 kfree(to_nd_blk_region(dev
));
143 static struct device_type nd_blk_device_type
= {
145 .release
= nd_region_release
,
148 static struct device_type nd_pmem_device_type
= {
150 .release
= nd_region_release
,
153 static struct device_type nd_volatile_device_type
= {
154 .name
= "nd_volatile",
155 .release
= nd_region_release
,
158 bool is_nd_pmem(struct device
*dev
)
160 return dev
? dev
->type
== &nd_pmem_device_type
: false;
163 bool is_nd_blk(struct device
*dev
)
165 return dev
? dev
->type
== &nd_blk_device_type
: false;
168 bool is_nd_volatile(struct device
*dev
)
170 return dev
? dev
->type
== &nd_volatile_device_type
: false;
173 struct nd_region
*to_nd_region(struct device
*dev
)
175 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
177 WARN_ON(dev
->type
->release
!= nd_region_release
);
180 EXPORT_SYMBOL_GPL(to_nd_region
);
182 struct device
*nd_region_dev(struct nd_region
*nd_region
)
186 return &nd_region
->dev
;
188 EXPORT_SYMBOL_GPL(nd_region_dev
);
190 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
192 struct nd_region
*nd_region
= to_nd_region(dev
);
194 WARN_ON(!is_nd_blk(dev
));
195 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
197 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
199 void *nd_region_provider_data(struct nd_region
*nd_region
)
201 return nd_region
->provider_data
;
203 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
205 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
207 return ndbr
->blk_provider_data
;
209 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
211 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
213 ndbr
->blk_provider_data
= data
;
215 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
218 * nd_region_to_nstype() - region to an integer namespace type
219 * @nd_region: region-device to interrogate
221 * This is the 'nstype' attribute of a region as well, an input to the
222 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
223 * namespace devices with namespace drivers.
225 int nd_region_to_nstype(struct nd_region
*nd_region
)
227 if (is_memory(&nd_region
->dev
)) {
230 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
231 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
232 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
234 if (test_bit(NDD_ALIASING
, &nvdimm
->flags
))
238 return ND_DEVICE_NAMESPACE_PMEM
;
240 return ND_DEVICE_NAMESPACE_IO
;
241 } else if (is_nd_blk(&nd_region
->dev
)) {
242 return ND_DEVICE_NAMESPACE_BLK
;
247 EXPORT_SYMBOL(nd_region_to_nstype
);
249 static ssize_t
size_show(struct device
*dev
,
250 struct device_attribute
*attr
, char *buf
)
252 struct nd_region
*nd_region
= to_nd_region(dev
);
253 unsigned long long size
= 0;
255 if (is_memory(dev
)) {
256 size
= nd_region
->ndr_size
;
257 } else if (nd_region
->ndr_mappings
== 1) {
258 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
260 size
= nd_mapping
->size
;
263 return sprintf(buf
, "%llu\n", size
);
265 static DEVICE_ATTR_RO(size
);
267 static ssize_t
deep_flush_show(struct device
*dev
,
268 struct device_attribute
*attr
, char *buf
)
270 struct nd_region
*nd_region
= to_nd_region(dev
);
273 * NOTE: in the nvdimm_has_flush() error case this attribute is
276 return sprintf(buf
, "%d\n", nvdimm_has_flush(nd_region
));
279 static ssize_t
deep_flush_store(struct device
*dev
, struct device_attribute
*attr
,
280 const char *buf
, size_t len
)
283 int rc
= strtobool(buf
, &flush
);
284 struct nd_region
*nd_region
= to_nd_region(dev
);
290 nvdimm_flush(nd_region
);
294 static DEVICE_ATTR_RW(deep_flush
);
296 static ssize_t
mappings_show(struct device
*dev
,
297 struct device_attribute
*attr
, char *buf
)
299 struct nd_region
*nd_region
= to_nd_region(dev
);
301 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
303 static DEVICE_ATTR_RO(mappings
);
305 static ssize_t
nstype_show(struct device
*dev
,
306 struct device_attribute
*attr
, char *buf
)
308 struct nd_region
*nd_region
= to_nd_region(dev
);
310 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
312 static DEVICE_ATTR_RO(nstype
);
314 static ssize_t
set_cookie_show(struct device
*dev
,
315 struct device_attribute
*attr
, char *buf
)
317 struct nd_region
*nd_region
= to_nd_region(dev
);
318 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
321 if (is_memory(dev
) && nd_set
)
322 /* pass, should be precluded by region_visible */;
327 * The cookie to show depends on which specification of the
328 * labels we are using. If there are not labels then default to
329 * the v1.1 namespace label cookie definition. To read all this
330 * data we need to wait for probing to settle.
333 nvdimm_bus_lock(dev
);
334 wait_nvdimm_bus_probe_idle(dev
);
335 if (nd_region
->ndr_mappings
) {
336 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
337 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
340 struct nd_namespace_index
*nsindex
;
342 nsindex
= to_namespace_index(ndd
, ndd
->ns_current
);
343 rc
= sprintf(buf
, "%#llx\n",
344 nd_region_interleave_set_cookie(nd_region
,
348 nvdimm_bus_unlock(dev
);
353 return sprintf(buf
, "%#llx\n", nd_set
->cookie1
);
355 static DEVICE_ATTR_RO(set_cookie
);
357 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
359 resource_size_t blk_max_overlap
= 0, available
, overlap
;
362 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
366 overlap
= blk_max_overlap
;
367 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
368 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
369 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
371 /* if a dimm is disabled the available capacity is zero */
375 if (is_memory(&nd_region
->dev
)) {
376 available
+= nd_pmem_available_dpa(nd_region
,
377 nd_mapping
, &overlap
);
378 if (overlap
> blk_max_overlap
) {
379 blk_max_overlap
= overlap
;
382 } else if (is_nd_blk(&nd_region
->dev
))
383 available
+= nd_blk_available_dpa(nd_region
);
389 resource_size_t
nd_region_allocatable_dpa(struct nd_region
*nd_region
)
391 resource_size_t available
= 0;
394 if (is_memory(&nd_region
->dev
))
395 available
= PHYS_ADDR_MAX
;
397 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
398 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
399 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
401 if (is_memory(&nd_region
->dev
))
402 available
= min(available
,
403 nd_pmem_max_contiguous_dpa(nd_region
,
405 else if (is_nd_blk(&nd_region
->dev
))
406 available
+= nd_blk_available_dpa(nd_region
);
408 if (is_memory(&nd_region
->dev
))
409 return available
* nd_region
->ndr_mappings
;
413 static ssize_t
available_size_show(struct device
*dev
,
414 struct device_attribute
*attr
, char *buf
)
416 struct nd_region
*nd_region
= to_nd_region(dev
);
417 unsigned long long available
= 0;
420 * Flush in-flight updates and grab a snapshot of the available
421 * size. Of course, this value is potentially invalidated the
422 * memory nvdimm_bus_lock() is dropped, but that's userspace's
423 * problem to not race itself.
425 nvdimm_bus_lock(dev
);
426 wait_nvdimm_bus_probe_idle(dev
);
427 available
= nd_region_available_dpa(nd_region
);
428 nvdimm_bus_unlock(dev
);
430 return sprintf(buf
, "%llu\n", available
);
432 static DEVICE_ATTR_RO(available_size
);
434 static ssize_t
max_available_extent_show(struct device
*dev
,
435 struct device_attribute
*attr
, char *buf
)
437 struct nd_region
*nd_region
= to_nd_region(dev
);
438 unsigned long long available
= 0;
440 nvdimm_bus_lock(dev
);
441 wait_nvdimm_bus_probe_idle(dev
);
442 available
= nd_region_allocatable_dpa(nd_region
);
443 nvdimm_bus_unlock(dev
);
445 return sprintf(buf
, "%llu\n", available
);
447 static DEVICE_ATTR_RO(max_available_extent
);
449 static ssize_t
init_namespaces_show(struct device
*dev
,
450 struct device_attribute
*attr
, char *buf
)
452 struct nd_region_data
*ndrd
= dev_get_drvdata(dev
);
455 nvdimm_bus_lock(dev
);
457 rc
= sprintf(buf
, "%d/%d\n", ndrd
->ns_active
, ndrd
->ns_count
);
460 nvdimm_bus_unlock(dev
);
464 static DEVICE_ATTR_RO(init_namespaces
);
466 static ssize_t
namespace_seed_show(struct device
*dev
,
467 struct device_attribute
*attr
, char *buf
)
469 struct nd_region
*nd_region
= to_nd_region(dev
);
472 nvdimm_bus_lock(dev
);
473 if (nd_region
->ns_seed
)
474 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
476 rc
= sprintf(buf
, "\n");
477 nvdimm_bus_unlock(dev
);
480 static DEVICE_ATTR_RO(namespace_seed
);
482 static ssize_t
btt_seed_show(struct device
*dev
,
483 struct device_attribute
*attr
, char *buf
)
485 struct nd_region
*nd_region
= to_nd_region(dev
);
488 nvdimm_bus_lock(dev
);
489 if (nd_region
->btt_seed
)
490 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
492 rc
= sprintf(buf
, "\n");
493 nvdimm_bus_unlock(dev
);
497 static DEVICE_ATTR_RO(btt_seed
);
499 static ssize_t
pfn_seed_show(struct device
*dev
,
500 struct device_attribute
*attr
, char *buf
)
502 struct nd_region
*nd_region
= to_nd_region(dev
);
505 nvdimm_bus_lock(dev
);
506 if (nd_region
->pfn_seed
)
507 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
509 rc
= sprintf(buf
, "\n");
510 nvdimm_bus_unlock(dev
);
514 static DEVICE_ATTR_RO(pfn_seed
);
516 static ssize_t
dax_seed_show(struct device
*dev
,
517 struct device_attribute
*attr
, char *buf
)
519 struct nd_region
*nd_region
= to_nd_region(dev
);
522 nvdimm_bus_lock(dev
);
523 if (nd_region
->dax_seed
)
524 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->dax_seed
));
526 rc
= sprintf(buf
, "\n");
527 nvdimm_bus_unlock(dev
);
531 static DEVICE_ATTR_RO(dax_seed
);
533 static ssize_t
read_only_show(struct device
*dev
,
534 struct device_attribute
*attr
, char *buf
)
536 struct nd_region
*nd_region
= to_nd_region(dev
);
538 return sprintf(buf
, "%d\n", nd_region
->ro
);
541 static ssize_t
read_only_store(struct device
*dev
,
542 struct device_attribute
*attr
, const char *buf
, size_t len
)
545 int rc
= strtobool(buf
, &ro
);
546 struct nd_region
*nd_region
= to_nd_region(dev
);
554 static DEVICE_ATTR_RW(read_only
);
556 static ssize_t
region_badblocks_show(struct device
*dev
,
557 struct device_attribute
*attr
, char *buf
)
559 struct nd_region
*nd_region
= to_nd_region(dev
);
564 rc
= badblocks_show(&nd_region
->bb
, buf
, 0);
571 static DEVICE_ATTR(badblocks
, 0444, region_badblocks_show
, NULL
);
573 static ssize_t
resource_show(struct device
*dev
,
574 struct device_attribute
*attr
, char *buf
)
576 struct nd_region
*nd_region
= to_nd_region(dev
);
578 return sprintf(buf
, "%#llx\n", nd_region
->ndr_start
);
580 static DEVICE_ATTR_RO(resource
);
582 static ssize_t
persistence_domain_show(struct device
*dev
,
583 struct device_attribute
*attr
, char *buf
)
585 struct nd_region
*nd_region
= to_nd_region(dev
);
587 if (test_bit(ND_REGION_PERSIST_CACHE
, &nd_region
->flags
))
588 return sprintf(buf
, "cpu_cache\n");
589 else if (test_bit(ND_REGION_PERSIST_MEMCTRL
, &nd_region
->flags
))
590 return sprintf(buf
, "memory_controller\n");
592 return sprintf(buf
, "\n");
594 static DEVICE_ATTR_RO(persistence_domain
);
596 static struct attribute
*nd_region_attributes
[] = {
598 &dev_attr_nstype
.attr
,
599 &dev_attr_mappings
.attr
,
600 &dev_attr_btt_seed
.attr
,
601 &dev_attr_pfn_seed
.attr
,
602 &dev_attr_dax_seed
.attr
,
603 &dev_attr_deep_flush
.attr
,
604 &dev_attr_read_only
.attr
,
605 &dev_attr_set_cookie
.attr
,
606 &dev_attr_available_size
.attr
,
607 &dev_attr_max_available_extent
.attr
,
608 &dev_attr_namespace_seed
.attr
,
609 &dev_attr_init_namespaces
.attr
,
610 &dev_attr_badblocks
.attr
,
611 &dev_attr_resource
.attr
,
612 &dev_attr_persistence_domain
.attr
,
616 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
618 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
619 struct nd_region
*nd_region
= to_nd_region(dev
);
620 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
621 int type
= nd_region_to_nstype(nd_region
);
623 if (!is_memory(dev
) && a
== &dev_attr_pfn_seed
.attr
)
626 if (!is_memory(dev
) && a
== &dev_attr_dax_seed
.attr
)
629 if (!is_nd_pmem(dev
) && a
== &dev_attr_badblocks
.attr
)
632 if (a
== &dev_attr_resource
.attr
) {
639 if (a
== &dev_attr_deep_flush
.attr
) {
640 int has_flush
= nvdimm_has_flush(nd_region
);
644 else if (has_flush
== 0)
650 if (a
== &dev_attr_persistence_domain
.attr
) {
651 if ((nd_region
->flags
& (BIT(ND_REGION_PERSIST_CACHE
)
652 | BIT(ND_REGION_PERSIST_MEMCTRL
))) == 0)
657 if (a
!= &dev_attr_set_cookie
.attr
658 && a
!= &dev_attr_available_size
.attr
)
661 if ((type
== ND_DEVICE_NAMESPACE_PMEM
662 || type
== ND_DEVICE_NAMESPACE_BLK
)
663 && a
== &dev_attr_available_size
.attr
)
665 else if (is_memory(dev
) && nd_set
)
671 struct attribute_group nd_region_attribute_group
= {
672 .attrs
= nd_region_attributes
,
673 .is_visible
= region_visible
,
675 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
677 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
,
678 struct nd_namespace_index
*nsindex
)
680 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
685 if (nsindex
&& __le16_to_cpu(nsindex
->major
) == 1
686 && __le16_to_cpu(nsindex
->minor
) == 1)
687 return nd_set
->cookie1
;
688 return nd_set
->cookie2
;
691 u64
nd_region_interleave_set_altcookie(struct nd_region
*nd_region
)
693 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
696 return nd_set
->altcookie
;
700 void nd_mapping_free_labels(struct nd_mapping
*nd_mapping
)
702 struct nd_label_ent
*label_ent
, *e
;
704 lockdep_assert_held(&nd_mapping
->lock
);
705 list_for_each_entry_safe(label_ent
, e
, &nd_mapping
->labels
, list
) {
706 list_del(&label_ent
->list
);
712 * Upon successful probe/remove, take/release a reference on the
713 * associated interleave set (if present), and plant new btt + namespace
714 * seeds. Also, on the removal of a BLK region, notify the provider to
715 * disable the region.
717 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
718 struct device
*dev
, bool probe
)
720 struct nd_region
*nd_region
;
722 if (!probe
&& is_nd_region(dev
)) {
725 nd_region
= to_nd_region(dev
);
726 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
727 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
728 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
729 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
731 mutex_lock(&nd_mapping
->lock
);
732 nd_mapping_free_labels(nd_mapping
);
733 mutex_unlock(&nd_mapping
->lock
);
736 nd_mapping
->ndd
= NULL
;
738 atomic_dec(&nvdimm
->busy
);
741 if (dev
->parent
&& is_nd_region(dev
->parent
) && probe
) {
742 nd_region
= to_nd_region(dev
->parent
);
743 nvdimm_bus_lock(dev
);
744 if (nd_region
->ns_seed
== dev
)
745 nd_region_create_ns_seed(nd_region
);
746 nvdimm_bus_unlock(dev
);
748 if (is_nd_btt(dev
) && probe
) {
749 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
751 nd_region
= to_nd_region(dev
->parent
);
752 nvdimm_bus_lock(dev
);
753 if (nd_region
->btt_seed
== dev
)
754 nd_region_create_btt_seed(nd_region
);
755 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
)
756 nd_region_create_ns_seed(nd_region
);
757 nvdimm_bus_unlock(dev
);
759 if (is_nd_pfn(dev
) && probe
) {
760 struct nd_pfn
*nd_pfn
= to_nd_pfn(dev
);
762 nd_region
= to_nd_region(dev
->parent
);
763 nvdimm_bus_lock(dev
);
764 if (nd_region
->pfn_seed
== dev
)
765 nd_region_create_pfn_seed(nd_region
);
766 if (nd_region
->ns_seed
== &nd_pfn
->ndns
->dev
)
767 nd_region_create_ns_seed(nd_region
);
768 nvdimm_bus_unlock(dev
);
770 if (is_nd_dax(dev
) && probe
) {
771 struct nd_dax
*nd_dax
= to_nd_dax(dev
);
773 nd_region
= to_nd_region(dev
->parent
);
774 nvdimm_bus_lock(dev
);
775 if (nd_region
->dax_seed
== dev
)
776 nd_region_create_dax_seed(nd_region
);
777 if (nd_region
->ns_seed
== &nd_dax
->nd_pfn
.ndns
->dev
)
778 nd_region_create_ns_seed(nd_region
);
779 nvdimm_bus_unlock(dev
);
783 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
785 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
788 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
790 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
793 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
795 struct nd_region
*nd_region
= to_nd_region(dev
);
796 struct nd_mapping
*nd_mapping
;
797 struct nvdimm
*nvdimm
;
799 if (n
>= nd_region
->ndr_mappings
)
801 nd_mapping
= &nd_region
->mapping
[n
];
802 nvdimm
= nd_mapping
->nvdimm
;
804 return sprintf(buf
, "%s,%llu,%llu,%d\n", dev_name(&nvdimm
->dev
),
805 nd_mapping
->start
, nd_mapping
->size
,
806 nd_mapping
->position
);
809 #define REGION_MAPPING(idx) \
810 static ssize_t mapping##idx##_show(struct device *dev, \
811 struct device_attribute *attr, char *buf) \
813 return mappingN(dev, buf, idx); \
815 static DEVICE_ATTR_RO(mapping##idx)
818 * 32 should be enough for a while, even in the presence of socket
819 * interleave a 32-way interleave set is a degenerate case.
854 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
856 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
857 struct nd_region
*nd_region
= to_nd_region(dev
);
859 if (n
< nd_region
->ndr_mappings
)
864 static struct attribute
*mapping_attributes
[] = {
865 &dev_attr_mapping0
.attr
,
866 &dev_attr_mapping1
.attr
,
867 &dev_attr_mapping2
.attr
,
868 &dev_attr_mapping3
.attr
,
869 &dev_attr_mapping4
.attr
,
870 &dev_attr_mapping5
.attr
,
871 &dev_attr_mapping6
.attr
,
872 &dev_attr_mapping7
.attr
,
873 &dev_attr_mapping8
.attr
,
874 &dev_attr_mapping9
.attr
,
875 &dev_attr_mapping10
.attr
,
876 &dev_attr_mapping11
.attr
,
877 &dev_attr_mapping12
.attr
,
878 &dev_attr_mapping13
.attr
,
879 &dev_attr_mapping14
.attr
,
880 &dev_attr_mapping15
.attr
,
881 &dev_attr_mapping16
.attr
,
882 &dev_attr_mapping17
.attr
,
883 &dev_attr_mapping18
.attr
,
884 &dev_attr_mapping19
.attr
,
885 &dev_attr_mapping20
.attr
,
886 &dev_attr_mapping21
.attr
,
887 &dev_attr_mapping22
.attr
,
888 &dev_attr_mapping23
.attr
,
889 &dev_attr_mapping24
.attr
,
890 &dev_attr_mapping25
.attr
,
891 &dev_attr_mapping26
.attr
,
892 &dev_attr_mapping27
.attr
,
893 &dev_attr_mapping28
.attr
,
894 &dev_attr_mapping29
.attr
,
895 &dev_attr_mapping30
.attr
,
896 &dev_attr_mapping31
.attr
,
900 struct attribute_group nd_mapping_attribute_group
= {
901 .is_visible
= mapping_visible
,
902 .attrs
= mapping_attributes
,
904 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
906 int nd_blk_region_init(struct nd_region
*nd_region
)
908 struct device
*dev
= &nd_region
->dev
;
909 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
914 if (nd_region
->ndr_mappings
< 1) {
915 dev_dbg(dev
, "invalid BLK region\n");
919 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
923 * nd_region_acquire_lane - allocate and lock a lane
924 * @nd_region: region id and number of lanes possible
926 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
927 * We optimize for the common case where there are 256 lanes, one
928 * per-cpu. For larger systems we need to lock to share lanes. For now
929 * this implementation assumes the cost of maintaining an allocator for
930 * free lanes is on the order of the lock hold time, so it implements a
931 * static lane = cpu % num_lanes mapping.
933 * In the case of a BTT instance on top of a BLK namespace a lane may be
934 * acquired recursively. We lock on the first instance.
936 * In the case of a BTT instance on top of PMEM, we only acquire a lane
937 * for the BTT metadata updates.
939 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
941 unsigned int cpu
, lane
;
944 if (nd_region
->num_lanes
< nr_cpu_ids
) {
945 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
947 lane
= cpu
% nd_region
->num_lanes
;
948 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
949 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
950 if (ndl_count
->count
++ == 0)
951 spin_lock(&ndl_lock
->lock
);
957 EXPORT_SYMBOL(nd_region_acquire_lane
);
959 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
961 if (nd_region
->num_lanes
< nr_cpu_ids
) {
962 unsigned int cpu
= get_cpu();
963 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
965 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
966 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
967 if (--ndl_count
->count
== 0)
968 spin_unlock(&ndl_lock
->lock
);
973 EXPORT_SYMBOL(nd_region_release_lane
);
975 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
976 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
979 struct nd_region
*nd_region
;
985 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
986 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
987 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
989 if ((mapping
->start
| mapping
->size
) % SZ_4K
) {
990 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
991 caller
, dev_name(&nvdimm
->dev
), i
);
996 if (test_bit(NDD_UNARMED
, &nvdimm
->flags
))
999 if (test_bit(NDD_NOBLK
, &nvdimm
->flags
)
1000 && dev_type
== &nd_blk_device_type
) {
1001 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not BLK capable\n",
1002 caller
, dev_name(&nvdimm
->dev
), i
);
1007 if (dev_type
== &nd_blk_device_type
) {
1008 struct nd_blk_region_desc
*ndbr_desc
;
1009 struct nd_blk_region
*ndbr
;
1011 ndbr_desc
= to_blk_region_desc(ndr_desc
);
1012 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
1013 * ndr_desc
->num_mappings
,
1016 nd_region
= &ndbr
->nd_region
;
1017 ndbr
->enable
= ndbr_desc
->enable
;
1018 ndbr
->do_io
= ndbr_desc
->do_io
;
1022 nd_region
= kzalloc(sizeof(struct nd_region
)
1023 + sizeof(struct nd_mapping
)
1024 * ndr_desc
->num_mappings
,
1026 region_buf
= nd_region
;
1031 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
1032 if (nd_region
->id
< 0)
1035 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
1036 if (!nd_region
->lane
)
1039 for (i
= 0; i
< nr_cpu_ids
; i
++) {
1040 struct nd_percpu_lane
*ndl
;
1042 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
1043 spin_lock_init(&ndl
->lock
);
1047 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
1048 struct nd_mapping_desc
*mapping
= &ndr_desc
->mapping
[i
];
1049 struct nvdimm
*nvdimm
= mapping
->nvdimm
;
1051 nd_region
->mapping
[i
].nvdimm
= nvdimm
;
1052 nd_region
->mapping
[i
].start
= mapping
->start
;
1053 nd_region
->mapping
[i
].size
= mapping
->size
;
1054 nd_region
->mapping
[i
].position
= mapping
->position
;
1055 INIT_LIST_HEAD(&nd_region
->mapping
[i
].labels
);
1056 mutex_init(&nd_region
->mapping
[i
].lock
);
1058 get_device(&nvdimm
->dev
);
1060 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
1061 nd_region
->provider_data
= ndr_desc
->provider_data
;
1062 nd_region
->nd_set
= ndr_desc
->nd_set
;
1063 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
1064 nd_region
->flags
= ndr_desc
->flags
;
1066 nd_region
->numa_node
= ndr_desc
->numa_node
;
1067 nd_region
->target_node
= ndr_desc
->target_node
;
1068 ida_init(&nd_region
->ns_ida
);
1069 ida_init(&nd_region
->btt_ida
);
1070 ida_init(&nd_region
->pfn_ida
);
1071 ida_init(&nd_region
->dax_ida
);
1072 dev
= &nd_region
->dev
;
1073 dev_set_name(dev
, "region%d", nd_region
->id
);
1074 dev
->parent
= &nvdimm_bus
->dev
;
1075 dev
->type
= dev_type
;
1076 dev
->groups
= ndr_desc
->attr_groups
;
1077 dev
->of_node
= ndr_desc
->of_node
;
1078 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
1079 nd_region
->ndr_start
= ndr_desc
->res
->start
;
1080 nd_device_register(dev
);
1085 ida_simple_remove(®ion_ida
, nd_region
->id
);
1091 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
1092 struct nd_region_desc
*ndr_desc
)
1094 ndr_desc
->num_lanes
= ND_MAX_LANES
;
1095 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
1098 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
1100 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
1101 struct nd_region_desc
*ndr_desc
)
1103 if (ndr_desc
->num_mappings
> 1)
1105 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
1106 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
1109 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
1111 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
1112 struct nd_region_desc
*ndr_desc
)
1114 ndr_desc
->num_lanes
= ND_MAX_LANES
;
1115 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
1118 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);
1121 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1122 * @nd_region: blk or interleaved pmem region
1124 void nvdimm_flush(struct nd_region
*nd_region
)
1126 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
1130 * Try to encourage some diversity in flush hint addresses
1131 * across cpus assuming a limited number of flush hints.
1133 idx
= this_cpu_read(flush_idx
);
1134 idx
= this_cpu_add_return(flush_idx
, hash_32(current
->pid
+ idx
, 8));
1137 * The first wmb() is needed to 'sfence' all previous writes
1138 * such that they are architecturally visible for the platform
1139 * buffer flush. Note that we've already arranged for pmem
1140 * writes to avoid the cache via memcpy_flushcache(). The final
1141 * wmb() ensures ordering for the NVDIMM flush write.
1144 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
1145 if (ndrd_get_flush_wpq(ndrd
, i
, 0))
1146 writeq(1, ndrd_get_flush_wpq(ndrd
, i
, idx
));
1149 EXPORT_SYMBOL_GPL(nvdimm_flush
);
1152 * nvdimm_has_flush - determine write flushing requirements
1153 * @nd_region: blk or interleaved pmem region
1155 * Returns 1 if writes require flushing
1156 * Returns 0 if writes do not require flushing
1157 * Returns -ENXIO if flushing capability can not be determined
1159 int nvdimm_has_flush(struct nd_region
*nd_region
)
1163 /* no nvdimm or pmem api == flushing capability unknown */
1164 if (nd_region
->ndr_mappings
== 0
1165 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API
))
1168 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1169 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1170 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1172 /* flush hints present / available */
1173 if (nvdimm
->num_flush
)
1178 * The platform defines dimm devices without hints, assume
1179 * platform persistence mechanism like ADR
1183 EXPORT_SYMBOL_GPL(nvdimm_has_flush
);
1185 int nvdimm_has_cache(struct nd_region
*nd_region
)
1187 return is_nd_pmem(&nd_region
->dev
) &&
1188 !test_bit(ND_REGION_PERSIST_CACHE
, &nd_region
->flags
);
1190 EXPORT_SYMBOL_GPL(nvdimm_has_cache
);
1192 struct conflict_context
{
1193 struct nd_region
*nd_region
;
1194 resource_size_t start
, size
;
1197 static int region_conflict(struct device
*dev
, void *data
)
1199 struct nd_region
*nd_region
;
1200 struct conflict_context
*ctx
= data
;
1201 resource_size_t res_end
, region_end
, region_start
;
1203 if (!is_memory(dev
))
1206 nd_region
= to_nd_region(dev
);
1207 if (nd_region
== ctx
->nd_region
)
1210 res_end
= ctx
->start
+ ctx
->size
;
1211 region_start
= nd_region
->ndr_start
;
1212 region_end
= region_start
+ nd_region
->ndr_size
;
1213 if (ctx
->start
>= region_start
&& ctx
->start
< region_end
)
1215 if (res_end
> region_start
&& res_end
<= region_end
)
1220 int nd_region_conflict(struct nd_region
*nd_region
, resource_size_t start
,
1221 resource_size_t size
)
1223 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
1224 struct conflict_context ctx
= {
1225 .nd_region
= nd_region
,
1230 return device_for_each_child(&nvdimm_bus
->dev
, &ctx
, region_conflict
);
1233 void __exit
nd_region_devs_exit(void)
1235 ida_destroy(®ion_ida
);