1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/sort.h>
10 #include <linux/idr.h>
16 * DOC: cxl core region
18 * CXL Regions represent mapped memory capacity in system physical address
19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
20 * Memory ranges, Regions represent the active mapped capacity by the HDM
21 * Decoder Capability structures throughout the Host Bridges, Switches, and
22 * Endpoints in the topology.
24 * Region configuration has ordering constraints. UUID may be set at any time
25 * but is only visible for persistent regions.
26 * 1. Interleave granularity
32 * All changes to the interleave configuration occur with this lock held
35 static DECLARE_RWSEM(cxl_region_rwsem
);
37 static struct cxl_region
*to_cxl_region(struct device
*dev
);
39 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
42 struct cxl_region
*cxlr
= to_cxl_region(dev
);
43 struct cxl_region_params
*p
= &cxlr
->params
;
46 rc
= down_read_interruptible(&cxl_region_rwsem
);
49 if (cxlr
->mode
!= CXL_DECODER_PMEM
)
50 rc
= sysfs_emit(buf
, "\n");
52 rc
= sysfs_emit(buf
, "%pUb\n", &p
->uuid
);
53 up_read(&cxl_region_rwsem
);
58 static int is_dup(struct device
*match
, void *data
)
60 struct cxl_region_params
*p
;
61 struct cxl_region
*cxlr
;
64 if (!is_cxl_region(match
))
67 lockdep_assert_held(&cxl_region_rwsem
);
68 cxlr
= to_cxl_region(match
);
71 if (uuid_equal(&p
->uuid
, uuid
)) {
72 dev_dbg(match
, "already has uuid: %pUb\n", uuid
);
79 static ssize_t
uuid_store(struct device
*dev
, struct device_attribute
*attr
,
80 const char *buf
, size_t len
)
82 struct cxl_region
*cxlr
= to_cxl_region(dev
);
83 struct cxl_region_params
*p
= &cxlr
->params
;
87 if (len
!= UUID_STRING_LEN
+ 1)
90 rc
= uuid_parse(buf
, &temp
);
94 if (uuid_is_null(&temp
))
97 rc
= down_write_killable(&cxl_region_rwsem
);
101 if (uuid_equal(&p
->uuid
, &temp
))
105 if (p
->state
>= CXL_CONFIG_ACTIVE
)
108 rc
= bus_for_each_dev(&cxl_bus_type
, NULL
, &temp
, is_dup
);
112 uuid_copy(&p
->uuid
, &temp
);
114 up_write(&cxl_region_rwsem
);
120 static DEVICE_ATTR_RW(uuid
);
122 static struct cxl_region_ref
*cxl_rr_load(struct cxl_port
*port
,
123 struct cxl_region
*cxlr
)
125 return xa_load(&port
->regions
, (unsigned long)cxlr
);
128 static int cxl_region_invalidate_memregion(struct cxl_region
*cxlr
)
130 if (!cpu_cache_has_invalidate_memregion()) {
131 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST
)) {
134 "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
138 "Failed to synchronize CPU cache state\n");
143 cpu_cache_invalidate_memregion(IORES_DESC_CXL
);
147 static int cxl_region_decode_reset(struct cxl_region
*cxlr
, int count
)
149 struct cxl_region_params
*p
= &cxlr
->params
;
153 * Before region teardown attempt to flush, and if the flush
154 * fails cancel the region teardown for data consistency
157 rc
= cxl_region_invalidate_memregion(cxlr
);
161 for (i
= count
- 1; i
>= 0; i
--) {
162 struct cxl_endpoint_decoder
*cxled
= p
->targets
[i
];
163 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
164 struct cxl_port
*iter
= cxled_to_port(cxled
);
165 struct cxl_dev_state
*cxlds
= cxlmd
->cxlds
;
171 while (!is_cxl_root(to_cxl_port(iter
->dev
.parent
)))
172 iter
= to_cxl_port(iter
->dev
.parent
);
174 for (ep
= cxl_ep_load(iter
, cxlmd
); iter
;
175 iter
= ep
->next
, ep
= cxl_ep_load(iter
, cxlmd
)) {
176 struct cxl_region_ref
*cxl_rr
;
177 struct cxl_decoder
*cxld
;
179 cxl_rr
= cxl_rr_load(iter
, cxlr
);
180 cxld
= cxl_rr
->decoder
;
182 rc
= cxld
->reset(cxld
);
185 set_bit(CXL_REGION_F_NEEDS_RESET
, &cxlr
->flags
);
189 rc
= cxled
->cxld
.reset(&cxled
->cxld
);
192 set_bit(CXL_REGION_F_NEEDS_RESET
, &cxlr
->flags
);
195 /* all decoders associated with this region have been torn down */
196 clear_bit(CXL_REGION_F_NEEDS_RESET
, &cxlr
->flags
);
201 static int commit_decoder(struct cxl_decoder
*cxld
)
203 struct cxl_switch_decoder
*cxlsd
= NULL
;
206 return cxld
->commit(cxld
);
208 if (is_switch_decoder(&cxld
->dev
))
209 cxlsd
= to_cxl_switch_decoder(&cxld
->dev
);
211 if (dev_WARN_ONCE(&cxld
->dev
, !cxlsd
|| cxlsd
->nr_targets
> 1,
212 "->commit() is required\n"))
217 static int cxl_region_decode_commit(struct cxl_region
*cxlr
)
219 struct cxl_region_params
*p
= &cxlr
->params
;
222 for (i
= 0; i
< p
->nr_targets
; i
++) {
223 struct cxl_endpoint_decoder
*cxled
= p
->targets
[i
];
224 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
225 struct cxl_region_ref
*cxl_rr
;
226 struct cxl_decoder
*cxld
;
227 struct cxl_port
*iter
;
230 /* commit bottom up */
231 for (iter
= cxled_to_port(cxled
); !is_cxl_root(iter
);
232 iter
= to_cxl_port(iter
->dev
.parent
)) {
233 cxl_rr
= cxl_rr_load(iter
, cxlr
);
234 cxld
= cxl_rr
->decoder
;
235 rc
= commit_decoder(cxld
);
241 /* programming @iter failed, teardown */
242 for (ep
= cxl_ep_load(iter
, cxlmd
); ep
&& iter
;
243 iter
= ep
->next
, ep
= cxl_ep_load(iter
, cxlmd
)) {
244 cxl_rr
= cxl_rr_load(iter
, cxlr
);
245 cxld
= cxl_rr
->decoder
;
250 cxled
->cxld
.reset(&cxled
->cxld
);
258 /* undo the targets that were successfully committed */
259 cxl_region_decode_reset(cxlr
, i
);
263 static ssize_t
commit_store(struct device
*dev
, struct device_attribute
*attr
,
264 const char *buf
, size_t len
)
266 struct cxl_region
*cxlr
= to_cxl_region(dev
);
267 struct cxl_region_params
*p
= &cxlr
->params
;
271 rc
= kstrtobool(buf
, &commit
);
275 rc
= down_write_killable(&cxl_region_rwsem
);
279 /* Already in the requested state? */
280 if (commit
&& p
->state
>= CXL_CONFIG_COMMIT
)
282 if (!commit
&& p
->state
< CXL_CONFIG_COMMIT
)
285 /* Not ready to commit? */
286 if (commit
&& p
->state
< CXL_CONFIG_ACTIVE
) {
292 * Invalidate caches before region setup to drop any speculative
293 * consumption of this address space
295 rc
= cxl_region_invalidate_memregion(cxlr
);
300 rc
= cxl_region_decode_commit(cxlr
);
302 p
->state
= CXL_CONFIG_COMMIT
;
304 p
->state
= CXL_CONFIG_RESET_PENDING
;
305 up_write(&cxl_region_rwsem
);
306 device_release_driver(&cxlr
->dev
);
307 down_write(&cxl_region_rwsem
);
310 * The lock was dropped, so need to revalidate that the reset is
313 if (p
->state
== CXL_CONFIG_RESET_PENDING
) {
314 rc
= cxl_region_decode_reset(cxlr
, p
->interleave_ways
);
316 * Revert to committed since there may still be active
317 * decoders associated with this region, or move forward
318 * to active to mark the reset successful
321 p
->state
= CXL_CONFIG_COMMIT
;
323 p
->state
= CXL_CONFIG_ACTIVE
;
328 up_write(&cxl_region_rwsem
);
335 static ssize_t
commit_show(struct device
*dev
, struct device_attribute
*attr
,
338 struct cxl_region
*cxlr
= to_cxl_region(dev
);
339 struct cxl_region_params
*p
= &cxlr
->params
;
342 rc
= down_read_interruptible(&cxl_region_rwsem
);
345 rc
= sysfs_emit(buf
, "%d\n", p
->state
>= CXL_CONFIG_COMMIT
);
346 up_read(&cxl_region_rwsem
);
350 static DEVICE_ATTR_RW(commit
);
352 static umode_t
cxl_region_visible(struct kobject
*kobj
, struct attribute
*a
,
355 struct device
*dev
= kobj_to_dev(kobj
);
356 struct cxl_region
*cxlr
= to_cxl_region(dev
);
359 * Support tooling that expects to find a 'uuid' attribute for all
360 * regions regardless of mode.
362 if (a
== &dev_attr_uuid
.attr
&& cxlr
->mode
!= CXL_DECODER_PMEM
)
367 static ssize_t
interleave_ways_show(struct device
*dev
,
368 struct device_attribute
*attr
, char *buf
)
370 struct cxl_region
*cxlr
= to_cxl_region(dev
);
371 struct cxl_region_params
*p
= &cxlr
->params
;
374 rc
= down_read_interruptible(&cxl_region_rwsem
);
377 rc
= sysfs_emit(buf
, "%d\n", p
->interleave_ways
);
378 up_read(&cxl_region_rwsem
);
383 static const struct attribute_group
*get_cxl_region_target_group(void);
385 static ssize_t
interleave_ways_store(struct device
*dev
,
386 struct device_attribute
*attr
,
387 const char *buf
, size_t len
)
389 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
->parent
);
390 struct cxl_decoder
*cxld
= &cxlrd
->cxlsd
.cxld
;
391 struct cxl_region
*cxlr
= to_cxl_region(dev
);
392 struct cxl_region_params
*p
= &cxlr
->params
;
393 unsigned int val
, save
;
397 rc
= kstrtouint(buf
, 0, &val
);
401 rc
= ways_to_eiw(val
, &iw
);
406 * Even for x3, x9, and x12 interleaves the region interleave must be a
407 * power of 2 multiple of the host bridge interleave.
409 if (!is_power_of_2(val
/ cxld
->interleave_ways
) ||
410 (val
% cxld
->interleave_ways
)) {
411 dev_dbg(&cxlr
->dev
, "invalid interleave: %d\n", val
);
415 rc
= down_write_killable(&cxl_region_rwsem
);
418 if (p
->state
>= CXL_CONFIG_INTERLEAVE_ACTIVE
) {
423 save
= p
->interleave_ways
;
424 p
->interleave_ways
= val
;
425 rc
= sysfs_update_group(&cxlr
->dev
.kobj
, get_cxl_region_target_group());
427 p
->interleave_ways
= save
;
429 up_write(&cxl_region_rwsem
);
434 static DEVICE_ATTR_RW(interleave_ways
);
436 static ssize_t
interleave_granularity_show(struct device
*dev
,
437 struct device_attribute
*attr
,
440 struct cxl_region
*cxlr
= to_cxl_region(dev
);
441 struct cxl_region_params
*p
= &cxlr
->params
;
444 rc
= down_read_interruptible(&cxl_region_rwsem
);
447 rc
= sysfs_emit(buf
, "%d\n", p
->interleave_granularity
);
448 up_read(&cxl_region_rwsem
);
453 static ssize_t
interleave_granularity_store(struct device
*dev
,
454 struct device_attribute
*attr
,
455 const char *buf
, size_t len
)
457 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
->parent
);
458 struct cxl_decoder
*cxld
= &cxlrd
->cxlsd
.cxld
;
459 struct cxl_region
*cxlr
= to_cxl_region(dev
);
460 struct cxl_region_params
*p
= &cxlr
->params
;
464 rc
= kstrtoint(buf
, 0, &val
);
468 rc
= granularity_to_eig(val
, &ig
);
473 * When the host-bridge is interleaved, disallow region granularity !=
474 * root granularity. Regions with a granularity less than the root
475 * interleave result in needing multiple endpoints to support a single
476 * slot in the interleave (possible to support in the future). Regions
477 * with a granularity greater than the root interleave result in invalid
478 * DPA translations (invalid to support).
480 if (cxld
->interleave_ways
> 1 && val
!= cxld
->interleave_granularity
)
483 rc
= down_write_killable(&cxl_region_rwsem
);
486 if (p
->state
>= CXL_CONFIG_INTERLEAVE_ACTIVE
) {
491 p
->interleave_granularity
= val
;
493 up_write(&cxl_region_rwsem
);
498 static DEVICE_ATTR_RW(interleave_granularity
);
500 static ssize_t
resource_show(struct device
*dev
, struct device_attribute
*attr
,
503 struct cxl_region
*cxlr
= to_cxl_region(dev
);
504 struct cxl_region_params
*p
= &cxlr
->params
;
505 u64 resource
= -1ULL;
508 rc
= down_read_interruptible(&cxl_region_rwsem
);
512 resource
= p
->res
->start
;
513 rc
= sysfs_emit(buf
, "%#llx\n", resource
);
514 up_read(&cxl_region_rwsem
);
518 static DEVICE_ATTR_RO(resource
);
520 static ssize_t
mode_show(struct device
*dev
, struct device_attribute
*attr
,
523 struct cxl_region
*cxlr
= to_cxl_region(dev
);
525 return sysfs_emit(buf
, "%s\n", cxl_decoder_mode_name(cxlr
->mode
));
527 static DEVICE_ATTR_RO(mode
);
529 static int alloc_hpa(struct cxl_region
*cxlr
, resource_size_t size
)
531 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(cxlr
->dev
.parent
);
532 struct cxl_region_params
*p
= &cxlr
->params
;
533 struct resource
*res
;
536 lockdep_assert_held_write(&cxl_region_rwsem
);
538 /* Nothing to do... */
539 if (p
->res
&& resource_size(p
->res
) == size
)
542 /* To change size the old size must be freed first */
546 if (p
->state
>= CXL_CONFIG_INTERLEAVE_ACTIVE
)
549 /* ways, granularity and uuid (if PMEM) need to be set before HPA */
550 if (!p
->interleave_ways
|| !p
->interleave_granularity
||
551 (cxlr
->mode
== CXL_DECODER_PMEM
&& uuid_is_null(&p
->uuid
)))
554 div_u64_rem(size
, SZ_256M
* p
->interleave_ways
, &remainder
);
558 res
= alloc_free_mem_region(cxlrd
->res
, size
, SZ_256M
,
559 dev_name(&cxlr
->dev
));
561 dev_dbg(&cxlr
->dev
, "failed to allocate HPA: %ld\n",
567 p
->state
= CXL_CONFIG_INTERLEAVE_ACTIVE
;
572 static void cxl_region_iomem_release(struct cxl_region
*cxlr
)
574 struct cxl_region_params
*p
= &cxlr
->params
;
576 if (device_is_registered(&cxlr
->dev
))
577 lockdep_assert_held_write(&cxl_region_rwsem
);
580 * Autodiscovered regions may not have been able to insert their
584 remove_resource(p
->res
);
590 static int free_hpa(struct cxl_region
*cxlr
)
592 struct cxl_region_params
*p
= &cxlr
->params
;
594 lockdep_assert_held_write(&cxl_region_rwsem
);
599 if (p
->state
>= CXL_CONFIG_ACTIVE
)
602 cxl_region_iomem_release(cxlr
);
603 p
->state
= CXL_CONFIG_IDLE
;
607 static ssize_t
size_store(struct device
*dev
, struct device_attribute
*attr
,
608 const char *buf
, size_t len
)
610 struct cxl_region
*cxlr
= to_cxl_region(dev
);
614 rc
= kstrtou64(buf
, 0, &val
);
618 rc
= down_write_killable(&cxl_region_rwsem
);
623 rc
= alloc_hpa(cxlr
, val
);
626 up_write(&cxl_region_rwsem
);
634 static ssize_t
size_show(struct device
*dev
, struct device_attribute
*attr
,
637 struct cxl_region
*cxlr
= to_cxl_region(dev
);
638 struct cxl_region_params
*p
= &cxlr
->params
;
642 rc
= down_read_interruptible(&cxl_region_rwsem
);
646 size
= resource_size(p
->res
);
647 rc
= sysfs_emit(buf
, "%#llx\n", size
);
648 up_read(&cxl_region_rwsem
);
652 static DEVICE_ATTR_RW(size
);
654 static struct attribute
*cxl_region_attrs
[] = {
656 &dev_attr_commit
.attr
,
657 &dev_attr_interleave_ways
.attr
,
658 &dev_attr_interleave_granularity
.attr
,
659 &dev_attr_resource
.attr
,
665 static const struct attribute_group cxl_region_group
= {
666 .attrs
= cxl_region_attrs
,
667 .is_visible
= cxl_region_visible
,
670 static size_t show_targetN(struct cxl_region
*cxlr
, char *buf
, int pos
)
672 struct cxl_region_params
*p
= &cxlr
->params
;
673 struct cxl_endpoint_decoder
*cxled
;
676 rc
= down_read_interruptible(&cxl_region_rwsem
);
680 if (pos
>= p
->interleave_ways
) {
681 dev_dbg(&cxlr
->dev
, "position %d out of range %d\n", pos
,
687 cxled
= p
->targets
[pos
];
689 rc
= sysfs_emit(buf
, "\n");
691 rc
= sysfs_emit(buf
, "%s\n", dev_name(&cxled
->cxld
.dev
));
693 up_read(&cxl_region_rwsem
);
698 static int match_free_decoder(struct device
*dev
, void *data
)
700 struct cxl_decoder
*cxld
;
703 if (!is_switch_decoder(dev
))
706 cxld
= to_cxl_decoder(dev
);
708 /* enforce ordered allocation */
720 static int match_auto_decoder(struct device
*dev
, void *data
)
722 struct cxl_region_params
*p
= data
;
723 struct cxl_decoder
*cxld
;
726 if (!is_switch_decoder(dev
))
729 cxld
= to_cxl_decoder(dev
);
730 r
= &cxld
->hpa_range
;
732 if (p
->res
&& p
->res
->start
== r
->start
&& p
->res
->end
== r
->end
)
738 static struct cxl_decoder
*cxl_region_find_decoder(struct cxl_port
*port
,
739 struct cxl_region
*cxlr
)
744 if (test_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
))
745 dev
= device_find_child(&port
->dev
, &cxlr
->params
,
748 dev
= device_find_child(&port
->dev
, &id
, match_free_decoder
);
752 * This decoder is pinned registered as long as the endpoint decoder is
753 * registered, and endpoint decoder unregistration holds the
754 * cxl_region_rwsem over unregister events, so no need to hold on to
755 * this extra reference.
758 return to_cxl_decoder(dev
);
761 static struct cxl_region_ref
*alloc_region_ref(struct cxl_port
*port
,
762 struct cxl_region
*cxlr
)
764 struct cxl_region_params
*p
= &cxlr
->params
;
765 struct cxl_region_ref
*cxl_rr
, *iter
;
769 xa_for_each(&port
->regions
, index
, iter
) {
770 struct cxl_region_params
*ip
= &iter
->region
->params
;
775 if (ip
->res
->start
> p
->res
->start
) {
777 "%s: HPA order violation %s:%pr vs %pr\n",
778 dev_name(&port
->dev
),
779 dev_name(&iter
->region
->dev
), ip
->res
, p
->res
);
780 return ERR_PTR(-EBUSY
);
784 cxl_rr
= kzalloc(sizeof(*cxl_rr
), GFP_KERNEL
);
786 return ERR_PTR(-ENOMEM
);
788 cxl_rr
->region
= cxlr
;
789 cxl_rr
->nr_targets
= 1;
790 xa_init(&cxl_rr
->endpoints
);
792 rc
= xa_insert(&port
->regions
, (unsigned long)cxlr
, cxl_rr
, GFP_KERNEL
);
795 "%s: failed to track region reference: %d\n",
796 dev_name(&port
->dev
), rc
);
804 static void cxl_rr_free_decoder(struct cxl_region_ref
*cxl_rr
)
806 struct cxl_region
*cxlr
= cxl_rr
->region
;
807 struct cxl_decoder
*cxld
= cxl_rr
->decoder
;
812 dev_WARN_ONCE(&cxlr
->dev
, cxld
->region
!= cxlr
, "region mismatch\n");
813 if (cxld
->region
== cxlr
) {
815 put_device(&cxlr
->dev
);
819 static void free_region_ref(struct cxl_region_ref
*cxl_rr
)
821 struct cxl_port
*port
= cxl_rr
->port
;
822 struct cxl_region
*cxlr
= cxl_rr
->region
;
824 cxl_rr_free_decoder(cxl_rr
);
825 xa_erase(&port
->regions
, (unsigned long)cxlr
);
826 xa_destroy(&cxl_rr
->endpoints
);
830 static int cxl_rr_ep_add(struct cxl_region_ref
*cxl_rr
,
831 struct cxl_endpoint_decoder
*cxled
)
834 struct cxl_port
*port
= cxl_rr
->port
;
835 struct cxl_region
*cxlr
= cxl_rr
->region
;
836 struct cxl_decoder
*cxld
= cxl_rr
->decoder
;
837 struct cxl_ep
*ep
= cxl_ep_load(port
, cxled_to_memdev(cxled
));
840 rc
= xa_insert(&cxl_rr
->endpoints
, (unsigned long)cxled
, ep
,
849 get_device(&cxlr
->dev
);
855 static int cxl_rr_alloc_decoder(struct cxl_port
*port
, struct cxl_region
*cxlr
,
856 struct cxl_endpoint_decoder
*cxled
,
857 struct cxl_region_ref
*cxl_rr
)
859 struct cxl_decoder
*cxld
;
861 if (port
== cxled_to_port(cxled
))
864 cxld
= cxl_region_find_decoder(port
, cxlr
);
866 dev_dbg(&cxlr
->dev
, "%s: no decoder available\n",
867 dev_name(&port
->dev
));
872 dev_dbg(&cxlr
->dev
, "%s: %s already attached to %s\n",
873 dev_name(&port
->dev
), dev_name(&cxld
->dev
),
874 dev_name(&cxld
->region
->dev
));
879 * Endpoints should already match the region type, but backstop that
880 * assumption with an assertion. Switch-decoders change mapping-type
881 * based on what is mapped when they are assigned to a region.
883 dev_WARN_ONCE(&cxlr
->dev
,
884 port
== cxled_to_port(cxled
) &&
885 cxld
->target_type
!= cxlr
->type
,
886 "%s:%s mismatch decoder type %d -> %d\n",
887 dev_name(&cxled_to_memdev(cxled
)->dev
),
888 dev_name(&cxld
->dev
), cxld
->target_type
, cxlr
->type
);
889 cxld
->target_type
= cxlr
->type
;
890 cxl_rr
->decoder
= cxld
;
895 * cxl_port_attach_region() - track a region's interest in a port by endpoint
896 * @port: port to add a new region reference 'struct cxl_region_ref'
897 * @cxlr: region to attach to @port
898 * @cxled: endpoint decoder used to create or further pin a region reference
899 * @pos: interleave position of @cxled in @cxlr
901 * The attach event is an opportunity to validate CXL decode setup
902 * constraints and record metadata needed for programming HDM decoders,
903 * in particular decoder target lists.
907 * - validate that there are no other regions with a higher HPA already
908 * associated with @port
909 * - establish a region reference if one is not already present
911 * - additionally allocate a decoder instance that will host @cxlr on
914 * - pin the region reference by the endpoint
915 * - account for how many entries in @port's target list are needed to
916 * cover all of the added endpoints.
918 static int cxl_port_attach_region(struct cxl_port
*port
,
919 struct cxl_region
*cxlr
,
920 struct cxl_endpoint_decoder
*cxled
, int pos
)
922 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
923 struct cxl_ep
*ep
= cxl_ep_load(port
, cxlmd
);
924 struct cxl_region_ref
*cxl_rr
;
925 bool nr_targets_inc
= false;
926 struct cxl_decoder
*cxld
;
930 lockdep_assert_held_write(&cxl_region_rwsem
);
932 cxl_rr
= cxl_rr_load(port
, cxlr
);
934 struct cxl_ep
*ep_iter
;
938 * Walk the existing endpoints that have been attached to
939 * @cxlr at @port and see if they share the same 'next' port
940 * in the downstream direction. I.e. endpoints that share common
943 xa_for_each(&cxl_rr
->endpoints
, index
, ep_iter
) {
946 if (ep_iter
->next
== ep
->next
) {
953 * New target port, or @port is an endpoint port that always
954 * accounts its own local decode as a target.
956 if (!found
|| !ep
->next
) {
957 cxl_rr
->nr_targets
++;
958 nr_targets_inc
= true;
961 cxl_rr
= alloc_region_ref(port
, cxlr
);
962 if (IS_ERR(cxl_rr
)) {
964 "%s: failed to allocate region reference\n",
965 dev_name(&port
->dev
));
966 return PTR_ERR(cxl_rr
);
968 nr_targets_inc
= true;
970 rc
= cxl_rr_alloc_decoder(port
, cxlr
, cxled
, cxl_rr
);
974 cxld
= cxl_rr
->decoder
;
976 rc
= cxl_rr_ep_add(cxl_rr
, cxled
);
979 "%s: failed to track endpoint %s:%s reference\n",
980 dev_name(&port
->dev
), dev_name(&cxlmd
->dev
),
981 dev_name(&cxld
->dev
));
986 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
987 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
988 dev_name(&cxld
->dev
), dev_name(&cxlmd
->dev
),
989 dev_name(&cxled
->cxld
.dev
), pos
,
990 ep
? ep
->next
? dev_name(ep
->next
->uport_dev
) :
991 dev_name(&cxlmd
->dev
) :
993 cxl_rr
->nr_eps
, cxl_rr
->nr_targets
);
998 cxl_rr
->nr_targets
--;
999 if (cxl_rr
->nr_eps
== 0)
1000 free_region_ref(cxl_rr
);
1004 static void cxl_port_detach_region(struct cxl_port
*port
,
1005 struct cxl_region
*cxlr
,
1006 struct cxl_endpoint_decoder
*cxled
)
1008 struct cxl_region_ref
*cxl_rr
;
1009 struct cxl_ep
*ep
= NULL
;
1011 lockdep_assert_held_write(&cxl_region_rwsem
);
1013 cxl_rr
= cxl_rr_load(port
, cxlr
);
1018 * Endpoint ports do not carry cxl_ep references, and they
1019 * never target more than one endpoint by definition
1021 if (cxl_rr
->decoder
== &cxled
->cxld
)
1024 ep
= xa_erase(&cxl_rr
->endpoints
, (unsigned long)cxled
);
1026 struct cxl_ep
*ep_iter
;
1027 unsigned long index
;
1031 xa_for_each(&cxl_rr
->endpoints
, index
, ep_iter
) {
1032 if (ep_iter
->next
== ep
->next
) {
1038 cxl_rr
->nr_targets
--;
1041 if (cxl_rr
->nr_eps
== 0)
1042 free_region_ref(cxl_rr
);
1045 static int check_last_peer(struct cxl_endpoint_decoder
*cxled
,
1046 struct cxl_ep
*ep
, struct cxl_region_ref
*cxl_rr
,
1049 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1050 struct cxl_region
*cxlr
= cxl_rr
->region
;
1051 struct cxl_region_params
*p
= &cxlr
->params
;
1052 struct cxl_endpoint_decoder
*cxled_peer
;
1053 struct cxl_port
*port
= cxl_rr
->port
;
1054 struct cxl_memdev
*cxlmd_peer
;
1055 struct cxl_ep
*ep_peer
;
1056 int pos
= cxled
->pos
;
1059 * If this position wants to share a dport with the last endpoint mapped
1060 * then that endpoint, at index 'position - distance', must also be
1061 * mapped by this dport.
1063 if (pos
< distance
) {
1064 dev_dbg(&cxlr
->dev
, "%s:%s: cannot host %s:%s at %d\n",
1065 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1066 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
), pos
);
1069 cxled_peer
= p
->targets
[pos
- distance
];
1070 cxlmd_peer
= cxled_to_memdev(cxled_peer
);
1071 ep_peer
= cxl_ep_load(port
, cxlmd_peer
);
1072 if (ep
->dport
!= ep_peer
->dport
) {
1074 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
1075 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1076 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
), pos
,
1077 dev_name(&cxlmd_peer
->dev
),
1078 dev_name(&cxled_peer
->cxld
.dev
));
1085 static int cxl_port_setup_targets(struct cxl_port
*port
,
1086 struct cxl_region
*cxlr
,
1087 struct cxl_endpoint_decoder
*cxled
)
1089 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(cxlr
->dev
.parent
);
1090 int parent_iw
, parent_ig
, ig
, iw
, rc
, inc
= 0, pos
= cxled
->pos
;
1091 struct cxl_port
*parent_port
= to_cxl_port(port
->dev
.parent
);
1092 struct cxl_region_ref
*cxl_rr
= cxl_rr_load(port
, cxlr
);
1093 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1094 struct cxl_ep
*ep
= cxl_ep_load(port
, cxlmd
);
1095 struct cxl_region_params
*p
= &cxlr
->params
;
1096 struct cxl_decoder
*cxld
= cxl_rr
->decoder
;
1097 struct cxl_switch_decoder
*cxlsd
;
1102 * While root level decoders support x3, x6, x12, switch level
1103 * decoders only support powers of 2 up to x16.
1105 if (!is_power_of_2(cxl_rr
->nr_targets
)) {
1106 dev_dbg(&cxlr
->dev
, "%s:%s: invalid target count %d\n",
1107 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1108 cxl_rr
->nr_targets
);
1112 cxlsd
= to_cxl_switch_decoder(&cxld
->dev
);
1113 if (cxl_rr
->nr_targets_set
) {
1117 * Passthrough decoders impose no distance requirements between
1120 if (cxl_rr
->nr_targets
== 1)
1123 distance
= p
->nr_targets
/ cxl_rr
->nr_targets
;
1124 for (i
= 0; i
< cxl_rr
->nr_targets_set
; i
++)
1125 if (ep
->dport
== cxlsd
->target
[i
]) {
1126 rc
= check_last_peer(cxled
, ep
, cxl_rr
,
1130 goto out_target_set
;
1135 if (is_cxl_root(parent_port
)) {
1136 parent_ig
= cxlrd
->cxlsd
.cxld
.interleave_granularity
;
1137 parent_iw
= cxlrd
->cxlsd
.cxld
.interleave_ways
;
1139 * For purposes of address bit routing, use power-of-2 math for
1142 if (!is_power_of_2(parent_iw
))
1145 struct cxl_region_ref
*parent_rr
;
1146 struct cxl_decoder
*parent_cxld
;
1148 parent_rr
= cxl_rr_load(parent_port
, cxlr
);
1149 parent_cxld
= parent_rr
->decoder
;
1150 parent_ig
= parent_cxld
->interleave_granularity
;
1151 parent_iw
= parent_cxld
->interleave_ways
;
1154 rc
= granularity_to_eig(parent_ig
, &peig
);
1156 dev_dbg(&cxlr
->dev
, "%s:%s: invalid parent granularity: %d\n",
1157 dev_name(parent_port
->uport_dev
),
1158 dev_name(&parent_port
->dev
), parent_ig
);
1162 rc
= ways_to_eiw(parent_iw
, &peiw
);
1164 dev_dbg(&cxlr
->dev
, "%s:%s: invalid parent interleave: %d\n",
1165 dev_name(parent_port
->uport_dev
),
1166 dev_name(&parent_port
->dev
), parent_iw
);
1170 iw
= cxl_rr
->nr_targets
;
1171 rc
= ways_to_eiw(iw
, &eiw
);
1173 dev_dbg(&cxlr
->dev
, "%s:%s: invalid port interleave: %d\n",
1174 dev_name(port
->uport_dev
), dev_name(&port
->dev
), iw
);
1179 * Interleave granularity is a multiple of @parent_port granularity.
1180 * Multiplier is the parent port interleave ways.
1182 rc
= granularity_to_eig(parent_ig
* parent_iw
, &eig
);
1185 "%s: invalid granularity calculation (%d * %d)\n",
1186 dev_name(&parent_port
->dev
), parent_ig
, parent_iw
);
1190 rc
= eig_to_granularity(eig
, &ig
);
1192 dev_dbg(&cxlr
->dev
, "%s:%s: invalid interleave: %d\n",
1193 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1198 if (test_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
)) {
1199 if (cxld
->interleave_ways
!= iw
||
1200 cxld
->interleave_granularity
!= ig
||
1201 cxld
->hpa_range
.start
!= p
->res
->start
||
1202 cxld
->hpa_range
.end
!= p
->res
->end
||
1203 ((cxld
->flags
& CXL_DECODER_F_ENABLE
) == 0)) {
1205 "%s:%s %s expected iw: %d ig: %d %pr\n",
1206 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1207 __func__
, iw
, ig
, p
->res
);
1209 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n",
1210 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1211 __func__
, cxld
->interleave_ways
,
1212 cxld
->interleave_granularity
,
1213 (cxld
->flags
& CXL_DECODER_F_ENABLE
) ?
1216 cxld
->hpa_range
.start
, cxld
->hpa_range
.end
);
1220 cxld
->interleave_ways
= iw
;
1221 cxld
->interleave_granularity
= ig
;
1222 cxld
->hpa_range
= (struct range
) {
1223 .start
= p
->res
->start
,
1227 dev_dbg(&cxlr
->dev
, "%s:%s iw: %d ig: %d\n", dev_name(port
->uport_dev
),
1228 dev_name(&port
->dev
), iw
, ig
);
1230 if (cxl_rr
->nr_targets_set
== cxl_rr
->nr_targets
) {
1232 "%s:%s: targets full trying to add %s:%s at %d\n",
1233 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1234 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
), pos
);
1237 if (test_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
)) {
1238 if (cxlsd
->target
[cxl_rr
->nr_targets_set
] != ep
->dport
) {
1239 dev_dbg(&cxlr
->dev
, "%s:%s: %s expected %s at %d\n",
1240 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1241 dev_name(&cxlsd
->cxld
.dev
),
1242 dev_name(ep
->dport
->dport_dev
),
1243 cxl_rr
->nr_targets_set
);
1247 cxlsd
->target
[cxl_rr
->nr_targets_set
] = ep
->dport
;
1250 cxl_rr
->nr_targets_set
+= inc
;
1251 dev_dbg(&cxlr
->dev
, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1252 dev_name(port
->uport_dev
), dev_name(&port
->dev
),
1253 cxl_rr
->nr_targets_set
- 1, dev_name(ep
->dport
->dport_dev
),
1254 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
), pos
);
1259 static void cxl_port_reset_targets(struct cxl_port
*port
,
1260 struct cxl_region
*cxlr
)
1262 struct cxl_region_ref
*cxl_rr
= cxl_rr_load(port
, cxlr
);
1263 struct cxl_decoder
*cxld
;
1266 * After the last endpoint has been detached the entire cxl_rr may now
1271 cxl_rr
->nr_targets_set
= 0;
1273 cxld
= cxl_rr
->decoder
;
1274 cxld
->hpa_range
= (struct range
) {
1280 static void cxl_region_teardown_targets(struct cxl_region
*cxlr
)
1282 struct cxl_region_params
*p
= &cxlr
->params
;
1283 struct cxl_endpoint_decoder
*cxled
;
1284 struct cxl_dev_state
*cxlds
;
1285 struct cxl_memdev
*cxlmd
;
1286 struct cxl_port
*iter
;
1291 * In the auto-discovery case skip automatic teardown since the
1292 * address space is already active
1294 if (test_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
))
1297 for (i
= 0; i
< p
->nr_targets
; i
++) {
1298 cxled
= p
->targets
[i
];
1299 cxlmd
= cxled_to_memdev(cxled
);
1300 cxlds
= cxlmd
->cxlds
;
1305 iter
= cxled_to_port(cxled
);
1306 while (!is_cxl_root(to_cxl_port(iter
->dev
.parent
)))
1307 iter
= to_cxl_port(iter
->dev
.parent
);
1309 for (ep
= cxl_ep_load(iter
, cxlmd
); iter
;
1310 iter
= ep
->next
, ep
= cxl_ep_load(iter
, cxlmd
))
1311 cxl_port_reset_targets(iter
, cxlr
);
1315 static int cxl_region_setup_targets(struct cxl_region
*cxlr
)
1317 struct cxl_region_params
*p
= &cxlr
->params
;
1318 struct cxl_endpoint_decoder
*cxled
;
1319 struct cxl_dev_state
*cxlds
;
1320 int i
, rc
, rch
= 0, vh
= 0;
1321 struct cxl_memdev
*cxlmd
;
1322 struct cxl_port
*iter
;
1325 for (i
= 0; i
< p
->nr_targets
; i
++) {
1326 cxled
= p
->targets
[i
];
1327 cxlmd
= cxled_to_memdev(cxled
);
1328 cxlds
= cxlmd
->cxlds
;
1330 /* validate that all targets agree on topology */
1338 iter
= cxled_to_port(cxled
);
1339 while (!is_cxl_root(to_cxl_port(iter
->dev
.parent
)))
1340 iter
= to_cxl_port(iter
->dev
.parent
);
1343 * Descend the topology tree programming / validating
1344 * targets while looking for conflicts.
1346 for (ep
= cxl_ep_load(iter
, cxlmd
); iter
;
1347 iter
= ep
->next
, ep
= cxl_ep_load(iter
, cxlmd
)) {
1348 rc
= cxl_port_setup_targets(iter
, cxlr
, cxled
);
1350 cxl_region_teardown_targets(cxlr
);
1357 dev_err(&cxlr
->dev
, "mismatched CXL topologies detected\n");
1358 cxl_region_teardown_targets(cxlr
);
1365 static int cxl_region_validate_position(struct cxl_region
*cxlr
,
1366 struct cxl_endpoint_decoder
*cxled
,
1369 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1370 struct cxl_region_params
*p
= &cxlr
->params
;
1373 if (pos
< 0 || pos
>= p
->interleave_ways
) {
1374 dev_dbg(&cxlr
->dev
, "position %d out of range %d\n", pos
,
1375 p
->interleave_ways
);
1379 if (p
->targets
[pos
] == cxled
)
1382 if (p
->targets
[pos
]) {
1383 struct cxl_endpoint_decoder
*cxled_target
= p
->targets
[pos
];
1384 struct cxl_memdev
*cxlmd_target
= cxled_to_memdev(cxled_target
);
1386 dev_dbg(&cxlr
->dev
, "position %d already assigned to %s:%s\n",
1387 pos
, dev_name(&cxlmd_target
->dev
),
1388 dev_name(&cxled_target
->cxld
.dev
));
1392 for (i
= 0; i
< p
->interleave_ways
; i
++) {
1393 struct cxl_endpoint_decoder
*cxled_target
;
1394 struct cxl_memdev
*cxlmd_target
;
1396 cxled_target
= p
->targets
[i
];
1400 cxlmd_target
= cxled_to_memdev(cxled_target
);
1401 if (cxlmd_target
== cxlmd
) {
1403 "%s already specified at position %d via: %s\n",
1404 dev_name(&cxlmd
->dev
), pos
,
1405 dev_name(&cxled_target
->cxld
.dev
));
1413 static int cxl_region_attach_position(struct cxl_region
*cxlr
,
1414 struct cxl_root_decoder
*cxlrd
,
1415 struct cxl_endpoint_decoder
*cxled
,
1416 const struct cxl_dport
*dport
, int pos
)
1418 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1419 struct cxl_port
*iter
;
1422 if (cxlrd
->calc_hb(cxlrd
, pos
) != dport
) {
1423 dev_dbg(&cxlr
->dev
, "%s:%s invalid target position for %s\n",
1424 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
1425 dev_name(&cxlrd
->cxlsd
.cxld
.dev
));
1429 for (iter
= cxled_to_port(cxled
); !is_cxl_root(iter
);
1430 iter
= to_cxl_port(iter
->dev
.parent
)) {
1431 rc
= cxl_port_attach_region(iter
, cxlr
, cxled
, pos
);
1439 for (iter
= cxled_to_port(cxled
); !is_cxl_root(iter
);
1440 iter
= to_cxl_port(iter
->dev
.parent
))
1441 cxl_port_detach_region(iter
, cxlr
, cxled
);
1445 static int cxl_region_attach_auto(struct cxl_region
*cxlr
,
1446 struct cxl_endpoint_decoder
*cxled
, int pos
)
1448 struct cxl_region_params
*p
= &cxlr
->params
;
1450 if (cxled
->state
!= CXL_DECODER_STATE_AUTO
) {
1452 "%s: unable to add decoder to autodetected region\n",
1453 dev_name(&cxled
->cxld
.dev
));
1458 dev_dbg(&cxlr
->dev
, "%s: expected auto position, not %d\n",
1459 dev_name(&cxled
->cxld
.dev
), pos
);
1463 if (p
->nr_targets
>= p
->interleave_ways
) {
1464 dev_err(&cxlr
->dev
, "%s: no more target slots available\n",
1465 dev_name(&cxled
->cxld
.dev
));
1470 * Temporarily record the endpoint decoder into the target array. Yes,
1471 * this means that userspace can view devices in the wrong position
1472 * before the region activates, and must be careful to understand when
1473 * it might be racing region autodiscovery.
1475 pos
= p
->nr_targets
;
1476 p
->targets
[pos
] = cxled
;
1483 static struct cxl_port
*next_port(struct cxl_port
*port
)
1485 if (!port
->parent_dport
)
1487 return port
->parent_dport
->port
;
1490 static int decoder_match_range(struct device
*dev
, void *data
)
1492 struct cxl_endpoint_decoder
*cxled
= data
;
1493 struct cxl_switch_decoder
*cxlsd
;
1495 if (!is_switch_decoder(dev
))
1498 cxlsd
= to_cxl_switch_decoder(dev
);
1499 return range_contains(&cxlsd
->cxld
.hpa_range
, &cxled
->cxld
.hpa_range
);
1502 static void find_positions(const struct cxl_switch_decoder
*cxlsd
,
1503 const struct cxl_port
*iter_a
,
1504 const struct cxl_port
*iter_b
, int *a_pos
,
1509 for (i
= 0, *a_pos
= -1, *b_pos
= -1; i
< cxlsd
->nr_targets
; i
++) {
1510 if (cxlsd
->target
[i
] == iter_a
->parent_dport
)
1512 else if (cxlsd
->target
[i
] == iter_b
->parent_dport
)
1514 if (*a_pos
>= 0 && *b_pos
>= 0)
1519 static int cmp_decode_pos(const void *a
, const void *b
)
1521 struct cxl_endpoint_decoder
*cxled_a
= *(typeof(cxled_a
) *)a
;
1522 struct cxl_endpoint_decoder
*cxled_b
= *(typeof(cxled_b
) *)b
;
1523 struct cxl_memdev
*cxlmd_a
= cxled_to_memdev(cxled_a
);
1524 struct cxl_memdev
*cxlmd_b
= cxled_to_memdev(cxled_b
);
1525 struct cxl_port
*port_a
= cxled_to_port(cxled_a
);
1526 struct cxl_port
*port_b
= cxled_to_port(cxled_b
);
1527 struct cxl_port
*iter_a
, *iter_b
, *port
= NULL
;
1528 struct cxl_switch_decoder
*cxlsd
;
1533 /* Exit early if any prior sorting failed */
1534 if (cxled_a
->pos
< 0 || cxled_b
->pos
< 0)
1538 * Walk up the hierarchy to find a shared port, find the decoder that
1539 * maps the range, compare the relative position of those dport
1542 for (iter_a
= port_a
; iter_a
; iter_a
= next_port(iter_a
)) {
1543 struct cxl_port
*next_a
, *next_b
;
1545 next_a
= next_port(iter_a
);
1549 for (iter_b
= port_b
; iter_b
; iter_b
= next_port(iter_b
)) {
1550 next_b
= next_port(iter_b
);
1551 if (next_a
!= next_b
)
1562 dev_err(cxlmd_a
->dev
.parent
,
1563 "failed to find shared port with %s\n",
1564 dev_name(cxlmd_b
->dev
.parent
));
1568 dev
= device_find_child(&port
->dev
, cxled_a
, decoder_match_range
);
1570 struct range
*range
= &cxled_a
->cxld
.hpa_range
;
1572 dev_err(port
->uport_dev
,
1573 "failed to find decoder that maps %#llx-%#llx\n",
1574 range
->start
, range
->end
);
1578 cxlsd
= to_cxl_switch_decoder(dev
);
1580 seq
= read_seqbegin(&cxlsd
->target_lock
);
1581 find_positions(cxlsd
, iter_a
, iter_b
, &a_pos
, &b_pos
);
1582 } while (read_seqretry(&cxlsd
->target_lock
, seq
));
1586 if (a_pos
< 0 || b_pos
< 0) {
1587 dev_err(port
->uport_dev
,
1588 "failed to find shared decoder for %s and %s\n",
1589 dev_name(cxlmd_a
->dev
.parent
),
1590 dev_name(cxlmd_b
->dev
.parent
));
1594 dev_dbg(port
->uport_dev
, "%s comes %s %s\n",
1595 dev_name(cxlmd_a
->dev
.parent
),
1596 a_pos
- b_pos
< 0 ? "before" : "after",
1597 dev_name(cxlmd_b
->dev
.parent
));
1599 return a_pos
- b_pos
;
1605 static int cxl_region_sort_targets(struct cxl_region
*cxlr
)
1607 struct cxl_region_params
*p
= &cxlr
->params
;
1610 sort(p
->targets
, p
->nr_targets
, sizeof(p
->targets
[0]), cmp_decode_pos
,
1613 for (i
= 0; i
< p
->nr_targets
; i
++) {
1614 struct cxl_endpoint_decoder
*cxled
= p
->targets
[i
];
1617 * Record that sorting failed, but still continue to restore
1618 * cxled->pos with its ->targets[] position so that follow-on
1619 * code paths can reliably do p->targets[cxled->pos] to
1620 * self-reference their entry.
1627 dev_dbg(&cxlr
->dev
, "region sort %s\n", rc
? "failed" : "successful");
1631 static int cxl_region_attach(struct cxl_region
*cxlr
,
1632 struct cxl_endpoint_decoder
*cxled
, int pos
)
1634 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(cxlr
->dev
.parent
);
1635 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1636 struct cxl_region_params
*p
= &cxlr
->params
;
1637 struct cxl_port
*ep_port
, *root_port
;
1638 struct cxl_dport
*dport
;
1641 if (cxled
->mode
!= cxlr
->mode
) {
1642 dev_dbg(&cxlr
->dev
, "%s region mode: %d mismatch: %d\n",
1643 dev_name(&cxled
->cxld
.dev
), cxlr
->mode
, cxled
->mode
);
1647 if (cxled
->mode
== CXL_DECODER_DEAD
) {
1648 dev_dbg(&cxlr
->dev
, "%s dead\n", dev_name(&cxled
->cxld
.dev
));
1652 /* all full of members, or interleave config not established? */
1653 if (p
->state
> CXL_CONFIG_INTERLEAVE_ACTIVE
) {
1654 dev_dbg(&cxlr
->dev
, "region already active\n");
1656 } else if (p
->state
< CXL_CONFIG_INTERLEAVE_ACTIVE
) {
1657 dev_dbg(&cxlr
->dev
, "interleave config missing\n");
1661 ep_port
= cxled_to_port(cxled
);
1662 root_port
= cxlrd_to_port(cxlrd
);
1663 dport
= cxl_find_dport_by_dev(root_port
, ep_port
->host_bridge
);
1665 dev_dbg(&cxlr
->dev
, "%s:%s invalid target for %s\n",
1666 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
1667 dev_name(cxlr
->dev
.parent
));
1671 if (cxled
->cxld
.target_type
!= cxlr
->type
) {
1672 dev_dbg(&cxlr
->dev
, "%s:%s type mismatch: %d vs %d\n",
1673 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
1674 cxled
->cxld
.target_type
, cxlr
->type
);
1678 if (!cxled
->dpa_res
) {
1679 dev_dbg(&cxlr
->dev
, "%s:%s: missing DPA allocation.\n",
1680 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
));
1684 if (resource_size(cxled
->dpa_res
) * p
->interleave_ways
!=
1685 resource_size(p
->res
)) {
1687 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1688 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
1689 (u64
)resource_size(cxled
->dpa_res
), p
->interleave_ways
,
1690 (u64
)resource_size(p
->res
));
1694 if (test_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
)) {
1697 rc
= cxl_region_attach_auto(cxlr
, cxled
, pos
);
1701 /* await more targets to arrive... */
1702 if (p
->nr_targets
< p
->interleave_ways
)
1706 * All targets are here, which implies all PCI enumeration that
1707 * affects this region has been completed. Walk the topology to
1708 * sort the devices into their relative region decode position.
1710 rc
= cxl_region_sort_targets(cxlr
);
1714 for (i
= 0; i
< p
->nr_targets
; i
++) {
1715 cxled
= p
->targets
[i
];
1716 ep_port
= cxled_to_port(cxled
);
1717 dport
= cxl_find_dport_by_dev(root_port
,
1718 ep_port
->host_bridge
);
1719 rc
= cxl_region_attach_position(cxlr
, cxlrd
, cxled
,
1725 rc
= cxl_region_setup_targets(cxlr
);
1730 * If target setup succeeds in the autodiscovery case
1731 * then the region is already committed.
1733 p
->state
= CXL_CONFIG_COMMIT
;
1738 rc
= cxl_region_validate_position(cxlr
, cxled
, pos
);
1742 rc
= cxl_region_attach_position(cxlr
, cxlrd
, cxled
, dport
, pos
);
1746 p
->targets
[pos
] = cxled
;
1750 if (p
->nr_targets
== p
->interleave_ways
) {
1751 rc
= cxl_region_setup_targets(cxlr
);
1754 p
->state
= CXL_CONFIG_ACTIVE
;
1757 cxled
->cxld
.interleave_ways
= p
->interleave_ways
;
1758 cxled
->cxld
.interleave_granularity
= p
->interleave_granularity
;
1759 cxled
->cxld
.hpa_range
= (struct range
) {
1760 .start
= p
->res
->start
,
1769 p
->targets
[pos
] = NULL
;
1773 static int cxl_region_detach(struct cxl_endpoint_decoder
*cxled
)
1775 struct cxl_port
*iter
, *ep_port
= cxled_to_port(cxled
);
1776 struct cxl_region
*cxlr
= cxled
->cxld
.region
;
1777 struct cxl_region_params
*p
;
1780 lockdep_assert_held_write(&cxl_region_rwsem
);
1786 get_device(&cxlr
->dev
);
1788 if (p
->state
> CXL_CONFIG_ACTIVE
) {
1790 * TODO: tear down all impacted regions if a device is
1791 * removed out of order
1793 rc
= cxl_region_decode_reset(cxlr
, p
->interleave_ways
);
1796 p
->state
= CXL_CONFIG_ACTIVE
;
1799 for (iter
= ep_port
; !is_cxl_root(iter
);
1800 iter
= to_cxl_port(iter
->dev
.parent
))
1801 cxl_port_detach_region(iter
, cxlr
, cxled
);
1803 if (cxled
->pos
< 0 || cxled
->pos
>= p
->interleave_ways
||
1804 p
->targets
[cxled
->pos
] != cxled
) {
1805 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
1807 dev_WARN_ONCE(&cxlr
->dev
, 1, "expected %s:%s at position %d\n",
1808 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
1813 if (p
->state
== CXL_CONFIG_ACTIVE
) {
1814 p
->state
= CXL_CONFIG_INTERLEAVE_ACTIVE
;
1815 cxl_region_teardown_targets(cxlr
);
1817 p
->targets
[cxled
->pos
] = NULL
;
1819 cxled
->cxld
.hpa_range
= (struct range
) {
1824 /* notify the region driver that one of its targets has departed */
1825 up_write(&cxl_region_rwsem
);
1826 device_release_driver(&cxlr
->dev
);
1827 down_write(&cxl_region_rwsem
);
1829 put_device(&cxlr
->dev
);
1833 void cxl_decoder_kill_region(struct cxl_endpoint_decoder
*cxled
)
1835 down_write(&cxl_region_rwsem
);
1836 cxled
->mode
= CXL_DECODER_DEAD
;
1837 cxl_region_detach(cxled
);
1838 up_write(&cxl_region_rwsem
);
1841 static int attach_target(struct cxl_region
*cxlr
,
1842 struct cxl_endpoint_decoder
*cxled
, int pos
,
1847 if (state
== TASK_INTERRUPTIBLE
)
1848 rc
= down_write_killable(&cxl_region_rwsem
);
1850 down_write(&cxl_region_rwsem
);
1854 down_read(&cxl_dpa_rwsem
);
1855 rc
= cxl_region_attach(cxlr
, cxled
, pos
);
1856 up_read(&cxl_dpa_rwsem
);
1857 up_write(&cxl_region_rwsem
);
1861 static int detach_target(struct cxl_region
*cxlr
, int pos
)
1863 struct cxl_region_params
*p
= &cxlr
->params
;
1866 rc
= down_write_killable(&cxl_region_rwsem
);
1870 if (pos
>= p
->interleave_ways
) {
1871 dev_dbg(&cxlr
->dev
, "position %d out of range %d\n", pos
,
1872 p
->interleave_ways
);
1877 if (!p
->targets
[pos
]) {
1882 rc
= cxl_region_detach(p
->targets
[pos
]);
1884 up_write(&cxl_region_rwsem
);
1888 static size_t store_targetN(struct cxl_region
*cxlr
, const char *buf
, int pos
,
1893 if (sysfs_streq(buf
, "\n"))
1894 rc
= detach_target(cxlr
, pos
);
1898 dev
= bus_find_device_by_name(&cxl_bus_type
, NULL
, buf
);
1902 if (!is_endpoint_decoder(dev
)) {
1907 rc
= attach_target(cxlr
, to_cxl_endpoint_decoder(dev
), pos
,
1908 TASK_INTERRUPTIBLE
);
1918 #define TARGET_ATTR_RW(n) \
1919 static ssize_t target##n##_show( \
1920 struct device *dev, struct device_attribute *attr, char *buf) \
1922 return show_targetN(to_cxl_region(dev), buf, (n)); \
1924 static ssize_t target##n##_store(struct device *dev, \
1925 struct device_attribute *attr, \
1926 const char *buf, size_t len) \
1928 return store_targetN(to_cxl_region(dev), buf, (n), len); \
1930 static DEVICE_ATTR_RW(target##n)
1949 static struct attribute
*target_attrs
[] = {
1950 &dev_attr_target0
.attr
,
1951 &dev_attr_target1
.attr
,
1952 &dev_attr_target2
.attr
,
1953 &dev_attr_target3
.attr
,
1954 &dev_attr_target4
.attr
,
1955 &dev_attr_target5
.attr
,
1956 &dev_attr_target6
.attr
,
1957 &dev_attr_target7
.attr
,
1958 &dev_attr_target8
.attr
,
1959 &dev_attr_target9
.attr
,
1960 &dev_attr_target10
.attr
,
1961 &dev_attr_target11
.attr
,
1962 &dev_attr_target12
.attr
,
1963 &dev_attr_target13
.attr
,
1964 &dev_attr_target14
.attr
,
1965 &dev_attr_target15
.attr
,
1969 static umode_t
cxl_region_target_visible(struct kobject
*kobj
,
1970 struct attribute
*a
, int n
)
1972 struct device
*dev
= kobj_to_dev(kobj
);
1973 struct cxl_region
*cxlr
= to_cxl_region(dev
);
1974 struct cxl_region_params
*p
= &cxlr
->params
;
1976 if (n
< p
->interleave_ways
)
1981 static const struct attribute_group cxl_region_target_group
= {
1982 .attrs
= target_attrs
,
1983 .is_visible
= cxl_region_target_visible
,
1986 static const struct attribute_group
*get_cxl_region_target_group(void)
1988 return &cxl_region_target_group
;
1991 static const struct attribute_group
*region_groups
[] = {
1992 &cxl_base_attribute_group
,
1994 &cxl_region_target_group
,
1998 static void cxl_region_release(struct device
*dev
)
2000 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
->parent
);
2001 struct cxl_region
*cxlr
= to_cxl_region(dev
);
2002 int id
= atomic_read(&cxlrd
->region_id
);
2005 * Try to reuse the recently idled id rather than the cached
2006 * next id to prevent the region id space from increasing
2010 if (atomic_try_cmpxchg(&cxlrd
->region_id
, &id
, cxlr
->id
)) {
2015 memregion_free(cxlr
->id
);
2017 put_device(dev
->parent
);
2021 const struct device_type cxl_region_type
= {
2022 .name
= "cxl_region",
2023 .release
= cxl_region_release
,
2024 .groups
= region_groups
2027 bool is_cxl_region(struct device
*dev
)
2029 return dev
->type
== &cxl_region_type
;
2031 EXPORT_SYMBOL_NS_GPL(is_cxl_region
, CXL
);
2033 static struct cxl_region
*to_cxl_region(struct device
*dev
)
2035 if (dev_WARN_ONCE(dev
, dev
->type
!= &cxl_region_type
,
2036 "not a cxl_region device\n"))
2039 return container_of(dev
, struct cxl_region
, dev
);
2042 static void unregister_region(void *dev
)
2044 struct cxl_region
*cxlr
= to_cxl_region(dev
);
2045 struct cxl_region_params
*p
= &cxlr
->params
;
2051 * Now that region sysfs is shutdown, the parameter block is now
2052 * read-only, so no need to hold the region rwsem to access the
2053 * region parameters.
2055 for (i
= 0; i
< p
->interleave_ways
; i
++)
2056 detach_target(cxlr
, i
);
2058 cxl_region_iomem_release(cxlr
);
2062 static struct lock_class_key cxl_region_key
;
2064 static struct cxl_region
*cxl_region_alloc(struct cxl_root_decoder
*cxlrd
, int id
)
2066 struct cxl_region
*cxlr
;
2069 cxlr
= kzalloc(sizeof(*cxlr
), GFP_KERNEL
);
2072 return ERR_PTR(-ENOMEM
);
2076 device_initialize(dev
);
2077 lockdep_set_class(&dev
->mutex
, &cxl_region_key
);
2078 dev
->parent
= &cxlrd
->cxlsd
.cxld
.dev
;
2080 * Keep root decoder pinned through cxl_region_release to fixup
2081 * region id allocations
2083 get_device(dev
->parent
);
2084 device_set_pm_not_required(dev
);
2085 dev
->bus
= &cxl_bus_type
;
2086 dev
->type
= &cxl_region_type
;
2093 * devm_cxl_add_region - Adds a region to a decoder
2094 * @cxlrd: root decoder
2095 * @id: memregion id to create, or memregion_free() on failure
2096 * @mode: mode for the endpoint decoders of this region
2097 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2099 * This is the second step of region initialization. Regions exist within an
2100 * address space which is mapped by a @cxlrd.
2102 * Return: 0 if the region was added to the @cxlrd, else returns negative error
2103 * code. The region will be named "regionZ" where Z is the unique region number.
2105 static struct cxl_region
*devm_cxl_add_region(struct cxl_root_decoder
*cxlrd
,
2107 enum cxl_decoder_mode mode
,
2108 enum cxl_decoder_type type
)
2110 struct cxl_port
*port
= to_cxl_port(cxlrd
->cxlsd
.cxld
.dev
.parent
);
2111 struct cxl_region
*cxlr
;
2116 case CXL_DECODER_RAM
:
2117 case CXL_DECODER_PMEM
:
2120 dev_err(&cxlrd
->cxlsd
.cxld
.dev
, "unsupported mode %d\n", mode
);
2121 return ERR_PTR(-EINVAL
);
2124 cxlr
= cxl_region_alloc(cxlrd
, id
);
2131 rc
= dev_set_name(dev
, "region%d", id
);
2135 rc
= device_add(dev
);
2139 rc
= devm_add_action_or_reset(port
->uport_dev
, unregister_region
, cxlr
);
2143 dev_dbg(port
->uport_dev
, "%s: created %s\n",
2144 dev_name(&cxlrd
->cxlsd
.cxld
.dev
), dev_name(dev
));
2152 static ssize_t
__create_region_show(struct cxl_root_decoder
*cxlrd
, char *buf
)
2154 return sysfs_emit(buf
, "region%u\n", atomic_read(&cxlrd
->region_id
));
2157 static ssize_t
create_pmem_region_show(struct device
*dev
,
2158 struct device_attribute
*attr
, char *buf
)
2160 return __create_region_show(to_cxl_root_decoder(dev
), buf
);
2163 static ssize_t
create_ram_region_show(struct device
*dev
,
2164 struct device_attribute
*attr
, char *buf
)
2166 return __create_region_show(to_cxl_root_decoder(dev
), buf
);
2169 static struct cxl_region
*__create_region(struct cxl_root_decoder
*cxlrd
,
2170 enum cxl_decoder_mode mode
, int id
)
2174 rc
= memregion_alloc(GFP_KERNEL
);
2178 if (atomic_cmpxchg(&cxlrd
->region_id
, id
, rc
) != id
) {
2180 return ERR_PTR(-EBUSY
);
2183 return devm_cxl_add_region(cxlrd
, id
, mode
, CXL_DECODER_HOSTONLYMEM
);
2186 static ssize_t
create_pmem_region_store(struct device
*dev
,
2187 struct device_attribute
*attr
,
2188 const char *buf
, size_t len
)
2190 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
);
2191 struct cxl_region
*cxlr
;
2194 rc
= sscanf(buf
, "region%d\n", &id
);
2198 cxlr
= __create_region(cxlrd
, CXL_DECODER_PMEM
, id
);
2200 return PTR_ERR(cxlr
);
2204 DEVICE_ATTR_RW(create_pmem_region
);
2206 static ssize_t
create_ram_region_store(struct device
*dev
,
2207 struct device_attribute
*attr
,
2208 const char *buf
, size_t len
)
2210 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
);
2211 struct cxl_region
*cxlr
;
2214 rc
= sscanf(buf
, "region%d\n", &id
);
2218 cxlr
= __create_region(cxlrd
, CXL_DECODER_RAM
, id
);
2220 return PTR_ERR(cxlr
);
2224 DEVICE_ATTR_RW(create_ram_region
);
2226 static ssize_t
region_show(struct device
*dev
, struct device_attribute
*attr
,
2229 struct cxl_decoder
*cxld
= to_cxl_decoder(dev
);
2232 rc
= down_read_interruptible(&cxl_region_rwsem
);
2237 rc
= sysfs_emit(buf
, "%s\n", dev_name(&cxld
->region
->dev
));
2239 rc
= sysfs_emit(buf
, "\n");
2240 up_read(&cxl_region_rwsem
);
2244 DEVICE_ATTR_RO(region
);
2246 static struct cxl_region
*
2247 cxl_find_region_by_name(struct cxl_root_decoder
*cxlrd
, const char *name
)
2249 struct cxl_decoder
*cxld
= &cxlrd
->cxlsd
.cxld
;
2250 struct device
*region_dev
;
2252 region_dev
= device_find_child_by_name(&cxld
->dev
, name
);
2254 return ERR_PTR(-ENODEV
);
2256 return to_cxl_region(region_dev
);
2259 static ssize_t
delete_region_store(struct device
*dev
,
2260 struct device_attribute
*attr
,
2261 const char *buf
, size_t len
)
2263 struct cxl_root_decoder
*cxlrd
= to_cxl_root_decoder(dev
);
2264 struct cxl_port
*port
= to_cxl_port(dev
->parent
);
2265 struct cxl_region
*cxlr
;
2267 cxlr
= cxl_find_region_by_name(cxlrd
, buf
);
2269 return PTR_ERR(cxlr
);
2271 devm_release_action(port
->uport_dev
, unregister_region
, cxlr
);
2272 put_device(&cxlr
->dev
);
2276 DEVICE_ATTR_WO(delete_region
);
2278 static void cxl_pmem_region_release(struct device
*dev
)
2280 struct cxl_pmem_region
*cxlr_pmem
= to_cxl_pmem_region(dev
);
2283 for (i
= 0; i
< cxlr_pmem
->nr_mappings
; i
++) {
2284 struct cxl_memdev
*cxlmd
= cxlr_pmem
->mapping
[i
].cxlmd
;
2286 put_device(&cxlmd
->dev
);
2292 static const struct attribute_group
*cxl_pmem_region_attribute_groups
[] = {
2293 &cxl_base_attribute_group
,
2297 const struct device_type cxl_pmem_region_type
= {
2298 .name
= "cxl_pmem_region",
2299 .release
= cxl_pmem_region_release
,
2300 .groups
= cxl_pmem_region_attribute_groups
,
2303 bool is_cxl_pmem_region(struct device
*dev
)
2305 return dev
->type
== &cxl_pmem_region_type
;
2307 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region
, CXL
);
2309 struct cxl_pmem_region
*to_cxl_pmem_region(struct device
*dev
)
2311 if (dev_WARN_ONCE(dev
, !is_cxl_pmem_region(dev
),
2312 "not a cxl_pmem_region device\n"))
2314 return container_of(dev
, struct cxl_pmem_region
, dev
);
2316 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region
, CXL
);
2318 struct cxl_poison_context
{
2319 struct cxl_port
*port
;
2320 enum cxl_decoder_mode mode
;
2324 static int cxl_get_poison_unmapped(struct cxl_memdev
*cxlmd
,
2325 struct cxl_poison_context
*ctx
)
2327 struct cxl_dev_state
*cxlds
= cxlmd
->cxlds
;
2332 * Collect poison for the remaining unmapped resources
2333 * after poison is collected by committed endpoints.
2335 * Knowing that PMEM must always follow RAM, get poison
2336 * for unmapped resources based on the last decoder's mode:
2337 * ram: scan remains of ram range, then any pmem range
2338 * pmem: scan remains of pmem range
2341 if (ctx
->mode
== CXL_DECODER_RAM
) {
2342 offset
= ctx
->offset
;
2343 length
= resource_size(&cxlds
->ram_res
) - offset
;
2344 rc
= cxl_mem_get_poison(cxlmd
, offset
, length
, NULL
);
2350 if (ctx
->mode
== CXL_DECODER_PMEM
) {
2351 offset
= ctx
->offset
;
2352 length
= resource_size(&cxlds
->dpa_res
) - offset
;
2355 } else if (resource_size(&cxlds
->pmem_res
)) {
2356 offset
= cxlds
->pmem_res
.start
;
2357 length
= resource_size(&cxlds
->pmem_res
);
2362 return cxl_mem_get_poison(cxlmd
, offset
, length
, NULL
);
2365 static int poison_by_decoder(struct device
*dev
, void *arg
)
2367 struct cxl_poison_context
*ctx
= arg
;
2368 struct cxl_endpoint_decoder
*cxled
;
2369 struct cxl_memdev
*cxlmd
;
2373 if (!is_endpoint_decoder(dev
))
2376 cxled
= to_cxl_endpoint_decoder(dev
);
2377 if (!cxled
->dpa_res
|| !resource_size(cxled
->dpa_res
))
2381 * Regions are only created with single mode decoders: pmem or ram.
2382 * Linux does not support mixed mode decoders. This means that
2383 * reading poison per endpoint decoder adheres to the requirement
2384 * that poison reads of pmem and ram must be separated.
2385 * CXL 3.0 Spec 8.2.9.8.4.1
2387 if (cxled
->mode
== CXL_DECODER_MIXED
) {
2388 dev_dbg(dev
, "poison list read unsupported in mixed mode\n");
2392 cxlmd
= cxled_to_memdev(cxled
);
2394 offset
= cxled
->dpa_res
->start
- cxled
->skip
;
2395 length
= cxled
->skip
;
2396 rc
= cxl_mem_get_poison(cxlmd
, offset
, length
, NULL
);
2397 if (rc
== -EFAULT
&& cxled
->mode
== CXL_DECODER_RAM
)
2403 offset
= cxled
->dpa_res
->start
;
2404 length
= cxled
->dpa_res
->end
- offset
+ 1;
2405 rc
= cxl_mem_get_poison(cxlmd
, offset
, length
, cxled
->cxld
.region
);
2406 if (rc
== -EFAULT
&& cxled
->mode
== CXL_DECODER_RAM
)
2411 /* Iterate until commit_end is reached */
2412 if (cxled
->cxld
.id
== ctx
->port
->commit_end
) {
2413 ctx
->offset
= cxled
->dpa_res
->end
+ 1;
2414 ctx
->mode
= cxled
->mode
;
2421 int cxl_get_poison_by_endpoint(struct cxl_port
*port
)
2423 struct cxl_poison_context ctx
;
2426 rc
= down_read_interruptible(&cxl_region_rwsem
);
2430 ctx
= (struct cxl_poison_context
) {
2434 rc
= device_for_each_child(&port
->dev
, &ctx
, poison_by_decoder
);
2436 rc
= cxl_get_poison_unmapped(to_cxl_memdev(port
->uport_dev
),
2439 up_read(&cxl_region_rwsem
);
2443 static struct lock_class_key cxl_pmem_region_key
;
2445 static struct cxl_pmem_region
*cxl_pmem_region_alloc(struct cxl_region
*cxlr
)
2447 struct cxl_region_params
*p
= &cxlr
->params
;
2448 struct cxl_nvdimm_bridge
*cxl_nvb
;
2449 struct cxl_pmem_region
*cxlr_pmem
;
2453 down_read(&cxl_region_rwsem
);
2454 if (p
->state
!= CXL_CONFIG_COMMIT
) {
2455 cxlr_pmem
= ERR_PTR(-ENXIO
);
2459 cxlr_pmem
= kzalloc(struct_size(cxlr_pmem
, mapping
, p
->nr_targets
),
2462 cxlr_pmem
= ERR_PTR(-ENOMEM
);
2466 cxlr_pmem
->hpa_range
.start
= p
->res
->start
;
2467 cxlr_pmem
->hpa_range
.end
= p
->res
->end
;
2469 /* Snapshot the region configuration underneath the cxl_region_rwsem */
2470 cxlr_pmem
->nr_mappings
= p
->nr_targets
;
2471 for (i
= 0; i
< p
->nr_targets
; i
++) {
2472 struct cxl_endpoint_decoder
*cxled
= p
->targets
[i
];
2473 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
2474 struct cxl_pmem_region_mapping
*m
= &cxlr_pmem
->mapping
[i
];
2477 * Regions never span CXL root devices, so by definition the
2478 * bridge for one device is the same for all.
2481 cxl_nvb
= cxl_find_nvdimm_bridge(cxlmd
);
2483 cxlr_pmem
= ERR_PTR(-ENODEV
);
2486 cxlr
->cxl_nvb
= cxl_nvb
;
2489 get_device(&cxlmd
->dev
);
2490 m
->start
= cxled
->dpa_res
->start
;
2491 m
->size
= resource_size(cxled
->dpa_res
);
2495 dev
= &cxlr_pmem
->dev
;
2496 cxlr_pmem
->cxlr
= cxlr
;
2497 cxlr
->cxlr_pmem
= cxlr_pmem
;
2498 device_initialize(dev
);
2499 lockdep_set_class(&dev
->mutex
, &cxl_pmem_region_key
);
2500 device_set_pm_not_required(dev
);
2501 dev
->parent
= &cxlr
->dev
;
2502 dev
->bus
= &cxl_bus_type
;
2503 dev
->type
= &cxl_pmem_region_type
;
2505 up_read(&cxl_region_rwsem
);
2510 static void cxl_dax_region_release(struct device
*dev
)
2512 struct cxl_dax_region
*cxlr_dax
= to_cxl_dax_region(dev
);
2517 static const struct attribute_group
*cxl_dax_region_attribute_groups
[] = {
2518 &cxl_base_attribute_group
,
2522 const struct device_type cxl_dax_region_type
= {
2523 .name
= "cxl_dax_region",
2524 .release
= cxl_dax_region_release
,
2525 .groups
= cxl_dax_region_attribute_groups
,
2528 static bool is_cxl_dax_region(struct device
*dev
)
2530 return dev
->type
== &cxl_dax_region_type
;
2533 struct cxl_dax_region
*to_cxl_dax_region(struct device
*dev
)
2535 if (dev_WARN_ONCE(dev
, !is_cxl_dax_region(dev
),
2536 "not a cxl_dax_region device\n"))
2538 return container_of(dev
, struct cxl_dax_region
, dev
);
2540 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region
, CXL
);
2542 static struct lock_class_key cxl_dax_region_key
;
2544 static struct cxl_dax_region
*cxl_dax_region_alloc(struct cxl_region
*cxlr
)
2546 struct cxl_region_params
*p
= &cxlr
->params
;
2547 struct cxl_dax_region
*cxlr_dax
;
2550 down_read(&cxl_region_rwsem
);
2551 if (p
->state
!= CXL_CONFIG_COMMIT
) {
2552 cxlr_dax
= ERR_PTR(-ENXIO
);
2556 cxlr_dax
= kzalloc(sizeof(*cxlr_dax
), GFP_KERNEL
);
2558 cxlr_dax
= ERR_PTR(-ENOMEM
);
2562 cxlr_dax
->hpa_range
.start
= p
->res
->start
;
2563 cxlr_dax
->hpa_range
.end
= p
->res
->end
;
2565 dev
= &cxlr_dax
->dev
;
2566 cxlr_dax
->cxlr
= cxlr
;
2567 device_initialize(dev
);
2568 lockdep_set_class(&dev
->mutex
, &cxl_dax_region_key
);
2569 device_set_pm_not_required(dev
);
2570 dev
->parent
= &cxlr
->dev
;
2571 dev
->bus
= &cxl_bus_type
;
2572 dev
->type
= &cxl_dax_region_type
;
2574 up_read(&cxl_region_rwsem
);
2579 static void cxlr_pmem_unregister(void *_cxlr_pmem
)
2581 struct cxl_pmem_region
*cxlr_pmem
= _cxlr_pmem
;
2582 struct cxl_region
*cxlr
= cxlr_pmem
->cxlr
;
2583 struct cxl_nvdimm_bridge
*cxl_nvb
= cxlr
->cxl_nvb
;
2586 * Either the bridge is in ->remove() context under the device_lock(),
2587 * or cxlr_release_nvdimm() is cancelling the bridge's release action
2588 * for @cxlr_pmem and doing it itself (while manually holding the bridge
2591 device_lock_assert(&cxl_nvb
->dev
);
2592 cxlr
->cxlr_pmem
= NULL
;
2593 cxlr_pmem
->cxlr
= NULL
;
2594 device_unregister(&cxlr_pmem
->dev
);
2597 static void cxlr_release_nvdimm(void *_cxlr
)
2599 struct cxl_region
*cxlr
= _cxlr
;
2600 struct cxl_nvdimm_bridge
*cxl_nvb
= cxlr
->cxl_nvb
;
2602 device_lock(&cxl_nvb
->dev
);
2603 if (cxlr
->cxlr_pmem
)
2604 devm_release_action(&cxl_nvb
->dev
, cxlr_pmem_unregister
,
2606 device_unlock(&cxl_nvb
->dev
);
2607 cxlr
->cxl_nvb
= NULL
;
2608 put_device(&cxl_nvb
->dev
);
2612 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
2613 * @cxlr: parent CXL region for this pmem region bridge device
2615 * Return: 0 on success negative error code on failure.
2617 static int devm_cxl_add_pmem_region(struct cxl_region
*cxlr
)
2619 struct cxl_pmem_region
*cxlr_pmem
;
2620 struct cxl_nvdimm_bridge
*cxl_nvb
;
2624 cxlr_pmem
= cxl_pmem_region_alloc(cxlr
);
2625 if (IS_ERR(cxlr_pmem
))
2626 return PTR_ERR(cxlr_pmem
);
2627 cxl_nvb
= cxlr
->cxl_nvb
;
2629 dev
= &cxlr_pmem
->dev
;
2630 rc
= dev_set_name(dev
, "pmem_region%d", cxlr
->id
);
2634 rc
= device_add(dev
);
2638 dev_dbg(&cxlr
->dev
, "%s: register %s\n", dev_name(dev
->parent
),
2641 device_lock(&cxl_nvb
->dev
);
2642 if (cxl_nvb
->dev
.driver
)
2643 rc
= devm_add_action_or_reset(&cxl_nvb
->dev
,
2644 cxlr_pmem_unregister
, cxlr_pmem
);
2647 device_unlock(&cxl_nvb
->dev
);
2652 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */
2653 return devm_add_action_or_reset(&cxlr
->dev
, cxlr_release_nvdimm
, cxlr
);
2658 put_device(&cxl_nvb
->dev
);
2659 cxlr
->cxl_nvb
= NULL
;
2663 static void cxlr_dax_unregister(void *_cxlr_dax
)
2665 struct cxl_dax_region
*cxlr_dax
= _cxlr_dax
;
2667 device_unregister(&cxlr_dax
->dev
);
2670 static int devm_cxl_add_dax_region(struct cxl_region
*cxlr
)
2672 struct cxl_dax_region
*cxlr_dax
;
2676 cxlr_dax
= cxl_dax_region_alloc(cxlr
);
2677 if (IS_ERR(cxlr_dax
))
2678 return PTR_ERR(cxlr_dax
);
2680 dev
= &cxlr_dax
->dev
;
2681 rc
= dev_set_name(dev
, "dax_region%d", cxlr
->id
);
2685 rc
= device_add(dev
);
2689 dev_dbg(&cxlr
->dev
, "%s: register %s\n", dev_name(dev
->parent
),
2692 return devm_add_action_or_reset(&cxlr
->dev
, cxlr_dax_unregister
,
2699 static int match_decoder_by_range(struct device
*dev
, void *data
)
2701 struct range
*r1
, *r2
= data
;
2702 struct cxl_root_decoder
*cxlrd
;
2704 if (!is_root_decoder(dev
))
2707 cxlrd
= to_cxl_root_decoder(dev
);
2708 r1
= &cxlrd
->cxlsd
.cxld
.hpa_range
;
2709 return range_contains(r1
, r2
);
2712 static int match_region_by_range(struct device
*dev
, void *data
)
2714 struct cxl_region_params
*p
;
2715 struct cxl_region
*cxlr
;
2716 struct range
*r
= data
;
2719 if (!is_cxl_region(dev
))
2722 cxlr
= to_cxl_region(dev
);
2725 down_read(&cxl_region_rwsem
);
2726 if (p
->res
&& p
->res
->start
== r
->start
&& p
->res
->end
== r
->end
)
2728 up_read(&cxl_region_rwsem
);
2733 /* Establish an empty region covering the given HPA range */
2734 static struct cxl_region
*construct_region(struct cxl_root_decoder
*cxlrd
,
2735 struct cxl_endpoint_decoder
*cxled
)
2737 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
2738 struct cxl_port
*port
= cxlrd_to_port(cxlrd
);
2739 struct range
*hpa
= &cxled
->cxld
.hpa_range
;
2740 struct cxl_region_params
*p
;
2741 struct cxl_region
*cxlr
;
2742 struct resource
*res
;
2746 cxlr
= __create_region(cxlrd
, cxled
->mode
,
2747 atomic_read(&cxlrd
->region_id
));
2748 } while (IS_ERR(cxlr
) && PTR_ERR(cxlr
) == -EBUSY
);
2751 dev_err(cxlmd
->dev
.parent
,
2752 "%s:%s: %s failed assign region: %ld\n",
2753 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
2754 __func__
, PTR_ERR(cxlr
));
2758 down_write(&cxl_region_rwsem
);
2760 if (p
->state
>= CXL_CONFIG_INTERLEAVE_ACTIVE
) {
2761 dev_err(cxlmd
->dev
.parent
,
2762 "%s:%s: %s autodiscovery interrupted\n",
2763 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
2769 set_bit(CXL_REGION_F_AUTO
, &cxlr
->flags
);
2771 res
= kmalloc(sizeof(*res
), GFP_KERNEL
);
2777 *res
= DEFINE_RES_MEM_NAMED(hpa
->start
, range_len(hpa
),
2778 dev_name(&cxlr
->dev
));
2779 rc
= insert_resource(cxlrd
->res
, res
);
2782 * Platform-firmware may not have split resources like "System
2783 * RAM" on CXL window boundaries see cxl_region_iomem_release()
2785 dev_warn(cxlmd
->dev
.parent
,
2786 "%s:%s: %s %s cannot insert resource\n",
2787 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
),
2788 __func__
, dev_name(&cxlr
->dev
));
2792 p
->interleave_ways
= cxled
->cxld
.interleave_ways
;
2793 p
->interleave_granularity
= cxled
->cxld
.interleave_granularity
;
2794 p
->state
= CXL_CONFIG_INTERLEAVE_ACTIVE
;
2796 rc
= sysfs_update_group(&cxlr
->dev
.kobj
, get_cxl_region_target_group());
2800 dev_dbg(cxlmd
->dev
.parent
, "%s:%s: %s %s res: %pr iw: %d ig: %d\n",
2801 dev_name(&cxlmd
->dev
), dev_name(&cxled
->cxld
.dev
), __func__
,
2802 dev_name(&cxlr
->dev
), p
->res
, p
->interleave_ways
,
2803 p
->interleave_granularity
);
2805 /* ...to match put_device() in cxl_add_to_region() */
2806 get_device(&cxlr
->dev
);
2807 up_write(&cxl_region_rwsem
);
2812 up_write(&cxl_region_rwsem
);
2813 devm_release_action(port
->uport_dev
, unregister_region
, cxlr
);
2817 int cxl_add_to_region(struct cxl_port
*root
, struct cxl_endpoint_decoder
*cxled
)
2819 struct cxl_memdev
*cxlmd
= cxled_to_memdev(cxled
);
2820 struct range
*hpa
= &cxled
->cxld
.hpa_range
;
2821 struct cxl_decoder
*cxld
= &cxled
->cxld
;
2822 struct device
*cxlrd_dev
, *region_dev
;
2823 struct cxl_root_decoder
*cxlrd
;
2824 struct cxl_region_params
*p
;
2825 struct cxl_region
*cxlr
;
2826 bool attach
= false;
2829 cxlrd_dev
= device_find_child(&root
->dev
, &cxld
->hpa_range
,
2830 match_decoder_by_range
);
2832 dev_err(cxlmd
->dev
.parent
,
2833 "%s:%s no CXL window for range %#llx:%#llx\n",
2834 dev_name(&cxlmd
->dev
), dev_name(&cxld
->dev
),
2835 cxld
->hpa_range
.start
, cxld
->hpa_range
.end
);
2839 cxlrd
= to_cxl_root_decoder(cxlrd_dev
);
2842 * Ensure that if multiple threads race to construct_region() for @hpa
2843 * one does the construction and the others add to that.
2845 mutex_lock(&cxlrd
->range_lock
);
2846 region_dev
= device_find_child(&cxlrd
->cxlsd
.cxld
.dev
, hpa
,
2847 match_region_by_range
);
2849 cxlr
= construct_region(cxlrd
, cxled
);
2850 region_dev
= &cxlr
->dev
;
2852 cxlr
= to_cxl_region(region_dev
);
2853 mutex_unlock(&cxlrd
->range_lock
);
2855 rc
= PTR_ERR_OR_ZERO(cxlr
);
2859 attach_target(cxlr
, cxled
, -1, TASK_UNINTERRUPTIBLE
);
2861 down_read(&cxl_region_rwsem
);
2863 attach
= p
->state
== CXL_CONFIG_COMMIT
;
2864 up_read(&cxl_region_rwsem
);
2868 * If device_attach() fails the range may still be active via
2869 * the platform-firmware memory map, otherwise the driver for
2870 * regions is local to this file, so driver matching can't fail.
2872 if (device_attach(&cxlr
->dev
) < 0)
2873 dev_err(&cxlr
->dev
, "failed to enable, range: %pr\n",
2877 put_device(region_dev
);
2879 put_device(cxlrd_dev
);
2882 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region
, CXL
);
2884 static int is_system_ram(struct resource
*res
, void *arg
)
2886 struct cxl_region
*cxlr
= arg
;
2887 struct cxl_region_params
*p
= &cxlr
->params
;
2889 dev_dbg(&cxlr
->dev
, "%pr has System RAM: %pr\n", p
->res
, res
);
2893 static int cxl_region_probe(struct device
*dev
)
2895 struct cxl_region
*cxlr
= to_cxl_region(dev
);
2896 struct cxl_region_params
*p
= &cxlr
->params
;
2899 rc
= down_read_interruptible(&cxl_region_rwsem
);
2901 dev_dbg(&cxlr
->dev
, "probe interrupted\n");
2905 if (p
->state
< CXL_CONFIG_COMMIT
) {
2906 dev_dbg(&cxlr
->dev
, "config state: %d\n", p
->state
);
2911 if (test_bit(CXL_REGION_F_NEEDS_RESET
, &cxlr
->flags
)) {
2913 "failed to activate, re-commit region and retry\n");
2919 * From this point on any path that changes the region's state away from
2920 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
2923 up_read(&cxl_region_rwsem
);
2928 switch (cxlr
->mode
) {
2929 case CXL_DECODER_PMEM
:
2930 return devm_cxl_add_pmem_region(cxlr
);
2931 case CXL_DECODER_RAM
:
2933 * The region can not be manged by CXL if any portion of
2934 * it is already online as 'System RAM'
2936 if (walk_iomem_res_desc(IORES_DESC_NONE
,
2937 IORESOURCE_SYSTEM_RAM
| IORESOURCE_BUSY
,
2938 p
->res
->start
, p
->res
->end
, cxlr
,
2941 return devm_cxl_add_dax_region(cxlr
);
2943 dev_dbg(&cxlr
->dev
, "unsupported region mode: %d\n",
2949 static struct cxl_driver cxl_region_driver
= {
2950 .name
= "cxl_region",
2951 .probe
= cxl_region_probe
,
2952 .id
= CXL_DEVICE_REGION
,
2955 int cxl_region_init(void)
2957 return cxl_driver_register(&cxl_region_driver
);
2960 void cxl_region_exit(void)
2962 cxl_driver_unregister(&cxl_region_driver
);
2965 MODULE_IMPORT_NS(CXL
);
2966 MODULE_IMPORT_NS(DEVMEM
);
2967 MODULE_ALIAS_CXL(CXL_DEVICE_REGION
);