#define MAX_RESOURCE_SIZE ((resource_size_t)-1)
-void __init e820__reserve_resources_late(void)
+__init void e820__reserve_resources_late(void)
{
- u32 idx;
- int i;
-- struct resource *res;
-
+ /*
+ * Register device address regions listed in the E820 map,
+ * these can be claimed by device drivers later on:
+ */
- res = e820_res;
- for (idx = 0; idx < e820_table->nr_entries; idx++) {
- if (!res->parent && res->end)
++ for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
++ struct resource *res = e820_res + idx;
+
- for (i = 0, res = e820_res; i < e820_table->nr_entries; i++, res++) {
+ /* skip added or uninitialized resources */
+ if (res->parent || !res->end)
+ continue;
+
+ /* set aside soft-reserved resources for driver consideration */
+ if (res->desc == IORES_DESC_SOFT_RESERVED) {
+ insert_resource_expand_to_fit(&soft_reserve_resource, res);
+ } else {
+ /* publish the rest immediately */
insert_resource_expand_to_fit(&iomem_resource, res);
- res++;
+ }
}
/*
- * Try to bump up RAM regions to reasonable boundaries, to
- * avoid stolen RAM:
+ * Create additional 'gaps' at the end of RAM regions,
+ * rounding them up to 64k/1MB/64MB boundaries, should
+ * they be weirdly sized, and register extra, locked
+ * resource regions for them, to make sure drivers
+ * won't claim those addresses.
+ *
+ * These are basically blind guesses and heuristics to
+ * avoid resource conflicts with broken firmware that
+ * doesn't properly list 'stolen RAM' as a system region
+ * in the E820 map.
*/
- for (idx = 0; idx < e820_table->nr_entries; idx++) {
- for (i = 0; i < e820_table->nr_entries; i++) {
- struct e820_entry *entry = &e820_table->entries[i];
++ for (u32 idx = 0; idx < e820_table->nr_entries; idx++) {
+ struct e820_entry *entry = &e820_table->entries[idx];
u64 start, end;
if (entry->type != E820_TYPE_RAM)
u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
u64 dpa)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_root_decoder *cxlrd = cxlr->cxlrd;
+ struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
struct cxl_region_params *p = &cxlr->params;
struct cxl_endpoint_decoder *cxled = NULL;
- u64 dpa_offset, hpa_offset, hpa;
+ u64 base, dpa_offset, hpa_offset, hpa;
+ bool unaligned = false;
u16 eig = 0;
u8 eiw = 0;
int pos;
if (!cxled)
return ULLONG_MAX;
- pos = cxled->pos;
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
-
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+ base = cxl_dpa_resource_start(cxled);
+ if (base == RESOURCE_SIZE_MAX)
+ return ULLONG_MAX;
+
+ dpa_offset = dpa - base;
+
+ /* Unaligned calc for MOD3 interleaves not hbiw * 256MB aligned */
+ unaligned = region_is_unaligned_mod3(cxlr);
+ if (unaligned) {
+ hpa = unaligned_dpa_to_hpa(cxld, p, cxled->pos, dpa_offset);
+ if (hpa == ULLONG_MAX)
+ return ULLONG_MAX;
+
+ goto skip_aligned;
+ }
+ /*
+ * Aligned calc for all power-of-2 interleaves and for MOD3
+ * interleaves that are aligned at hbiw * 256MB
+ */
+ pos = cxled->pos;
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
+ if (hpa_offset == ULLONG_MAX)
+ return ULLONG_MAX;
/* Apply the hpa_offset to the region base address */
- hpa = hpa_offset + p->res->start + p->cache_size;
+ hpa = hpa_offset + p->res->start;
+
+ skip_aligned:
+ hpa += p->cache_size;
/* Root decoder translation overrides typical modulo decode */
if (cxlrd->ops.hpa_to_spa)
struct dpa_result *result)
{
struct cxl_region_params *p = &cxlr->params;
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_root_decoder *cxlrd = cxlr->cxlrd;
struct cxl_endpoint_decoder *cxled;
- u64 hpa, hpa_offset, dpa_offset;
+ u64 hpa_offset = offset;
+ u64 dpa, dpa_offset;
u16 eig = 0;
u8 eiw = 0;
int pos;
* CXL HPA is assumed to equal SPA.
*/
if (cxlrd->ops.spa_to_hpa) {
- hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
- hpa_offset = hpa - p->res->start;
- } else {
- hpa_offset = offset;
+ hpa_offset = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
+ if (hpa_offset == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev, "HPA not found for %pr offset %#llx\n",
+ p->res, offset);
+ return -ENXIO;
+ }
+ hpa_offset -= p->res->start;
}
+ if (region_is_unaligned_mod3(cxlr))
+ return unaligned_region_offset_to_dpa_result(cxlr, offset,
+ result);
+
pos = cxl_calculate_position(hpa_offset, eiw, eig);
if (pos < 0 || pos >= p->nr_targets) {
dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",