]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
acpi: numa: Create enum for memory_target access coordinates indexing
authorDave Jiang <dave.jiang@intel.com>
Thu, 21 Dec 2023 22:02:43 +0000 (15:02 -0700)
committerDan Williams <dan.j.williams@intel.com>
Fri, 22 Dec 2023 22:23:13 +0000 (14:23 -0800)
Create enums to provide named indexing for the access coordinate array.
This is in preparation for adding generic port support which will add a
third index in the array to keep the generic port attributes separate from
the memory attributes.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/170319616332.2212653.3872789279950567889.stgit@djiang5-mobl3
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/acpi/numa/hmat.c

index 83bc2b69401bfb8ef0b72123d80ba2282abe240f..ca7aedfbb5f2d5035a4ae51bd04d6419c57bd53b 100644 (file)
@@ -58,12 +58,18 @@ struct target_cache {
        struct node_cache_attrs cache_attrs;
 };
 
+enum {
+       NODE_ACCESS_CLASS_0 = 0,
+       NODE_ACCESS_CLASS_1,
+       NODE_ACCESS_CLASS_MAX,
+};
+
 struct memory_target {
        struct list_head node;
        unsigned int memory_pxm;
        unsigned int processor_pxm;
        struct resource memregions;
-       struct access_coordinate coord[2];
+       struct access_coordinate coord[NODE_ACCESS_CLASS_MAX];
        struct list_head caches;
        struct node_cache_attrs cache_attrs;
        bool registered;
@@ -339,10 +345,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
                        if (mem_hier == ACPI_HMAT_MEMORY) {
                                target = find_mem_target(targs[targ]);
                                if (target && target->processor_pxm == inits[init]) {
-                                       hmat_update_target_access(target, type, value, 0);
+                                       hmat_update_target_access(target, type, value,
+                                                                 NODE_ACCESS_CLASS_0);
                                        /* If the node has a CPU, update access 1 */
                                        if (node_state(pxm_to_node(inits[init]), N_CPU))
-                                               hmat_update_target_access(target, type, value, 1);
+                                               hmat_update_target_access(target, type, value,
+                                                                         NODE_ACCESS_CLASS_1);
                                }
                        }
                }
@@ -726,8 +734,8 @@ static void hmat_register_target(struct memory_target *target)
        if (!target->registered) {
                hmat_register_target_initiators(target);
                hmat_register_target_cache(target);
-               hmat_register_target_perf(target, 0);
-               hmat_register_target_perf(target, 1);
+               hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
+               hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
                target->registered = true;
        }
        mutex_unlock(&target_lock);