]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/intel_rdt: Prevent pseudo-locking from using stale pointers
authorJithu Joseph <jithu.joseph@intel.com>
Fri, 12 Oct 2018 22:51:01 +0000 (15:51 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 1 Dec 2019 08:16:43 +0000 (09:16 +0100)
[ Upstream commit b61b8bba18fe2b63d38fdaf9b83de25e2d787dfe ]

When the last CPU in an rdt_domain goes offline, its rdt_domain struct gets
freed. Current pseudo-locking code is unaware of this scenario and tries to
dereference the freed structure in a few places.

Add checks to prevent pseudo-locking code from doing this.

While further work is needed to seamlessly restore resource groups (not
just pseudo-locking) to their configuration when the domain is brought back
online, the immediate issue of invalid pointers is addressed here.

Fixes: f4e80d67a5274 ("x86/intel_rdt: Resctrl files reflect pseudo-locked information")
Fixes: 443810fe61605 ("x86/intel_rdt: Create debugfs files for pseudo-locking testing")
Fixes: 746e08590b864 ("x86/intel_rdt: Create character device exposing pseudo-locked region")
Fixes: 33dc3e410a0d9 ("x86/intel_rdt: Make CPU information accessible for pseudo-locked regions")
Signed-off-by: Jithu Joseph <jithu.joseph@intel.com>
Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: fenghua.yu@intel.com
Cc: tony.luck@intel.com
Cc: gavin.hindman@intel.com
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/231f742dbb7b00a31cc104416860e27dba6b072d.1539384145.git.reinette.chatre@intel.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kernel/cpu/intel_rdt.c
arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c

index cc43c5abd187bbbcc0fce4d98c1faeb4b8f895df..b99a04da70f618d1668d66390a9bb749e364c798 100644 (file)
@@ -610,6 +610,13 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
                        cancel_delayed_work(&d->cqm_limbo);
                }
 
+               /*
+                * rdt_domain "d" is going to be freed below, so clear
+                * its pointer from pseudo_lock_region struct.
+                */
+               if (d->plr)
+                       d->plr->d = NULL;
+
                kfree(d->ctrl_val);
                kfree(d->mbps_val);
                kfree(d->rmid_busy_llc);
index 968ace3c6d730a2d83f494a7bcf58be3d7173356..c8b72aff55e00b9431104af2883ef6a71cfd8200 100644 (file)
@@ -408,8 +408,16 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of,
                        for_each_alloc_enabled_rdt_resource(r)
                                seq_printf(s, "%s:uninitialized\n", r->name);
                } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
-                       seq_printf(s, "%s:%d=%x\n", rdtgrp->plr->r->name,
-                                  rdtgrp->plr->d->id, rdtgrp->plr->cbm);
+                       if (!rdtgrp->plr->d) {
+                               rdt_last_cmd_clear();
+                               rdt_last_cmd_puts("Cache domain offline\n");
+                               ret = -ENODEV;
+                       } else {
+                               seq_printf(s, "%s:%d=%x\n",
+                                          rdtgrp->plr->r->name,
+                                          rdtgrp->plr->d->id,
+                                          rdtgrp->plr->cbm);
+                       }
                } else {
                        closid = rdtgrp->closid;
                        for_each_alloc_enabled_rdt_resource(r) {
index 912d53939f4f4ef6160196cf38718eeb5f0c10ae..a999a58ca33180ea3374b6ee570cd2426aea67f9 100644 (file)
@@ -1116,6 +1116,11 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
                goto out;
        }
 
+       if (!plr->d) {
+               ret = -ENODEV;
+               goto out;
+       }
+
        plr->thread_done = 0;
        cpu = cpumask_first(&plr->d->cpu_mask);
        if (!cpu_online(cpu)) {
@@ -1429,6 +1434,11 @@ static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma)
 
        plr = rdtgrp->plr;
 
+       if (!plr->d) {
+               mutex_unlock(&rdtgroup_mutex);
+               return -ENODEV;
+       }
+
        /*
         * Task is required to run with affinity to the cpus associated
         * with the pseudo-locked region. If this is not the case the task
index ad64031e82dcd47b59a4220fb917960fde379ead..a2d7e6646cce8034a632a78eb1095f53edcd42c3 100644 (file)
@@ -268,17 +268,27 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
                              struct seq_file *s, void *v)
 {
        struct rdtgroup *rdtgrp;
+       struct cpumask *mask;
        int ret = 0;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
 
        if (rdtgrp) {
-               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
-                       seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
-                                  cpumask_pr_args(&rdtgrp->plr->d->cpu_mask));
-               else
+               if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
+                       if (!rdtgrp->plr->d) {
+                               rdt_last_cmd_clear();
+                               rdt_last_cmd_puts("Cache domain offline\n");
+                               ret = -ENODEV;
+                       } else {
+                               mask = &rdtgrp->plr->d->cpu_mask;
+                               seq_printf(s, is_cpu_list(of) ?
+                                          "%*pbl\n" : "%*pb\n",
+                                          cpumask_pr_args(mask));
+                       }
+               } else {
                        seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
                                   cpumask_pr_args(&rdtgrp->cpu_mask));
+               }
        } else {
                ret = -ENOENT;
        }
@@ -1286,6 +1296,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
        struct rdt_resource *r;
        struct rdt_domain *d;
        unsigned int size;
+       int ret = 0;
        bool sep;
        u32 ctrl;
 
@@ -1296,11 +1307,18 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
        }
 
        if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
-               seq_printf(s, "%*s:", max_name_width, rdtgrp->plr->r->name);
-               size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
-                                           rdtgrp->plr->d,
-                                           rdtgrp->plr->cbm);
-               seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+               if (!rdtgrp->plr->d) {
+                       rdt_last_cmd_clear();
+                       rdt_last_cmd_puts("Cache domain offline\n");
+                       ret = -ENODEV;
+               } else {
+                       seq_printf(s, "%*s:", max_name_width,
+                                  rdtgrp->plr->r->name);
+                       size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
+                                                   rdtgrp->plr->d,
+                                                   rdtgrp->plr->cbm);
+                       seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
+               }
                goto out;
        }
 
@@ -1330,7 +1348,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
 out:
        rdtgroup_kn_unlock(of->kn);
 
-       return 0;
+       return ret;
 }
 
 /* rdtgroup information files for one cache resource. */