kvm-x86-host-feature-ssbd-doesn-t-imply-guest-featur.patch
scsi-target-remove-boilerplate-code.patch
scsi-target-fix-hang-when-multiple-threads-try-to-de.patch
+x86-microcode-amd-increase-microcode-patch_max_size.patch
+x86-intel_rdt-enumerate-l2-code-and-data-prioritization-cdp-feature.patch
+x86-intel_rdt-add-two-new-resources-for-l2-code-and-data-prioritization-cdp.patch
+x86-intel_rdt-enable-l2-cdp-in-msr-ia32_l2_qos_cfg.patch
+x86-resctrl-preserve-cdp-enable-over-cpu-hotplug.patch
+x86-resctrl-fix-invalid-attempt-at-removing-the-default-resource-group.patch
--- /dev/null
+From def10853930a82456ab862a3a8292a3a16c386e7 Mon Sep 17 00:00:00 2001
+From: Fenghua Yu <fenghua.yu@intel.com>
+Date: Wed, 20 Dec 2017 14:57:22 -0800
+Subject: x86/intel_rdt: Add two new resources for L2 Code and Data Prioritization (CDP)
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+commit def10853930a82456ab862a3a8292a3a16c386e7 upstream.
+
+L2 data and L2 code are added as new resources in rdt_resources_all[]
+and data in the resources are configured.
+
+When L2 CDP is enabled, the schemata will have the two resources in
+this format:
+L2DATA:l2id0=xxxx;l2id1=xxxx;....
+L2CODE:l2id0=xxxx;l2id1=xxxx;....
+
+xxxx represent CBM (Cache Bit Mask) values in the schemata, similar to all
+others (L2 CAT/L3 CAT/L3 CDP).
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com>
+Cc: "Tony Luck" <tony.luck@intel.com>
+Cc: Vikas" <vikas.shivappa@intel.com>
+Cc: Sai Praneeth" <sai.praneeth.prakhya@intel.com>
+Cc: Reinette" <reinette.chatre@intel.com>
+Link: https://lkml.kernel.org/r/1513810644-78015-5-git-send-email-fenghua.yu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt.c | 66 +++++++++++++++++++++++++++++++++-------
+ arch/x86/kernel/cpu/intel_rdt.h | 2 +
+ 2 files changed, 58 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -135,6 +135,40 @@ struct rdt_resource rdt_resources_all[]
+ .format_str = "%d=%0*x",
+ .fflags = RFTYPE_RES_CACHE,
+ },
++ [RDT_RESOURCE_L2DATA] =
++ {
++ .rid = RDT_RESOURCE_L2DATA,
++ .name = "L2DATA",
++ .domains = domain_init(RDT_RESOURCE_L2DATA),
++ .msr_base = IA32_L2_CBM_BASE,
++ .msr_update = cat_wrmsr,
++ .cache_level = 2,
++ .cache = {
++ .min_cbm_bits = 1,
++ .cbm_idx_mult = 2,
++ .cbm_idx_offset = 0,
++ },
++ .parse_ctrlval = parse_cbm,
++ .format_str = "%d=%0*x",
++ .fflags = RFTYPE_RES_CACHE,
++ },
++ [RDT_RESOURCE_L2CODE] =
++ {
++ .rid = RDT_RESOURCE_L2CODE,
++ .name = "L2CODE",
++ .domains = domain_init(RDT_RESOURCE_L2CODE),
++ .msr_base = IA32_L2_CBM_BASE,
++ .msr_update = cat_wrmsr,
++ .cache_level = 2,
++ .cache = {
++ .min_cbm_bits = 1,
++ .cbm_idx_mult = 2,
++ .cbm_idx_offset = 1,
++ },
++ .parse_ctrlval = parse_cbm,
++ .format_str = "%d=%0*x",
++ .fflags = RFTYPE_RES_CACHE,
++ },
+ [RDT_RESOURCE_MBA] =
+ {
+ .rid = RDT_RESOURCE_MBA,
+@@ -259,15 +293,15 @@ static void rdt_get_cache_alloc_cfg(int
+ r->alloc_enabled = true;
+ }
+
+-static void rdt_get_cdp_l3_config(int type)
++static void rdt_get_cdp_config(int level, int type)
+ {
+- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
++ struct rdt_resource *r_l = &rdt_resources_all[level];
+ struct rdt_resource *r = &rdt_resources_all[type];
+
+- r->num_closid = r_l3->num_closid / 2;
+- r->cache.cbm_len = r_l3->cache.cbm_len;
+- r->default_ctrl = r_l3->default_ctrl;
+- r->cache.shareable_bits = r_l3->cache.shareable_bits;
++ r->num_closid = r_l->num_closid / 2;
++ r->cache.cbm_len = r_l->cache.cbm_len;
++ r->default_ctrl = r_l->default_ctrl;
++ r->cache.shareable_bits = r_l->cache.shareable_bits;
+ r->data_width = (r->cache.cbm_len + 3) / 4;
+ r->alloc_capable = true;
+ /*
+@@ -277,6 +311,18 @@ static void rdt_get_cdp_l3_config(int ty
+ r->alloc_enabled = false;
+ }
+
++static void rdt_get_cdp_l3_config(void)
++{
++ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
++ rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
++}
++
++static void rdt_get_cdp_l2_config(void)
++{
++ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
++ rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
++}
++
+ static int get_cache_id(int cpu, int level)
+ {
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+@@ -729,15 +775,15 @@ static __init bool get_rdt_alloc_resourc
+
+ if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
+ rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+- if (rdt_cpu_has(X86_FEATURE_CDP_L3)) {
+- rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+- rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+- }
++ if (rdt_cpu_has(X86_FEATURE_CDP_L3))
++ rdt_get_cdp_l3_config();
+ ret = true;
+ }
+ if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
+ /* CPUID 0x10.2 fields are same format at 0x10.1 */
+ rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
++ if (rdt_cpu_has(X86_FEATURE_CDP_L2))
++ rdt_get_cdp_l2_config();
+ ret = true;
+ }
+
+--- a/arch/x86/kernel/cpu/intel_rdt.h
++++ b/arch/x86/kernel/cpu/intel_rdt.h
+@@ -354,6 +354,8 @@ enum {
+ RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE,
+ RDT_RESOURCE_L2,
++ RDT_RESOURCE_L2DATA,
++ RDT_RESOURCE_L2CODE,
+ RDT_RESOURCE_MBA,
+
+ /* Must be the last */
--- /dev/null
+From 99adde9b370de8e07ef76630c6f60dbf586cdf0e Mon Sep 17 00:00:00 2001
+From: Fenghua Yu <fenghua.yu@intel.com>
+Date: Wed, 20 Dec 2017 14:57:23 -0800
+Subject: x86/intel_rdt: Enable L2 CDP in MSR IA32_L2_QOS_CFG
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+commit 99adde9b370de8e07ef76630c6f60dbf586cdf0e upstream.
+
+Bit 0 in MSR IA32_L2_QOS_CFG (0xc82) is L2 CDP enable bit. By default,
+the bit is zero, i.e. L2 CAT is enabled, and L2 CDP is disabled. When
+the resctrl mount parameter "cdpl2" is given, the bit is set to 1 and L2
+CDP is enabled.
+
+In L2 CDP mode, the L2 CAT mask MSRs are re-mapped into interleaved pairs
+of mask MSRs for code (referenced by an odd CLOSID) and data (referenced by
+an even CLOSID).
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com>
+Cc: "Tony Luck" <tony.luck@intel.com>
+Cc: Vikas" <vikas.shivappa@intel.com>
+Cc: Sai Praneeth" <sai.praneeth.prakhya@intel.com>
+Cc: Reinette" <reinette.chatre@intel.com>
+Link: https://lkml.kernel.org/r/1513810644-78015-6-git-send-email-fenghua.yu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt.h | 3
+ arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 117 ++++++++++++++++++++++++-------
+ 2 files changed, 94 insertions(+), 26 deletions(-)
+
+--- a/arch/x86/kernel/cpu/intel_rdt.h
++++ b/arch/x86/kernel/cpu/intel_rdt.h
+@@ -7,12 +7,15 @@
+ #include <linux/jump_label.h>
+
+ #define IA32_L3_QOS_CFG 0xc81
++#define IA32_L2_QOS_CFG 0xc82
+ #define IA32_L3_CBM_BASE 0xc90
+ #define IA32_L2_CBM_BASE 0xd10
+ #define IA32_MBA_THRTL_BASE 0xd50
+
+ #define L3_QOS_CDP_ENABLE 0x01ULL
+
++#define L2_QOS_CDP_ENABLE 0x01ULL
++
+ /*
+ * Event IDs are used to program IA32_QM_EVTSEL before reading event
+ * counter from IA32_QM_CTR
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -922,6 +922,7 @@ out_destroy:
+ kernfs_remove(kn);
+ return ret;
+ }
++
+ static void l3_qos_cfg_update(void *arg)
+ {
+ bool *enable = arg;
+@@ -929,8 +930,17 @@ static void l3_qos_cfg_update(void *arg)
+ wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+ }
+
+-static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
++static void l2_qos_cfg_update(void *arg)
++{
++ bool *enable = arg;
++
++ wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
++}
++
++static int set_cache_qos_cfg(int level, bool enable)
+ {
++ void (*update)(void *arg);
++ struct rdt_resource *r_l;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int cpu;
+@@ -938,16 +948,24 @@ static int set_l3_qos_cfg(struct rdt_res
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+- list_for_each_entry(d, &r->domains, list) {
++ if (level == RDT_RESOURCE_L3)
++ update = l3_qos_cfg_update;
++ else if (level == RDT_RESOURCE_L2)
++ update = l2_qos_cfg_update;
++ else
++ return -EINVAL;
++
++ r_l = &rdt_resources_all[level];
++ list_for_each_entry(d, &r_l->domains, list) {
+ /* Pick one CPU from each domain instance to update MSR */
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ }
+ cpu = get_cpu();
+ /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+- l3_qos_cfg_update(&enable);
++ update(&enable);
+ /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
+- smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
++ smp_call_function_many(cpu_mask, update, &enable, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+@@ -955,52 +973,99 @@ static int set_l3_qos_cfg(struct rdt_res
+ return 0;
+ }
+
+-static int cdp_enable(void)
++static int cdp_enable(int level, int data_type, int code_type)
+ {
+- struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+- struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+- struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
++ struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
++ struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
++ struct rdt_resource *r_l = &rdt_resources_all[level];
+ int ret;
+
+- if (!r_l3->alloc_capable || !r_l3data->alloc_capable ||
+- !r_l3code->alloc_capable)
++ if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
++ !r_lcode->alloc_capable)
+ return -EINVAL;
+
+- ret = set_l3_qos_cfg(r_l3, true);
++ ret = set_cache_qos_cfg(level, true);
+ if (!ret) {
+- r_l3->alloc_enabled = false;
+- r_l3data->alloc_enabled = true;
+- r_l3code->alloc_enabled = true;
++ r_l->alloc_enabled = false;
++ r_ldata->alloc_enabled = true;
++ r_lcode->alloc_enabled = true;
+ }
+ return ret;
+ }
+
+-static void cdp_disable(void)
++static int cdpl3_enable(void)
++{
++ return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
++ RDT_RESOURCE_L3CODE);
++}
++
++static int cdpl2_enable(void)
++{
++ return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
++ RDT_RESOURCE_L2CODE);
++}
++
++static void cdp_disable(int level, int data_type, int code_type)
+ {
+- struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
++ struct rdt_resource *r = &rdt_resources_all[level];
+
+ r->alloc_enabled = r->alloc_capable;
+
+- if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled) {
+- rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled = false;
+- rdt_resources_all[RDT_RESOURCE_L3CODE].alloc_enabled = false;
+- set_l3_qos_cfg(r, false);
++ if (rdt_resources_all[data_type].alloc_enabled) {
++ rdt_resources_all[data_type].alloc_enabled = false;
++ rdt_resources_all[code_type].alloc_enabled = false;
++ set_cache_qos_cfg(level, false);
+ }
+ }
+
++static void cdpl3_disable(void)
++{
++ cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
++}
++
++static void cdpl2_disable(void)
++{
++ cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
++}
++
++static void cdp_disable_all(void)
++{
++ if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
++ cdpl3_disable();
++ if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
++ cdpl2_disable();
++}
++
+ static int parse_rdtgroupfs_options(char *data)
+ {
+ char *token, *o = data;
+ int ret = 0;
+
+ while ((token = strsep(&o, ",")) != NULL) {
+- if (!*token)
+- return -EINVAL;
++ if (!*token) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- if (!strcmp(token, "cdp"))
+- ret = cdp_enable();
++ if (!strcmp(token, "cdp")) {
++ ret = cdpl3_enable();
++ if (ret)
++ goto out;
++ } else if (!strcmp(token, "cdpl2")) {
++ ret = cdpl2_enable();
++ if (ret)
++ goto out;
++ } else {
++ ret = -EINVAL;
++ goto out;
++ }
+ }
+
++ return 0;
++
++out:
++ pr_err("Invalid mount option \"%s\"\n", token);
++
+ return ret;
+ }
+
+@@ -1155,7 +1220,7 @@ out_mongrp:
+ out_info:
+ kernfs_remove(kn_info);
+ out_cdp:
+- cdp_disable();
++ cdp_disable_all();
+ out:
+ mutex_unlock(&rdtgroup_mutex);
+ cpus_read_unlock();
+@@ -1322,7 +1387,7 @@ static void rdt_kill_sb(struct super_blo
+ /*Put everything back to default values. */
+ for_each_alloc_enabled_rdt_resource(r)
+ reset_all_ctrls(r);
+- cdp_disable();
++ cdp_disable_all();
+ rmdir_all_sub();
+ static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
+ static_branch_disable_cpuslocked(&rdt_mon_enable_key);
--- /dev/null
+From a511e7935378ef1f321456a90beae2a2632d3d83 Mon Sep 17 00:00:00 2001
+From: Fenghua Yu <fenghua.yu@intel.com>
+Date: Wed, 20 Dec 2017 14:57:21 -0800
+Subject: x86/intel_rdt: Enumerate L2 Code and Data Prioritization (CDP) feature
+
+From: Fenghua Yu <fenghua.yu@intel.com>
+
+commit a511e7935378ef1f321456a90beae2a2632d3d83 upstream.
+
+L2 Code and Data Prioritization (CDP) is enumerated in
+CPUID(EAX=0x10, ECX=0x2):ECX.bit2
+
+Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "Ravi V Shankar" <ravi.v.shankar@intel.com>
+Cc: "Tony Luck" <tony.luck@intel.com>
+Cc: Vikas" <vikas.shivappa@intel.com>
+Cc: Sai Praneeth" <sai.praneeth.prakhya@intel.com>
+Cc: Reinette" <reinette.chatre@intel.com>
+Link: https://lkml.kernel.org/r/1513810644-78015-4-git-send-email-fenghua.yu@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -205,7 +205,7 @@
+ #define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
+-
++#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
+ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
++ { X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
+ { X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
--- /dev/null
+From bdf89df3c54518eed879d8fac7577fcfb220c67e Mon Sep 17 00:00:00 2001
+From: John Allen <john.allen@amd.com>
+Date: Thu, 9 Apr 2020 10:34:29 -0500
+Subject: x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
+
+From: John Allen <john.allen@amd.com>
+
+commit bdf89df3c54518eed879d8fac7577fcfb220c67e upstream.
+
+Future AMD CPUs will have microcode patches that exceed the default 4K
+patch size. Raise our limit.
+
+Signed-off-by: John Allen <john.allen@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: stable@vger.kernel.org # v4.14..
+Link: https://lkml.kernel.org/r/20200409152931.GA685273@mojo.amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/microcode_amd.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/microcode_amd.h
++++ b/arch/x86/include/asm/microcode_amd.h
+@@ -41,7 +41,7 @@ struct microcode_amd {
+ unsigned int mpb[0];
+ };
+
+-#define PATCH_MAX_SIZE PAGE_SIZE
++#define PATCH_MAX_SIZE (3 * PAGE_SIZE)
+
+ #ifdef CONFIG_MICROCODE_AMD
+ extern void __init load_ucode_amd_bsp(unsigned int family);
--- /dev/null
+From b0151da52a6d4f3951ea24c083e7a95977621436 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Tue, 17 Mar 2020 09:26:45 -0700
+Subject: x86/resctrl: Fix invalid attempt at removing the default resource group
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit b0151da52a6d4f3951ea24c083e7a95977621436 upstream.
+
+The default resource group ("rdtgroup_default") is associated with the
+root of the resctrl filesystem and should never be removed. New resource
+groups can be created as subdirectories of the resctrl filesystem and
+they can be removed from user space.
+
+There exists a safeguard in the directory removal code
+(rdtgroup_rmdir()) that ensures that only subdirectories can be removed
+by testing that the directory to be removed has to be a child of the
+root directory.
+
+A possible deadlock was recently fixed with
+
+ 334b0f4e9b1b ("x86/resctrl: Fix a deadlock due to inaccurate reference").
+
+This fix involved associating the private data of the "mon_groups"
+and "mon_data" directories to the resource group to which they belong
+instead of NULL as before. A consequence of this change was that
+the original safeguard code preventing removal of "mon_groups" and
+"mon_data" found in the root directory failed resulting in attempts to
+remove the default resource group that ends in a BUG:
+
+ kernel BUG at mm/slub.c:3969!
+ invalid opcode: 0000 [#1] SMP PTI
+
+ Call Trace:
+ rdtgroup_rmdir+0x16b/0x2c0
+ kernfs_iop_rmdir+0x5c/0x90
+ vfs_rmdir+0x7a/0x160
+ do_rmdir+0x17d/0x1e0
+ do_syscall_64+0x55/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Fix this by improving the directory removal safeguard to ensure that
+subdirectories of the resctrl root directory can only be removed if they
+are a child of the resctrl filesystem's root _and_ not associated with
+the default resource group.
+
+Fixes: 334b0f4e9b1b ("x86/resctrl: Fix a deadlock due to inaccurate reference")
+Reported-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Tested-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/884cbe1773496b5dbec1b6bd11bb50cffa83603d.1584461853.git.reinette.chatre@intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -1918,7 +1918,8 @@ static int rdtgroup_rmdir(struct kernfs_
+ * If the rdtgroup is a mon group and parent directory
+ * is a valid "mon_groups" directory, remove the mon group.
+ */
+- if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn)
++ if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
++ rdtgrp != &rdtgroup_default)
+ ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
+ else if (rdtgrp->type == RDTMON_GROUP &&
+ is_mon_groups(parent_kn, kn->name))
--- /dev/null
+From 9fe0450785abbc04b0ed5d3cf61fcdb8ab656b4b Mon Sep 17 00:00:00 2001
+From: James Morse <james.morse@arm.com>
+Date: Fri, 21 Feb 2020 16:21:05 +0000
+Subject: x86/resctrl: Preserve CDP enable over CPU hotplug
+
+From: James Morse <james.morse@arm.com>
+
+commit 9fe0450785abbc04b0ed5d3cf61fcdb8ab656b4b upstream.
+
+Resctrl assumes that all CPUs are online when the filesystem is mounted,
+and that CPUs remember their CDP-enabled state over CPU hotplug.
+
+This goes wrong when resctrl's CDP-enabled state changes while all the
+CPUs in a domain are offline.
+
+When a domain comes online, enable (or disable!) CDP to match resctrl's
+current setting.
+
+Fixes: 5ff193fbde20 ("x86/intel_rdt: Add basic resctrl filesystem support")
+Suggested-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20200221162105.154163-1-james.morse@arm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel_rdt.c | 2 ++
+ arch/x86/kernel/cpu/intel_rdt.h | 1 +
+ arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 13 +++++++++++++
+ 3 files changed, 16 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel_rdt.c
++++ b/arch/x86/kernel/cpu/intel_rdt.c
+@@ -532,6 +532,8 @@ static void domain_add_cpu(int cpu, stru
+ d->id = id;
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+
++ rdt_domain_reconfigure_cdp(r);
++
+ if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
+ kfree(d);
+ return;
+--- a/arch/x86/kernel/cpu/intel_rdt.h
++++ b/arch/x86/kernel/cpu/intel_rdt.h
+@@ -442,5 +442,6 @@ void cqm_setup_limbo_handler(struct rdt_
+ void cqm_handle_limbo(struct work_struct *work);
+ bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
+ void __check_limbo(struct rdt_domain *d, bool force_free);
++void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
+
+ #endif /* _ASM_X86_INTEL_RDT_H */
+--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
++++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+@@ -1757,6 +1757,19 @@ out_unlock:
+ return ret;
+ }
+
++/* Restore the qos cfg state when a domain comes online */
++void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
++{
++ if (!r->alloc_capable)
++ return;
++
++ if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
++ l2_qos_cfg_update(&r->alloc_enabled);
++
++ if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
++ l3_qos_cfg_update(&r->alloc_enabled);
++}
++
+ /*
+ * We allow creating mon groups only with in a directory called "mon_groups"
+ * which is present in every ctrl_mon group. Check if this is a valid