--- /dev/null
+From dbbc93576e03fbe24b365fab0e901eb442237a8a Mon Sep 17 00:00:00 2001
+From: Bixuan Cui <cuibixuan@huawei.com>
+Date: Tue, 18 May 2021 11:31:17 +0800
+Subject: genirq/msi: Ensure deactivation on teardown
+
+From: Bixuan Cui <cuibixuan@huawei.com>
+
+commit dbbc93576e03fbe24b365fab0e901eb442237a8a upstream.
+
+msi_domain_alloc_irqs() invokes irq_domain_activate_irq(), but
+msi_domain_free_irqs() does not enforce deactivation before tearing down
+the interrupts.
+
+This happens when PCI/MSI interrupts are set up and never used before being
+torn down again, e.g. in error handling pathes. The only place which cleans
+that up is the error handling path in msi_domain_alloc_irqs().
+
+Move the cleanup from msi_domain_alloc_irqs() into msi_domain_free_irqs()
+to cure that.
+
+Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early")
+Signed-off-by: Bixuan Cui <cuibixuan@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210518033117.78104-1-cuibixuan@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/msi.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/kernel/irq/msi.c
++++ b/kernel/irq/msi.c
+@@ -477,11 +477,6 @@ skip_activate:
+ return 0;
+
+ cleanup:
+- for_each_msi_vector(desc, i, dev) {
+- irq_data = irq_domain_get_irq_data(domain, i);
+- if (irqd_is_activated(irq_data))
+- irq_domain_deactivate_irq(irq_data);
+- }
+ msi_domain_free_irqs(domain, dev);
+ return ret;
+ }
+@@ -494,7 +489,15 @@ cleanup:
+ */
+ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
+ {
++ struct irq_data *irq_data;
+ struct msi_desc *desc;
++ int i;
++
++ for_each_msi_vector(desc, i, dev) {
++ irq_data = irq_domain_get_irq_data(domain, i);
++ if (irqd_is_activated(irq_data))
++ irq_domain_deactivate_irq(irq_data);
++ }
+
+ for_each_msi_entry(desc, dev) {
+ /*
--- /dev/null
+From 826da771291fc25a428e871f9e7fb465e390f852 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Jul 2021 23:51:48 +0200
+Subject: genirq: Provide IRQCHIP_AFFINITY_PRE_STARTUP
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 826da771291fc25a428e871f9e7fb465e390f852 upstream.
+
+X86 IO/APIC and MSI interrupts (when used without interrupts remapping)
+require that the affinity setup on startup is done before the interrupt is
+enabled for the first time as the non-remapped operation mode cannot safely
+migrate enabled interrupts from arbitrary contexts. Provide a new irq chip
+flag which allows affected hardware to request this.
+
+This has to be opt-in because there have been reports in the past that some
+interrupt chips cannot handle affinity setting before startup.
+
+Fixes: 18404756765c ("genirq: Expose default irq affinity mask (take 3)")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210729222542.779791738@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/irq.h | 2 ++
+ kernel/irq/chip.c | 5 ++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -542,6 +542,7 @@ struct irq_chip {
+ * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
+ * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
++ * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup
+ */
+ enum {
+ IRQCHIP_SET_TYPE_MASKED = (1 << 0),
+@@ -553,6 +554,7 @@ enum {
+ IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
+ IRQCHIP_SUPPORTS_NMI = (1 << 8),
++ IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10),
+ };
+
+ #include <linux/irqdesc.h>
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, b
+ } else {
+ switch (__irq_startup_managed(desc, aff, force)) {
+ case IRQ_STARTUP_NORMAL:
++ if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
++ irq_setup_affinity(desc);
+ ret = __irq_startup(desc);
+- irq_setup_affinity(desc);
++ if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
++ irq_setup_affinity(desc);
+ break;
+ case IRQ_STARTUP_MANAGED:
+ irq_do_set_affinity(d, aff, false);
--- /dev/null
+From b9cc7d8a4656a6e815852c27ab50365009cb69c1 Mon Sep 17 00:00:00 2001
+From: Ben Dai <ben.dai@unisoc.com>
+Date: Sun, 25 Apr 2021 23:09:03 +0800
+Subject: genirq/timings: Prevent potential array overflow in __irq_timings_store()
+
+From: Ben Dai <ben.dai@unisoc.com>
+
+commit b9cc7d8a4656a6e815852c27ab50365009cb69c1 upstream.
+
+When the interrupt interval is greater than 2 ^ PREDICTION_BUFFER_SIZE *
+PREDICTION_FACTOR us and less than 1s, the calculated index will be greater
+than the length of irqs->ema_time[]. Check the calculated index before
+using it to prevent array overflow.
+
+Fixes: 23aa3b9a6b7d ("genirq/timings: Encapsulate storing function")
+Signed-off-by: Ben Dai <ben.dai@unisoc.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210425150903.25456-1-ben.dai9703@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/irq/timings.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/kernel/irq/timings.c
++++ b/kernel/irq/timings.c
+@@ -453,6 +453,11 @@ static __always_inline void __irq_timing
+ */
+ index = irq_timings_interval_index(interval);
+
++ if (index > PREDICTION_BUFFER_SIZE - 1) {
++ irqs->count = 0;
++ return;
++ }
++
+ /*
+ * Store the index as an element of the pattern in another
+ * circular array.
--- /dev/null
+From 438553958ba19296663c6d6583d208dfb6792830 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Jul 2021 23:51:40 +0200
+Subject: PCI/MSI: Enable and mask MSI-X early
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 438553958ba19296663c6d6583d208dfb6792830 upstream.
+
+The ordering of MSI-X enable in hardware is dysfunctional:
+
+ 1) MSI-X is disabled in the control register
+ 2) Various setup functions
+ 3) pci_msi_setup_msi_irqs() is invoked which ends up accessing
+ the MSI-X table entries
+ 4) MSI-X is enabled and masked in the control register with the
+ comment that enabling is required for some hardware to access
+ the MSI-X table
+
+Step #4 obviously contradicts #3. The history of this is an issue with the
+NIU hardware. When #4 was introduced the table access actually happened in
+msix_program_entries() which was invoked after enabling and masking MSI-X.
+
+This was changed in commit d71d6432e105 ("PCI/MSI: Kill redundant call of
+irq_set_msi_desc() for MSI-X interrupts") which removed the table write
+from msix_program_entries().
+
+Interestingly enough nobody noticed and either NIU still works or it did
+not get any testing with a kernel 3.19 or later.
+
+Nevertheless this is inconsistent and there is no reason why MSI-X can't be
+enabled and masked in the control register early on, i.e. move step #4
+above to step #1. This preserves the NIU workaround and has no side effects
+on other hardware.
+
+Fixes: d71d6432e105 ("PCI/MSI: Kill redundant call of irq_set_msi_desc() for MSI-X interrupts")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Ashok Raj <ashok.raj@intel.com>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210729222542.344136412@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/msi.c | 28 +++++++++++++++-------------
+ 1 file changed, 15 insertions(+), 13 deletions(-)
+
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -778,18 +778,25 @@ static int msix_capability_init(struct p
+ u16 control;
+ void __iomem *base;
+
+- /* Ensure MSI-X is disabled while it is set up */
+- pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
++ /*
++ * Some devices require MSI-X to be enabled before the MSI-X
++ * registers can be accessed. Mask all the vectors to prevent
++ * interrupts coming in before they're fully set up.
++ */
++ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
++ PCI_MSIX_FLAGS_ENABLE);
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ /* Request & Map MSI-X table region */
+ base = msix_map_region(dev, msix_table_size(control));
+- if (!base)
+- return -ENOMEM;
++ if (!base) {
++ ret = -ENOMEM;
++ goto out_disable;
++ }
+
+ ret = msix_setup_entries(dev, base, entries, nvec, affd);
+ if (ret)
+- return ret;
++ goto out_disable;
+
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+@@ -800,14 +807,6 @@ static int msix_capability_init(struct p
+ if (ret)
+ goto out_free;
+
+- /*
+- * Some devices require MSI-X to be enabled before we can touch the
+- * MSI-X registers. We need to mask all the vectors to prevent
+- * interrupts coming in before they're fully set up.
+- */
+- pci_msix_clear_and_set_ctrl(dev, 0,
+- PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
+-
+ msix_program_entries(dev, entries);
+
+ ret = populate_msi_sysfs(dev);
+@@ -842,6 +841,9 @@ out_avail:
+ out_free:
+ free_msi_irqs(dev);
+
++out_disable:
++ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
++
+ return ret;
+ }
+
nbd-aovid-double-completion-of-a-request.patch
powerpc-kprobes-fix-kprobe-oops-happens-in-booke.patch
x86-tools-fix-objdump-version-check-again.patch
+genirq-provide-irqchip_affinity_pre_startup.patch
+x86-msi-force-affinity-setup-before-startup.patch
+x86-ioapic-force-affinity-setup-before-startup.patch
+x86-resctrl-fix-default-monitoring-groups-reporting.patch
+genirq-msi-ensure-deactivation-on-teardown.patch
+genirq-timings-prevent-potential-array-overflow-in-__irq_timings_store.patch
+pci-msi-enable-and-mask-msi-x-early.patch
--- /dev/null
+From 0c0e37dc11671384e53ba6ede53a4d91162a2cc5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Jul 2021 23:51:49 +0200
+Subject: x86/ioapic: Force affinity setup before startup
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 0c0e37dc11671384e53ba6ede53a4d91162a2cc5 upstream.
+
+The IO/APIC cannot handle interrupt affinity changes safely after startup
+other than from an interrupt handler. The startup sequence in the generic
+interrupt code violates that assumption.
+
+Mark the irq chip with the new IRQCHIP_AFFINITY_PRE_STARTUP flag so that
+the default interrupt setting happens before the interrupt is started up
+for the first time.
+
+Fixes: 18404756765c ("genirq: Expose default irq affinity mask (take 3)")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210729222542.832143400@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/apic/io_apic.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -1961,7 +1961,8 @@ static struct irq_chip ioapic_chip __rea
+ .irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE |
++ IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ static struct irq_chip ioapic_ir_chip __read_mostly = {
+@@ -1974,7 +1975,8 @@ static struct irq_chip ioapic_ir_chip __
+ .irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_get_irqchip_state = ioapic_irq_get_chip_state,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE |
++ IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ static inline void init_IO_APIC_traps(void)
--- /dev/null
+From ff363f480e5997051dd1de949121ffda3b753741 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Jul 2021 23:51:50 +0200
+Subject: x86/msi: Force affinity setup before startup
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit ff363f480e5997051dd1de949121ffda3b753741 upstream.
+
+The X86 MSI mechanism cannot handle interrupt affinity changes safely after
+startup other than from an interrupt handler, unless interrupt remapping is
+enabled. The startup sequence in the generic interrupt code violates that
+assumption.
+
+Mark the irq chips with the new IRQCHIP_AFFINITY_PRE_STARTUP flag so that
+the default interrupt setting happens before the interrupt is started up
+for the first time.
+
+While the interrupt remapping MSI chip does not require this, there is no
+point in treating it differently as this might spare an interrupt to a CPU
+which is not in the default affinity mask.
+
+For the non-remapping case go to the direct write path when the interrupt
+is not yet started similar to the not yet activated case.
+
+Fixes: 18404756765c ("genirq: Expose default irq affinity mask (take 3)")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210729222542.886722080@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/apic/msi.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -86,11 +86,13 @@ msi_set_affinity(struct irq_data *irqd,
+ * The quirk bit is not set in this case.
+ * - The new vector is the same as the old vector
+ * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up)
++ * - The interrupt is not yet started up
+ * - The new destination CPU is the same as the old destination CPU
+ */
+ if (!irqd_msi_nomask_quirk(irqd) ||
+ cfg->vector == old_cfg.vector ||
+ old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR ||
++ !irqd_is_started(irqd) ||
+ cfg->dest_apicid == old_cfg.dest_apicid) {
+ irq_msi_update_msg(irqd, cfg);
+ return ret;
+@@ -178,7 +180,8 @@ static struct irq_chip pci_msi_controlle
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_compose_msi_msg = irq_msi_compose_msg,
+ .irq_set_affinity = msi_set_affinity,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE |
++ IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+@@ -279,7 +282,8 @@ static struct irq_chip pci_msi_ir_contro
+ .irq_ack = irq_chip_ack_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE |
++ IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ static struct msi_domain_info pci_msi_ir_domain_info = {
+@@ -322,7 +326,8 @@ static struct irq_chip dmar_msi_controll
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_compose_msi_msg = irq_msi_compose_msg,
+ .irq_write_msi_msg = dmar_msi_write_msg,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE |
++ IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info,
+@@ -420,7 +425,7 @@ static struct irq_chip hpet_msi_controll
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_compose_msi_msg = irq_msi_compose_msg,
+ .irq_write_msi_msg = hpet_msi_write_msg,
+- .flags = IRQCHIP_SKIP_SET_WAKE,
++ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
+ };
+
+ static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info,
--- /dev/null
+From 064855a69003c24bd6b473b367d364e418c57625 Mon Sep 17 00:00:00 2001
+From: Babu Moger <Babu.Moger@amd.com>
+Date: Mon, 2 Aug 2021 14:38:58 -0500
+Subject: x86/resctrl: Fix default monitoring groups reporting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Babu Moger <Babu.Moger@amd.com>
+
+commit 064855a69003c24bd6b473b367d364e418c57625 upstream.
+
+Creating a new sub monitoring group in the root /sys/fs/resctrl leads to
+getting the "Unavailable" value for mbm_total_bytes and mbm_local_bytes
+on the entire filesystem.
+
+Steps to reproduce:
+
+ 1. mount -t resctrl resctrl /sys/fs/resctrl/
+
+ 2. cd /sys/fs/resctrl/
+
+ 3. cat mon_data/mon_L3_00/mbm_total_bytes
+ 23189832
+
+ 4. Create sub monitor group:
+ mkdir mon_groups/test1
+
+ 5. cat mon_data/mon_L3_00/mbm_total_bytes
+ Unavailable
+
+When a new monitoring group is created, a new RMID is assigned to the
+new group. But the RMID is not active yet. When the events are read on
+the new RMID, it is expected to report the status as "Unavailable".
+
+When the user reads the events on the default monitoring group with
+multiple subgroups, the events on all subgroups are consolidated
+together. Currently, if any of the RMID reads report as "Unavailable",
+then everything will be reported as "Unavailable".
+
+Fix the issue by discarding the "Unavailable" reads and reporting all
+the successful RMID reads. This is not a problem on Intel systems as
+Intel reports 0 on Inactive RMIDs.
+
+Fixes: d89b7379015f ("x86/intel_rdt/cqm: Add mon_data")
+Reported-by: Paweł Szulik <pawel.szulik@intel.com>
+Signed-off-by: Babu Moger <Babu.Moger@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Reinette Chatre <reinette.chatre@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=213311
+Link: https://lkml.kernel.org/r/162793309296.9224.15871659871696482080.stgit@bmoger-ubuntu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/resctrl/monitor.c | 27 +++++++++++++--------------
+ 1 file changed, 13 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/cpu/resctrl/monitor.c
++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
+@@ -223,15 +223,14 @@ static u64 mbm_overflow_count(u64 prev_m
+ return chunks >>= shift;
+ }
+
+-static int __mon_event_count(u32 rmid, struct rmid_read *rr)
++static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
+ {
+ struct mbm_state *m;
+ u64 chunks, tval;
+
+ tval = __rmid_read(rmid, rr->evtid);
+ if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) {
+- rr->val = tval;
+- return -EINVAL;
++ return tval;
+ }
+ switch (rr->evtid) {
+ case QOS_L3_OCCUP_EVENT_ID:
+@@ -243,12 +242,6 @@ static int __mon_event_count(u32 rmid, s
+ case QOS_L3_MBM_LOCAL_EVENT_ID:
+ m = &rr->d->mbm_local[rmid];
+ break;
+- default:
+- /*
+- * Code would never reach here because
+- * an invalid event id would fail the __rmid_read.
+- */
+- return -EINVAL;
+ }
+
+ if (rr->first) {
+@@ -298,23 +291,29 @@ void mon_event_count(void *info)
+ struct rdtgroup *rdtgrp, *entry;
+ struct rmid_read *rr = info;
+ struct list_head *head;
++ u64 ret_val;
+
+ rdtgrp = rr->rgrp;
+
+- if (__mon_event_count(rdtgrp->mon.rmid, rr))
+- return;
++ ret_val = __mon_event_count(rdtgrp->mon.rmid, rr);
+
+ /*
+- * For Ctrl groups read data from child monitor groups.
++ * For Ctrl groups read data from child monitor groups and
++ * add them together. Count events which are read successfully.
++ * Discard the rmid_read's reporting errors.
+ */
+ head = &rdtgrp->mon.crdtgrp_list;
+
+ if (rdtgrp->type == RDTCTRL_GROUP) {
+ list_for_each_entry(entry, head, mon.crdtgrp_list) {
+- if (__mon_event_count(entry->mon.rmid, rr))
+- return;
++ if (__mon_event_count(entry->mon.rmid, rr) == 0)
++ ret_val = 0;
+ }
+ }
++
++ /* Report error if none of rmid_reads are successful */
++ if (ret_val)
++ rr->val = ret_val;
+ }
+
+ /*