]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Jun 2024 13:29:15 +0000 (15:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 18 Jun 2024 13:29:15 +0000 (15:29 +0200)
added patches:
irqchip-gic-v3-its-fix-potential-race-condition-in-its_vlpi_prop_update.patch
null_blk-print-correct-max-open-zones-limit-in-null_init_zoned_dev.patch
perf-core-fix-missing-wakeup-when-waiting-for-context-reference.patch
riscv-fix-overlap-of-allocated-page-and-ptr_err.patch
tracing-selftests-fix-kprobe-event-name-test-for-.isra.-functions.patch
x86-amd_nb-check-for-invalid-smn-reads.patch

queue-6.1/irqchip-gic-v3-its-fix-potential-race-condition-in-its_vlpi_prop_update.patch [new file with mode: 0644]
queue-6.1/null_blk-print-correct-max-open-zones-limit-in-null_init_zoned_dev.patch [new file with mode: 0644]
queue-6.1/perf-core-fix-missing-wakeup-when-waiting-for-context-reference.patch [new file with mode: 0644]
queue-6.1/riscv-fix-overlap-of-allocated-page-and-ptr_err.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/tracing-selftests-fix-kprobe-event-name-test-for-.isra.-functions.patch [new file with mode: 0644]
queue-6.1/x86-amd_nb-check-for-invalid-smn-reads.patch [new file with mode: 0644]

diff --git a/queue-6.1/irqchip-gic-v3-its-fix-potential-race-condition-in-its_vlpi_prop_update.patch b/queue-6.1/irqchip-gic-v3-its-fix-potential-race-condition-in-its_vlpi_prop_update.patch
new file mode 100644 (file)
index 0000000..164d317
--- /dev/null
@@ -0,0 +1,141 @@
+From b97e8a2f7130a4b30d1502003095833d16c028b3 Mon Sep 17 00:00:00 2001
+From: Hagar Hemdan <hagarhem@amazon.com>
+Date: Fri, 31 May 2024 16:21:44 +0000
+Subject: irqchip/gic-v3-its: Fix potential race condition in its_vlpi_prop_update()
+
+From: Hagar Hemdan <hagarhem@amazon.com>
+
+commit b97e8a2f7130a4b30d1502003095833d16c028b3 upstream.
+
+its_vlpi_prop_update() calls lpi_write_config() which obtains the
+mapping information for a VLPI without lock held. So it could race
+with its_vlpi_unmap().
+
+Since all calls from its_irq_set_vcpu_affinity() require the same
+lock to be held, hoist the locking there instead of sprinkling the
+locking all over the place.
+
+This bug was discovered using Coverity Static Analysis Security Testing
+(SAST) by Synopsys, Inc.
+
+[ tglx: Use guard() instead of goto ]
+
+Fixes: 015ec0386ab6 ("irqchip/gic-v3-its: Add VLPI configuration handling")
+Suggested-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Hagar Hemdan <hagarhem@amazon.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Reviewed-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20240531162144.28650-1-hagarhem@amazon.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c |   44 ++++++++++-----------------------------
+ 1 file changed, 12 insertions(+), 32 deletions(-)
+
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -1837,28 +1837,22 @@ static int its_vlpi_map(struct irq_data
+ {
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+-      int ret = 0;
+       if (!info->map)
+               return -EINVAL;
+-      raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+       if (!its_dev->event_map.vm) {
+               struct its_vlpi_map *maps;
+               maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
+                              GFP_ATOMIC);
+-              if (!maps) {
+-                      ret = -ENOMEM;
+-                      goto out;
+-              }
++              if (!maps)
++                      return -ENOMEM;
+               its_dev->event_map.vm = info->map->vm;
+               its_dev->event_map.vlpi_maps = maps;
+       } else if (its_dev->event_map.vm != info->map->vm) {
+-              ret = -EINVAL;
+-              goto out;
++              return -EINVAL;
+       }
+       /* Get our private copy of the mapping information */
+@@ -1890,46 +1884,32 @@ static int its_vlpi_map(struct irq_data
+               its_dev->event_map.nr_vlpis++;
+       }
+-out:
+-      raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-      return ret;
++      return 0;
+ }
+ static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
+ {
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       struct its_vlpi_map *map;
+-      int ret = 0;
+-
+-      raw_spin_lock(&its_dev->event_map.vlpi_lock);
+       map = get_vlpi_map(d);
+-      if (!its_dev->event_map.vm || !map) {
+-              ret = -EINVAL;
+-              goto out;
+-      }
++      if (!its_dev->event_map.vm || !map)
++              return -EINVAL;
+       /* Copy our mapping information to the incoming request */
+       *info->map = *map;
+-out:
+-      raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-      return ret;
++      return 0;
+ }
+ static int its_vlpi_unmap(struct irq_data *d)
+ {
+       struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+       u32 event = its_get_event_id(d);
+-      int ret = 0;
+-      raw_spin_lock(&its_dev->event_map.vlpi_lock);
+-
+-      if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
+-              ret = -EINVAL;
+-              goto out;
+-      }
++      if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
++              return -EINVAL;
+       /* Drop the virtual mapping */
+       its_send_discard(its_dev, event);
+@@ -1953,9 +1933,7 @@ static int its_vlpi_unmap(struct irq_dat
+               kfree(its_dev->event_map.vlpi_maps);
+       }
+-out:
+-      raw_spin_unlock(&its_dev->event_map.vlpi_lock);
+-      return ret;
++      return 0;
+ }
+ static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
+@@ -1983,6 +1961,8 @@ static int its_irq_set_vcpu_affinity(str
+       if (!is_v4(its_dev->its))
+               return -EINVAL;
++      guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
++
+       /* Unmap request? */
+       if (!info)
+               return its_vlpi_unmap(d);
diff --git a/queue-6.1/null_blk-print-correct-max-open-zones-limit-in-null_init_zoned_dev.patch b/queue-6.1/null_blk-print-correct-max-open-zones-limit-in-null_init_zoned_dev.patch
new file mode 100644 (file)
index 0000000..96e4819
--- /dev/null
@@ -0,0 +1,34 @@
+From 233e27b4d21c3e44eb863f03e566d3a22e81a7ae Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <dlemoal@kernel.org>
+Date: Tue, 28 May 2024 15:28:52 +0900
+Subject: null_blk: Print correct max open zones limit in null_init_zoned_dev()
+
+From: Damien Le Moal <dlemoal@kernel.org>
+
+commit 233e27b4d21c3e44eb863f03e566d3a22e81a7ae upstream.
+
+When changing the maximum number of open zones, print that number
+instead of the total number of zones.
+
+Fixes: dc4d137ee3b7 ("null_blk: add support for max open/active zone limit for zoned devices")
+Cc: stable@vger.kernel.org
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Reviewed-by: Niklas Cassel <cassel@kernel.org>
+Link: https://lore.kernel.org/r/20240528062852.437599-1-dlemoal@kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/null_blk/zoned.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/block/null_blk/zoned.c
++++ b/drivers/block/null_blk/zoned.c
+@@ -112,7 +112,7 @@ int null_init_zoned_dev(struct nullb_dev
+       if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
+               dev->zone_max_open = dev->zone_max_active;
+               pr_info("changed the maximum number of open zones to %u\n",
+-                      dev->nr_zones);
++                      dev->zone_max_open);
+       } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
+               dev->zone_max_open = 0;
+               pr_info("zone_max_open limit disabled, limit >= zone count\n");
diff --git a/queue-6.1/perf-core-fix-missing-wakeup-when-waiting-for-context-reference.patch b/queue-6.1/perf-core-fix-missing-wakeup-when-waiting-for-context-reference.patch
new file mode 100644 (file)
index 0000000..87a12c5
--- /dev/null
@@ -0,0 +1,106 @@
+From 74751ef5c1912ebd3e65c3b65f45587e05ce5d36 Mon Sep 17 00:00:00 2001
+From: Haifeng Xu <haifeng.xu@shopee.com>
+Date: Mon, 13 May 2024 10:39:48 +0000
+Subject: perf/core: Fix missing wakeup when waiting for context reference
+
+From: Haifeng Xu <haifeng.xu@shopee.com>
+
+commit 74751ef5c1912ebd3e65c3b65f45587e05ce5d36 upstream.
+
+In our production environment, we found many hung tasks which are
+blocked for more than 18 hours. Their call traces are like this:
+
+[346278.191038] __schedule+0x2d8/0x890
+[346278.191046] schedule+0x4e/0xb0
+[346278.191049] perf_event_free_task+0x220/0x270
+[346278.191056] ? init_wait_var_entry+0x50/0x50
+[346278.191060] copy_process+0x663/0x18d0
+[346278.191068] kernel_clone+0x9d/0x3d0
+[346278.191072] __do_sys_clone+0x5d/0x80
+[346278.191076] __x64_sys_clone+0x25/0x30
+[346278.191079] do_syscall_64+0x5c/0xc0
+[346278.191083] ? syscall_exit_to_user_mode+0x27/0x50
+[346278.191086] ? do_syscall_64+0x69/0xc0
+[346278.191088] ? irqentry_exit_to_user_mode+0x9/0x20
+[346278.191092] ? irqentry_exit+0x19/0x30
+[346278.191095] ? exc_page_fault+0x89/0x160
+[346278.191097] ? asm_exc_page_fault+0x8/0x30
+[346278.191102] entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+The task was waiting for the refcount become to 1, but from the vmcore,
+we found the refcount has already been 1. It seems that the task didn't
+get woken up by perf_event_release_kernel() and got stuck forever. The
+below scenario may cause the problem.
+
+Thread A                                       Thread B
+...                                            ...
+perf_event_free_task                           perf_event_release_kernel
+                                                  ...
+                                                  acquire event->child_mutex
+                                                  ...
+                                                  get_ctx
+   ...                                            release event->child_mutex
+   acquire ctx->mutex
+   ...
+   perf_free_event (acquire/release event->child_mutex)
+   ...
+   release ctx->mutex
+   wait_var_event
+                                                  acquire ctx->mutex
+                                                  acquire event->child_mutex
+                                                  # move existing events to free_list
+                                                  release event->child_mutex
+                                                  release ctx->mutex
+                                                  put_ctx
+...                                            ...
+
+In this case, all events of the ctx have been freed, so we couldn't
+find the ctx in free_list and Thread A will miss the wakeup. It's thus
+necessary to add a wakeup after dropping the reference.
+
+Fixes: 1cf8dfe8a661 ("perf/core: Fix race between close() and fork()")
+Signed-off-by: Haifeng Xu <haifeng.xu@shopee.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20240513103948.33570-1-haifeng.xu@shopee.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5182,6 +5182,7 @@ int perf_event_release_kernel(struct per
+ again:
+       mutex_lock(&event->child_mutex);
+       list_for_each_entry(child, &event->child_list, child_list) {
++              void *var = NULL;
+               /*
+                * Cannot change, child events are not migrated, see the
+@@ -5222,11 +5223,23 @@ again:
+                        * this can't be the last reference.
+                        */
+                       put_event(event);
++              } else {
++                      var = &ctx->refcount;
+               }
+               mutex_unlock(&event->child_mutex);
+               mutex_unlock(&ctx->mutex);
+               put_ctx(ctx);
++
++              if (var) {
++                      /*
++                       * If perf_event_free_task() has deleted all events from the
++                       * ctx while the child_mutex got released above, make sure to
++                       * notify about the preceding put_ctx().
++                       */
++                      smp_mb(); /* pairs with wait_var_event() */
++                      wake_up_var(var);
++              }
+               goto again;
+       }
+       mutex_unlock(&event->child_mutex);
diff --git a/queue-6.1/riscv-fix-overlap-of-allocated-page-and-ptr_err.patch b/queue-6.1/riscv-fix-overlap-of-allocated-page-and-ptr_err.patch
new file mode 100644 (file)
index 0000000..dc7af5d
--- /dev/null
@@ -0,0 +1,69 @@
+From 994af1825a2aa286f4903ff64a1c7378b52defe6 Mon Sep 17 00:00:00 2001
+From: Nam Cao <namcao@linutronix.de>
+Date: Thu, 25 Apr 2024 13:52:01 +0200
+Subject: riscv: fix overlap of allocated page and PTR_ERR
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nam Cao <namcao@linutronix.de>
+
+commit 994af1825a2aa286f4903ff64a1c7378b52defe6 upstream.
+
+On riscv32, it is possible for the last page in virtual address space
+(0xfffff000) to be allocated. This page overlaps with PTR_ERR, so that
+shouldn't happen.
+
+There is already some code to ensure memblock won't allocate the last page.
+However, buddy allocator is left unchecked.
+
+Fix this by reserving physical memory that would be mapped at virtual
+addresses greater than 0xfffff000.
+
+Reported-by: Björn Töpel <bjorn@kernel.org>
+Closes: https://lore.kernel.org/linux-riscv/878r1ibpdn.fsf@all.your.base.are.belong.to.us
+Fixes: 76d2a0493a17 ("RISC-V: Init and Halt Code")
+Signed-off-by: Nam Cao <namcao@linutronix.de>
+Cc: <stable@vger.kernel.org>
+Tested-by: Björn Töpel <bjorn@rivosinc.com>
+Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
+Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
+Link: https://lore.kernel.org/r/20240425115201.3044202-1-namcao@linutronix.de
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/riscv/mm/init.c |   21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/riscv/mm/init.c
++++ b/arch/riscv/mm/init.c
+@@ -213,18 +213,19 @@ static void __init setup_bootmem(void)
+       if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+               phys_ram_base = memblock_start_of_DRAM();
+       /*
+-       * memblock allocator is not aware of the fact that last 4K bytes of
+-       * the addressable memory can not be mapped because of IS_ERR_VALUE
+-       * macro. Make sure that last 4k bytes are not usable by memblock
+-       * if end of dram is equal to maximum addressable memory.  For 64-bit
+-       * kernel, this problem can't happen here as the end of the virtual
+-       * address space is occupied by the kernel mapping then this check must
+-       * be done as soon as the kernel mapping base address is determined.
++       * Reserve physical address space that would be mapped to virtual
++       * addresses greater than (void *)(-PAGE_SIZE) because:
++       *  - This memory would overlap with ERR_PTR
++       *  - This memory belongs to high memory, which is not supported
++       *
++       * This is not applicable to 64-bit kernel, because virtual addresses
++       * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
++       * occupied by kernel mapping. Also it is unrealistic for high memory
++       * to exist on 64-bit platforms.
+        */
+       if (!IS_ENABLED(CONFIG_64BIT)) {
+-              max_mapped_addr = __pa(~(ulong)0);
+-              if (max_mapped_addr == (phys_ram_end - 1))
+-                      memblock_set_current_limit(max_mapped_addr - 4096);
++              max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
++              memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
+       }
+       min_low_pfn = PFN_UP(phys_ram_base);
index 4c9b9c8262d4afc0da05faf160d69b7ff64e8f4f..a914aacaede5de9d9baf0a774ad5b8a80b601aa9 100644 (file)
@@ -171,3 +171,9 @@ drm-exynos-vidi-fix-memory-leak-in-.get_modes.patch
 drm-exynos-hdmi-report-safe-640x480-mode-as-a-fallback-when-no-edid-found.patch
 mptcp-ensure-snd_una-is-properly-initialized-on-connect.patch
 mptcp-pm-inc-rmaddr-mib-counter-once-per-rm_addr-id.patch
+irqchip-gic-v3-its-fix-potential-race-condition-in-its_vlpi_prop_update.patch
+x86-amd_nb-check-for-invalid-smn-reads.patch
+perf-core-fix-missing-wakeup-when-waiting-for-context-reference.patch
+riscv-fix-overlap-of-allocated-page-and-ptr_err.patch
+tracing-selftests-fix-kprobe-event-name-test-for-.isra.-functions.patch
+null_blk-print-correct-max-open-zones-limit-in-null_init_zoned_dev.patch
diff --git a/queue-6.1/tracing-selftests-fix-kprobe-event-name-test-for-.isra.-functions.patch b/queue-6.1/tracing-selftests-fix-kprobe-event-name-test-for-.isra.-functions.patch
new file mode 100644 (file)
index 0000000..18c3db0
--- /dev/null
@@ -0,0 +1,50 @@
+From 23a4b108accc29a6125ed14de4a044689ffeda78 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Google)" <rostedt@goodmis.org>
+Date: Mon, 20 May 2024 20:57:37 -0400
+Subject: tracing/selftests: Fix kprobe event name test for .isra. functions
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+commit 23a4b108accc29a6125ed14de4a044689ffeda78 upstream.
+
+The kprobe_eventname.tc test checks if a function with .isra. can have a
+kprobe attached to it. It loops through the kallsyms file for all the
+functions that have the .isra. name, and checks if it exists in the
+available_filter_functions file, and if it does, it uses it to attach a
+kprobe to it.
+
+The issue is that kprobes can not attach to functions that are listed more
+than once in available_filter_functions. With the latest kernel, the
+function that is found is: rapl_event_update.isra.0
+
+  # grep rapl_event_update.isra.0 /sys/kernel/tracing/available_filter_functions
+  rapl_event_update.isra.0
+  rapl_event_update.isra.0
+
+It is listed twice. This causes the attached kprobe to it to fail which in
+turn fails the test. Instead of just picking the function function that is
+found in available_filter_functions, pick the first one that is listed
+only once in available_filter_functions.
+
+Cc: stable@vger.kernel.org
+Fixes: 604e3548236d ("selftests/ftrace: Select an existing function in kprobe_eventname test")
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+@@ -30,7 +30,8 @@ find_dot_func() {
+       fi
+       grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
+-              if grep -s $f available_filter_functions; then
++              cnt=`grep -s $f available_filter_functions | wc -l`;
++              if [ $cnt -eq 1 ]; then
+                       echo $f
+                       break
+               fi
diff --git a/queue-6.1/x86-amd_nb-check-for-invalid-smn-reads.patch b/queue-6.1/x86-amd_nb-check-for-invalid-smn-reads.patch
new file mode 100644 (file)
index 0000000..319265b
--- /dev/null
@@ -0,0 +1,59 @@
+From c625dabbf1c4a8e77e4734014f2fde7aa9071a1f Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+Date: Mon, 3 Apr 2023 16:42:44 +0000
+Subject: x86/amd_nb: Check for invalid SMN reads
+
+From: Yazen Ghannam <yazen.ghannam@amd.com>
+
+commit c625dabbf1c4a8e77e4734014f2fde7aa9071a1f upstream.
+
+AMD Zen-based systems use a System Management Network (SMN) that
+provides access to implementation-specific registers.
+
+SMN accesses are done indirectly through an index/data pair in PCI
+config space. The PCI config access may fail and return an error code.
+This would prevent the "read" value from being updated.
+
+However, the PCI config access may succeed, but the return value may be
+invalid. This is in similar fashion to PCI bad reads, i.e. return all
+bits set.
+
+Most systems will return 0 for SMN addresses that are not accessible.
+This is in line with AMD convention that unavailable registers are
+Read-as-Zero/Writes-Ignored.
+
+However, some systems will return a "PCI Error Response" instead. This
+value, along with an error code of 0 from the PCI config access, will
+confuse callers of the amd_smn_read() function.
+
+Check for this condition, clear the return value, and set a proper error
+code.
+
+Fixes: ddfe43cdc0da ("x86/amd_nb: Add SMN and Indirect Data Fabric access for AMD Fam17h")
+Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20230403164244.471141-1-yazen.ghannam@amd.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/amd_nb.c |    9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -195,7 +195,14 @@ out:
+ int amd_smn_read(u16 node, u32 address, u32 *value)
+ {
+-      return __amd_smn_rw(node, address, value, false);
++      int err = __amd_smn_rw(node, address, value, false);
++
++      if (PCI_POSSIBLE_ERROR(*value)) {
++              err = -ENODEV;
++              *value = 0;
++      }
++
++      return err;
+ }
+ EXPORT_SYMBOL_GPL(amd_smn_read);