]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Nov 2019 17:50:51 +0000 (18:50 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 Nov 2019 17:50:51 +0000 (18:50 +0100)
added patches:
drm-amd-powerplay-issue-no-ppsmc_msg_getcurrpkgpwr-on-unsupported-asics.patch
drm-amdgpu-disable-gfxoff-on-original-raven.patch
drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch
drm-i915-don-t-oops-in-dumb_create-ioctl-if-we-have-no-crtcs.patch
drm-i915-pmu-frequency-is-reported-as-accumulated-cycles.patch
drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch
fork-fix-pidfd_poll-s-return-type.patch
gpio-bd70528-use-correct-unit-for-debounce-times.patch
gpio-max77620-fixup-debounce-delays.patch
mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch
mm-memory_hotplug-don-t-access-uninitialized-memmaps-in-shrink_zone_span.patch
nbd-fix-memory-leak-in-nbd_get_socket.patch
revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch
tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch
vhost-vsock-split-packets-to-send-using-multiple-buffers.patch
virtio_balloon-fix-shrinker-count.patch
virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch
virtio_ring-fix-return-code-on-dma-mapping-fails.patch

19 files changed:
queue-5.3/drm-amd-powerplay-issue-no-ppsmc_msg_getcurrpkgpwr-on-unsupported-asics.patch [new file with mode: 0644]
queue-5.3/drm-amdgpu-disable-gfxoff-on-original-raven.patch [new file with mode: 0644]
queue-5.3/drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch [new file with mode: 0644]
queue-5.3/drm-i915-don-t-oops-in-dumb_create-ioctl-if-we-have-no-crtcs.patch [new file with mode: 0644]
queue-5.3/drm-i915-pmu-frequency-is-reported-as-accumulated-cycles.patch [new file with mode: 0644]
queue-5.3/drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch [new file with mode: 0644]
queue-5.3/fork-fix-pidfd_poll-s-return-type.patch [new file with mode: 0644]
queue-5.3/gpio-bd70528-use-correct-unit-for-debounce-times.patch [new file with mode: 0644]
queue-5.3/gpio-max77620-fixup-debounce-delays.patch [new file with mode: 0644]
queue-5.3/mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch [new file with mode: 0644]
queue-5.3/mm-memory_hotplug-don-t-access-uninitialized-memmaps-in-shrink_zone_span.patch [new file with mode: 0644]
queue-5.3/nbd-fix-memory-leak-in-nbd_get_socket.patch [new file with mode: 0644]
queue-5.3/revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch [new file with mode: 0644]
queue-5.3/series
queue-5.3/tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch [new file with mode: 0644]
queue-5.3/vhost-vsock-split-packets-to-send-using-multiple-buffers.patch [new file with mode: 0644]
queue-5.3/virtio_balloon-fix-shrinker-count.patch [new file with mode: 0644]
queue-5.3/virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch [new file with mode: 0644]
queue-5.3/virtio_ring-fix-return-code-on-dma-mapping-fails.patch [new file with mode: 0644]

diff --git a/queue-5.3/drm-amd-powerplay-issue-no-ppsmc_msg_getcurrpkgpwr-on-unsupported-asics.patch b/queue-5.3/drm-amd-powerplay-issue-no-ppsmc_msg_getcurrpkgpwr-on-unsupported-asics.patch
new file mode 100644 (file)
index 0000000..03ea4f0
--- /dev/null
@@ -0,0 +1,60 @@
+From 355d991cb6ff6ae76b5e28b8edae144124c730e4 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Thu, 14 Nov 2019 15:30:39 +0800
+Subject: drm/amd/powerplay: issue no PPSMC_MSG_GetCurrPkgPwr on unsupported ASICs
+
+From: Evan Quan <evan.quan@amd.com>
+
+commit 355d991cb6ff6ae76b5e28b8edae144124c730e4 upstream.
+
+Otherwise, the error message prompted will confuse user.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c |   23 ++++++++++++++++++-----
+ 1 file changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struc
+ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
+ {
++      struct amdgpu_device *adev = hwmgr->adev;
+       int i;
+       u32 tmp = 0;
+       if (!query)
+               return -EINVAL;
+-      smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+-      tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+-      *query = tmp;
++      /*
++       * PPSMC_MSG_GetCurrPkgPwr is not supported on:
++       *  - Hawaii
++       *  - Bonaire
++       *  - Fiji
++       *  - Tonga
++       */
++      if ((adev->asic_type != CHIP_HAWAII) &&
++          (adev->asic_type != CHIP_BONAIRE) &&
++          (adev->asic_type != CHIP_FIJI) &&
++          (adev->asic_type != CHIP_TONGA)) {
++              smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
++              tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
++              *query = tmp;
+-      if (tmp != 0)
+-              return 0;
++              if (tmp != 0)
++                      return 0;
++      }
+       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
diff --git a/queue-5.3/drm-amdgpu-disable-gfxoff-on-original-raven.patch b/queue-5.3/drm-amdgpu-disable-gfxoff-on-original-raven.patch
new file mode 100644 (file)
index 0000000..7461e42
--- /dev/null
@@ -0,0 +1,43 @@
+From 941a0a7945c39f36a16634bc65c2649a1b94eee1 Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Fri, 15 Nov 2019 10:21:23 -0500
+Subject: drm/amdgpu: disable gfxoff on original raven
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 941a0a7945c39f36a16634bc65c2649a1b94eee1 upstream.
+
+There are still combinations of sbios and firmware that
+are not stable.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=204689
+Acked-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -596,8 +596,13 @@ static void gfx_v9_0_check_if_need_gfxof
+       case CHIP_VEGA20:
+               break;
+       case CHIP_RAVEN:
+-              if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+-                      &&((adev->gfx.rlc_fw_version != 106 &&
++              /* Disable GFXOFF on original raven.  There are combinations
++               * of sbios and platforms that are not stable.
++               */
++              if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
++                      adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
++              else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
++                       &&((adev->gfx.rlc_fw_version != 106 &&
+                            adev->gfx.rlc_fw_version < 531) ||
+                           (adev->gfx.rlc_fw_version == 53815) ||
+                           (adev->gfx.rlc_feature_version < 1) ||
diff --git a/queue-5.3/drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch b/queue-5.3/drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch
new file mode 100644 (file)
index 0000000..2948d55
--- /dev/null
@@ -0,0 +1,46 @@
+From c57040d333c6729ce99c2cb95061045ff84c89ea Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 14 Nov 2019 11:39:05 -0500
+Subject: drm/amdgpu: disable gfxoff when using register read interface
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit c57040d333c6729ce99c2cb95061045ff84c89ea upstream.
+
+When gfxoff is enabled, accessing gfx registers via MMIO
+can lead to a hang.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205497
+Acked-by: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -635,15 +635,19 @@ static int amdgpu_info_ioctl(struct drm_
+                       return -ENOMEM;
+               alloc_size = info->read_mmr_reg.count * sizeof(*regs);
+-              for (i = 0; i < info->read_mmr_reg.count; i++)
++              amdgpu_gfx_off_ctrl(adev, false);
++              for (i = 0; i < info->read_mmr_reg.count; i++) {
+                       if (amdgpu_asic_read_register(adev, se_num, sh_num,
+                                                     info->read_mmr_reg.dword_offset + i,
+                                                     &regs[i])) {
+                               DRM_DEBUG_KMS("unallowed offset %#x\n",
+                                             info->read_mmr_reg.dword_offset + i);
+                               kfree(regs);
++                              amdgpu_gfx_off_ctrl(adev, true);
+                               return -EFAULT;
+                       }
++              }
++              amdgpu_gfx_off_ctrl(adev, true);
+               n = copy_to_user(out, regs, min(size, alloc_size));
+               kfree(regs);
+               return n ? -EFAULT : 0;
diff --git a/queue-5.3/drm-i915-don-t-oops-in-dumb_create-ioctl-if-we-have-no-crtcs.patch b/queue-5.3/drm-i915-don-t-oops-in-dumb_create-ioctl-if-we-have-no-crtcs.patch
new file mode 100644 (file)
index 0000000..640b41f
--- /dev/null
@@ -0,0 +1,49 @@
+From 8ac495f624a42809000255955be406f6a8a74b55 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= <ville.syrjala@linux.intel.com>
+Date: Wed, 6 Nov 2019 19:23:49 +0200
+Subject: drm/i915: Don't oops in dumb_create ioctl if we have no crtcs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ville Syrjälä <ville.syrjala@linux.intel.com>
+
+commit 8ac495f624a42809000255955be406f6a8a74b55 upstream.
+
+Make sure we have a crtc before probing its primary plane's
+max stride. Initially I thought we can't get this far without
+crtcs, but looks like we can via the dumb_create ioctl.
+
+Not sure if we shouldn't disable dumb buffer support entirely
+when we have no crtcs, but that would require some amount of work
+as the only thing currently being checked is dev->driver->dumb_create
+which we'd have to convert to some device specific dynamic thing.
+
+Cc: stable@vger.kernel.org
+Reported-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Fixes: aa5ca8b7421c ("drm/i915: Align dumb buffer stride to 4k to allow for gtt remapping")
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191106172349.11987-1-ville.syrjala@linux.intel.com
+Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
+(cherry picked from commit baea9ffe64200033499a4955f431e315bb807899)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+(cherry picked from commit aeec766133f99d45aad60d650de50fb382104d95)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/display/intel_display.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -2519,6 +2519,9 @@ u32 intel_plane_fb_max_stride(struct drm
+        * the highest stride limits of them all.
+        */
+       crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
++      if (!crtc)
++              return 0;
++
+       plane = to_intel_plane(crtc->base.primary);
+       return plane->max_stride(plane, pixel_format, modifier,
diff --git a/queue-5.3/drm-i915-pmu-frequency-is-reported-as-accumulated-cycles.patch b/queue-5.3/drm-i915-pmu-frequency-is-reported-as-accumulated-cycles.patch
new file mode 100644 (file)
index 0000000..9fc278e
--- /dev/null
@@ -0,0 +1,42 @@
+From add3eeed3683e2636ef524db48e1a678757c8e96 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sat, 9 Nov 2019 10:53:56 +0000
+Subject: drm/i915/pmu: "Frequency" is reported as accumulated cycles
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit add3eeed3683e2636ef524db48e1a678757c8e96 upstream.
+
+We report "frequencies" (actual-frequency, requested-frequency) as the
+number of accumulated cycles so that the average frequency over that
+period may be determined by the user. This means the units we report to
+the user are Mcycles (or just M), not MHz.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191109105356.5273-1-chris@chris-wilson.co.uk
+(cherry picked from commit e88866ef02851c88fe95a4bb97820b94b4d46f36)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+(cherry picked from commit a7d87b70d6da96c6772e50728c8b4e78e4cbfd55)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_pmu.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -833,8 +833,8 @@ create_event_attributes(struct drm_i915_
+               const char *name;
+               const char *unit;
+       } events[] = {
+-              __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
+-              __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
++              __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "M"),
++              __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"),
+               __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
+               __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
+       };
diff --git a/queue-5.3/drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch b/queue-5.3/drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch
new file mode 100644 (file)
index 0000000..265e35f
--- /dev/null
@@ -0,0 +1,83 @@
+From 2d691aeca4aecbb8d0414a777a46981a8e142b05 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 11 Nov 2019 13:32:03 +0000
+Subject: drm/i915/userptr: Try to acquire the page lock around set_page_dirty()
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 2d691aeca4aecbb8d0414a777a46981a8e142b05 upstream.
+
+set_page_dirty says:
+
+       For pages with a mapping this should be done under the page lock
+       for the benefit of asynchronous memory errors who prefer a
+       consistent dirty state. This rule can be broken in some special
+       cases, but should be better not to.
+
+Under those rules, it is only safe for us to use the plain set_page_dirty
+calls for shmemfs/anonymous memory. Userptr may be used with real
+mappings and so needs to use the locked version (set_page_dirty_lock).
+
+However, following a try_to_unmap() we may want to remove the userptr and
+so call put_pages(). However, try_to_unmap() acquires the page lock and
+so we must avoid recursively locking the pages ourselves -- which means
+that we cannot safely acquire the lock around set_page_dirty(). Since we
+can't be sure of the lock, we have to risk skip dirtying the page, or
+else risk calling set_page_dirty() without a lock and so risk fs
+corruption.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112012
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+References: cb6d7c7dc7ff ("drm/i915/userptr: Acquire the page lock around set_page_dirty()")
+References: 505a8ec7e11a ("Revert "drm/i915/userptr: Acquire the page lock around set_page_dirty()"")
+References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191111133205.11590-1-chris@chris-wilson.co.uk
+(cherry picked from commit 0d4bbe3d407f79438dc4f87943db21f7134cfc65)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+(cherry picked from commit cee7fb437edcdb2f9f8affa959e274997f5dca4d)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/gem/i915_gem_userptr.c |   22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+@@ -663,8 +663,28 @@ i915_gem_userptr_put_pages(struct drm_i9
+       i915_gem_gtt_finish_pages(obj, pages);
+       for_each_sgt_page(page, sgt_iter, pages) {
+-              if (obj->mm.dirty)
++              if (obj->mm.dirty && trylock_page(page)) {
++                      /*
++                       * As this may not be anonymous memory (e.g. shmem)
++                       * but exist on a real mapping, we have to lock
++                       * the page in order to dirty it -- holding
++                       * the page reference is not sufficient to
++                       * prevent the inode from being truncated.
++                       * Play safe and take the lock.
++                       *
++                       * However...!
++                       *
++                       * The mmu-notifier can be invalidated for a
++                       * migrate_page, that is alreadying holding the lock
++                       * on the page. Such a try_to_unmap() will result
++                       * in us calling put_pages() and so recursively try
++                       * to lock the page. We avoid that deadlock with
++                       * a trylock_page() and in exchange we risk missing
++                       * some page dirtying.
++                       */
+                       set_page_dirty(page);
++                      unlock_page(page);
++              }
+               mark_page_accessed(page);
+               put_page(page);
diff --git a/queue-5.3/fork-fix-pidfd_poll-s-return-type.patch b/queue-5.3/fork-fix-pidfd_poll-s-return-type.patch
new file mode 100644 (file)
index 0000000..fe08ae6
--- /dev/null
@@ -0,0 +1,53 @@
+From 9e77716a75bc6cf54965e5ec069ba7c02b32251c Mon Sep 17 00:00:00 2001
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Date: Wed, 20 Nov 2019 01:33:20 +0100
+Subject: fork: fix pidfd_poll()'s return type
+
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+
+commit 9e77716a75bc6cf54965e5ec069ba7c02b32251c upstream.
+
+pidfd_poll() is defined as returning 'unsigned int' but the
+.poll method is declared as returning '__poll_t', a bitwise type.
+
+Fix this by using the proper return type and using the EPOLL
+constants instead of the POLL ones, as required for __poll_t.
+
+Fixes: b53b0b9d9a61 ("pidfd: add polling support")
+Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
+Cc: stable@vger.kernel.org # 5.3
+Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Reviewed-by: Christian Brauner <christian.brauner@ubuntu.com>
+Link: https://lore.kernel.org/r/20191120003320.31138-1-luc.vanoostenryck@gmail.com
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/fork.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1713,11 +1713,11 @@ static void pidfd_show_fdinfo(struct seq
+ /*
+  * Poll support for process exit notification.
+  */
+-static unsigned int pidfd_poll(struct file *file, struct poll_table_struct *pts)
++static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
+ {
+       struct task_struct *task;
+       struct pid *pid = file->private_data;
+-      int poll_flags = 0;
++      __poll_t poll_flags = 0;
+       poll_wait(file, &pid->wait_pidfd, pts);
+@@ -1729,7 +1729,7 @@ static unsigned int pidfd_poll(struct fi
+        * group, then poll(2) should block, similar to the wait(2) family.
+        */
+       if (!task || (task->exit_state && thread_group_empty(task)))
+-              poll_flags = POLLIN | POLLRDNORM;
++              poll_flags = EPOLLIN | EPOLLRDNORM;
+       rcu_read_unlock();
+       return poll_flags;
diff --git a/queue-5.3/gpio-bd70528-use-correct-unit-for-debounce-times.patch b/queue-5.3/gpio-bd70528-use-correct-unit-for-debounce-times.patch
new file mode 100644 (file)
index 0000000..090df1b
--- /dev/null
@@ -0,0 +1,43 @@
+From f88c117b6d6d7e96557b6ee143b26b550fc51076 Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Fri, 8 Nov 2019 17:07:47 +0100
+Subject: gpio: bd70528: Use correct unit for debounce times
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit f88c117b6d6d7e96557b6ee143b26b550fc51076 upstream.
+
+The debounce time passed to gpiod_set_debounce() is specified in
+microseconds, so make sure to use the correct unit when computing the
+register values, which denote delays in milliseconds.
+
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 18bc64b3aebf ("gpio: Initial support for ROHM bd70528 GPIO block")
+[Bartosz: fixed a typo in commit message]
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-bd70528.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpio/gpio-bd70528.c
++++ b/drivers/gpio/gpio-bd70528.c
+@@ -25,13 +25,13 @@ static int bd70528_set_debounce(struct b
+       case 0:
+               val = BD70528_DEBOUNCE_DISABLE;
+               break;
+-      case 1 ... 15:
++      case 1 ... 15000:
+               val = BD70528_DEBOUNCE_15MS;
+               break;
+-      case 16 ... 30:
++      case 15001 ... 30000:
+               val = BD70528_DEBOUNCE_30MS;
+               break;
+-      case 31 ... 50:
++      case 30001 ... 50000:
+               val = BD70528_DEBOUNCE_50MS;
+               break;
+       default:
diff --git a/queue-5.3/gpio-max77620-fixup-debounce-delays.patch b/queue-5.3/gpio-max77620-fixup-debounce-delays.patch
new file mode 100644 (file)
index 0000000..c407dec
--- /dev/null
@@ -0,0 +1,56 @@
+From b0391479ae04dfcbd208b9571c375064caad9a57 Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Fri, 8 Nov 2019 17:07:46 +0100
+Subject: gpio: max77620: Fixup debounce delays
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit b0391479ae04dfcbd208b9571c375064caad9a57 upstream.
+
+When converting milliseconds to microseconds in commit fffa6af94894
+("gpio: max77620: Use correct unit for debounce times") some ~1 ms gaps
+were introduced between the various ranges supported by the controller.
+Fix this by changing the start of each range to the value immediately
+following the end of the previous range. This way a debounce time of,
+say 8250 us will translate into 16 ms instead of returning an -EINVAL
+error.
+
+Typically the debounce delay is only ever set through device tree and
+specified in milliseconds, so we can never really hit this issue because
+debounce times are always a multiple of 1000 us.
+
+The only notable exception for this is drivers/mmc/host/mmc-spi.c where
+the CD GPIO is requested, which passes a 1 us debounce time. According
+to a comment preceeding that code this should actually be 1 ms (i.e.
+1000 us).
+
+Reported-by: Pavel Machek <pavel@denx.de>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Acked-by: Pavel Machek <pavel@denx.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-max77620.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpio/gpio-max77620.c
++++ b/drivers/gpio/gpio-max77620.c
+@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(st
+       case 0:
+               val = MAX77620_CNFG_GPIO_DBNC_None;
+               break;
+-      case 1000 ... 8000:
++      case 1 ... 8000:
+               val = MAX77620_CNFG_GPIO_DBNC_8ms;
+               break;
+-      case 9000 ... 16000:
++      case 8001 ... 16000:
+               val = MAX77620_CNFG_GPIO_DBNC_16ms;
+               break;
+-      case 17000 ... 32000:
++      case 16001 ... 32000:
+               val = MAX77620_CNFG_GPIO_DBNC_32ms;
+               break;
+       default:
diff --git a/queue-5.3/mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch b/queue-5.3/mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch
new file mode 100644 (file)
index 0000000..96fa0e5
--- /dev/null
@@ -0,0 +1,63 @@
+From 9a63236f1ad82d71a98aa80320b6cb618fb32f44 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 21 Nov 2019 17:54:01 -0800
+Subject: mm/ksm.c: don't WARN if page is still mapped in remove_stable_node()
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 9a63236f1ad82d71a98aa80320b6cb618fb32f44 upstream.
+
+It's possible to hit the WARN_ON_ONCE(page_mapped(page)) in
+remove_stable_node() when it races with __mmput() and squeezes in
+between ksm_exit() and exit_mmap().
+
+  WARNING: CPU: 0 PID: 3295 at mm/ksm.c:888 remove_stable_node+0x10c/0x150
+
+  Call Trace:
+   remove_all_stable_nodes+0x12b/0x330
+   run_store+0x4ef/0x7b0
+   kernfs_fop_write+0x200/0x420
+   vfs_write+0x154/0x450
+   ksys_write+0xf9/0x1d0
+   do_syscall_64+0x99/0x510
+   entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Remove the warning as there is nothing scary going on.
+
+Link: http://lkml.kernel.org/r/20191119131850.5675-1-aryabinin@virtuozzo.com
+Fixes: cbf86cfe04a6 ("ksm: remove old stable nodes more thoroughly")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/ksm.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -885,13 +885,13 @@ static int remove_stable_node(struct sta
+               return 0;
+       }
+-      if (WARN_ON_ONCE(page_mapped(page))) {
+-              /*
+-               * This should not happen: but if it does, just refuse to let
+-               * merge_across_nodes be switched - there is no need to panic.
+-               */
+-              err = -EBUSY;
+-      } else {
++      /*
++       * Page could be still mapped if this races with __mmput() running in
++       * between ksm_exit() and exit_mmap(). Just refuse to let
++       * merge_across_nodes/max_page_sharing be switched.
++       */
++      err = -EBUSY;
++      if (!page_mapped(page)) {
+               /*
+                * The stable node did not yet appear stale to get_ksm_page(),
+                * since that allows for an unmapped ksm page to be recognized
diff --git a/queue-5.3/mm-memory_hotplug-don-t-access-uninitialized-memmaps-in-shrink_zone_span.patch b/queue-5.3/mm-memory_hotplug-don-t-access-uninitialized-memmaps-in-shrink_zone_span.patch
new file mode 100644 (file)
index 0000000..712d40b
--- /dev/null
@@ -0,0 +1,178 @@
+From 7ce700bf11b5e2cb84e4352bbdf2123a7a239c84 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Thu, 21 Nov 2019 17:53:56 -0800
+Subject: mm/memory_hotplug: don't access uninitialized memmaps in shrink_zone_span()
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 7ce700bf11b5e2cb84e4352bbdf2123a7a239c84 upstream.
+
+Let's limit shrinking to !ZONE_DEVICE so we can fix the current code.
+We should never try to touch the memmap of offline sections where we
+could have uninitialized memmaps and could trigger BUGs when calling
+page_to_nid() on poisoned pages.
+
+There is no reliable way to distinguish an uninitialized memmap from an
+initialized memmap that belongs to ZONE_DEVICE, as we don't have
+anything like SECTION_IS_ONLINE we can use similar to
+pfn_to_online_section() for !ZONE_DEVICE memory.
+
+E.g., set_zone_contiguous() similarly relies on pfn_to_online_section()
+and will therefore never set a ZONE_DEVICE zone consecutive.  Stopping
+to shrink the ZONE_DEVICE therefore results in no observable changes,
+besides /proc/zoneinfo indicating different boundaries - something we
+can totally live with.
+
+Before commit d0dc12e86b31 ("mm/memory_hotplug: optimize memory
+hotplug"), the memmap was initialized with 0 and the node with the right
+value.  So the zone might be wrong but not garbage.  After that commit,
+both the zone and the node will be garbage when touching uninitialized
+memmaps.
+
+Toshiki reported a BUG (race between delayed initialization of
+ZONE_DEVICE memmaps without holding the memory hotplug lock and
+concurrent zone shrinking).
+
+  https://lkml.org/lkml/2019/11/14/1040
+
+"Iteration of create and destroy namespace causes the panic as below:
+
+      kernel BUG at mm/page_alloc.c:535!
+      CPU: 7 PID: 2766 Comm: ndctl Not tainted 5.4.0-rc4 #6
+      Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014
+      RIP: 0010:set_pfnblock_flags_mask+0x95/0xf0
+      Call Trace:
+       memmap_init_zone_device+0x165/0x17c
+       memremap_pages+0x4c1/0x540
+       devm_memremap_pages+0x1d/0x60
+       pmem_attach_disk+0x16b/0x600 [nd_pmem]
+       nvdimm_bus_probe+0x69/0x1c0
+       really_probe+0x1c2/0x3e0
+       driver_probe_device+0xb4/0x100
+       device_driver_attach+0x4f/0x60
+       bind_store+0xc9/0x110
+       kernfs_fop_write+0x116/0x190
+       vfs_write+0xa5/0x1a0
+       ksys_write+0x59/0xd0
+       do_syscall_64+0x5b/0x180
+       entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+  While creating a namespace and initializing memmap, if you destroy the
+  namespace and shrink the zone, it will initialize the memmap outside
+  the zone and trigger VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page),
+  pfn), page) in set_pfnblock_flags_mask()."
+
+This BUG is also mitigated by this commit, where we for now stop to
+shrink the ZONE_DEVICE zone until we can do it in a safe and clean way.
+
+Link: http://lkml.kernel.org/r/20191006085646.5768-5-david@redhat.com
+Fixes: f1dd2cd13c4b ("mm, memory_hotplug: do not associate hotadded memory to zones until online")     [visible after d0dc12e86b319]
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reported-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Reported-by: Toshiki Fukasawa <t-fukasawa@vx.jp.nec.com>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Christophe Leroy <christophe.leroy@c-s.fr>
+Cc: Damian Tometzki <damian.tometzki@gmail.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Halil Pasic <pasic@linux.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Jun Yao <yaojun8558363@gmail.com>
+Cc: Logan Gunthorpe <logang@deltatee.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Mike Rapoport <rppt@linux.ibm.com>
+Cc: Pankaj Gupta <pagupta@redhat.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Pavel Tatashin <pavel.tatashin@microsoft.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Qian Cai <cai@lca.pw>
+Cc: Rich Felker <dalias@libc.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Cc: Steve Capper <steve.capper@arm.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Wei Yang <richard.weiyang@gmail.com>
+Cc: Wei Yang <richardw.yang@linux.intel.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>   [4.13+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory_hotplug.c |   16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -331,7 +331,7 @@ static unsigned long find_smallest_secti
+                                    unsigned long end_pfn)
+ {
+       for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
+-              if (unlikely(!pfn_valid(start_pfn)))
++              if (unlikely(!pfn_to_online_page(start_pfn)))
+                       continue;
+               if (unlikely(pfn_to_nid(start_pfn) != nid))
+@@ -356,7 +356,7 @@ static unsigned long find_biggest_sectio
+       /* pfn is the end pfn of a memory section. */
+       pfn = end_pfn - 1;
+       for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
+-              if (unlikely(!pfn_valid(pfn)))
++              if (unlikely(!pfn_to_online_page(pfn)))
+                       continue;
+               if (unlikely(pfn_to_nid(pfn) != nid))
+@@ -415,7 +415,7 @@ static void shrink_zone_span(struct zone
+        */
+       pfn = zone_start_pfn;
+       for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
+-              if (unlikely(!pfn_valid(pfn)))
++              if (unlikely(!pfn_to_online_page(pfn)))
+                       continue;
+               if (page_zone(pfn_to_page(pfn)) != zone)
+@@ -471,6 +471,16 @@ static void __remove_zone(struct zone *z
+       struct pglist_data *pgdat = zone->zone_pgdat;
+       unsigned long flags;
++#ifdef CONFIG_ZONE_DEVICE
++      /*
++       * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
++       * we will not try to shrink the zones - which is okay as
++       * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
++       */
++      if (zone_idx(zone) == ZONE_DEVICE)
++              return;
++#endif
++
+       pgdat_resize_lock(zone->zone_pgdat, &flags);
+       shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
+       update_pgdat_span(pgdat);
diff --git a/queue-5.3/nbd-fix-memory-leak-in-nbd_get_socket.patch b/queue-5.3/nbd-fix-memory-leak-in-nbd_get_socket.patch
new file mode 100644 (file)
index 0000000..dd65985
--- /dev/null
@@ -0,0 +1,33 @@
+From dff10bbea4be47bdb615b036c834a275b7c68133 Mon Sep 17 00:00:00 2001
+From: Sun Ke <sunke32@huawei.com>
+Date: Tue, 19 Nov 2019 14:09:11 +0800
+Subject: nbd:fix memory leak in nbd_get_socket()
+
+From: Sun Ke <sunke32@huawei.com>
+
+commit dff10bbea4be47bdb615b036c834a275b7c68133 upstream.
+
+Before returning NULL, put the sock first.
+
+Cc: stable@vger.kernel.org
+Fixes: cf1b2326b734 ("nbd: verify socket is supported during setup")
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Sun Ke <sunke32@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -956,6 +956,7 @@ static struct socket *nbd_get_socket(str
+       if (sock->ops->shutdown == sock_no_shutdown) {
+               dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+               *err = -EINVAL;
++              sockfd_put(sock);
+               return NULL;
+       }
diff --git a/queue-5.3/revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch b/queue-5.3/revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch
new file mode 100644 (file)
index 0000000..5cca375
--- /dev/null
@@ -0,0 +1,111 @@
+From 94b07b6f9e2e996afff7395de6b35f34f4cb10bf Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+Date: Thu, 21 Nov 2019 17:53:52 -0800
+Subject: Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
+
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+
+commit 94b07b6f9e2e996afff7395de6b35f34f4cb10bf upstream.
+
+This reverts commit 56e94ea132bb5c2c1d0b60a6aeb34dcb7d71a53d.
+
+Commit 56e94ea132bb ("fs: ocfs2: fix possible null-pointer dereferences
+in ocfs2_xa_prepare_entry()") introduces a regression that fail to
+create directory with mount option user_xattr and acl.  Actually the
+reported NULL pointer dereference case can be correctly handled by
+loc->xl_ops->xlo_add_entry(), so revert it.
+
+Link: http://lkml.kernel.org/r/1573624916-83825-1-git-send-email-joseph.qi@linux.alibaba.com
+Fixes: 56e94ea132bb ("fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()")
+Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Reported-by: Thomas Voegtle <tv@lio96.de>
+Acked-by: Changwei Ge <gechangwei@live.cn>
+Cc: Jia-Ju Bai <baijiaju1990@gmail.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/xattr.c |   56 ++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 23 deletions(-)
+
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1490,6 +1490,18 @@ static int ocfs2_xa_check_space(struct o
+       return loc->xl_ops->xlo_check_space(loc, xi);
+ }
++static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
++{
++      loc->xl_ops->xlo_add_entry(loc, name_hash);
++      loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
++      /*
++       * We can't leave the new entry's xe_name_offset at zero or
++       * add_namevalue() will go nuts.  We set it to the size of our
++       * storage so that it can never be less than any other entry.
++       */
++      loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
++}
++
+ static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+                                  struct ocfs2_xattr_info *xi)
+ {
+@@ -2121,31 +2133,29 @@ static int ocfs2_xa_prepare_entry(struct
+       if (rc)
+               goto out;
+-      if (!loc->xl_entry) {
+-              rc = -EINVAL;
+-              goto out;
+-      }
+-
+-      if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+-              orig_value_size = loc->xl_entry->xe_value_size;
+-              rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+-              if (rc)
+-                      goto out;
+-              goto alloc_value;
+-      }
++      if (loc->xl_entry) {
++              if (ocfs2_xa_can_reuse_entry(loc, xi)) {
++                      orig_value_size = loc->xl_entry->xe_value_size;
++                      rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
++                      if (rc)
++                              goto out;
++                      goto alloc_value;
++              }
+-      if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+-              orig_clusters = ocfs2_xa_value_clusters(loc);
+-              rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+-              if (rc) {
+-                      mlog_errno(rc);
+-                      ocfs2_xa_cleanup_value_truncate(loc,
+-                                                      "overwriting",
+-                                                      orig_clusters);
+-                      goto out;
++              if (!ocfs2_xattr_is_local(loc->xl_entry)) {
++                      orig_clusters = ocfs2_xa_value_clusters(loc);
++                      rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
++                      if (rc) {
++                              mlog_errno(rc);
++                              ocfs2_xa_cleanup_value_truncate(loc,
++                                                              "overwriting",
++                                                              orig_clusters);
++                              goto out;
++                      }
+               }
+-      }
+-      ocfs2_xa_wipe_namevalue(loc);
++              ocfs2_xa_wipe_namevalue(loc);
++      } else
++              ocfs2_xa_add_entry(loc, name_hash);
+       /*
+        * If we get here, we have a blank entry.  Fill it.  We grow our
index 8dcc1974b4a6a63a895faa1893274f0c29b06d72..0a559fbb40b0e01013b9bfb8b31e691d8ff0679f 100644 (file)
@@ -16,3 +16,21 @@ net-ipv4-fix-sysctl-max-for-fib_multipath_hash_policy.patch
 net-mlx5e-fix-error-flow-cleanup-in-mlx5e_tc_tun_create_header_ipv4-6.patch
 net-mlx5e-do-not-use-non-ext-link-modes-in-ext-mode.patch
 net-mlx5-update-the-list-of-the-pci-supported-devices.patch
+vhost-vsock-split-packets-to-send-using-multiple-buffers.patch
+gpio-max77620-fixup-debounce-delays.patch
+gpio-bd70528-use-correct-unit-for-debounce-times.patch
+tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch
+fork-fix-pidfd_poll-s-return-type.patch
+nbd-fix-memory-leak-in-nbd_get_socket.patch
+virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch
+virtio_ring-fix-return-code-on-dma-mapping-fails.patch
+virtio_balloon-fix-shrinker-count.patch
+revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch
+mm-memory_hotplug-don-t-access-uninitialized-memmaps-in-shrink_zone_span.patch
+mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch
+drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch
+drm-amdgpu-disable-gfxoff-on-original-raven.patch
+drm-amd-powerplay-issue-no-ppsmc_msg_getcurrpkgpwr-on-unsupported-asics.patch
+drm-i915-don-t-oops-in-dumb_create-ioctl-if-we-have-no-crtcs.patch
+drm-i915-pmu-frequency-is-reported-as-accumulated-cycles.patch
+drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch
diff --git a/queue-5.3/tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch b/queue-5.3/tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch
new file mode 100644 (file)
index 0000000..e5c58b0
--- /dev/null
@@ -0,0 +1,74 @@
+From 0161a94e2d1c713bd34d72bc0239d87c31747bf7 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Tue, 12 Nov 2019 17:10:26 -0500
+Subject: tools: gpio: Correctly add make dependencies for gpio_utils
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 0161a94e2d1c713bd34d72bc0239d87c31747bf7 upstream.
+
+gpio tools fail to build correctly with make parallelization:
+
+$ make -s -j24
+ld: gpio-utils.o: file not recognized: file truncated
+make[1]: *** [/home/labbott/linux_upstream/tools/build/Makefile.build:145: lsgpio-in.o] Error 1
+make: *** [Makefile:43: lsgpio-in.o] Error 2
+make: *** Waiting for unfinished jobs....
+
+This is because gpio-utils.o is used across multiple targets.
+Fix this by making gpio-utios.o a proper dependency.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/gpio/Build    |    1 +
+ tools/gpio/Makefile |   10 +++++++---
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/tools/gpio/Build
++++ b/tools/gpio/Build
+@@ -1,3 +1,4 @@
++gpio-utils-y += gpio-utils.o
+ lsgpio-y += lsgpio.o gpio-utils.o
+ gpio-hammer-y += gpio-hammer.o gpio-utils.o
+ gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
+--- a/tools/gpio/Makefile
++++ b/tools/gpio/Makefile
+@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../inc
+ prepare: $(OUTPUT)include/linux/gpio.h
++GPIO_UTILS_IN := $(output)gpio-utils-in.o
++$(GPIO_UTILS_IN): prepare FORCE
++      $(Q)$(MAKE) $(build)=gpio-utils
++
+ #
+ # lsgpio
+ #
+ LSGPIO_IN := $(OUTPUT)lsgpio-in.o
+-$(LSGPIO_IN): prepare FORCE
++$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+       $(Q)$(MAKE) $(build)=lsgpio
+ $(OUTPUT)lsgpio: $(LSGPIO_IN)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
+ # gpio-hammer
+ #
+ GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
+-$(GPIO_HAMMER_IN): prepare FORCE
++$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+       $(Q)$(MAKE) $(build)=gpio-hammer
+ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
+ # gpio-event-mon
+ #
+ GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
+-$(GPIO_EVENT_MON_IN): prepare FORCE
++$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+       $(Q)$(MAKE) $(build)=gpio-event-mon
+ $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
diff --git a/queue-5.3/vhost-vsock-split-packets-to-send-using-multiple-buffers.patch b/queue-5.3/vhost-vsock-split-packets-to-send-using-multiple-buffers.patch
new file mode 100644 (file)
index 0000000..c8f2a1b
--- /dev/null
@@ -0,0 +1,158 @@
+From 6dbd3e66e7785a2f055bf84d98de9b8fd31ff3f5 Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Tue, 30 Jul 2019 17:43:33 +0200
+Subject: vhost/vsock: split packets to send using multiple buffers
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+commit 6dbd3e66e7785a2f055bf84d98de9b8fd31ff3f5 upstream.
+
+If the packets to sent to the guest are bigger than the buffer
+available, we can split them, using multiple buffers and fixing
+the length in the packet header.
+This is safe since virtio-vsock supports only stream sockets.
+
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vsock.c                   |   66 +++++++++++++++++++++++---------
+ net/vmw_vsock/virtio_transport_common.c |   15 +++++--
+ 2 files changed, 60 insertions(+), 21 deletions(-)
+
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -102,7 +102,7 @@ vhost_transport_do_send_pkt(struct vhost
+               struct iov_iter iov_iter;
+               unsigned out, in;
+               size_t nbytes;
+-              size_t len;
++              size_t iov_len, payload_len;
+               int head;
+               spin_lock_bh(&vsock->send_pkt_list_lock);
+@@ -147,8 +147,24 @@ vhost_transport_do_send_pkt(struct vhost
+                       break;
+               }
+-              len = iov_length(&vq->iov[out], in);
+-              iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
++              iov_len = iov_length(&vq->iov[out], in);
++              if (iov_len < sizeof(pkt->hdr)) {
++                      virtio_transport_free_pkt(pkt);
++                      vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
++                      break;
++              }
++
++              iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
++              payload_len = pkt->len - pkt->off;
++
++              /* If the packet is greater than the space available in the
++               * buffer, we split it using multiple buffers.
++               */
++              if (payload_len > iov_len - sizeof(pkt->hdr))
++                      payload_len = iov_len - sizeof(pkt->hdr);
++
++              /* Set the correct length in the header */
++              pkt->hdr.len = cpu_to_le32(payload_len);
+               nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+               if (nbytes != sizeof(pkt->hdr)) {
+@@ -157,33 +173,47 @@ vhost_transport_do_send_pkt(struct vhost
+                       break;
+               }
+-              nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
+-              if (nbytes != pkt->len) {
++              nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
++                                    &iov_iter);
++              if (nbytes != payload_len) {
+                       virtio_transport_free_pkt(pkt);
+                       vq_err(vq, "Faulted on copying pkt buf\n");
+                       break;
+               }
+-              vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
++              vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+               added = true;
+-              if (pkt->reply) {
+-                      int val;
+-
+-                      val = atomic_dec_return(&vsock->queued_replies);
+-
+-                      /* Do we have resources to resume tx processing? */
+-                      if (val + 1 == tx_vq->num)
+-                              restart_tx = true;
+-              }
+-
+               /* Deliver to monitoring devices all correctly transmitted
+                * packets.
+                */
+               virtio_transport_deliver_tap_pkt(pkt);
+-              total_len += pkt->len;
+-              virtio_transport_free_pkt(pkt);
++              pkt->off += payload_len;
++              total_len += payload_len;
++
++              /* If we didn't send all the payload we can requeue the packet
++               * to send it with the next available buffer.
++               */
++              if (pkt->off < pkt->len) {
++                      spin_lock_bh(&vsock->send_pkt_list_lock);
++                      list_add(&pkt->list, &vsock->send_pkt_list);
++                      spin_unlock_bh(&vsock->send_pkt_list_lock);
++              } else {
++                      if (pkt->reply) {
++                              int val;
++
++                              val = atomic_dec_return(&vsock->queued_replies);
++
++                              /* Do we have resources to resume tx
++                               * processing?
++                               */
++                              if (val + 1 == tx_vq->num)
++                                      restart_tx = true;
++                      }
++
++                      virtio_transport_free_pkt(pkt);
++              }
+       } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+       if (added)
+               vhost_signal(&vsock->dev, vq);
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -91,8 +91,17 @@ static struct sk_buff *virtio_transport_
+       struct virtio_vsock_pkt *pkt = opaque;
+       struct af_vsockmon_hdr *hdr;
+       struct sk_buff *skb;
++      size_t payload_len;
++      void *payload_buf;
+-      skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len,
++      /* A packet could be split to fit the RX buffer, so we can retrieve
++       * the payload length from the header and the buffer pointer taking
++       * care of the offset in the original packet.
++       */
++      payload_len = le32_to_cpu(pkt->hdr.len);
++      payload_buf = pkt->buf + pkt->off;
++
++      skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
+                       GFP_ATOMIC);
+       if (!skb)
+               return NULL;
+@@ -132,8 +141,8 @@ static struct sk_buff *virtio_transport_
+       skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
+-      if (pkt->len) {
+-              skb_put_data(skb, pkt->buf, pkt->len);
++      if (payload_len) {
++              skb_put_data(skb, payload_buf, payload_len);
+       }
+       return skb;
diff --git a/queue-5.3/virtio_balloon-fix-shrinker-count.patch b/queue-5.3/virtio_balloon-fix-shrinker-count.patch
new file mode 100644 (file)
index 0000000..3ceb4ff
--- /dev/null
@@ -0,0 +1,35 @@
+From c9a6820fc0da2603be3054ee7590eb9f350508a7 Mon Sep 17 00:00:00 2001
+From: Wei Wang <wei.w.wang@intel.com>
+Date: Tue, 19 Nov 2019 05:02:33 -0500
+Subject: virtio_balloon: fix shrinker count
+
+From: Wei Wang <wei.w.wang@intel.com>
+
+commit c9a6820fc0da2603be3054ee7590eb9f350508a7 upstream.
+
+Instead of multiplying by page order, virtio balloon divided by page
+order. The result is that it can return 0 if there are a bit less
+than MAX_ORDER - 1 pages in use, and then shrinker scan won't be called.
+
+Cc: stable@vger.kernel.org
+Fixes: 71994620bb25 ("virtio_balloon: replace oom notifier with shrinker")
+Signed-off-by: Wei Wang <wei.w.wang@intel.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_balloon.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -820,7 +820,7 @@ static unsigned long virtio_balloon_shri
+       unsigned long count;
+       count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+-      count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
++      count += vb->num_free_page_blocks << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+       return count;
+ }
diff --git a/queue-5.3/virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch b/queue-5.3/virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch
new file mode 100644 (file)
index 0000000..251577c
--- /dev/null
@@ -0,0 +1,130 @@
+From d791cfcbf98191122af70b053a21075cb450d119 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Thu, 14 Nov 2019 13:25:48 +0100
+Subject: virtio_console: allocate inbufs in add_port() only if it is needed
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+commit d791cfcbf98191122af70b053a21075cb450d119 upstream.
+
+When we hot unplug a virtserialport and then try to hot plug again,
+it fails:
+
+(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
+(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
+                  chardev=serial0,id=serial0,name=serial0
+(qemu) device_del serial0
+(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
+                  chardev=serial0,id=serial0,name=serial0
+kernel error:
+  virtio-ports vport2p2: Error allocating inbufs
+qemu error:
+  virtio-serial-bus: Guest failure in adding port 2 for device \
+                     virtio-serial0.0
+
+This happens because buffers for the in_vq are allocated when the port is
+added but are not released when the port is unplugged.
+
+They are only released when virtconsole is removed (see a7a69ec0d8e4)
+
+To avoid the problem and to be symmetric, we could allocate all the buffers
+in init_vqs() as they are released in remove_vqs(), but it sounds like
+a waste of memory.
+
+Rather than that, this patch changes add_port() logic to ignore ENOSPC
+error in fill_queue(), which means queue has already been filled.
+
+Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
+Cc: mst@redhat.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/virtio_console.c |   28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1325,24 +1325,24 @@ static void set_console_size(struct port
+       port->cons.ws.ws_col = cols;
+ }
+-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
++static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+ {
+       struct port_buffer *buf;
+-      unsigned int nr_added_bufs;
++      int nr_added_bufs;
+       int ret;
+       nr_added_bufs = 0;
+       do {
+               buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
+               if (!buf)
+-                      break;
++                      return -ENOMEM;
+               spin_lock_irq(lock);
+               ret = add_inbuf(vq, buf);
+               if (ret < 0) {
+                       spin_unlock_irq(lock);
+                       free_buf(buf, true);
+-                      break;
++                      return ret;
+               }
+               nr_added_bufs++;
+               spin_unlock_irq(lock);
+@@ -1362,7 +1362,6 @@ static int add_port(struct ports_device
+       char debugfs_name[16];
+       struct port *port;
+       dev_t devt;
+-      unsigned int nr_added_bufs;
+       int err;
+       port = kmalloc(sizeof(*port), GFP_KERNEL);
+@@ -1421,11 +1420,13 @@ static int add_port(struct ports_device
+       spin_lock_init(&port->outvq_lock);
+       init_waitqueue_head(&port->waitqueue);
+-      /* Fill the in_vq with buffers so the host can send us data. */
+-      nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+-      if (!nr_added_bufs) {
++      /* We can safely ignore ENOSPC because it means
++       * the queue already has buffers. Buffers are removed
++       * only by virtcons_remove(), not by unplug_port()
++       */
++      err = fill_queue(port->in_vq, &port->inbuf_lock);
++      if (err < 0 && err != -ENOSPC) {
+               dev_err(port->dev, "Error allocating inbufs\n");
+-              err = -ENOMEM;
+               goto free_device;
+       }
+@@ -2059,14 +2060,11 @@ static int virtcons_probe(struct virtio_
+       INIT_WORK(&portdev->control_work, &control_work_handler);
+       if (multiport) {
+-              unsigned int nr_added_bufs;
+-
+               spin_lock_init(&portdev->c_ivq_lock);
+               spin_lock_init(&portdev->c_ovq_lock);
+-              nr_added_bufs = fill_queue(portdev->c_ivq,
+-                                         &portdev->c_ivq_lock);
+-              if (!nr_added_bufs) {
++              err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
++              if (err < 0) {
+                       dev_err(&vdev->dev,
+                               "Error allocating buffers for control queue\n");
+                       /*
+@@ -2077,7 +2075,7 @@ static int virtcons_probe(struct virtio_
+                                          VIRTIO_CONSOLE_DEVICE_READY, 0);
+                       /* Device was functional: we need full cleanup. */
+                       virtcons_remove(vdev);
+-                      return -ENOMEM;
++                      return err;
+               }
+       } else {
+               /*
diff --git a/queue-5.3/virtio_ring-fix-return-code-on-dma-mapping-fails.patch b/queue-5.3/virtio_ring-fix-return-code-on-dma-mapping-fails.patch
new file mode 100644 (file)
index 0000000..a262253
--- /dev/null
@@ -0,0 +1,53 @@
+From f7728002c1c7bfa787b276a31c3ef458739b8e7c Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Thu, 14 Nov 2019 13:46:46 +0100
+Subject: virtio_ring: fix return code on DMA mapping fails
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit f7728002c1c7bfa787b276a31c3ef458739b8e7c upstream.
+
+Commit 780bc7903a32 ("virtio_ring: Support DMA APIs")  makes
+virtqueue_add() return -EIO when we fail to map our I/O buffers. This is
+a very realistic scenario for guests with encrypted memory, as swiotlb
+may run out of space, depending on it's size and the I/O load.
+
+The virtio-blk driver interprets -EIO form virtqueue_add() as an IO
+error, despite the fact that swiotlb full is in absence of bugs a
+recoverable condition.
+
+Let us change the return code to -ENOMEM, and make the block layer
+recover form these failures when virtio-blk encounters the condition
+described above.
+
+Cc: stable@vger.kernel.org
+Fixes: 780bc7903a32 ("virtio_ring: Support DMA APIs")
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/virtio/virtio_ring.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -583,7 +583,7 @@ unmap_release:
+               kfree(desc);
+       END_USE(vq);
+-      return -EIO;
++      return -ENOMEM;
+ }
+ static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
+@@ -1085,7 +1085,7 @@ unmap_release:
+       kfree(desc);
+       END_USE(vq);
+-      return -EIO;
++      return -ENOMEM;
+ }
+ static inline int virtqueue_add_packed(struct virtqueue *_vq,