--- /dev/null
+From c57040d333c6729ce99c2cb95061045ff84c89ea Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 14 Nov 2019 11:39:05 -0500
+Subject: drm/amdgpu: disable gfxoff when using register read interface
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit c57040d333c6729ce99c2cb95061045ff84c89ea upstream.
+
+When gfxoff is enabled, accessing gfx registers via MMIO
+can lead to a hang.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205497
+Acked-by: Xiaojie Yuan <xiaojie.yuan@amd.com>
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -513,15 +513,19 @@ static int amdgpu_info_ioctl(struct drm_
+ return -ENOMEM;
+ alloc_size = info->read_mmr_reg.count * sizeof(*regs);
+
+- for (i = 0; i < info->read_mmr_reg.count; i++)
++ amdgpu_gfx_off_ctrl(adev, false);
++ for (i = 0; i < info->read_mmr_reg.count; i++) {
+ if (amdgpu_asic_read_register(adev, se_num, sh_num,
+ info->read_mmr_reg.dword_offset + i,
+ ®s[i])) {
+ DRM_DEBUG_KMS("unallowed offset %#x\n",
+ info->read_mmr_reg.dword_offset + i);
+ kfree(regs);
++ amdgpu_gfx_off_ctrl(adev, true);
+ return -EFAULT;
+ }
++ }
++ amdgpu_gfx_off_ctrl(adev, true);
+ n = copy_to_user(out, regs, min(size, alloc_size));
+ kfree(regs);
+ return n ? -EFAULT : 0;
--- /dev/null
+From 2d691aeca4aecbb8d0414a777a46981a8e142b05 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon, 11 Nov 2019 13:32:03 +0000
+Subject: drm/i915/userptr: Try to acquire the page lock around set_page_dirty()
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 2d691aeca4aecbb8d0414a777a46981a8e142b05 upstream.
+
+set_page_dirty says:
+
+ For pages with a mapping this should be done under the page lock
+ for the benefit of asynchronous memory errors who prefer a
+ consistent dirty state. This rule can be broken in some special
+ cases, but should be better not to.
+
+Under those rules, it is only safe for us to use the plain set_page_dirty
+calls for shmemfs/anonymous memory. Userptr may be used with real
+mappings and so needs to use the locked version (set_page_dirty_lock).
+
+However, following a try_to_unmap() we may want to remove the userptr and
+so call put_pages(). However, try_to_unmap() acquires the page lock and
+so we must avoid recursively locking the pages ourselves -- which means
+that we cannot safely acquire the lock around set_page_dirty(). Since we
+can't be sure of the lock, we have to risk skip dirtying the page, or
+else risk calling set_page_dirty() without a lock and so risk fs
+corruption.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112012
+Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl")
+References: cb6d7c7dc7ff ("drm/i915/userptr: Acquire the page lock around set_page_dirty()")
+References: 505a8ec7e11a ("Revert "drm/i915/userptr: Acquire the page lock around set_page_dirty()"")
+References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191111133205.11590-1-chris@chris-wilson.co.uk
+(cherry picked from commit 0d4bbe3d407f79438dc4f87943db21f7134cfc65)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+(cherry picked from commit cee7fb437edcdb2f9f8affa959e274997f5dca4d)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/i915/i915_gem_userptr.c | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -690,8 +690,28 @@ i915_gem_userptr_put_pages(struct drm_i9
+ i915_gem_gtt_finish_pages(obj, pages);
+
+ for_each_sgt_page(page, sgt_iter, pages) {
+- if (obj->mm.dirty)
++ if (obj->mm.dirty && trylock_page(page)) {
++ /*
++ * As this may not be anonymous memory (e.g. shmem)
++ * but exist on a real mapping, we have to lock
++ * the page in order to dirty it -- holding
++ * the page reference is not sufficient to
++ * prevent the inode from being truncated.
++ * Play safe and take the lock.
++ *
++ * However...!
++ *
++ * The mmu-notifier can be invalidated for a
++ * migrate_page, that is alreadying holding the lock
++ * on the page. Such a try_to_unmap() will result
++ * in us calling put_pages() and so recursively try
++ * to lock the page. We avoid that deadlock with
++ * a trylock_page() and in exchange we risk missing
++ * some page dirtying.
++ */
+ set_page_dirty(page);
++ unlock_page(page);
++ }
+
+ mark_page_accessed(page);
+ put_page(page);
--- /dev/null
+From b0391479ae04dfcbd208b9571c375064caad9a57 Mon Sep 17 00:00:00 2001
+From: Thierry Reding <treding@nvidia.com>
+Date: Fri, 8 Nov 2019 17:07:46 +0100
+Subject: gpio: max77620: Fixup debounce delays
+
+From: Thierry Reding <treding@nvidia.com>
+
+commit b0391479ae04dfcbd208b9571c375064caad9a57 upstream.
+
+When converting milliseconds to microseconds in commit fffa6af94894
+("gpio: max77620: Use correct unit for debounce times") some ~1 ms gaps
+were introduced between the various ranges supported by the controller.
+Fix this by changing the start of each range to the value immediately
+following the end of the previous range. This way a debounce time of,
+say 8250 us will translate into 16 ms instead of returning an -EINVAL
+error.
+
+Typically the debounce delay is only ever set through device tree and
+specified in milliseconds, so we can never really hit this issue because
+debounce times are always a multiple of 1000 us.
+
+The only notable exception for this is drivers/mmc/host/mmc-spi.c where
+the CD GPIO is requested, which passes a 1 us debounce time. According
+to a comment preceeding that code this should actually be 1 ms (i.e.
+1000 us).
+
+Reported-by: Pavel Machek <pavel@denx.de>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Acked-by: Pavel Machek <pavel@denx.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpio/gpio-max77620.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpio/gpio-max77620.c
++++ b/drivers/gpio/gpio-max77620.c
+@@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(st
+ case 0:
+ val = MAX77620_CNFG_GPIO_DBNC_None;
+ break;
+- case 1000 ... 8000:
++ case 1 ... 8000:
+ val = MAX77620_CNFG_GPIO_DBNC_8ms;
+ break;
+- case 9000 ... 16000:
++ case 8001 ... 16000:
+ val = MAX77620_CNFG_GPIO_DBNC_16ms;
+ break;
+- case 17000 ... 32000:
++ case 16001 ... 32000:
+ val = MAX77620_CNFG_GPIO_DBNC_32ms;
+ break;
+ default:
--- /dev/null
+From 9a63236f1ad82d71a98aa80320b6cb618fb32f44 Mon Sep 17 00:00:00 2001
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Date: Thu, 21 Nov 2019 17:54:01 -0800
+Subject: mm/ksm.c: don't WARN if page is still mapped in remove_stable_node()
+
+From: Andrey Ryabinin <aryabinin@virtuozzo.com>
+
+commit 9a63236f1ad82d71a98aa80320b6cb618fb32f44 upstream.
+
+It's possible to hit the WARN_ON_ONCE(page_mapped(page)) in
+remove_stable_node() when it races with __mmput() and squeezes in
+between ksm_exit() and exit_mmap().
+
+ WARNING: CPU: 0 PID: 3295 at mm/ksm.c:888 remove_stable_node+0x10c/0x150
+
+ Call Trace:
+ remove_all_stable_nodes+0x12b/0x330
+ run_store+0x4ef/0x7b0
+ kernfs_fop_write+0x200/0x420
+ vfs_write+0x154/0x450
+ ksys_write+0xf9/0x1d0
+ do_syscall_64+0x99/0x510
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Remove the warning as there is nothing scary going on.
+
+Link: http://lkml.kernel.org/r/20191119131850.5675-1-aryabinin@virtuozzo.com
+Fixes: cbf86cfe04a6 ("ksm: remove old stable nodes more thoroughly")
+Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/ksm.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -849,13 +849,13 @@ static int remove_stable_node(struct sta
+ return 0;
+ }
+
+- if (WARN_ON_ONCE(page_mapped(page))) {
+- /*
+- * This should not happen: but if it does, just refuse to let
+- * merge_across_nodes be switched - there is no need to panic.
+- */
+- err = -EBUSY;
+- } else {
++ /*
++ * Page could be still mapped if this races with __mmput() running in
++ * between ksm_exit() and exit_mmap(). Just refuse to let
++ * merge_across_nodes/max_page_sharing be switched.
++ */
++ err = -EBUSY;
++ if (!page_mapped(page)) {
+ /*
+ * The stable node did not yet appear stale to get_ksm_page(),
+ * since that allows for an unmapped ksm page to be recognized
--- /dev/null
+From dff10bbea4be47bdb615b036c834a275b7c68133 Mon Sep 17 00:00:00 2001
+From: Sun Ke <sunke32@huawei.com>
+Date: Tue, 19 Nov 2019 14:09:11 +0800
+Subject: nbd:fix memory leak in nbd_get_socket()
+
+From: Sun Ke <sunke32@huawei.com>
+
+commit dff10bbea4be47bdb615b036c834a275b7c68133 upstream.
+
+Before returning NULL, put the sock first.
+
+Cc: stable@vger.kernel.org
+Fixes: cf1b2326b734 ("nbd: verify socket is supported during setup")
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Reviewed-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Sun Ke <sunke32@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -931,6 +931,7 @@ static struct socket *nbd_get_socket(str
+ if (sock->ops->shutdown == sock_no_shutdown) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+ *err = -EINVAL;
++ sockfd_put(sock);
+ return NULL;
+ }
+
--- /dev/null
+From 94b07b6f9e2e996afff7395de6b35f34f4cb10bf Mon Sep 17 00:00:00 2001
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+Date: Thu, 21 Nov 2019 17:53:52 -0800
+Subject: Revert "fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()"
+
+From: Joseph Qi <joseph.qi@linux.alibaba.com>
+
+commit 94b07b6f9e2e996afff7395de6b35f34f4cb10bf upstream.
+
+This reverts commit 56e94ea132bb5c2c1d0b60a6aeb34dcb7d71a53d.
+
+Commit 56e94ea132bb ("fs: ocfs2: fix possible null-pointer dereferences
+in ocfs2_xa_prepare_entry()") introduces a regression that fail to
+create directory with mount option user_xattr and acl. Actually the
+reported NULL pointer dereference case can be correctly handled by
+loc->xl_ops->xlo_add_entry(), so revert it.
+
+Link: http://lkml.kernel.org/r/1573624916-83825-1-git-send-email-joseph.qi@linux.alibaba.com
+Fixes: 56e94ea132bb ("fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()")
+Signed-off-by: Joseph Qi <joseph.qi@linux.alibaba.com>
+Reported-by: Thomas Voegtle <tv@lio96.de>
+Acked-by: Changwei Ge <gechangwei@live.cn>
+Cc: Jia-Ju Bai <baijiaju1990@gmail.com>
+Cc: Mark Fasheh <mark@fasheh.com>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Cc: Gang He <ghe@suse.com>
+Cc: Jun Piao <piaojun@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/xattr.c | 56 ++++++++++++++++++++++++++++++++-----------------------
+ 1 file changed, 33 insertions(+), 23 deletions(-)
+
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -1497,6 +1497,18 @@ static int ocfs2_xa_check_space(struct o
+ return loc->xl_ops->xlo_check_space(loc, xi);
+ }
+
++static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
++{
++ loc->xl_ops->xlo_add_entry(loc, name_hash);
++ loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
++ /*
++ * We can't leave the new entry's xe_name_offset at zero or
++ * add_namevalue() will go nuts. We set it to the size of our
++ * storage so that it can never be less than any other entry.
++ */
++ loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
++}
++
+ static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
+ struct ocfs2_xattr_info *xi)
+ {
+@@ -2128,31 +2140,29 @@ static int ocfs2_xa_prepare_entry(struct
+ if (rc)
+ goto out;
+
+- if (!loc->xl_entry) {
+- rc = -EINVAL;
+- goto out;
+- }
+-
+- if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+- orig_value_size = loc->xl_entry->xe_value_size;
+- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+- if (rc)
+- goto out;
+- goto alloc_value;
+- }
++ if (loc->xl_entry) {
++ if (ocfs2_xa_can_reuse_entry(loc, xi)) {
++ orig_value_size = loc->xl_entry->xe_value_size;
++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
++ if (rc)
++ goto out;
++ goto alloc_value;
++ }
+
+- if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+- orig_clusters = ocfs2_xa_value_clusters(loc);
+- rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+- if (rc) {
+- mlog_errno(rc);
+- ocfs2_xa_cleanup_value_truncate(loc,
+- "overwriting",
+- orig_clusters);
+- goto out;
++ if (!ocfs2_xattr_is_local(loc->xl_entry)) {
++ orig_clusters = ocfs2_xa_value_clusters(loc);
++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
++ if (rc) {
++ mlog_errno(rc);
++ ocfs2_xa_cleanup_value_truncate(loc,
++ "overwriting",
++ orig_clusters);
++ goto out;
++ }
+ }
+- }
+- ocfs2_xa_wipe_namevalue(loc);
++ ocfs2_xa_wipe_namevalue(loc);
++ } else
++ ocfs2_xa_add_entry(loc, name_hash);
+
+ /*
+ * If we get here, we have a blank entry. Fill it. We grow our
net-mlx5e-fix-set-vf-link-state-error-flow.patch
net-mlxfw-verify-fsm-error-code-translation-doesn-t-exceed-array-size.patch
net-sched-act_pedit-fix-warn-in-the-traffic-path.patch
+vhost-vsock-split-packets-to-send-using-multiple-buffers.patch
+gpio-max77620-fixup-debounce-delays.patch
+tools-gpio-correctly-add-make-dependencies-for-gpio_utils.patch
+nbd-fix-memory-leak-in-nbd_get_socket.patch
+virtio_console-allocate-inbufs-in-add_port-only-if-it-is-needed.patch
+revert-fs-ocfs2-fix-possible-null-pointer-dereferences-in-ocfs2_xa_prepare_entry.patch
+mm-ksm.c-don-t-warn-if-page-is-still-mapped-in-remove_stable_node.patch
+drm-amdgpu-disable-gfxoff-when-using-register-read-interface.patch
+drm-i915-userptr-try-to-acquire-the-page-lock-around-set_page_dirty.patch
--- /dev/null
+From 0161a94e2d1c713bd34d72bc0239d87c31747bf7 Mon Sep 17 00:00:00 2001
+From: Laura Abbott <labbott@redhat.com>
+Date: Tue, 12 Nov 2019 17:10:26 -0500
+Subject: tools: gpio: Correctly add make dependencies for gpio_utils
+
+From: Laura Abbott <labbott@redhat.com>
+
+commit 0161a94e2d1c713bd34d72bc0239d87c31747bf7 upstream.
+
+gpio tools fail to build correctly with make parallelization:
+
+$ make -s -j24
+ld: gpio-utils.o: file not recognized: file truncated
+make[1]: *** [/home/labbott/linux_upstream/tools/build/Makefile.build:145: lsgpio-in.o] Error 1
+make: *** [Makefile:43: lsgpio-in.o] Error 2
+make: *** Waiting for unfinished jobs....
+
+This is because gpio-utils.o is used across multiple targets.
+Fix this by making gpio-utios.o a proper dependency.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Laura Abbott <labbott@redhat.com>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/gpio/Build | 1 +
+ tools/gpio/Makefile | 10 +++++++---
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/tools/gpio/Build
++++ b/tools/gpio/Build
+@@ -1,3 +1,4 @@
++gpio-utils-y += gpio-utils.o
+ lsgpio-y += lsgpio.o gpio-utils.o
+ gpio-hammer-y += gpio-hammer.o gpio-utils.o
+ gpio-event-mon-y += gpio-event-mon.o gpio-utils.o
+--- a/tools/gpio/Makefile
++++ b/tools/gpio/Makefile
+@@ -35,11 +35,15 @@ $(OUTPUT)include/linux/gpio.h: ../../inc
+
+ prepare: $(OUTPUT)include/linux/gpio.h
+
++GPIO_UTILS_IN := $(output)gpio-utils-in.o
++$(GPIO_UTILS_IN): prepare FORCE
++ $(Q)$(MAKE) $(build)=gpio-utils
++
+ #
+ # lsgpio
+ #
+ LSGPIO_IN := $(OUTPUT)lsgpio-in.o
+-$(LSGPIO_IN): prepare FORCE
++$(LSGPIO_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+ $(Q)$(MAKE) $(build)=lsgpio
+ $(OUTPUT)lsgpio: $(LSGPIO_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+@@ -48,7 +52,7 @@ $(OUTPUT)lsgpio: $(LSGPIO_IN)
+ # gpio-hammer
+ #
+ GPIO_HAMMER_IN := $(OUTPUT)gpio-hammer-in.o
+-$(GPIO_HAMMER_IN): prepare FORCE
++$(GPIO_HAMMER_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+ $(Q)$(MAKE) $(build)=gpio-hammer
+ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+@@ -57,7 +61,7 @@ $(OUTPUT)gpio-hammer: $(GPIO_HAMMER_IN)
+ # gpio-event-mon
+ #
+ GPIO_EVENT_MON_IN := $(OUTPUT)gpio-event-mon-in.o
+-$(GPIO_EVENT_MON_IN): prepare FORCE
++$(GPIO_EVENT_MON_IN): prepare FORCE $(OUTPUT)gpio-utils-in.o
+ $(Q)$(MAKE) $(build)=gpio-event-mon
+ $(OUTPUT)gpio-event-mon: $(GPIO_EVENT_MON_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
--- /dev/null
+From 6dbd3e66e7785a2f055bf84d98de9b8fd31ff3f5 Mon Sep 17 00:00:00 2001
+From: Stefano Garzarella <sgarzare@redhat.com>
+Date: Tue, 30 Jul 2019 17:43:33 +0200
+Subject: vhost/vsock: split packets to send using multiple buffers
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+commit 6dbd3e66e7785a2f055bf84d98de9b8fd31ff3f5 upstream.
+
+If the packets to sent to the guest are bigger than the buffer
+available, we can split them, using multiple buffers and fixing
+the length in the packet header.
+This is safe since virtio-vsock supports only stream sockets.
+
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vsock.c | 66 +++++++++++++++++++++++---------
+ net/vmw_vsock/virtio_transport_common.c | 15 +++++--
+ 2 files changed, 60 insertions(+), 21 deletions(-)
+
+--- a/drivers/vhost/vsock.c
++++ b/drivers/vhost/vsock.c
+@@ -103,7 +103,7 @@ vhost_transport_do_send_pkt(struct vhost
+ struct iov_iter iov_iter;
+ unsigned out, in;
+ size_t nbytes;
+- size_t len;
++ size_t iov_len, payload_len;
+ int head;
+
+ spin_lock_bh(&vsock->send_pkt_list_lock);
+@@ -148,8 +148,24 @@ vhost_transport_do_send_pkt(struct vhost
+ break;
+ }
+
+- len = iov_length(&vq->iov[out], in);
+- iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
++ iov_len = iov_length(&vq->iov[out], in);
++ if (iov_len < sizeof(pkt->hdr)) {
++ virtio_transport_free_pkt(pkt);
++ vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
++ break;
++ }
++
++ iov_iter_init(&iov_iter, READ, &vq->iov[out], in, iov_len);
++ payload_len = pkt->len - pkt->off;
++
++ /* If the packet is greater than the space available in the
++ * buffer, we split it using multiple buffers.
++ */
++ if (payload_len > iov_len - sizeof(pkt->hdr))
++ payload_len = iov_len - sizeof(pkt->hdr);
++
++ /* Set the correct length in the header */
++ pkt->hdr.len = cpu_to_le32(payload_len);
+
+ nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
+ if (nbytes != sizeof(pkt->hdr)) {
+@@ -158,33 +174,47 @@ vhost_transport_do_send_pkt(struct vhost
+ break;
+ }
+
+- nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
+- if (nbytes != pkt->len) {
++ nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
++ &iov_iter);
++ if (nbytes != payload_len) {
+ virtio_transport_free_pkt(pkt);
+ vq_err(vq, "Faulted on copying pkt buf\n");
+ break;
+ }
+
+- vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
++ vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+ added = true;
+
+- if (pkt->reply) {
+- int val;
+-
+- val = atomic_dec_return(&vsock->queued_replies);
+-
+- /* Do we have resources to resume tx processing? */
+- if (val + 1 == tx_vq->num)
+- restart_tx = true;
+- }
+-
+ /* Deliver to monitoring devices all correctly transmitted
+ * packets.
+ */
+ virtio_transport_deliver_tap_pkt(pkt);
+
+- total_len += pkt->len;
+- virtio_transport_free_pkt(pkt);
++ pkt->off += payload_len;
++ total_len += payload_len;
++
++ /* If we didn't send all the payload we can requeue the packet
++ * to send it with the next available buffer.
++ */
++ if (pkt->off < pkt->len) {
++ spin_lock_bh(&vsock->send_pkt_list_lock);
++ list_add(&pkt->list, &vsock->send_pkt_list);
++ spin_unlock_bh(&vsock->send_pkt_list_lock);
++ } else {
++ if (pkt->reply) {
++ int val;
++
++ val = atomic_dec_return(&vsock->queued_replies);
++
++ /* Do we have resources to resume tx
++ * processing?
++ */
++ if (val + 1 == tx_vq->num)
++ restart_tx = true;
++ }
++
++ virtio_transport_free_pkt(pkt);
++ }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
+ if (added)
+ vhost_signal(&vsock->dev, vq);
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -92,8 +92,17 @@ static struct sk_buff *virtio_transport_
+ struct virtio_vsock_pkt *pkt = opaque;
+ struct af_vsockmon_hdr *hdr;
+ struct sk_buff *skb;
++ size_t payload_len;
++ void *payload_buf;
+
+- skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + pkt->len,
++ /* A packet could be split to fit the RX buffer, so we can retrieve
++ * the payload length from the header and the buffer pointer taking
++ * care of the offset in the original packet.
++ */
++ payload_len = le32_to_cpu(pkt->hdr.len);
++ payload_buf = pkt->buf + pkt->off;
++
++ skb = alloc_skb(sizeof(*hdr) + sizeof(pkt->hdr) + payload_len,
+ GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+@@ -133,8 +142,8 @@ static struct sk_buff *virtio_transport_
+
+ skb_put_data(skb, &pkt->hdr, sizeof(pkt->hdr));
+
+- if (pkt->len) {
+- skb_put_data(skb, pkt->buf, pkt->len);
++ if (payload_len) {
++ skb_put_data(skb, payload_buf, payload_len);
+ }
+
+ return skb;
--- /dev/null
+From d791cfcbf98191122af70b053a21075cb450d119 Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Thu, 14 Nov 2019 13:25:48 +0100
+Subject: virtio_console: allocate inbufs in add_port() only if it is needed
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+commit d791cfcbf98191122af70b053a21075cb450d119 upstream.
+
+When we hot unplug a virtserialport and then try to hot plug again,
+it fails:
+
+(qemu) chardev-add socket,id=serial0,path=/tmp/serial0,server,nowait
+(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
+ chardev=serial0,id=serial0,name=serial0
+(qemu) device_del serial0
+(qemu) device_add virtserialport,bus=virtio-serial0.0,nr=2,\
+ chardev=serial0,id=serial0,name=serial0
+kernel error:
+ virtio-ports vport2p2: Error allocating inbufs
+qemu error:
+ virtio-serial-bus: Guest failure in adding port 2 for device \
+ virtio-serial0.0
+
+This happens because buffers for the in_vq are allocated when the port is
+added but are not released when the port is unplugged.
+
+They are only released when virtconsole is removed (see a7a69ec0d8e4)
+
+To avoid the problem and to be symmetric, we could allocate all the buffers
+in init_vqs() as they are released in remove_vqs(), but it sounds like
+a waste of memory.
+
+Rather than that, this patch changes add_port() logic to ignore ENOSPC
+error in fill_queue(), which means queue has already been filled.
+
+Fixes: a7a69ec0d8e4 ("virtio_console: free buffers after reset")
+Cc: mst@redhat.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/virtio_console.c | 28 +++++++++++++---------------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1366,24 +1366,24 @@ static void set_console_size(struct port
+ port->cons.ws.ws_col = cols;
+ }
+
+-static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
++static int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+ {
+ struct port_buffer *buf;
+- unsigned int nr_added_bufs;
++ int nr_added_bufs;
+ int ret;
+
+ nr_added_bufs = 0;
+ do {
+ buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
+ if (!buf)
+- break;
++ return -ENOMEM;
+
+ spin_lock_irq(lock);
+ ret = add_inbuf(vq, buf);
+ if (ret < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf, true);
+- break;
++ return ret;
+ }
+ nr_added_bufs++;
+ spin_unlock_irq(lock);
+@@ -1403,7 +1403,6 @@ static int add_port(struct ports_device
+ char debugfs_name[16];
+ struct port *port;
+ dev_t devt;
+- unsigned int nr_added_bufs;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+@@ -1462,11 +1461,13 @@ static int add_port(struct ports_device
+ spin_lock_init(&port->outvq_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+- /* Fill the in_vq with buffers so the host can send us data. */
+- nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
+- if (!nr_added_bufs) {
++ /* We can safely ignore ENOSPC because it means
++ * the queue already has buffers. Buffers are removed
++ * only by virtcons_remove(), not by unplug_port()
++ */
++ err = fill_queue(port->in_vq, &port->inbuf_lock);
++ if (err < 0 && err != -ENOSPC) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+- err = -ENOMEM;
+ goto free_device;
+ }
+
+@@ -2099,14 +2100,11 @@ static int virtcons_probe(struct virtio_
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+
+ if (multiport) {
+- unsigned int nr_added_bufs;
+-
+ spin_lock_init(&portdev->c_ivq_lock);
+ spin_lock_init(&portdev->c_ovq_lock);
+
+- nr_added_bufs = fill_queue(portdev->c_ivq,
+- &portdev->c_ivq_lock);
+- if (!nr_added_bufs) {
++ err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
++ if (err < 0) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+ /*
+@@ -2117,7 +2115,7 @@ static int virtcons_probe(struct virtio_
+ VIRTIO_CONSOLE_DEVICE_READY, 0);
+ /* Device was functional: we need full cleanup. */
+ virtcons_remove(vdev);
+- return -ENOMEM;
++ return err;
+ }
+ } else {
+ /*