]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.7-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 11:00:06 +0000 (13:00 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Aug 2020 11:00:06 +0000 (13:00 +0200)
added patches:
9p-fix-memory-leak-in-v9fs_mount.patch
alsa-hda-fix-the-micmute-led-status-for-lenovo-thinkcentre-aio.patch
alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch
alsa-usb-audio-creative-usb-x-fi-pro-sb1095-volume-knob-support.patch
alsa-usb-audio-fix-overeager-device-match-for-macrosilicon-ms2109.patch
alsa-usb-audio-work-around-streaming-quirk-for-macrosilicon-ms2109.patch
bitfield.h-don-t-compile-time-validate-_val-in-field_fit.patch
crypto-ccp-fix-use-of-merged-scatterlists.patch
crypto-cpt-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
crypto-hisilicon-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
crypto-qat-fix-double-free-in-qat_uclo_create_batch_init_list.patch
driver-core-fix-probe_count-imbalance-in-really_probe.patch
drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-alloc-failure.patch
fs-minix-check-return-value-of-sb_getblk.patch
fs-minix-don-t-allow-getting-deleted-inodes.patch
fs-minix-reject-too-large-maximum-file-size.patch
io_uring-fail-poll-arm-on-queue-proc-failure.patch
io_uring-set-ctx-sq-cq-entry-count-earlier.patch
io_uring-use-twa_signal-for-task_work-uncondtionally.patch
kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch
media-media-request-fix-crash-if-memory-allocation-fails.patch
nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch
nfs-don-t-return-layout-segments-that-are-in-use.patch
pstore-fix-linking-when-crypto-api-disabled.patch
tick-nohz-narrow-down-noise-while-setting-current-task-s-tick-dependency.patch
tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch
usb-cdns3-gadget-always-zeroed-trb-buffer-when-enable-endpoint.patch
usb-serial-cp210x-enable-usb-generic-throttle-unthrottle.patch
usb-serial-cp210x-re-enable-auto-rts-on-open.patch
vdpasim-protect-concurrent-access-to-iommu-iotlb.patch

31 files changed:
queue-5.7/9p-fix-memory-leak-in-v9fs_mount.patch [new file with mode: 0644]
queue-5.7/alsa-hda-fix-the-micmute-led-status-for-lenovo-thinkcentre-aio.patch [new file with mode: 0644]
queue-5.7/alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch [new file with mode: 0644]
queue-5.7/alsa-usb-audio-creative-usb-x-fi-pro-sb1095-volume-knob-support.patch [new file with mode: 0644]
queue-5.7/alsa-usb-audio-fix-overeager-device-match-for-macrosilicon-ms2109.patch [new file with mode: 0644]
queue-5.7/alsa-usb-audio-work-around-streaming-quirk-for-macrosilicon-ms2109.patch [new file with mode: 0644]
queue-5.7/bitfield.h-don-t-compile-time-validate-_val-in-field_fit.patch [new file with mode: 0644]
queue-5.7/crypto-ccp-fix-use-of-merged-scatterlists.patch [new file with mode: 0644]
queue-5.7/crypto-cpt-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch [new file with mode: 0644]
queue-5.7/crypto-hisilicon-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch [new file with mode: 0644]
queue-5.7/crypto-qat-fix-double-free-in-qat_uclo_create_batch_init_list.patch [new file with mode: 0644]
queue-5.7/driver-core-fix-probe_count-imbalance-in-really_probe.patch [new file with mode: 0644]
queue-5.7/drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-alloc-failure.patch [new file with mode: 0644]
queue-5.7/fs-minix-check-return-value-of-sb_getblk.patch [new file with mode: 0644]
queue-5.7/fs-minix-don-t-allow-getting-deleted-inodes.patch [new file with mode: 0644]
queue-5.7/fs-minix-reject-too-large-maximum-file-size.patch [new file with mode: 0644]
queue-5.7/io_uring-fail-poll-arm-on-queue-proc-failure.patch [new file with mode: 0644]
queue-5.7/io_uring-set-ctx-sq-cq-entry-count-earlier.patch [new file with mode: 0644]
queue-5.7/io_uring-use-twa_signal-for-task_work-uncondtionally.patch [new file with mode: 0644]
queue-5.7/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch [new file with mode: 0644]
queue-5.7/media-media-request-fix-crash-if-memory-allocation-fails.patch [new file with mode: 0644]
queue-5.7/nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch [new file with mode: 0644]
queue-5.7/nfs-don-t-return-layout-segments-that-are-in-use.patch [new file with mode: 0644]
queue-5.7/pstore-fix-linking-when-crypto-api-disabled.patch [new file with mode: 0644]
queue-5.7/series
queue-5.7/tick-nohz-narrow-down-noise-while-setting-current-task-s-tick-dependency.patch [new file with mode: 0644]
queue-5.7/tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch [new file with mode: 0644]
queue-5.7/usb-cdns3-gadget-always-zeroed-trb-buffer-when-enable-endpoint.patch [new file with mode: 0644]
queue-5.7/usb-serial-cp210x-enable-usb-generic-throttle-unthrottle.patch [new file with mode: 0644]
queue-5.7/usb-serial-cp210x-re-enable-auto-rts-on-open.patch [new file with mode: 0644]
queue-5.7/vdpasim-protect-concurrent-access-to-iommu-iotlb.patch [new file with mode: 0644]

diff --git a/queue-5.7/9p-fix-memory-leak-in-v9fs_mount.patch b/queue-5.7/9p-fix-memory-leak-in-v9fs_mount.patch
new file mode 100644 (file)
index 0000000..4a8d8d5
--- /dev/null
@@ -0,0 +1,48 @@
+From cb0aae0e31c632c407a2cab4307be85a001d4d98 Mon Sep 17 00:00:00 2001
+From: Zheng Bin <zhengbin13@huawei.com>
+Date: Mon, 15 Jun 2020 09:21:53 +0800
+Subject: 9p: Fix memory leak in v9fs_mount
+
+From: Zheng Bin <zhengbin13@huawei.com>
+
+commit cb0aae0e31c632c407a2cab4307be85a001d4d98 upstream.
+
+v9fs_mount
+  v9fs_session_init
+    v9fs_cache_session_get_cookie
+      v9fs_random_cachetag                     -->alloc cachetag
+      v9ses->fscache = fscache_acquire_cookie  -->maybe NULL
+  sb = sget                                    -->fail, goto clunk
+clunk_fid:
+  v9fs_session_close
+    if (v9ses->fscache)                        -->NULL
+      kfree(v9ses->cachetag)
+
+Thus memleak happens.
+
+Link: http://lkml.kernel.org/r/20200615012153.89538-1-zhengbin13@huawei.com
+Fixes: 60e78d2c993e ("9p: Add fscache support to 9p")
+Cc: <stable@vger.kernel.org> # v2.6.32+
+Signed-off-by: Zheng Bin <zhengbin13@huawei.com>
+Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/9p/v9fs.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/9p/v9fs.c
++++ b/fs/9p/v9fs.c
+@@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_sess
+       }
+ #ifdef CONFIG_9P_FSCACHE
+-      if (v9ses->fscache) {
++      if (v9ses->fscache)
+               v9fs_cache_session_put_cookie(v9ses);
+-              kfree(v9ses->cachetag);
+-      }
++      kfree(v9ses->cachetag);
+ #endif
+       kfree(v9ses->uname);
+       kfree(v9ses->aname);
diff --git a/queue-5.7/alsa-hda-fix-the-micmute-led-status-for-lenovo-thinkcentre-aio.patch b/queue-5.7/alsa-hda-fix-the-micmute-led-status-for-lenovo-thinkcentre-aio.patch
new file mode 100644 (file)
index 0000000..fbe7cd4
--- /dev/null
@@ -0,0 +1,36 @@
+From 386a6539992b82fe9ac4f9dc3f548956fd894d8c Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Mon, 10 Aug 2020 10:16:59 +0800
+Subject: ALSA: hda - fix the micmute led status for Lenovo ThinkCentre AIO
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit 386a6539992b82fe9ac4f9dc3f548956fd894d8c upstream.
+
+After installing the Ubuntu Linux, the micmute led status is not
+correct. Users expect that the led is on if the capture is disabled,
+but with the current kernel, the led is off with the capture disabled.
+
+We tried the old linux kernel like linux-4.15, there is no this issue.
+It looks like we introduced this issue when switching to the led_cdev.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Link: https://lore.kernel.org/r/20200810021659.7429-1-hui.wang@canonical.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4391,6 +4391,7 @@ static void alc233_fixup_lenovo_line2_mi
+ {
+       struct alc_spec *spec = codec->spec;
++      spec->micmute_led_polarity = 1;
+       alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->init_amp = ALC_INIT_DEFAULT;
diff --git a/queue-5.7/alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch b/queue-5.7/alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch
new file mode 100644 (file)
index 0000000..99ef354
--- /dev/null
@@ -0,0 +1,87 @@
+From 6e8596172ee1cd46ec0bfd5adcf4ff86371478b6 Mon Sep 17 00:00:00 2001
+From: Hector Martin <marcan@marcan.st>
+Date: Mon, 10 Aug 2020 17:25:02 +0900
+Subject: ALSA: usb-audio: add quirk for Pioneer DDJ-RB
+
+From: Hector Martin <marcan@marcan.st>
+
+commit 6e8596172ee1cd46ec0bfd5adcf4ff86371478b6 upstream.
+
+This is just another Pioneer device with fixed endpoints. Input is dummy
+but used as feedback (it always returns silence).
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Hector Martin <marcan@marcan.st>
+Link: https://lore.kernel.org/r/20200810082502.225979-1-marcan@marcan.st
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/quirks-table.h |   56 +++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 56 insertions(+)
+
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3570,6 +3570,62 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge
+               }
+       }
+ },
++{
++      /*
++       * PIONEER DJ DDJ-RB
++       * PCM is 4 channels out, 2 dummy channels in @ 44.1 fixed
++       * The feedback for the output is the dummy input.
++       */
++      USB_DEVICE_VENDOR_SPEC(0x2b73, 0x000e),
++      .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++              .ifnum = QUIRK_ANY_INTERFACE,
++              .type = QUIRK_COMPOSITE,
++              .data = (const struct snd_usb_audio_quirk[]) {
++                      {
++                              .ifnum = 0,
++                              .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++                              .data = &(const struct audioformat) {
++                                      .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++                                      .channels = 4,
++                                      .iface = 0,
++                                      .altsetting = 1,
++                                      .altset_idx = 1,
++                                      .endpoint = 0x01,
++                                      .ep_attr = USB_ENDPOINT_XFER_ISOC|
++                                                 USB_ENDPOINT_SYNC_ASYNC,
++                                      .rates = SNDRV_PCM_RATE_44100,
++                                      .rate_min = 44100,
++                                      .rate_max = 44100,
++                                      .nr_rates = 1,
++                                      .rate_table = (unsigned int[]) { 44100 }
++                              }
++                      },
++                      {
++                              .ifnum = 0,
++                              .type = QUIRK_AUDIO_FIXED_ENDPOINT,
++                              .data = &(const struct audioformat) {
++                                      .formats = SNDRV_PCM_FMTBIT_S24_3LE,
++                                      .channels = 2,
++                                      .iface = 0,
++                                      .altsetting = 1,
++                                      .altset_idx = 1,
++                                      .endpoint = 0x82,
++                                      .ep_attr = USB_ENDPOINT_XFER_ISOC|
++                                               USB_ENDPOINT_SYNC_ASYNC|
++                                               USB_ENDPOINT_USAGE_IMPLICIT_FB,
++                                      .rates = SNDRV_PCM_RATE_44100,
++                                      .rate_min = 44100,
++                                      .rate_max = 44100,
++                                      .nr_rates = 1,
++                                      .rate_table = (unsigned int[]) { 44100 }
++                              }
++                      },
++                      {
++                              .ifnum = -1
++                      }
++              }
++      }
++},
+ #define ALC1220_VB_DESKTOP(vend, prod) { \
+       USB_DEVICE(vend, prod), \
diff --git a/queue-5.7/alsa-usb-audio-creative-usb-x-fi-pro-sb1095-volume-knob-support.patch b/queue-5.7/alsa-usb-audio-creative-usb-x-fi-pro-sb1095-volume-knob-support.patch
new file mode 100644 (file)
index 0000000..a60b4ee
--- /dev/null
@@ -0,0 +1,34 @@
+From fec9008828cde0076aae595ac031bfcf49d335a4 Mon Sep 17 00:00:00 2001
+From: Mirko Dietrich <buzz@l4m1.de>
+Date: Thu, 6 Aug 2020 14:48:50 +0200
+Subject: ALSA: usb-audio: Creative USB X-Fi Pro SB1095 volume knob support
+
+From: Mirko Dietrich <buzz@l4m1.de>
+
+commit fec9008828cde0076aae595ac031bfcf49d335a4 upstream.
+
+Adds an entry for Creative USB X-Fi to the rc_config array in
+mixer_quirks.c to allow use of volume knob on the device.
+Adds support for newer X-Fi Pro card, known as "Model No. SB1095"
+with USB ID "041e:3263"
+
+Signed-off-by: Mirko Dietrich <buzz@l4m1.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200806124850.20334-1-buzz@l4m1.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/mixer_quirks.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -185,6 +185,7 @@ static const struct rc_config {
+       { USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
+       { USB_ID(0x041e, 0x30df), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
+       { USB_ID(0x041e, 0x3237), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
++      { USB_ID(0x041e, 0x3263), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 Pro */
+       { USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
+ };
diff --git a/queue-5.7/alsa-usb-audio-fix-overeager-device-match-for-macrosilicon-ms2109.patch b/queue-5.7/alsa-usb-audio-fix-overeager-device-match-for-macrosilicon-ms2109.patch
new file mode 100644 (file)
index 0000000..9d542cc
--- /dev/null
@@ -0,0 +1,40 @@
+From 14a720dc1f5332f3bdf30a23a3bc549e81be974c Mon Sep 17 00:00:00 2001
+From: Hector Martin <marcan@marcan.st>
+Date: Mon, 10 Aug 2020 13:53:19 +0900
+Subject: ALSA: usb-audio: fix overeager device match for MacroSilicon MS2109
+
+From: Hector Martin <marcan@marcan.st>
+
+commit 14a720dc1f5332f3bdf30a23a3bc549e81be974c upstream.
+
+Matching by device matches all interfaces, which breaks the video/HID
+portions of the device depending on module load order.
+
+Fixes: e337bf19f6af ("ALSA: usb-audio: add quirk for MacroSilicon MS2109")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hector Martin <marcan@marcan.st>
+Link: https://lore.kernel.org/r/20200810045319.128745-1-marcan@marcan.st
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/quirks-table.h |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3623,7 +3623,13 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* A
+  * with.
+  */
+ {
+-      USB_DEVICE(0x534d, 0x2109),
++      .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++                     USB_DEVICE_ID_MATCH_INT_CLASS |
++                     USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++      .idVendor = 0x534d,
++      .idProduct = 0x2109,
++      .bInterfaceClass = USB_CLASS_AUDIO,
++      .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "MacroSilicon",
+               .product_name = "MS2109",
diff --git a/queue-5.7/alsa-usb-audio-work-around-streaming-quirk-for-macrosilicon-ms2109.patch b/queue-5.7/alsa-usb-audio-work-around-streaming-quirk-for-macrosilicon-ms2109.patch
new file mode 100644 (file)
index 0000000..541fdb8
--- /dev/null
@@ -0,0 +1,80 @@
+From 1b7ecc241a67ad6b584e071bd791a54e0cd5f097 Mon Sep 17 00:00:00 2001
+From: Hector Martin <marcan@marcan.st>
+Date: Mon, 10 Aug 2020 17:24:00 +0900
+Subject: ALSA: usb-audio: work around streaming quirk for MacroSilicon MS2109
+
+From: Hector Martin <marcan@marcan.st>
+
+commit 1b7ecc241a67ad6b584e071bd791a54e0cd5f097 upstream.
+
+Further investigation of the L-R swap problem on the MS2109 reveals that
+the problem isn't that the channels are swapped, but rather that they
+are swapped and also out of phase by one sample. In other words, the
+issue is actually that the very first frame that comes from the hardware
+is a half-frame containing only the right channel, and after that
+everything becomes offset.
+
+So introduce a new quirk field to drop the very first 2 bytes that come
+in after the format is configured and a capture stream starts. This puts
+the channels in phase and in the correct order.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Hector Martin <marcan@marcan.st>
+Link: https://lore.kernel.org/r/20200810082400.225858-1-marcan@marcan.st
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/card.h   |    1 +
+ sound/usb/pcm.c    |    6 ++++++
+ sound/usb/quirks.c |    3 +++
+ sound/usb/stream.c |    1 +
+ 4 files changed, 11 insertions(+)
+
+--- a/sound/usb/card.h
++++ b/sound/usb/card.h
+@@ -133,6 +133,7 @@ struct snd_usb_substream {
+       unsigned int tx_length_quirk:1; /* add length specifier to transfers */
+       unsigned int fmt_type;          /* USB audio format type (1-3) */
+       unsigned int pkt_offset_adj;    /* Bytes to drop from beginning of packets (for non-compliant devices) */
++      unsigned int stream_offset_adj; /* Bytes to drop from beginning of stream (for non-compliant devices) */
+       unsigned int running: 1;        /* running status */
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1416,6 +1416,12 @@ static void retire_capture_urb(struct sn
+                       // continue;
+               }
+               bytes = urb->iso_frame_desc[i].actual_length;
++              if (subs->stream_offset_adj > 0) {
++                      unsigned int adj = min(subs->stream_offset_adj, bytes);
++                      cp += adj;
++                      bytes -= adj;
++                      subs->stream_offset_adj -= adj;
++              }
+               frames = bytes / stride;
+               if (!subs->txfr_quirk)
+                       bytes = frames * stride;
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1468,6 +1468,9 @@ void snd_usb_set_format_quirk(struct snd
+       case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
+               set_format_emu_quirk(subs, fmt);
+               break;
++      case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
++              subs->stream_offset_adj = 2;
++              break;
+       }
+ }
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -94,6 +94,7 @@ static void snd_usb_init_substream(struc
+       subs->tx_length_quirk = as->chip->tx_length_quirk;
+       subs->speed = snd_usb_get_speed(subs->dev);
+       subs->pkt_offset_adj = 0;
++      subs->stream_offset_adj = 0;
+       snd_usb_set_pcm_ops(as->pcm, stream);
diff --git a/queue-5.7/bitfield.h-don-t-compile-time-validate-_val-in-field_fit.patch b/queue-5.7/bitfield.h-don-t-compile-time-validate-_val-in-field_fit.patch
new file mode 100644 (file)
index 0000000..22b21fc
--- /dev/null
@@ -0,0 +1,55 @@
+From 444da3f52407d74c9aa12187ac6b01f76ee47d62 Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Mon, 10 Aug 2020 11:21:11 -0700
+Subject: bitfield.h: don't compile-time validate _val in FIELD_FIT
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+commit 444da3f52407d74c9aa12187ac6b01f76ee47d62 upstream.
+
+When ur_load_imm_any() is inlined into jeq_imm(), it's possible for the
+compiler to deduce a case where _val can only have the value of -1 at
+compile time. Specifically,
+
+/* struct bpf_insn: _s32 imm */
+u64 imm = insn->imm; /* sign extend */
+if (imm >> 32) { /* non-zero only if insn->imm is negative */
+  /* inlined from ur_load_imm_any */
+  u32 __imm = imm >> 32; /* therefore, always 0xffffffff */
+  if (__builtin_constant_p(__imm) && __imm > 255)
+    compiletime_assert_XXX()
+
+This can result in tripping a BUILD_BUG_ON() in __BF_FIELD_CHECK() that
+checks that a given value is representable in one byte (interpreted as
+unsigned).
+
+FIELD_FIT() should return true or false at runtime for whether a value
+can fit for not. Don't break the build over a value that's too large for
+the mask. We'd prefer to keep the inlining and compiler optimizations
+though we know this case will always return false.
+
+Cc: stable@vger.kernel.org
+Fixes: 1697599ee301a ("bitfield.h: add FIELD_FIT() helper")
+Link: https://lore.kernel.org/kernel-hardening/CAK7LNASvb0UDJ0U5wkYYRzTAdnEs64HjXpEUL7d=V0CXiAXcNw@mail.gmail.com/
+Reported-by: Masahiro Yamada <masahiroy@kernel.org>
+Debugged-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/bitfield.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/bitfield.h
++++ b/include/linux/bitfield.h
+@@ -77,7 +77,7 @@
+  */
+ #define FIELD_FIT(_mask, _val)                                                \
+       ({                                                              \
+-              __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: ");     \
++              __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: ");     \
+               !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+       })
diff --git a/queue-5.7/crypto-ccp-fix-use-of-merged-scatterlists.patch b/queue-5.7/crypto-ccp-fix-use-of-merged-scatterlists.patch
new file mode 100644 (file)
index 0000000..d5eeca1
--- /dev/null
@@ -0,0 +1,176 @@
+From 8a302808c60d441d9884cb00ea7f2b534f2e3ca5 Mon Sep 17 00:00:00 2001
+From: John Allen <john.allen@amd.com>
+Date: Mon, 22 Jun 2020 15:24:02 -0500
+Subject: crypto: ccp - Fix use of merged scatterlists
+
+From: John Allen <john.allen@amd.com>
+
+commit 8a302808c60d441d9884cb00ea7f2b534f2e3ca5 upstream.
+
+Running the crypto manager self tests with
+CONFIG_CRYPTO_MANAGER_EXTRA_TESTS may result in several types of errors
+when using the ccp-crypto driver:
+
+alg: skcipher: cbc-des3-ccp encryption failed on test vector 0; expected_error=0, actual_error=-5 ...
+
+alg: skcipher: ctr-aes-ccp decryption overran dst buffer on test vector 0 ...
+
+alg: ahash: sha224-ccp test failed (wrong result) on test vector ...
+
+These errors are the result of improper processing of scatterlists mapped
+for DMA.
+
+Given a scatterlist in which entries are merged as part of mapping the
+scatterlist for DMA, the DMA length of a merged entry will reflect the
+combined length of the entries that were merged. The subsequent
+scatterlist entry will contain DMA information for the scatterlist entry
+after the last merged entry, but the non-DMA information will be that of
+the first merged entry.
+
+The ccp driver does not take this scatterlist merging into account. To
+address this, add a second scatterlist pointer to track the current
+position in the DMA mapped representation of the scatterlist. Both the DMA
+representation and the original representation of the scatterlist must be
+tracked as while most of the driver can use just the DMA representation,
+scatterlist_map_and_copy() must use the original representation and
+expects the scatterlist pointer to be accurate to the original
+representation.
+
+In order to properly walk the original scatterlist, the scatterlist must
+be walked until the combined lengths of the entries seen is equal to the
+DMA length of the current entry being processed in the DMA mapped
+representation.
+
+Fixes: 63b945091a070 ("crypto: ccp - CCP device driver and interface support")
+Signed-off-by: John Allen <john.allen@amd.com>
+Cc: stable@vger.kernel.org
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccp/ccp-dev.h |    1 +
+ drivers/crypto/ccp/ccp-ops.c |   37 ++++++++++++++++++++++++++-----------
+ 2 files changed, 27 insertions(+), 11 deletions(-)
+
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -469,6 +469,7 @@ struct ccp_sg_workarea {
+       unsigned int sg_used;
+       struct scatterlist *dma_sg;
++      struct scatterlist *dma_sg_head;
+       struct device *dma_dev;
+       unsigned int dma_count;
+       enum dma_data_direction dma_dir;
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -63,7 +63,7 @@ static u32 ccp_gen_jobid(struct ccp_devi
+ static void ccp_sg_free(struct ccp_sg_workarea *wa)
+ {
+       if (wa->dma_count)
+-              dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
++              dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir);
+       wa->dma_count = 0;
+ }
+@@ -92,6 +92,7 @@ static int ccp_init_sg_workarea(struct c
+               return 0;
+       wa->dma_sg = sg;
++      wa->dma_sg_head = sg;
+       wa->dma_dev = dev;
+       wa->dma_dir = dma_dir;
+       wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
+@@ -104,14 +105,28 @@ static int ccp_init_sg_workarea(struct c
+ static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
+ {
+       unsigned int nbytes = min_t(u64, len, wa->bytes_left);
++      unsigned int sg_combined_len = 0;
+       if (!wa->sg)
+               return;
+       wa->sg_used += nbytes;
+       wa->bytes_left -= nbytes;
+-      if (wa->sg_used == wa->sg->length) {
+-              wa->sg = sg_next(wa->sg);
++      if (wa->sg_used == sg_dma_len(wa->dma_sg)) {
++              /* Advance to the next DMA scatterlist entry */
++              wa->dma_sg = sg_next(wa->dma_sg);
++
++              /* In the case that the DMA mapped scatterlist has entries
++               * that have been merged, the non-DMA mapped scatterlist
++               * must be advanced multiple times for each merged entry.
++               * This ensures that the current non-DMA mapped entry
++               * corresponds to the current DMA mapped entry.
++               */
++              do {
++                      sg_combined_len += wa->sg->length;
++                      wa->sg = sg_next(wa->sg);
++              } while (wa->sg_used > sg_combined_len);
++
+               wa->sg_used = 0;
+       }
+ }
+@@ -299,7 +314,7 @@ static unsigned int ccp_queue_buf(struct
+       /* Update the structures and generate the count */
+       buf_count = 0;
+       while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
+-              nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
++              nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used,
+                            dm_wa->length - buf_count);
+               nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
+@@ -331,11 +346,11 @@ static void ccp_prepare_data(struct ccp_
+        * and destination. The resulting len values will always be <= UINT_MAX
+        * because the dma length is an unsigned int.
+        */
+-      sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
++      sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used;
+       sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
+       if (dst) {
+-              sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
++              sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used;
+               sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
+               op_len = min(sg_src_len, sg_dst_len);
+       } else {
+@@ -365,7 +380,7 @@ static void ccp_prepare_data(struct ccp_
+               /* Enough data in the sg element, but we need to
+                * adjust for any previously copied data
+                */
+-              op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
++              op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg);
+               op->src.u.dma.offset = src->sg_wa.sg_used;
+               op->src.u.dma.length = op_len & ~(block_size - 1);
+@@ -386,7 +401,7 @@ static void ccp_prepare_data(struct ccp_
+                       /* Enough room in the sg element, but we need to
+                        * adjust for any previously used area
+                        */
+-                      op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
++                      op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg);
+                       op->dst.u.dma.offset = dst->sg_wa.sg_used;
+                       op->dst.u.dma.length = op->src.u.dma.length;
+               }
+@@ -2028,7 +2043,7 @@ ccp_run_passthru_cmd(struct ccp_cmd_queu
+       dst.sg_wa.sg_used = 0;
+       for (i = 1; i <= src.sg_wa.dma_count; i++) {
+               if (!dst.sg_wa.sg ||
+-                  (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
++                  (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) {
+                       ret = -EINVAL;
+                       goto e_dst;
+               }
+@@ -2054,8 +2069,8 @@ ccp_run_passthru_cmd(struct ccp_cmd_queu
+                       goto e_dst;
+               }
+-              dst.sg_wa.sg_used += src.sg_wa.sg->length;
+-              if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
++              dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg);
++              if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) {
+                       dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
+                       dst.sg_wa.sg_used = 0;
+               }
diff --git a/queue-5.7/crypto-cpt-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch b/queue-5.7/crypto-cpt-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
new file mode 100644 (file)
index 0000000..afe5a12
--- /dev/null
@@ -0,0 +1,103 @@
+From 9e27c99104707f083dccd3b4d79762859b5a0614 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 17 Jun 2020 09:48:56 -0400
+Subject: crypto: cpt - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 9e27c99104707f083dccd3b4d79762859b5a0614 upstream.
+
+There is this call chain:
+cvm_encrypt -> cvm_enc_dec -> cptvf_do_request -> process_request -> kzalloc
+where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP
+was not specified.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org     # v4.11+
+Fixes: c694b233295b ("crypto: cavium - Add the Virtual Function driver for CPT")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/cavium/cpt/cptvf_algs.c       |    1 +
+ drivers/crypto/cavium/cpt/cptvf_reqmanager.c |   12 ++++++------
+ drivers/crypto/cavium/cpt/request_manager.h  |    2 ++
+ 3 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
++++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
+@@ -200,6 +200,7 @@ static inline int cvm_enc_dec(struct skc
+       int status;
+       memset(req_info, 0, sizeof(struct cpt_request_info));
++      req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
+       memset(fctx, 0, sizeof(struct fc_context));
+       create_input_list(req, enc, enc_iv_len);
+       create_output_list(req, enc_iv_len);
+--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
++++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+@@ -133,7 +133,7 @@ static inline int setup_sgio_list(struct
+       /* Setup gather (input) components */
+       g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component);
+-      info->gather_components = kzalloc(g_sz_bytes, GFP_KERNEL);
++      info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (!info->gather_components) {
+               ret = -ENOMEM;
+               goto  scatter_gather_clean;
+@@ -150,7 +150,7 @@ static inline int setup_sgio_list(struct
+       /* Setup scatter (output) components */
+       s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component);
+-      info->scatter_components = kzalloc(s_sz_bytes, GFP_KERNEL);
++      info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (!info->scatter_components) {
+               ret = -ENOMEM;
+               goto  scatter_gather_clean;
+@@ -167,7 +167,7 @@ static inline int setup_sgio_list(struct
+       /* Create and initialize DPTR */
+       info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+-      info->in_buffer = kzalloc(info->dlen, GFP_KERNEL);
++      info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (!info->in_buffer) {
+               ret = -ENOMEM;
+               goto  scatter_gather_clean;
+@@ -195,7 +195,7 @@ static inline int setup_sgio_list(struct
+       }
+       /* Create and initialize RPTR */
+-      info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, GFP_KERNEL);
++      info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (!info->out_buffer) {
+               ret = -ENOMEM;
+               goto scatter_gather_clean;
+@@ -421,7 +421,7 @@ int process_request(struct cpt_vf *cptvf
+       struct cpt_vq_command vq_cmd;
+       union cpt_inst_s cptinst;
+-      info = kzalloc(sizeof(*info), GFP_KERNEL);
++      info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (unlikely(!info)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n");
+               return -ENOMEM;
+@@ -443,7 +443,7 @@ int process_request(struct cpt_vf *cptvf
+        * Get buffer for union cpt_res_s response
+        * structure and its physical address
+        */
+-      info->completion_addr = kzalloc(sizeof(union cpt_res_s), GFP_KERNEL);
++      info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC);
+       if (unlikely(!info->completion_addr)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n");
+               ret = -ENOMEM;
+--- a/drivers/crypto/cavium/cpt/request_manager.h
++++ b/drivers/crypto/cavium/cpt/request_manager.h
+@@ -62,6 +62,8 @@ struct cpt_request_info {
+       union ctrl_info ctrl; /* User control information */
+       struct cptvf_request req; /* Request Information (Core specific) */
++      bool may_sleep;
++
+       struct buf_ptr in[MAX_BUF_CNT];
+       struct buf_ptr out[MAX_BUF_CNT];
diff --git a/queue-5.7/crypto-hisilicon-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch b/queue-5.7/crypto-hisilicon-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
new file mode 100644 (file)
index 0000000..478483e
--- /dev/null
@@ -0,0 +1,173 @@
+From 5ead051780404b5cb22147170acadd1994dc3236 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Wed, 17 Jun 2020 09:49:52 -0400
+Subject: crypto: hisilicon - don't sleep of CRYPTO_TFM_REQ_MAY_SLEEP was not specified
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 5ead051780404b5cb22147170acadd1994dc3236 upstream.
+
+There is this call chain:
+sec_alg_skcipher_encrypt -> sec_alg_skcipher_crypto ->
+sec_alg_alloc_and_calc_split_sizes -> kcalloc
+where we call sleeping allocator function even if CRYPTO_TFM_REQ_MAY_SLEEP
+was not specified.
+
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Cc: stable@vger.kernel.org     # v4.19+
+Fixes: 915e4e8413da ("crypto: hisilicon - SEC security accelerator driver")
+Acked-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/hisilicon/sec/sec_algs.c |   34 ++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+--- a/drivers/crypto/hisilicon/sec/sec_algs.c
++++ b/drivers/crypto/hisilicon/sec/sec_algs.c
+@@ -175,7 +175,8 @@ static int sec_alloc_and_fill_hw_sgl(str
+                                    dma_addr_t *psec_sgl,
+                                    struct scatterlist *sgl,
+                                    int count,
+-                                   struct sec_dev_info *info)
++                                   struct sec_dev_info *info,
++                                   gfp_t gfp)
+ {
+       struct sec_hw_sgl *sgl_current = NULL;
+       struct sec_hw_sgl *sgl_next;
+@@ -190,7 +191,7 @@ static int sec_alloc_and_fill_hw_sgl(str
+               sge_index = i % SEC_MAX_SGE_NUM;
+               if (sge_index == 0) {
+                       sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
+-                                                 GFP_KERNEL, &sgl_next_dma);
++                                                 gfp, &sgl_next_dma);
+                       if (!sgl_next) {
+                               ret = -ENOMEM;
+                               goto err_free_hw_sgls;
+@@ -545,14 +546,14 @@ void sec_alg_callback(struct sec_bd_info
+ }
+ static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
+-                                            int *steps)
++                                            int *steps, gfp_t gfp)
+ {
+       size_t *sizes;
+       int i;
+       /* Split into suitable sized blocks */
+       *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
+-      sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL);
++      sizes = kcalloc(*steps, sizeof(*sizes), gfp);
+       if (!sizes)
+               return -ENOMEM;
+@@ -568,7 +569,7 @@ static int sec_map_and_split_sg(struct s
+                               int steps, struct scatterlist ***splits,
+                               int **splits_nents,
+                               int sgl_len_in,
+-                              struct device *dev)
++                              struct device *dev, gfp_t gfp)
+ {
+       int ret, count;
+@@ -576,12 +577,12 @@ static int sec_map_and_split_sg(struct s
+       if (!count)
+               return -EINVAL;
+-      *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL);
++      *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
+       if (!*splits) {
+               ret = -ENOMEM;
+               goto err_unmap_sg;
+       }
+-      *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL);
++      *splits_nents = kcalloc(steps, sizeof(int), gfp);
+       if (!*splits_nents) {
+               ret = -ENOMEM;
+               goto err_free_splits;
+@@ -589,7 +590,7 @@ static int sec_map_and_split_sg(struct s
+       /* output the scatter list before and after this */
+       ret = sg_split(sgl, count, 0, steps, split_sizes,
+-                     *splits, *splits_nents, GFP_KERNEL);
++                     *splits, *splits_nents, gfp);
+       if (ret) {
+               ret = -ENOMEM;
+               goto err_free_splits_nents;
+@@ -630,13 +631,13 @@ static struct sec_request_el
+                          int el_size, bool different_dest,
+                          struct scatterlist *sgl_in, int n_ents_in,
+                          struct scatterlist *sgl_out, int n_ents_out,
+-                         struct sec_dev_info *info)
++                         struct sec_dev_info *info, gfp_t gfp)
+ {
+       struct sec_request_el *el;
+       struct sec_bd_info *req;
+       int ret;
+-      el = kzalloc(sizeof(*el), GFP_KERNEL);
++      el = kzalloc(sizeof(*el), gfp);
+       if (!el)
+               return ERR_PTR(-ENOMEM);
+       el->el_length = el_size;
+@@ -668,7 +669,7 @@ static struct sec_request_el
+       el->sgl_in = sgl_in;
+       ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
+-                                      n_ents_in, info);
++                                      n_ents_in, info, gfp);
+       if (ret)
+               goto err_free_el;
+@@ -679,7 +680,7 @@ static struct sec_request_el
+               el->sgl_out = sgl_out;
+               ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
+                                               el->sgl_out,
+-                                              n_ents_out, info);
++                                              n_ents_out, info, gfp);
+               if (ret)
+                       goto err_free_hw_sgl_in;
+@@ -720,6 +721,7 @@ static int sec_alg_skcipher_crypto(struc
+       int *splits_out_nents = NULL;
+       struct sec_request_el *el, *temp;
+       bool split = skreq->src != skreq->dst;
++      gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+       mutex_init(&sec_req->lock);
+       sec_req->req_base = &skreq->base;
+@@ -728,13 +730,13 @@ static int sec_alg_skcipher_crypto(struc
+       sec_req->len_in = sg_nents(skreq->src);
+       ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
+-                                               &steps);
++                                               &steps, gfp);
+       if (ret)
+               return ret;
+       sec_req->num_elements = steps;
+       ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
+                                  &splits_in_nents, sec_req->len_in,
+-                                 info->dev);
++                                 info->dev, gfp);
+       if (ret)
+               goto err_free_split_sizes;
+@@ -742,7 +744,7 @@ static int sec_alg_skcipher_crypto(struc
+               sec_req->len_out = sg_nents(skreq->dst);
+               ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
+                                          &splits_out, &splits_out_nents,
+-                                         sec_req->len_out, info->dev);
++                                         sec_req->len_out, info->dev, gfp);
+               if (ret)
+                       goto err_unmap_in_sg;
+       }
+@@ -775,7 +777,7 @@ static int sec_alg_skcipher_crypto(struc
+                                              splits_in[i], splits_in_nents[i],
+                                              split ? splits_out[i] : NULL,
+                                              split ? splits_out_nents[i] : 0,
+-                                             info);
++                                             info, gfp);
+               if (IS_ERR(el)) {
+                       ret = PTR_ERR(el);
+                       goto err_free_elements;
diff --git a/queue-5.7/crypto-qat-fix-double-free-in-qat_uclo_create_batch_init_list.patch b/queue-5.7/crypto-qat-fix-double-free-in-qat_uclo_create_batch_init_list.patch
new file mode 100644 (file)
index 0000000..cebc13c
--- /dev/null
@@ -0,0 +1,91 @@
+From c06c76602e03bde24ee69a2022a829127e504202 Mon Sep 17 00:00:00 2001
+From: Tom Rix <trix@redhat.com>
+Date: Mon, 13 Jul 2020 07:06:34 -0700
+Subject: crypto: qat - fix double free in qat_uclo_create_batch_init_list
+
+From: Tom Rix <trix@redhat.com>
+
+commit c06c76602e03bde24ee69a2022a829127e504202 upstream.
+
+clang static analysis flags this error
+
+qat_uclo.c:297:3: warning: Attempt to free released memory
+  [unix.Malloc]
+                kfree(*init_tab_base);
+                ^~~~~~~~~~~~~~~~~~~~~
+
+When input *init_tab_base is null, the function allocates memory for
+the head of the list.  When there is problem allocating other list
+elements the list is unwound and freed.  Then a check is made if the
+list head was allocated and is also freed.
+
+Keeping track of the what may need to be freed is the variable 'tail_old'.
+The unwinding/freeing block is
+
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+
+The problem is that the first element of tail_old is also what was
+allocated for the list head
+
+               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+               ...
+               *init_tab_base = init_header;
+               flag = 1;
+       }
+       tail_old = init_header;
+
+So *init_tab_base/init_header are freed twice.
+
+There is another problem.
+When the input *init_tab_base is non null the tail_old is calculated by
+traveling down the list to first non null entry.
+
+       tail_old = init_header;
+       while (tail_old->next)
+               tail_old = tail_old->next;
+
+When the unwinding free happens, the last entry of the input list will
+be freed.
+
+So the freeing needs a general changed.
+If locally allocated the first element of tail_old is freed, else it
+is skipped.  As a bit of cleanup, reset *init_tab_base if it came in
+as null.
+
+Fixes: b4b7e67c917f ("crypto: qat - Intel(R) QAT ucode part of fw loader")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Tom Rix <trix@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/qat/qat_common/qat_uclo.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/qat/qat_common/qat_uclo.c
++++ b/drivers/crypto/qat/qat_common/qat_uclo.c
+@@ -332,13 +332,18 @@ static int qat_uclo_create_batch_init_li
+       }
+       return 0;
+ out_err:
++      /* Do not free the list head unless we allocated it. */
++      tail_old = tail_old->next;
++      if (flag) {
++              kfree(*init_tab_base);
++              *init_tab_base = NULL;
++      }
++
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+-      if (flag)
+-              kfree(*init_tab_base);
+       return -ENOMEM;
+ }
diff --git a/queue-5.7/driver-core-fix-probe_count-imbalance-in-really_probe.patch b/queue-5.7/driver-core-fix-probe_count-imbalance-in-really_probe.patch
new file mode 100644 (file)
index 0000000..9379edc
--- /dev/null
@@ -0,0 +1,64 @@
+From b292b50b0efcc7095d8bf15505fba6909bb35dce Mon Sep 17 00:00:00 2001
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Date: Mon, 13 Jul 2020 11:12:54 +0900
+Subject: driver core: Fix probe_count imbalance in really_probe()
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+commit b292b50b0efcc7095d8bf15505fba6909bb35dce upstream.
+
+syzbot is reporting hung task in wait_for_device_probe() [1]. At least,
+we always need to decrement probe_count if we incremented probe_count in
+really_probe().
+
+However, since I can't find "Resources present before probing" message in
+the console log, both "this message simply flowed off" and "syzbot is not
+hitting this path" will be possible. Therefore, while we are at it, let's
+also prepare for concurrent wait_for_device_probe() calls by replacing
+wake_up() with wake_up_all().
+
+[1] https://syzkaller.appspot.com/bug?id=25c833f1983c9c1d512f4ff860dd0d7f5a2e2c0f
+
+Reported-by: syzbot <syzbot+805f5f6ae37411f15b64@syzkaller.appspotmail.com>
+Fixes: 7c35e699c88bd607 ("driver core: Print device when resources present in really_probe()")
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20200713021254.3444-1-penguin-kernel@I-love.SAKURA.ne.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/base/dd.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -276,7 +276,7 @@ static void deferred_probe_timeout_work_
+       list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
+               dev_info(private->device, "deferred probe pending");
+-      wake_up(&probe_timeout_waitqueue);
++      wake_up_all(&probe_timeout_waitqueue);
+ }
+ static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
+@@ -487,7 +487,8 @@ static int really_probe(struct device *d
+                drv->bus->name, __func__, drv->name, dev_name(dev));
+       if (!list_empty(&dev->devres_head)) {
+               dev_crit(dev, "Resources present before probing\n");
+-              return -EBUSY;
++              ret = -EBUSY;
++              goto done;
+       }
+ re_probe:
+@@ -608,7 +609,7 @@ pinctrl_bind_failed:
+       ret = 0;
+ done:
+       atomic_dec(&probe_count);
+-      wake_up(&probe_waitqueue);
++      wake_up_all(&probe_waitqueue);
+       return ret;
+ }
diff --git a/queue-5.7/drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-alloc-failure.patch b/queue-5.7/drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-alloc-failure.patch
new file mode 100644 (file)
index 0000000..4a1a8ae
--- /dev/null
@@ -0,0 +1,76 @@
+From 5de5b6ecf97a021f29403aa272cb4e03318ef586 Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Tue, 28 Jul 2020 14:17:36 +1000
+Subject: drm/ttm/nouveau: don't call tt destroy callback on alloc failure.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 5de5b6ecf97a021f29403aa272cb4e03318ef586 upstream.
+
+This is confusing, and from my reading of all the drivers only
+nouveau got this right.
+
+Just make the API act under driver control of it's own allocation
+failing, and don't call destroy, if the page table fails to
+create there is nothing to cleanup here.
+
+(I'm willing to believe I've missed something here, so please
+review deeply).
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200728041736.20689-1-airlied@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/nouveau/nouveau_sgdma.c |    9 +++------
+ drivers/gpu/drm/ttm/ttm_tt.c            |    3 ---
+ 2 files changed, 3 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+@@ -96,12 +96,9 @@ nouveau_sgdma_create_ttm(struct ttm_buff
+       else
+               nvbe->ttm.ttm.func = &nv50_sgdma_backend;
+-      if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags))
+-              /*
+-               * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
+-               * and thus our nouveau_sgdma_destroy() hook, so we don't need
+-               * to free nvbe here.
+-               */
++      if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
++              kfree(nvbe);
+               return NULL;
++      }
+       return &nvbe->ttm.ttm;
+ }
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -242,7 +242,6 @@ int ttm_tt_init(struct ttm_tt *ttm, stru
+       ttm_tt_init_fields(ttm, bo, page_flags);
+       if (ttm_tt_alloc_page_directory(ttm)) {
+-              ttm_tt_destroy(ttm);
+               pr_err("Failed allocating page table\n");
+               return -ENOMEM;
+       }
+@@ -266,7 +265,6 @@ int ttm_dma_tt_init(struct ttm_dma_tt *t
+       INIT_LIST_HEAD(&ttm_dma->pages_list);
+       if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+-              ttm_tt_destroy(ttm);
+               pr_err("Failed allocating page table\n");
+               return -ENOMEM;
+       }
+@@ -288,7 +286,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *tt
+       else
+               ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+       if (ret) {
+-              ttm_tt_destroy(ttm);
+               pr_err("Failed allocating page table\n");
+               return -ENOMEM;
+       }
diff --git a/queue-5.7/fs-minix-check-return-value-of-sb_getblk.patch b/queue-5.7/fs-minix-check-return-value-of-sb_getblk.patch
new file mode 100644 (file)
index 0000000..9206dd7
--- /dev/null
@@ -0,0 +1,78 @@
+From da27e0a0e5f655f0d58d4e153c3182bb2b290f64 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 11 Aug 2020 18:35:24 -0700
+Subject: fs/minix: check return value of sb_getblk()
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit da27e0a0e5f655f0d58d4e153c3182bb2b290f64 upstream.
+
+Patch series "fs/minix: fix syzbot bugs and set s_maxbytes".
+
+This series fixes all syzbot bugs in the minix filesystem:
+
+       KASAN: null-ptr-deref Write in get_block
+       KASAN: use-after-free Write in get_block
+       KASAN: use-after-free Read in get_block
+       WARNING in inc_nlink
+       KMSAN: uninit-value in get_block
+       WARNING in drop_nlink
+
+It also fixes the minix filesystem to set s_maxbytes correctly, so that
+userspace sees the correct behavior when exceeding the max file size.
+
+This patch (of 6):
+
+sb_getblk() can fail, so check its return value.
+
+This fixes a NULL pointer dereference.
+
+Originally from Qiujun Huang.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+4a88b2b9dc280f47baf4@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Qiujun Huang <anenbupt@gmail.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200628060846.682158-1-ebiggers@kernel.org
+Link: http://lkml.kernel.org/r/20200628060846.682158-2-ebiggers@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/minix/itree_common.c |    8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/minix/itree_common.c
++++ b/fs/minix/itree_common.c
+@@ -75,6 +75,7 @@ static int alloc_branch(struct inode *in
+       int n = 0;
+       int i;
+       int parent = minix_new_block(inode);
++      int err = -ENOSPC;
+       branch[0].key = cpu_to_block(parent);
+       if (parent) for (n = 1; n < num; n++) {
+@@ -85,6 +86,11 @@ static int alloc_branch(struct inode *in
+                       break;
+               branch[n].key = cpu_to_block(nr);
+               bh = sb_getblk(inode->i_sb, parent);
++              if (!bh) {
++                      minix_free_block(inode, nr);
++                      err = -ENOMEM;
++                      break;
++              }
+               lock_buffer(bh);
+               memset(bh->b_data, 0, bh->b_size);
+               branch[n].bh = bh;
+@@ -103,7 +109,7 @@ static int alloc_branch(struct inode *in
+               bforget(branch[i].bh);
+       for (i = 0; i < n; i++)
+               minix_free_block(inode, block_to_cpu(branch[i].key));
+-      return -ENOSPC;
++      return err;
+ }
+ static inline int splice_branch(struct inode *inode,
diff --git a/queue-5.7/fs-minix-don-t-allow-getting-deleted-inodes.patch b/queue-5.7/fs-minix-don-t-allow-getting-deleted-inodes.patch
new file mode 100644 (file)
index 0000000..f92d6b7
--- /dev/null
@@ -0,0 +1,59 @@
+From facb03dddec04e4aac1bb2139accdceb04deb1f3 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 11 Aug 2020 18:35:27 -0700
+Subject: fs/minix: don't allow getting deleted inodes
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit facb03dddec04e4aac1bb2139accdceb04deb1f3 upstream.
+
+If an inode has no links, we need to mark it bad rather than allowing it
+to be accessed.  This avoids WARNINGs in inc_nlink() and drop_nlink() when
+doing directory operations on a fuzzed filesystem.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+a9ac3de1b5de5fb10efc@syzkaller.appspotmail.com
+Reported-by: syzbot+df958cf5688a96ad3287@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Qiujun Huang <anenbupt@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200628060846.682158-3-ebiggers@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/minix/inode.c |   14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -468,6 +468,13 @@ static struct inode *V1_minix_iget(struc
+               iget_failed(inode);
+               return ERR_PTR(-EIO);
+       }
++      if (raw_inode->i_nlinks == 0) {
++              printk("MINIX-fs: deleted inode referenced: %lu\n",
++                     inode->i_ino);
++              brelse(bh);
++              iget_failed(inode);
++              return ERR_PTR(-ESTALE);
++      }
+       inode->i_mode = raw_inode->i_mode;
+       i_uid_write(inode, raw_inode->i_uid);
+       i_gid_write(inode, raw_inode->i_gid);
+@@ -501,6 +508,13 @@ static struct inode *V2_minix_iget(struc
+               iget_failed(inode);
+               return ERR_PTR(-EIO);
+       }
++      if (raw_inode->i_nlinks == 0) {
++              printk("MINIX-fs: deleted inode referenced: %lu\n",
++                     inode->i_ino);
++              brelse(bh);
++              iget_failed(inode);
++              return ERR_PTR(-ESTALE);
++      }
+       inode->i_mode = raw_inode->i_mode;
+       i_uid_write(inode, raw_inode->i_uid);
+       i_gid_write(inode, raw_inode->i_gid);
diff --git a/queue-5.7/fs-minix-reject-too-large-maximum-file-size.patch b/queue-5.7/fs-minix-reject-too-large-maximum-file-size.patch
new file mode 100644 (file)
index 0000000..5b871d1
--- /dev/null
@@ -0,0 +1,76 @@
+From 270ef41094e9fa95273f288d7d785313ceab2ff3 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 11 Aug 2020 18:35:30 -0700
+Subject: fs/minix: reject too-large maximum file size
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 270ef41094e9fa95273f288d7d785313ceab2ff3 upstream.
+
+If the minix filesystem tries to map a very large logical block number to
+its on-disk location, block_to_path() can return offsets that are too
+large, causing out-of-bounds memory accesses when accessing indirect index
+blocks.  This should be prevented by the check against the maximum file
+size, but this doesn't work because the maximum file size is read directly
+from the on-disk superblock and isn't validated itself.
+
+Fix this by validating the maximum file size at mount time.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+c7d9ec7a1a7272dd71b3@syzkaller.appspotmail.com
+Reported-by: syzbot+3b7b03a0c28948054fb5@syzkaller.appspotmail.com
+Reported-by: syzbot+6e056ee473568865f3e6@syzkaller.appspotmail.com
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Qiujun Huang <anenbupt@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200628060846.682158-4-ebiggers@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/minix/inode.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/fs/minix/inode.c
++++ b/fs/minix/inode.c
+@@ -150,6 +150,23 @@ static int minix_remount (struct super_b
+       return 0;
+ }
++static bool minix_check_superblock(struct minix_sb_info *sbi)
++{
++      if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
++              return false;
++
++      /*
++       * s_max_size must not exceed the block mapping limitation.  This check
++       * is only needed for V1 filesystems, since V2/V3 support an extra level
++       * of indirect blocks which places the limit well above U32_MAX.
++       */
++      if (sbi->s_version == MINIX_V1 &&
++          sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
++              return false;
++
++      return true;
++}
++
+ static int minix_fill_super(struct super_block *s, void *data, int silent)
+ {
+       struct buffer_head *bh;
+@@ -228,11 +245,12 @@ static int minix_fill_super(struct super
+       } else
+               goto out_no_fs;
++      if (!minix_check_superblock(sbi))
++              goto out_illegal_sb;
++
+       /*
+        * Allocate the buffer map to keep the superblock small.
+        */
+-      if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
+-              goto out_illegal_sb;
+       i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh);
+       map = kzalloc(i, GFP_KERNEL);
+       if (!map)
diff --git a/queue-5.7/io_uring-fail-poll-arm-on-queue-proc-failure.patch b/queue-5.7/io_uring-fail-poll-arm-on-queue-proc-failure.patch
new file mode 100644 (file)
index 0000000..7e388c8
--- /dev/null
@@ -0,0 +1,38 @@
+From a36da65c46565d2527eec3efdb546251e38253fd Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Tue, 11 Aug 2020 09:50:19 -0600
+Subject: io_uring: fail poll arm on queue proc failure
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit a36da65c46565d2527eec3efdb546251e38253fd upstream.
+
+Check the ipt.error value, it must have been either cleared to zero or
+set to another error than the default -EINVAL if we don't go through the
+waitqueue proc addition. Just give up on poll at that point and return
+failure, this will fallback to async work.
+
+io_poll_add() doesn't suffer from this failure case, as it returns the
+error value directly.
+
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: syzbot+a730016dc0bdce4f6ff5@syzkaller.appspotmail.com
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4544,7 +4544,7 @@ static bool io_arm_poll_handler(struct i
+       ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
+                                       io_async_wake);
+-      if (ret) {
++      if (ret || ipt.error) {
+               io_poll_remove_double(req, apoll->double_poll);
+               spin_unlock_irq(&ctx->completion_lock);
+               memcpy(&req->work, &apoll->work, sizeof(req->work));
diff --git a/queue-5.7/io_uring-set-ctx-sq-cq-entry-count-earlier.patch b/queue-5.7/io_uring-set-ctx-sq-cq-entry-count-earlier.patch
new file mode 100644 (file)
index 0000000..39f3c2d
--- /dev/null
@@ -0,0 +1,52 @@
+From bd74048108c179cea0ff52979506164c80f29da7 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 5 Aug 2020 12:58:23 -0600
+Subject: io_uring: set ctx sq/cq entry count earlier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit bd74048108c179cea0ff52979506164c80f29da7 upstream.
+
+If we hit an earlier error path in io_uring_create(), then we will have
+accounted memory, but not set ctx->{sq,cq}_entries yet. Then when the
+ring is torn down in error, we use those values to unaccount the memory.
+
+Ensure we set the ctx entries before we're able to hit a potential error
+path.
+
+Cc: stable@vger.kernel.org
+Reported-by: Tomáš Chaloupka <chalucha@gmail.com>
+Tested-by: Tomáš Chaloupka <chalucha@gmail.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7869,6 +7869,10 @@ static int io_allocate_scq_urings(struct
+       struct io_rings *rings;
+       size_t size, sq_array_offset;
++      /* make sure these are sane, as we already accounted them */
++      ctx->sq_entries = p->sq_entries;
++      ctx->cq_entries = p->cq_entries;
++
+       size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
+       if (size == SIZE_MAX)
+               return -EOVERFLOW;
+@@ -7885,8 +7889,6 @@ static int io_allocate_scq_urings(struct
+       rings->cq_ring_entries = p->cq_entries;
+       ctx->sq_mask = rings->sq_ring_mask;
+       ctx->cq_mask = rings->cq_ring_mask;
+-      ctx->sq_entries = rings->sq_ring_entries;
+-      ctx->cq_entries = rings->cq_ring_entries;
+       size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
+       if (size == SIZE_MAX) {
diff --git a/queue-5.7/io_uring-use-twa_signal-for-task_work-uncondtionally.patch b/queue-5.7/io_uring-use-twa_signal-for-task_work-uncondtionally.patch
new file mode 100644 (file)
index 0000000..163dae1
--- /dev/null
@@ -0,0 +1,64 @@
+From 0ba9c9edcd152158a0e321a4c13ac1dfc571ff3d Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Thu, 6 Aug 2020 19:41:50 -0600
+Subject: io_uring: use TWA_SIGNAL for task_work uncondtionally
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 0ba9c9edcd152158a0e321a4c13ac1dfc571ff3d upstream.
+
+An earlier commit:
+
+b7db41c9e03b ("io_uring: fix regression with always ignoring signals in io_cqring_wait()")
+
+ensured that we didn't get stuck waiting for eventfd reads when it's
+registered with the io_uring ring for event notification, but we still
+have cases where the task can be waiting on other events in the kernel and
+need a bigger nudge to make forward progress. Or the task could be in the
+kernel and running, but on its way to blocking.
+
+This means that TWA_RESUME cannot reliably be used to ensure we make
+progress. Use TWA_SIGNAL unconditionally.
+
+Cc: stable@vger.kernel.org # v5.7+
+Reported-by: Josef <josef.grieb@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4161,22 +4161,22 @@ static int io_req_task_work_add(struct i
+ {
+       struct task_struct *tsk = req->task;
+       struct io_ring_ctx *ctx = req->ctx;
+-      int ret, notify = TWA_RESUME;
++      int ret, notify;
+       /*
+-       * SQPOLL kernel thread doesn't need notification, just a wakeup.
+-       * If we're not using an eventfd, then TWA_RESUME is always fine,
+-       * as we won't have dependencies between request completions for
+-       * other kernel wait conditions.
++       * SQPOLL kernel thread doesn't need notification, just a wakeup. For
++       * all other cases, use TWA_SIGNAL unconditionally to ensure we're
++       * processing task_work. There's no reliable way to tell if TWA_RESUME
++       * will do the job.
+        */
+-      if (ctx->flags & IORING_SETUP_SQPOLL)
+-              notify = 0;
+-      else if (ctx->cq_ev_fd)
++      notify = 0;
++      if (!(ctx->flags & IORING_SETUP_SQPOLL))
+               notify = TWA_SIGNAL;
+       ret = task_work_add(tsk, cb, notify);
+       if (!ret)
+               wake_up_process(tsk);
++
+       return ret;
+ }
diff --git a/queue-5.7/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch b/queue-5.7/kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch
new file mode 100644 (file)
index 0000000..196f30d
--- /dev/null
@@ -0,0 +1,125 @@
+From 841c2be09fe4f495fe5224952a419bd8c7e5b455 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 8 Jul 2020 14:57:31 +0300
+Subject: kvm: x86: replace kvm_spec_ctrl_test_value with runtime test on the host
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+commit 841c2be09fe4f495fe5224952a419bd8c7e5b455 upstream.
+
+To avoid complex and in some cases incorrect logic in
+kvm_spec_ctrl_test_value, just try the guest's given value on the host
+processor instead, and if it doesn't #GP, allow the guest to set it.
+
+One such case is when host CPU supports STIBP mitigation
+but doesn't support IBRS (as is the case with some Zen2 AMD cpus),
+and in this case we were giving guest #GP when it tried to use STIBP
+
+The reason why can can do the host test is that IA32_SPEC_CTRL msr is
+passed to the guest, after the guest sets it to a non zero value
+for the first time (due to performance reasons),
+and as as result of this, it is pointless to emulate #GP condition on
+this first access, in a different way than what the host CPU does.
+
+This is based on a patch from Sean Christopherson, who suggested this idea.
+
+Fixes: 6441fa6178f5 ("KVM: x86: avoid incorrect writes to host MSR_IA32_SPEC_CTRL")
+Cc: stable@vger.kernel.org
+Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20200708115731.180097-1-mlevitsk@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/svm/svm.c |    2 +-
+ arch/x86/kvm/vmx/vmx.c |    2 +-
+ arch/x86/kvm/x86.c     |   40 ++++++++++++++++++++++------------------
+ arch/x86/kvm/x86.h     |    2 +-
+ 4 files changed, 25 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -2509,7 +2509,7 @@ static int svm_set_msr(struct kvm_vcpu *
+                   !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+                       return 1;
+-              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++              if (kvm_spec_ctrl_test_value(data))
+                       return 1;
+               svm->spec_ctrl = data;
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2015,7 +2015,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+                   !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+                       return 1;
+-              if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++              if (kvm_spec_ctrl_test_value(data))
+                       return 1;
+               vmx->spec_ctrl = data;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10573,28 +10573,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *v
+ }
+ EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
++
++int kvm_spec_ctrl_test_value(u64 value)
+ {
+-      uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
++      /*
++       * test that setting IA32_SPEC_CTRL to given value
++       * is allowed by the host processor
++       */
++
++      u64 saved_value;
++      unsigned long flags;
++      int ret = 0;
++
++      local_irq_save(flags);
++
++      if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
++              ret = 1;
++      else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
++              ret = 1;
++      else
++              wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
+-      /* The STIBP bit doesn't fault even if it's not advertised */
+-      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+-          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+-              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+-      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+-          !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+-              bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+-
+-      if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+-          !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+-              bits &= ~SPEC_CTRL_SSBD;
+-      if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+-          !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+-              bits &= ~SPEC_CTRL_SSBD;
++      local_irq_restore(flags);
+-      return bits;
++      return ret;
+ }
+-EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
++EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -357,6 +357,6 @@ static inline bool kvm_dr7_valid(u64 dat
+ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
+ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
++int kvm_spec_ctrl_test_value(u64 value);
+ #endif
diff --git a/queue-5.7/media-media-request-fix-crash-if-memory-allocation-fails.patch b/queue-5.7/media-media-request-fix-crash-if-memory-allocation-fails.patch
new file mode 100644 (file)
index 0000000..0774156
--- /dev/null
@@ -0,0 +1,118 @@
+From e30cc79cc80fd919b697a15c5000d9f57487de8e Mon Sep 17 00:00:00 2001
+From: Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+Date: Sun, 21 Jun 2020 13:30:40 +0200
+Subject: media: media-request: Fix crash if memory allocation fails
+
+From: Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+
+commit e30cc79cc80fd919b697a15c5000d9f57487de8e upstream.
+
+Syzbot reports a NULL-ptr deref in the kref_put() call:
+
+BUG: KASAN: null-ptr-deref in media_request_put drivers/media/mc/mc-request.c:81 [inline]
+ kref_put include/linux/kref.h:64 [inline]
+ media_request_put drivers/media/mc/mc-request.c:81 [inline]
+ media_request_close+0x4d/0x170 drivers/media/mc/mc-request.c:89
+ __fput+0x2ed/0x750 fs/file_table.c:281
+ task_work_run+0x147/0x1d0 kernel/task_work.c:123
+ tracehook_notify_resume include/linux/tracehook.h:188 [inline]
+ exit_to_usermode_loop arch/x86/entry/common.c:165 [inline]
+ prepare_exit_to_usermode+0x48e/0x600 arch/x86/entry/common.c:196
+
+What led to this crash was an injected memory allocation failure in
+media_request_alloc():
+
+FAULT_INJECTION: forcing a failure.
+name failslab, interval 1, probability 0, space 0, times 0
+ should_failslab+0x5/0x20
+ kmem_cache_alloc_trace+0x57/0x300
+ ? anon_inode_getfile+0xe5/0x170
+ media_request_alloc+0x339/0x440
+ media_device_request_alloc+0x94/0xc0
+ media_device_ioctl+0x1fb/0x330
+ ? do_vfs_ioctl+0x6ea/0x1a00
+ ? media_ioctl+0x101/0x120
+ ? __media_device_usb_init+0x430/0x430
+ ? media_poll+0x110/0x110
+ __se_sys_ioctl+0xf9/0x160
+ do_syscall_64+0xf3/0x1b0
+
+When that allocation fails, filp->private_data is left uninitialized
+which media_request_close() does not expect and crashes.
+
+To avoid this, reorder media_request_alloc() such that
+allocating the struct file happens as the last step thus
+media_request_close() will no longer get called for a partially created
+media request.
+
+Reported-by: syzbot+6bed2d543cf7e48b822b@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
+Fixes: 10905d70d788 ("media: media-request: implement media requests")
+Reviewed-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/mc/mc-request.c |   31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+--- a/drivers/media/mc/mc-request.c
++++ b/drivers/media/mc/mc-request.c
+@@ -296,9 +296,18 @@ int media_request_alloc(struct media_dev
+       if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
+               return -ENOMEM;
++      if (mdev->ops->req_alloc)
++              req = mdev->ops->req_alloc(mdev);
++      else
++              req = kzalloc(sizeof(*req), GFP_KERNEL);
++      if (!req)
++              return -ENOMEM;
++
+       fd = get_unused_fd_flags(O_CLOEXEC);
+-      if (fd < 0)
+-              return fd;
++      if (fd < 0) {
++              ret = fd;
++              goto err_free_req;
++      }
+       filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
+       if (IS_ERR(filp)) {
+@@ -306,15 +315,6 @@ int media_request_alloc(struct media_dev
+               goto err_put_fd;
+       }
+-      if (mdev->ops->req_alloc)
+-              req = mdev->ops->req_alloc(mdev);
+-      else
+-              req = kzalloc(sizeof(*req), GFP_KERNEL);
+-      if (!req) {
+-              ret = -ENOMEM;
+-              goto err_fput;
+-      }
+-
+       filp->private_data = req;
+       req->mdev = mdev;
+       req->state = MEDIA_REQUEST_STATE_IDLE;
+@@ -336,12 +336,15 @@ int media_request_alloc(struct media_dev
+       return 0;
+-err_fput:
+-      fput(filp);
+-
+ err_put_fd:
+       put_unused_fd(fd);
++err_free_req:
++      if (mdev->ops->req_free)
++              mdev->ops->req_free(req);
++      else
++              kfree(req);
++
+       return ret;
+ }
diff --git a/queue-5.7/nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch b/queue-5.7/nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch
new file mode 100644 (file)
index 0000000..e3a309c
--- /dev/null
@@ -0,0 +1,50 @@
+From ff041727e9e029845857cac41aae118ead5e261b Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Tue, 4 Aug 2020 16:30:30 -0400
+Subject: NFS: Don't move layouts to plh_return_segs list while in use
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit ff041727e9e029845857cac41aae118ead5e261b upstream.
+
+If the layout segment is still in use for a read or a write, we should
+not move it to the layout plh_return_segs list. If we do, we can end
+up returning the layout while I/O is still in progress.
+
+Fixes: e0b7d420f72a ("pNFS: Don't discard layout segments that are marked for return")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/pnfs.c |   12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -2392,16 +2392,6 @@ out_forget:
+       return ERR_PTR(-EAGAIN);
+ }
+-static int
+-mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
+-              struct list_head *tmp_list)
+-{
+-      if (!mark_lseg_invalid(lseg, tmp_list))
+-              return 0;
+-      pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
+-      return 1;
+-}
+-
+ /**
+  * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
+  * @lo: pointer to layout header
+@@ -2438,7 +2428,7 @@ pnfs_mark_matching_lsegs_return(struct p
+                               lseg, lseg->pls_range.iomode,
+                               lseg->pls_range.offset,
+                               lseg->pls_range.length);
+-                      if (mark_lseg_invalid_or_return(lseg, tmp_list))
++                      if (mark_lseg_invalid(lseg, tmp_list))
+                               continue;
+                       remaining++;
+                       set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
diff --git a/queue-5.7/nfs-don-t-return-layout-segments-that-are-in-use.patch b/queue-5.7/nfs-don-t-return-layout-segments-that-are-in-use.patch
new file mode 100644 (file)
index 0000000..9d12b9a
--- /dev/null
@@ -0,0 +1,71 @@
+From d474f96104bd4377573526ebae2ee212205a6839 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Wed, 5 Aug 2020 09:03:56 -0400
+Subject: NFS: Don't return layout segments that are in use
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit d474f96104bd4377573526ebae2ee212205a6839 upstream.
+
+If the NFS_LAYOUT_RETURN_REQUESTED flag is set, we want to return the
+layout as soon as possible, meaning that the affected layout segments
+should be marked as invalid, and should no longer be in use for I/O.
+
+Fixes: f0b429819b5f ("pNFS: Ignore non-recalled layouts in pnfs_layout_need_return()")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/pnfs.c |   34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1226,31 +1226,27 @@ out:
+       return status;
+ }
++static bool
++pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo,
++                              enum pnfs_iomode iomode,
++                              u32 seq)
++{
++      struct pnfs_layout_range recall_range = {
++              .length = NFS4_MAX_UINT64,
++              .iomode = iomode,
++      };
++      return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
++                                             &recall_range, seq) != -EBUSY;
++}
++
+ /* Return true if layoutreturn is needed */
+ static bool
+ pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
+ {
+-      struct pnfs_layout_segment *s;
+-      enum pnfs_iomode iomode;
+-      u32 seq;
+-
+       if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+               return false;
+-
+-      seq = lo->plh_return_seq;
+-      iomode = lo->plh_return_iomode;
+-
+-      /* Defer layoutreturn until all recalled lsegs are done */
+-      list_for_each_entry(s, &lo->plh_segs, pls_list) {
+-              if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
+-                      continue;
+-              if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
+-                      continue;
+-              if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+-                      return false;
+-      }
+-
+-      return true;
++      return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode,
++                                             lo->plh_return_seq);
+ }
+ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
diff --git a/queue-5.7/pstore-fix-linking-when-crypto-api-disabled.patch b/queue-5.7/pstore-fix-linking-when-crypto-api-disabled.patch
new file mode 100644 (file)
index 0000000..3490b9a
--- /dev/null
@@ -0,0 +1,53 @@
+From fd49e03280e596e54edb93a91bc96170f8e97e4a Mon Sep 17 00:00:00 2001
+From: Matteo Croce <mcroce@linux.microsoft.com>
+Date: Mon, 6 Jul 2020 19:37:36 -0700
+Subject: pstore: Fix linking when crypto API disabled
+
+From: Matteo Croce <mcroce@linux.microsoft.com>
+
+commit fd49e03280e596e54edb93a91bc96170f8e97e4a upstream.
+
+When building a kernel with CONFIG_PSTORE=y and CONFIG_CRYPTO not set,
+a build error happens:
+
+    ld: fs/pstore/platform.o: in function `pstore_dump':
+    platform.c:(.text+0x3f9): undefined reference to `crypto_comp_compress'
+    ld: fs/pstore/platform.o: in function `pstore_get_backend_records':
+    platform.c:(.text+0x784): undefined reference to `crypto_comp_decompress'
+
+This because some pstore code uses crypto_comp_(de)compress regardless
+of the CONFIG_CRYPTO status. Fix it by wrapping the (de)compress usage
+by IS_ENABLED(CONFIG_PSTORE_COMPRESS)
+
+Signed-off-by: Matteo Croce <mcroce@linux.microsoft.com>
+Link: https://lore.kernel.org/lkml/20200706234045.9516-1-mcroce@linux.microsoft.com
+Fixes: cb3bee0369bc ("pstore: Use crypto compress API")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/platform.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -275,6 +275,9 @@ static int pstore_compress(const void *i
+ {
+       int ret;
++      if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
++              return -EINVAL;
++
+       ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
+       if (ret) {
+               pr_err("crypto_comp_compress failed, ret = %d!\n", ret);
+@@ -661,7 +664,7 @@ static void decompress_record(struct pst
+       int unzipped_len;
+       char *unzipped, *workspace;
+-      if (!record->compressed)
++      if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
+               return;
+       /* Only PSTORE_TYPE_DMESG support compression. */
index 037ec2ac161daa53134c56c26ea5f6a388a696cc..2a881203a1bbccdca3a6314e7dc7b965a40bdf34 100644 (file)
@@ -327,3 +327,33 @@ net-refactor-bind_bucket-fastreuse-into-helper.patch
 net-initialize-fastreuse-on-inet_inherit_port.patch
 vsock-fix-potential-null-pointer-dereference-in-vsock_poll.patch
 net-phy-marvell10g-fix-null-pointer-dereference.patch
+usb-serial-cp210x-re-enable-auto-rts-on-open.patch
+usb-serial-cp210x-enable-usb-generic-throttle-unthrottle.patch
+usb-cdns3-gadget-always-zeroed-trb-buffer-when-enable-endpoint.patch
+vdpasim-protect-concurrent-access-to-iommu-iotlb.patch
+alsa-hda-fix-the-micmute-led-status-for-lenovo-thinkcentre-aio.patch
+alsa-usb-audio-creative-usb-x-fi-pro-sb1095-volume-knob-support.patch
+alsa-usb-audio-fix-overeager-device-match-for-macrosilicon-ms2109.patch
+alsa-usb-audio-work-around-streaming-quirk-for-macrosilicon-ms2109.patch
+alsa-usb-audio-add-quirk-for-pioneer-ddj-rb.patch
+tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch
+pstore-fix-linking-when-crypto-api-disabled.patch
+crypto-hisilicon-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
+crypto-qat-fix-double-free-in-qat_uclo_create_batch_init_list.patch
+crypto-ccp-fix-use-of-merged-scatterlists.patch
+crypto-cpt-don-t-sleep-of-crypto_tfm_req_may_sleep-was-not-specified.patch
+tick-nohz-narrow-down-noise-while-setting-current-task-s-tick-dependency.patch
+bitfield.h-don-t-compile-time-validate-_val-in-field_fit.patch
+fs-minix-check-return-value-of-sb_getblk.patch
+fs-minix-don-t-allow-getting-deleted-inodes.patch
+fs-minix-reject-too-large-maximum-file-size.patch
+kvm-x86-replace-kvm_spec_ctrl_test_value-with-runtime-test-on-the-host.patch
+9p-fix-memory-leak-in-v9fs_mount.patch
+driver-core-fix-probe_count-imbalance-in-really_probe.patch
+media-media-request-fix-crash-if-memory-allocation-fails.patch
+drm-ttm-nouveau-don-t-call-tt-destroy-callback-on-alloc-failure.patch
+io_uring-set-ctx-sq-cq-entry-count-earlier.patch
+io_uring-use-twa_signal-for-task_work-uncondtionally.patch
+io_uring-fail-poll-arm-on-queue-proc-failure.patch
+nfs-don-t-move-layouts-to-plh_return_segs-list-while-in-use.patch
+nfs-don-t-return-layout-segments-that-are-in-use.patch
diff --git a/queue-5.7/tick-nohz-narrow-down-noise-while-setting-current-task-s-tick-dependency.patch b/queue-5.7/tick-nohz-narrow-down-noise-while-setting-current-task-s-tick-dependency.patch
new file mode 100644 (file)
index 0000000..2a0a49b
--- /dev/null
@@ -0,0 +1,79 @@
+From 3c8920e2dbd1a55f72dc14d656df9d0097cf5c72 Mon Sep 17 00:00:00 2001
+From: Frederic Weisbecker <frederic@kernel.org>
+Date: Fri, 15 May 2020 02:34:29 +0200
+Subject: tick/nohz: Narrow down noise while setting current task's tick dependency
+
+From: Frederic Weisbecker <frederic@kernel.org>
+
+commit 3c8920e2dbd1a55f72dc14d656df9d0097cf5c72 upstream.
+
+Setting a tick dependency on any task, including the case where a task
+sets that dependency on itself, triggers an IPI to all CPUs.  That is
+of course suboptimal but it had previously not been an issue because it
+was only used by POSIX CPU timers on nohz_full, which apparently never
+occurs in latency-sensitive workloads in production.  (Or users of such
+systems are suffering in silence on the one hand or venting their ire
+on the wrong people on the other.)
+
+But RCU now sets a task tick dependency on the current task in order
+to fix stall issues that can occur during RCU callback processing.
+Thus, RCU callback processing triggers frequent system-wide IPIs from
+nohz_full CPUs.  This is quite counter-productive, after all, avoiding
+IPIs is what nohz_full is supposed to be all about.
+
+This commit therefore optimizes tasks' self-setting of a task tick
+dependency by using tick_nohz_full_kick() to avoid the system-wide IPI.
+Instead, only the execution of the one task is disturbed, which is
+acceptable given that this disturbance is well down into the noise
+compared to the degree to which the RCU callback processing itself
+disturbs execution.
+
+Fixes: 6a949b7af82d (rcu: Force on tick when invoking lots of callbacks)
+Reported-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
+Cc: stable@kernel.org
+Cc: Paul E. McKenney <paulmck@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/tick-sched.c |   22 +++++++++++++++-------
+ 1 file changed, 15 insertions(+), 7 deletions(-)
+
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -351,16 +351,24 @@ void tick_nohz_dep_clear_cpu(int cpu, en
+ EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
+ /*
+- * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
+- * per task timers.
++ * Set a per-task tick dependency. RCU need this. Also posix CPU timers
++ * in order to elapse per task timers.
+  */
+ void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
+ {
+-      /*
+-       * We could optimize this with just kicking the target running the task
+-       * if that noise matters for nohz full users.
+-       */
+-      tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
++      if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) {
++              if (tsk == current) {
++                      preempt_disable();
++                      tick_nohz_full_kick();
++                      preempt_enable();
++              } else {
++                      /*
++                       * Some future tick_nohz_full_kick_task()
++                       * should optimize this.
++                       */
++                      tick_nohz_full_kick_all();
++              }
++      }
+ }
+ EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
diff --git a/queue-5.7/tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch b/queue-5.7/tpm-unify-the-mismatching-tpm-space-buffer-sizes.patch
new file mode 100644 (file)
index 0000000..ac92b44
--- /dev/null
@@ -0,0 +1,165 @@
+From 6c4e79d99e6f42b79040f1a33cd4018f5425030b Mon Sep 17 00:00:00 2001
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Date: Fri, 3 Jul 2020 01:55:59 +0300
+Subject: tpm: Unify the mismatching TPM space buffer sizes
+
+From: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+
+commit 6c4e79d99e6f42b79040f1a33cd4018f5425030b upstream.
+
+The size of the buffers for storing context's and sessions can vary from
+arch to arch as PAGE_SIZE can be anything between 4 kB and 256 kB (the
+maximum for PPC64). Define a fixed buffer size set to 16 kB. This should be
+enough for most use with three handles (that is how many we allow at the
+moment). Parametrize the buffer size while doing this, so that it is easier
+to revisit this later on if required.
+
+Cc: stable@vger.kernel.org
+Reported-by: Stefan Berger <stefanb@linux.ibm.com>
+Fixes: 745b361e989a ("tpm: infrastructure for TPM spaces")
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Tested-by: Stefan Berger <stefanb@linux.ibm.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm-chip.c   |    9 ++-------
+ drivers/char/tpm/tpm.h        |    5 ++++-
+ drivers/char/tpm/tpm2-space.c |   26 ++++++++++++++++----------
+ drivers/char/tpm/tpmrm-dev.c  |    2 +-
+ include/linux/tpm.h           |    1 +
+ 5 files changed, 24 insertions(+), 19 deletions(-)
+
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -386,13 +386,8 @@ struct tpm_chip *tpm_chip_alloc(struct d
+       chip->cdev.owner = THIS_MODULE;
+       chip->cdevs.owner = THIS_MODULE;
+-      chip->work_space.context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+-      if (!chip->work_space.context_buf) {
+-              rc = -ENOMEM;
+-              goto out;
+-      }
+-      chip->work_space.session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+-      if (!chip->work_space.session_buf) {
++      rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
++      if (rc) {
+               rc = -ENOMEM;
+               goto out;
+       }
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -59,6 +59,9 @@ enum tpm_addr {
+ #define TPM_TAG_RQU_COMMAND 193
++/* TPM2 specific constants. */
++#define TPM2_SPACE_BUFFER_SIZE                16384 /* 16 kB */
++
+ struct        stclear_flags_t {
+       __be16  tag;
+       u8      deactivated;
+@@ -228,7 +231,7 @@ unsigned long tpm2_calc_ordinal_duration
+ int tpm2_probe(struct tpm_chip *chip);
+ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
+ int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
+-int tpm2_init_space(struct tpm_space *space);
++int tpm2_init_space(struct tpm_space *space, unsigned int buf_size);
+ void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space);
+ void tpm2_flush_space(struct tpm_chip *chip);
+ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
+--- a/drivers/char/tpm/tpm2-space.c
++++ b/drivers/char/tpm/tpm2-space.c
+@@ -38,18 +38,21 @@ static void tpm2_flush_sessions(struct t
+       }
+ }
+-int tpm2_init_space(struct tpm_space *space)
++int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
+ {
+-      space->context_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++      space->context_buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!space->context_buf)
+               return -ENOMEM;
+-      space->session_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
++      space->session_buf = kzalloc(buf_size, GFP_KERNEL);
+       if (space->session_buf == NULL) {
+               kfree(space->context_buf);
++              /* Prevent caller getting a dangling pointer. */
++              space->context_buf = NULL;
+               return -ENOMEM;
+       }
++      space->buf_size = buf_size;
+       return 0;
+ }
+@@ -311,8 +314,10 @@ int tpm2_prepare_space(struct tpm_chip *
+              sizeof(space->context_tbl));
+       memcpy(&chip->work_space.session_tbl, &space->session_tbl,
+              sizeof(space->session_tbl));
+-      memcpy(chip->work_space.context_buf, space->context_buf, PAGE_SIZE);
+-      memcpy(chip->work_space.session_buf, space->session_buf, PAGE_SIZE);
++      memcpy(chip->work_space.context_buf, space->context_buf,
++             space->buf_size);
++      memcpy(chip->work_space.session_buf, space->session_buf,
++             space->buf_size);
+       rc = tpm2_load_space(chip);
+       if (rc) {
+@@ -492,7 +497,7 @@ static int tpm2_save_space(struct tpm_ch
+                       continue;
+               rc = tpm2_save_context(chip, space->context_tbl[i],
+-                                     space->context_buf, PAGE_SIZE,
++                                     space->context_buf, space->buf_size,
+                                      &offset);
+               if (rc == -ENOENT) {
+                       space->context_tbl[i] = 0;
+@@ -509,9 +514,8 @@ static int tpm2_save_space(struct tpm_ch
+                       continue;
+               rc = tpm2_save_context(chip, space->session_tbl[i],
+-                                     space->session_buf, PAGE_SIZE,
++                                     space->session_buf, space->buf_size,
+                                      &offset);
+-
+               if (rc == -ENOENT) {
+                       /* handle error saving session, just forget it */
+                       space->session_tbl[i] = 0;
+@@ -557,8 +561,10 @@ int tpm2_commit_space(struct tpm_chip *c
+              sizeof(space->context_tbl));
+       memcpy(&space->session_tbl, &chip->work_space.session_tbl,
+              sizeof(space->session_tbl));
+-      memcpy(space->context_buf, chip->work_space.context_buf, PAGE_SIZE);
+-      memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE);
++      memcpy(space->context_buf, chip->work_space.context_buf,
++             space->buf_size);
++      memcpy(space->session_buf, chip->work_space.session_buf,
++             space->buf_size);
+       return 0;
+ out:
+--- a/drivers/char/tpm/tpmrm-dev.c
++++ b/drivers/char/tpm/tpmrm-dev.c
+@@ -21,7 +21,7 @@ static int tpmrm_open(struct inode *inod
+       if (priv == NULL)
+               return -ENOMEM;
+-      rc = tpm2_init_space(&priv->space);
++      rc = tpm2_init_space(&priv->space, TPM2_SPACE_BUFFER_SIZE);
+       if (rc) {
+               kfree(priv);
+               return -ENOMEM;
+--- a/include/linux/tpm.h
++++ b/include/linux/tpm.h
+@@ -96,6 +96,7 @@ struct tpm_space {
+       u8 *context_buf;
+       u32 session_tbl[3];
+       u8 *session_buf;
++      u32 buf_size;
+ };
+ struct tpm_bios_log {
diff --git a/queue-5.7/usb-cdns3-gadget-always-zeroed-trb-buffer-when-enable-endpoint.patch b/queue-5.7/usb-cdns3-gadget-always-zeroed-trb-buffer-when-enable-endpoint.patch
new file mode 100644 (file)
index 0000000..24f4515
--- /dev/null
@@ -0,0 +1,76 @@
+From 95f5acfc4f58f01a22b66d8c9c0ffb72aa96271c Mon Sep 17 00:00:00 2001
+From: Peter Chen <peter.chen@nxp.com>
+Date: Wed, 22 Jul 2020 11:06:19 +0800
+Subject: usb: cdns3: gadget: always zeroed TRB buffer when enable endpoint
+
+From: Peter Chen <peter.chen@nxp.com>
+
+commit 95f5acfc4f58f01a22b66d8c9c0ffb72aa96271c upstream.
+
+During the endpoint dequeue operation, it changes dequeued TRB as link
+TRB, when the endpoint is disabled and re-enabled, the DMA fetches the
+TRB before the link TRB, after it handles current TRB, the DMA pointer
+will advance to the TRB after link TRB, but enqueue and dequene
+variables don't know it due to no hardware interrupt at the time, when
+the next TRB is added to link TRB position, the DMA will not handle
+this TRB due to its pointer is already at the next TRB. See the trace
+log like below:
+
+file-storage-675   [001] d..1    86.585657: usb_ep_queue: ep0: req 00000000df9b3a4f length 0/0 sgs 0/0 stream 0 zsI status 0 --> 0
+file-storage-675   [001] d..1    86.585663: cdns3_ep_queue: ep1out: req: 000000002ebce364, req buff 00000000f5bc96b4, length: 0/1024 zsi, status: -115, trb: [start:0, end:0: virt addr (null)], flags:0 SID: 0
+file-storage-675   [001] d..1    86.585671: cdns3_prepare_trb: ep1out: trb 000000007f770303, dma buf: 0xbd195800, size: 1024, burst: 128 ctrl: 0x00000425 (C=1, T=0, ISP, IOC, Normal) SID:0 LAST_SID:0
+file-storage-675   [001] d..1    86.585676: cdns3_ring:
+            Ring contents for ep1out:
+            Ring deq index: 0, trb: 000000007f770303 (virt), 0xc4003000 (dma)
+            Ring enq index: 1, trb: 0000000049c1ba21 (virt), 0xc400300c (dma)
+            free trbs: 38, CCS=1, PCS=1
+            @0x00000000c4003000 bd195800 80020400 00000425
+            @0x00000000c400300c c4003018 80020400 00001811
+            @0x00000000c4003018 bcfcc000 0000001f 00000426
+            @0x00000000c4003024 bcfce800 0000001f 00000426
+
+           ...
+
+ irq/144-5b13000-698   [000] d...    87.619286: usb_gadget_giveback_request: ep1in: req 0000000031b832eb length 13/13 sgs 0/0 stream 0 zsI status 0 --> 0
+    file-storage-675   [001] d..1    87.619287: cdns3_ep_queue: ep1out: req: 000000002ebce364, req buff 00000000f5bc96b4, length: 0/1024 zsi, status: -115, trb: [start:0, end:0: virt addr 0x80020400c400300c], flags:0 SID: 0
+    file-storage-675   [001] d..1    87.619294: cdns3_prepare_trb: ep1out: trb 0000000049c1ba21, dma buf: 0xbd198000, size: 1024, burst: 128 ctrl: 0x00000425 (C=1, T=0, ISP, IOC, Normal) SID:0 LAST_SID:0
+    file-storage-675   [001] d..1    87.619297: cdns3_ring:
+                Ring contents for ep1out:
+                Ring deq index: 1, trb: 0000000049c1ba21 (virt), 0xc400300c (dma)
+                Ring enq index: 2, trb: 0000000059b34b67 (virt), 0xc4003018 (dma)
+                free trbs: 38, CCS=1, PCS=1
+                @0x00000000c4003000 bd195800 0000001f 00000427
+                @0x00000000c400300c bd198000 80020400 00000425
+                @0x00000000c4003018 bcfcc000 0000001f 00000426
+                @0x00000000c4003024 bcfce800 0000001f 00000426
+               ...
+
+    file-storage-675   [001] d..1    87.619305: cdns3_doorbell_epx: ep1out, ep_trbaddr c4003018
+    file-storage-675   [001] ....    87.619308: usb_ep_queue: ep1out: req 000000002ebce364 length 0/1024 sgs 0/0 stream 0 zsI status -115 --> 0
+ irq/144-5b13000-698   [000] d..1    87.619315: cdns3_epx_irq: IRQ for ep1out: 01000c80 TRBERR , ep_traddr: c4003018 ep_last_sid: 00000000 use_streams: 0
+ irq/144-5b13000-698   [000] d..1    87.619395: cdns3_usb_irq: IRQ 00000008 = Hot Reset
+
+Fixes: f616c3bda47e ("usb: cdns3: Fix dequeue implementation")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/cdns3/gadget.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/usb/cdns3/gadget.c
++++ b/drivers/usb/cdns3/gadget.c
+@@ -242,9 +242,10 @@ int cdns3_allocate_trb_pool(struct cdns3
+                       return -ENOMEM;
+               priv_ep->alloc_ring_size = ring_size;
+-              memset(priv_ep->trb_pool, 0, ring_size);
+       }
++      memset(priv_ep->trb_pool, 0, ring_size);
++
+       priv_ep->num_trbs = num_trbs;
+       if (!priv_ep->num)
diff --git a/queue-5.7/usb-serial-cp210x-enable-usb-generic-throttle-unthrottle.patch b/queue-5.7/usb-serial-cp210x-enable-usb-generic-throttle-unthrottle.patch
new file mode 100644 (file)
index 0000000..3484e89
--- /dev/null
@@ -0,0 +1,38 @@
+From 4387b3dbb079d482d3c2b43a703ceed4dd27ed28 Mon Sep 17 00:00:00 2001
+From: Brant Merryman <brant.merryman@silabs.com>
+Date: Fri, 26 Jun 2020 04:22:58 +0000
+Subject: USB: serial: cp210x: enable usb generic throttle/unthrottle
+
+From: Brant Merryman <brant.merryman@silabs.com>
+
+commit 4387b3dbb079d482d3c2b43a703ceed4dd27ed28 upstream.
+
+Assign the .throttle and .unthrottle functions to be generic function
+in the driver structure to prevent data loss that can otherwise occur
+if the host does not enable USB throttling.
+
+Signed-off-by: Brant Merryman <brant.merryman@silabs.com>
+Co-developed-by: Phu Luu <phu.luu@silabs.com>
+Signed-off-by: Phu Luu <phu.luu@silabs.com>
+Link: https://lore.kernel.org/r/57401AF3-9961-461F-95E1-F8AFC2105F5E@silabs.com
+[ johan: fix up tags ]
+Fixes: 39a66b8d22a3 ("[PATCH] USB: CP2101 Add support for flow control")
+Cc: stable <stable@vger.kernel.org>     # 2.6.12
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/serial/cp210x.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -272,6 +272,8 @@ static struct usb_serial_driver cp210x_d
+       .break_ctl              = cp210x_break_ctl,
+       .set_termios            = cp210x_set_termios,
+       .tx_empty               = cp210x_tx_empty,
++      .throttle               = usb_serial_generic_throttle,
++      .unthrottle             = usb_serial_generic_unthrottle,
+       .tiocmget               = cp210x_tiocmget,
+       .tiocmset               = cp210x_tiocmset,
+       .attach                 = cp210x_attach,
diff --git a/queue-5.7/usb-serial-cp210x-re-enable-auto-rts-on-open.patch b/queue-5.7/usb-serial-cp210x-re-enable-auto-rts-on-open.patch
new file mode 100644 (file)
index 0000000..3f67231
--- /dev/null
@@ -0,0 +1,61 @@
+From c7614ff9b73a1e6fb2b1b51396da132ed22fecdb Mon Sep 17 00:00:00 2001
+From: Brant Merryman <brant.merryman@silabs.com>
+Date: Fri, 26 Jun 2020 04:24:20 +0000
+Subject: USB: serial: cp210x: re-enable auto-RTS on open
+
+From: Brant Merryman <brant.merryman@silabs.com>
+
+commit c7614ff9b73a1e6fb2b1b51396da132ed22fecdb upstream.
+
+CP210x hardware disables auto-RTS but leaves auto-CTS when in hardware
+flow control mode and UART on cp210x hardware is disabled. When
+re-opening the port, if auto-CTS is enabled on the cp210x, then auto-RTS
+must be re-enabled in the driver.
+
+Signed-off-by: Brant Merryman <brant.merryman@silabs.com>
+Co-developed-by: Phu Luu <phu.luu@silabs.com>
+Signed-off-by: Phu Luu <phu.luu@silabs.com>
+Link: https://lore.kernel.org/r/ECCF8E73-91F3-4080-BE17-1714BC8818FB@silabs.com
+[ johan: fix up tags and problem description ]
+Fixes: 39a66b8d22a3 ("[PATCH] USB: CP2101 Add support for flow control")
+Cc: stable <stable@vger.kernel.org>     # 2.6.12
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/serial/cp210x.c |   17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -915,6 +915,7 @@ static void cp210x_get_termios_port(stru
+       u32 baud;
+       u16 bits;
+       u32 ctl_hs;
++      u32 flow_repl;
+       cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
+@@ -1015,6 +1016,22 @@ static void cp210x_get_termios_port(stru
+       ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
+       if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) {
+               dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
++              /*
++               * When the port is closed, the CP210x hardware disables
++               * auto-RTS and RTS is deasserted but it leaves auto-CTS when
++               * in hardware flow control mode. When re-opening the port, if
++               * auto-CTS is enabled on the cp210x, then auto-RTS must be
++               * re-enabled in the driver.
++               */
++              flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
++              flow_repl &= ~CP210X_SERIAL_RTS_MASK;
++              flow_repl |= CP210X_SERIAL_RTS_SHIFT(CP210X_SERIAL_RTS_FLOW_CTL);
++              flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
++              cp210x_write_reg_block(port,
++                              CP210X_SET_FLOW,
++                              &flow_ctl,
++                              sizeof(flow_ctl));
++
+               cflag |= CRTSCTS;
+       } else {
+               dev_dbg(dev, "%s - flow control = NONE\n", __func__);
diff --git a/queue-5.7/vdpasim-protect-concurrent-access-to-iommu-iotlb.patch b/queue-5.7/vdpasim-protect-concurrent-access-to-iommu-iotlb.patch
new file mode 100644 (file)
index 0000000..0def3fe
--- /dev/null
@@ -0,0 +1,150 @@
+From 0ea9ee430e74b16c6b17e70757d1c26d8d140e1f Mon Sep 17 00:00:00 2001
+From: Max Gurtovoy <maxg@mellanox.com>
+Date: Fri, 31 Jul 2020 15:38:22 +0800
+Subject: vdpasim: protect concurrent access to iommu iotlb
+
+From: Max Gurtovoy <maxg@mellanox.com>
+
+commit 0ea9ee430e74b16c6b17e70757d1c26d8d140e1f upstream.
+
+Iommu iotlb can be accessed by different cores for performing IO using
+multiple virt queues. Add a spinlock to synchronize iotlb accesses.
+
+This could be easily reproduced when using more than 1 pktgen threads
+to inject traffic to vdpa simulator.
+
+Fixes: 2c53d0f64c06f("vdpasim: vDPA device simulator")
+Cc: stable@vger.kernel.org
+Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Link: https://lore.kernel.org/r/20200731073822.13326-1-jasowang@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c |   31 +++++++++++++++++++++++++++----
+ 1 file changed, 27 insertions(+), 4 deletions(-)
+
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -70,6 +70,8 @@ struct vdpasim {
+       u32 status;
+       u32 generation;
+       u64 features;
++      /* spinlock to synchronize iommu table */
++      spinlock_t iommu_lock;
+ };
+ static struct vdpasim *vdpasim_dev;
+@@ -118,7 +120,9 @@ static void vdpasim_reset(struct vdpasim
+       for (i = 0; i < VDPASIM_VQ_NUM; i++)
+               vdpasim_vq_reset(&vdpasim->vqs[i]);
++      spin_lock(&vdpasim->iommu_lock);
+       vhost_iotlb_reset(vdpasim->iommu);
++      spin_unlock(&vdpasim->iommu_lock);
+       vdpasim->features = 0;
+       vdpasim->status = 0;
+@@ -235,8 +239,10 @@ static dma_addr_t vdpasim_map_page(struc
+       /* For simplicity, use identical mapping to avoid e.g iova
+        * allocator.
+        */
++      spin_lock(&vdpasim->iommu_lock);
+       ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
+                                   pa, dir_to_perm(dir));
++      spin_unlock(&vdpasim->iommu_lock);
+       if (ret)
+               return DMA_MAPPING_ERROR;
+@@ -250,8 +256,10 @@ static void vdpasim_unmap_page(struct de
+       struct vdpasim *vdpasim = dev_to_sim(dev);
+       struct vhost_iotlb *iommu = vdpasim->iommu;
++      spin_lock(&vdpasim->iommu_lock);
+       vhost_iotlb_del_range(iommu, (u64)dma_addr,
+                             (u64)dma_addr + size - 1);
++      spin_unlock(&vdpasim->iommu_lock);
+ }
+ static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
+@@ -263,9 +271,10 @@ static void *vdpasim_alloc_coherent(stru
+       void *addr = kmalloc(size, flag);
+       int ret;
+-      if (!addr)
++      spin_lock(&vdpasim->iommu_lock);
++      if (!addr) {
+               *dma_addr = DMA_MAPPING_ERROR;
+-      else {
++      } else {
+               u64 pa = virt_to_phys(addr);
+               ret = vhost_iotlb_add_range(iommu, (u64)pa,
+@@ -278,6 +287,7 @@ static void *vdpasim_alloc_coherent(stru
+               } else
+                       *dma_addr = (dma_addr_t)pa;
+       }
++      spin_unlock(&vdpasim->iommu_lock);
+       return addr;
+ }
+@@ -289,8 +299,11 @@ static void vdpasim_free_coherent(struct
+       struct vdpasim *vdpasim = dev_to_sim(dev);
+       struct vhost_iotlb *iommu = vdpasim->iommu;
++      spin_lock(&vdpasim->iommu_lock);
+       vhost_iotlb_del_range(iommu, (u64)dma_addr,
+                             (u64)dma_addr + size - 1);
++      spin_unlock(&vdpasim->iommu_lock);
++
+       kfree(phys_to_virt((uintptr_t)dma_addr));
+ }
+@@ -531,6 +544,7 @@ static int vdpasim_set_map(struct vdpa_d
+       u64 start = 0ULL, last = 0ULL - 1;
+       int ret;
++      spin_lock(&vdpasim->iommu_lock);
+       vhost_iotlb_reset(vdpasim->iommu);
+       for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+@@ -540,10 +554,12 @@ static int vdpasim_set_map(struct vdpa_d
+               if (ret)
+                       goto err;
+       }
++      spin_unlock(&vdpasim->iommu_lock);
+       return 0;
+ err:
+       vhost_iotlb_reset(vdpasim->iommu);
++      spin_unlock(&vdpasim->iommu_lock);
+       return ret;
+ }
+@@ -551,16 +567,23 @@ static int vdpasim_dma_map(struct vdpa_d
+                          u64 pa, u32 perm)
+ {
+       struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
++      int ret;
+-      return vhost_iotlb_add_range(vdpasim->iommu, iova,
+-                                   iova + size - 1, pa, perm);
++      spin_lock(&vdpasim->iommu_lock);
++      ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
++                                  perm);
++      spin_unlock(&vdpasim->iommu_lock);
++
++      return ret;
+ }
+ static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+ {
+       struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
++      spin_lock(&vdpasim->iommu_lock);
+       vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
++      spin_unlock(&vdpasim->iommu_lock);
+       return 0;
+ }