--- /dev/null
+From 4b4e0e32e4b09274dbc9d173016c1a026f44608c Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Tue, 16 Jul 2019 15:21:34 +0800
+Subject: ALSA: hda/realtek: apply ALC891 headset fixup to one Dell machine
+
+From: Hui Wang <hui.wang@canonical.com>
+
+commit 4b4e0e32e4b09274dbc9d173016c1a026f44608c upstream.
+
+Without this patch, the headset-mic and headphone-mic don't work.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7272,6 +7272,11 @@ static const struct snd_hda_pin_quirk al
+ {0x18, 0x01a19030},
+ {0x1a, 0x01813040},
+ {0x21, 0x01014020}),
++ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
++ {0x16, 0x01813030},
++ {0x17, 0x02211010},
++ {0x18, 0x01a19040},
++ {0x21, 0x01014020}),
+ SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x14, 0x01014010},
+ {0x18, 0x01a19020},
--- /dev/null
+From ede34f397ddb063b145b9e7d79c6026f819ded13 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 15 Jul 2019 22:50:27 +0200
+Subject: ALSA: seq: Break too long mutex context in the write loop
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit ede34f397ddb063b145b9e7d79c6026f819ded13 upstream.
+
+The fix for the racy writes and ioctls to sequencer widened the
+application of client->ioctl_mutex to the whole write loop. Although
+it does unlock/relock for the lengthy operation like the event dup,
+the loop keeps the ioctl_mutex for the whole time in other
+situations. This may take quite long time if the user-space would
+give a huge buffer, and this is a likely cause of some weird behavior
+spotted by syzcaller fuzzer.
+
+This patch puts a simple workaround, just adding a mutex break in the
+loop when a large number of events have been processed. This
+shouldn't hit any performance drop because the threshold is set high
+enough for usual operations.
+
+Fixes: 7bd800915677 ("ALSA: seq: More protection for concurrent write and ioctl races")
+Reported-by: syzbot+97aae04ce27e39cbfca9@syzkaller.appspotmail.com
+Reported-by: syzbot+4c595632b98bb8ffcc66@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_clientmgr.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1001,7 +1001,7 @@ static ssize_t snd_seq_write(struct file
+ {
+ struct snd_seq_client *client = file->private_data;
+ int written = 0, len;
+- int err;
++ int err, handled;
+ struct snd_seq_event event;
+
+ if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
+@@ -1014,6 +1014,8 @@ static ssize_t snd_seq_write(struct file
+ if (!client->accept_output || client->pool == NULL)
+ return -ENXIO;
+
++ repeat:
++ handled = 0;
+ /* allocate the pool now if the pool is not allocated yet */
+ mutex_lock(&client->ioctl_mutex);
+ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+@@ -1073,12 +1075,19 @@ static ssize_t snd_seq_write(struct file
+ 0, 0, &client->ioctl_mutex);
+ if (err < 0)
+ break;
++ handled++;
+
+ __skip_event:
+ /* Update pointers and counts */
+ count -= len;
+ buf += len;
+ written += len;
++
++ /* let's have a coffee break if too many events are queued */
++ if (++handled >= 200) {
++ mutex_unlock(&client->ioctl_mutex);
++ goto repeat;
++ }
+ }
+
+ out:
--- /dev/null
+From aeb87246537a83c2aff482f3f34a2e0991e02cbc Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Mon, 24 Jun 2019 07:20:14 +0000
+Subject: lib/scatterlist: Fix mapping iterator when sg->offset is greater than PAGE_SIZE
+
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+
+commit aeb87246537a83c2aff482f3f34a2e0991e02cbc upstream.
+
+All mapping iterator logic is based on the assumption that sg->offset
+is always lower than PAGE_SIZE.
+
+But there are situations where sg->offset is such that the SG item
+is on the second page. In that case sg_copy_to_buffer() fails
+properly copying the data into the buffer. One of the reason is
+that the data will be outside the kmapped area used to access that
+data.
+
+This patch fixes the issue by adjusting the mapping iterator
+offset and pgoffset fields such that offset is always lower than
+PAGE_SIZE.
+
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Fixes: 4225fc8555a9 ("lib/scatterlist: use page iterator in the mapping iterator")
+Cc: stable@vger.kernel.org
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ lib/scatterlist.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -496,17 +496,18 @@ static bool sg_miter_get_next_page(struc
+ {
+ if (!miter->__remaining) {
+ struct scatterlist *sg;
+- unsigned long pgoffset;
+
+ if (!__sg_page_iter_next(&miter->piter))
+ return false;
+
+ sg = miter->piter.sg;
+- pgoffset = miter->piter.sg_pgoffset;
+
+- miter->__offset = pgoffset ? 0 : sg->offset;
++ miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
++ miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
++ miter->__offset &= PAGE_SIZE - 1;
+ miter->__remaining = sg->offset + sg->length -
+- (pgoffset << PAGE_SHIFT) - miter->__offset;
++ (miter->piter.sg_pgoffset << PAGE_SHIFT) -
++ miter->__offset;
+ miter->__remaining = min_t(unsigned long, miter->__remaining,
+ PAGE_SIZE - miter->__offset);
+ }
--- /dev/null
+From 766b9b168f6c75c350dd87c3e0bc6a9b322f0013 Mon Sep 17 00:00:00 2001
+From: Ezequiel Garcia <ezequiel@collabora.com>
+Date: Thu, 2 May 2019 18:00:43 -0400
+Subject: media: coda: Remove unbalanced and unneeded mutex unlock
+
+From: Ezequiel Garcia <ezequiel@collabora.com>
+
+commit 766b9b168f6c75c350dd87c3e0bc6a9b322f0013 upstream.
+
+The mutex unlock in the threaded interrupt handler is not paired
+with any mutex lock. Remove it.
+
+This bug has been here for a really long time, so it applies
+to any stable repo.
+
+Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
+Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Cc: stable@vger.kernel.org
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/platform/coda/coda-bit.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/media/platform/coda/coda-bit.c
++++ b/drivers/media/platform/coda/coda-bit.c
+@@ -2107,7 +2107,6 @@ irqreturn_t coda_irq_handler(int irq, vo
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev,
+ "Instance released before the end of transaction\n");
+- mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
--- /dev/null
+From 07d89227a983df957a6a7c56f7c040cde9ac571f Mon Sep 17 00:00:00 2001
+From: Boris Brezillon <boris.brezillon@collabora.com>
+Date: Wed, 19 Jun 2019 05:21:33 -0400
+Subject: media: v4l2: Test type instead of cfg->type in v4l2_ctrl_new_custom()
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+commit 07d89227a983df957a6a7c56f7c040cde9ac571f upstream.
+
+cfg->type can be overridden by v4l2_ctrl_fill() and the new value is
+stored in the local type var. Fix the tests to use this local var.
+
+Fixes: 0996517cf8ea ("V4L/DVB: v4l2: Add new control handling framework")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+[hverkuil-cisco@xs4all.nl: change to !qmenu and !qmenu_int (checkpatch)]
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/v4l2-core/v4l2-ctrls.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -2103,16 +2103,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(s
+ v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
+ &def, &flags);
+
+- is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
+- cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
++ is_menu = (type == V4L2_CTRL_TYPE_MENU ||
++ type == V4L2_CTRL_TYPE_INTEGER_MENU);
+ if (is_menu)
+ WARN_ON(step);
+ else
+ WARN_ON(cfg->menu_skip_mask);
+- if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
++ if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
+ qmenu = v4l2_ctrl_get_menu(cfg->id);
+- else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
+- qmenu_int == NULL) {
++ } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
+ handler_set_err(hdl, -EINVAL);
+ return NULL;
+ }
--- /dev/null
+From 44942b4e457beda00981f616402a1a791e8c616e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Thu, 27 Jun 2019 06:41:45 -0400
+Subject: NFSv4: Handle the special Linux file open access mode
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 44942b4e457beda00981f616402a1a791e8c616e upstream.
+
+According to the open() manpage, Linux reserves the access mode 3
+to mean "check for read and write permission on the file and return
+a file descriptor that can't be used for reading or writing."
+
+Currently, the NFSv4 code will ask the server to open the file,
+and will use an incorrect share access mode of 0. Since it has
+an incorrect share access mode, the client later forgets to send
+a corresponding close, meaning it can leak stateids on the server.
+
+Fixes: ce4ef7c0a8a05 ("NFS: Split out NFS v4 file operations")
+Cc: stable@vger.kernel.org # 3.6+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/inode.c | 1 +
+ fs/nfs/nfs4file.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -950,6 +950,7 @@ int nfs_open(struct inode *inode, struct
+ nfs_fscache_open_file(inode, filp);
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(nfs_open);
+
+ /*
+ * This function is called whenever some part of NFS notices that
+--- a/fs/nfs/nfs4file.c
++++ b/fs/nfs/nfs4file.c
+@@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, stru
+ return err;
+
+ if ((openflags & O_ACCMODE) == 3)
+- openflags--;
++ return nfs_open(inode, filp);
+
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
--- /dev/null
+From db531db951f950b86d274cc8ed7b21b9e2240036 Mon Sep 17 00:00:00 2001
+From: Max Kellermann <mk@cm4all.com>
+Date: Fri, 12 Jul 2019 16:18:06 +0200
+Subject: Revert "NFS: readdirplus optimization by cache mechanism" (memleak)
+
+From: Max Kellermann <mk@cm4all.com>
+
+commit db531db951f950b86d274cc8ed7b21b9e2240036 upstream.
+
+This reverts commit be4c2d4723a4a637f0d1b4f7c66447141a4b3564.
+
+That commit caused a severe memory leak in nfs_readdir_make_qstr().
+
+When listing a directory with more than 100 files (this is how many
+struct nfs_cache_array_entry elements fit in one 4kB page), all
+allocated file name strings past those 100 leak.
+
+The root of the leakage is that those string pointers are managed in
+pages which are never linked into the page cache.
+
+fs/nfs/dir.c puts pages into the page cache by calling
+read_cache_page(); the callback function nfs_readdir_filler() will
+then fill the given page struct which was passed to it, which is
+already linked in the page cache (by do_read_cache_page() calling
+add_to_page_cache_lru()).
+
+Commit be4c2d4723a4 added another (local) array of allocated pages, to
+be filled with more data, instead of discarding excess items received
+from the NFS server. Those additional pages can be used by the next
+nfs_readdir_filler() call (from within the same nfs_readdir() call).
+
+The leak happens when some of those additional pages are never used
+(copied to the page cache using copy_highpage()). The pages will be
+freed by nfs_readdir_free_pages(), but their contents will not. The
+commit did not invoke nfs_readdir_clear_array() (and doing so would
+have been dangerous, because it did not track which of those pages
+were already copied to the page cache, risking double free bugs).
+
+How to reproduce the leak:
+
+- Use a kernel with CONFIG_SLUB_DEBUG_ON.
+
+- Create a directory on a NFS mount with more than 100 files with
+ names long enough to use the "kmalloc-32" slab (so we can easily
+ look up the allocation counts):
+
+ for i in `seq 110`; do touch ${i}_0123456789abcdef; done
+
+- Drop all caches:
+
+ echo 3 >/proc/sys/vm/drop_caches
+
+- Check the allocation counter:
+
+ grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
+ 30564391 nfs_readdir_add_to_array+0x73/0xd0 age=534558/4791307/6540952 pid=370-1048386 cpus=0-47 nodes=0-1
+
+- Request a directory listing and check the allocation counters again:
+
+ ls
+ [...]
+ grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
+ 30564511 nfs_readdir_add_to_array+0x73/0xd0 age=207/4792999/6542663 pid=370-1048386 cpus=0-47 nodes=0-1
+
+There are now 120 new allocations.
+
+- Drop all caches and check the counters again:
+
+ echo 3 >/proc/sys/vm/drop_caches
+ grep nfs_readdir /sys/kernel/slab/kmalloc-32/alloc_calls
+ 30564401 nfs_readdir_add_to_array+0x73/0xd0 age=735/4793524/6543176 pid=370-1048386 cpus=0-47 nodes=0-1
+
+110 allocations are gone, but 10 have leaked and will never be freed.
+
+Unhelpfully, those allocations are explicitly excluded from KMEMLEAK,
+that's why my initial attempts with KMEMLEAK were not successful:
+
+ /*
+ * Avoid a kmemleak false positive. The pointer to the name is stored
+ * in a page cache page which kmemleak does not scan.
+ */
+ kmemleak_not_leak(string->name);
+
+It would be possible to solve this bug without reverting the whole
+commit:
+
+- keep track of which pages were not used, and call
+ nfs_readdir_clear_array() on them, or
+- manually link those pages into the page cache
+
+But for now I have decided to just revert the commit, because the real
+fix would require complex considerations, risking more dangerous
+(crash) bugs, which may seem unsuitable for the stable branches.
+
+Signed-off-by: Max Kellermann <mk@cm4all.com>
+Cc: stable@vger.kernel.org # v5.1+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index bd1f9555447b..8d501093660f 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -144,19 +144,12 @@ struct nfs_cache_array {
+ struct nfs_cache_array_entry array[0];
+ };
+
+-struct readdirvec {
+- unsigned long nr;
+- unsigned long index;
+- struct page *pages[NFS_MAX_READDIR_RAPAGES];
+-};
+-
+ typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, bool);
+ typedef struct {
+ struct file *file;
+ struct page *page;
+ struct dir_context *ctx;
+ unsigned long page_index;
+- struct readdirvec pvec;
+ u64 *dir_cookie;
+ u64 last_cookie;
+ loff_t current_index;
+@@ -536,10 +529,6 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ struct nfs_cache_array *array;
+ unsigned int count = 0;
+ int status;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+-
+- desc->pvec.index = desc->page_index;
+- desc->pvec.nr = 0;
+
+ scratch = alloc_page(GFP_KERNEL);
+ if (scratch == NULL)
+@@ -564,40 +553,20 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
+ if (desc->plus)
+ nfs_prime_dcache(file_dentry(desc->file), entry);
+
+- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+- if (status == -ENOSPC) {
+- desc->pvec.nr++;
+- if (desc->pvec.nr == max_rapages)
+- break;
+- status = nfs_readdir_add_to_array(entry, desc->pvec.pages[desc->pvec.nr]);
+- }
++ status = nfs_readdir_add_to_array(entry, page);
+ if (status != 0)
+ break;
+ } while (!entry->eof);
+
+- /*
+- * page and desc->pvec.pages[0] are valid, don't need to check
+- * whether or not to be NULL.
+- */
+- copy_highpage(page, desc->pvec.pages[0]);
+-
+ out_nopages:
+ if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
+- array = kmap_atomic(desc->pvec.pages[desc->pvec.nr]);
++ array = kmap(page);
+ array->eof_index = array->size;
+ status = 0;
+- kunmap_atomic(array);
++ kunmap(page);
+ }
+
+ put_page(scratch);
+-
+- /*
+- * desc->pvec.nr > 0 means at least one page was completely filled,
+- * we should return -ENOSPC. Otherwise function
+- * nfs_readdir_xdr_to_array will enter infinite loop.
+- */
+- if (desc->pvec.nr > 0)
+- return -ENOSPC;
+ return status;
+ }
+
+@@ -631,24 +600,6 @@ int nfs_readdir_alloc_pages(struct page **pages, unsigned int npages)
+ return -ENOMEM;
+ }
+
+-/*
+- * nfs_readdir_rapages_init initialize rapages by nfs_cache_array structure.
+- */
+-static
+-void nfs_readdir_rapages_init(nfs_readdir_descriptor_t *desc)
+-{
+- struct nfs_cache_array *array;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+- int index;
+-
+- for (index = 0; index < max_rapages; index++) {
+- array = kmap_atomic(desc->pvec.pages[index]);
+- memset(array, 0, sizeof(struct nfs_cache_array));
+- array->eof_index = -1;
+- kunmap_atomic(array);
+- }
+-}
+-
+ static
+ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
+ {
+@@ -659,12 +610,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
+ int status = -ENOMEM;
+ unsigned int array_size = ARRAY_SIZE(pages);
+
+- /*
+- * This means we hit readdir rdpages miss, the preallocated rdpages
+- * are useless, the preallocate rdpages should be reinitialized.
+- */
+- nfs_readdir_rapages_init(desc);
+-
+ entry.prev_cookie = 0;
+ entry.cookie = desc->last_cookie;
+ entry.eof = 0;
+@@ -725,24 +670,9 @@ int nfs_readdir_filler(void *data, struct page* page)
+ struct inode *inode = file_inode(desc->file);
+ int ret;
+
+- /*
+- * If desc->page_index in range desc->pvec.index and
+- * desc->pvec.index + desc->pvec.nr, we get readdir cache hit.
+- */
+- if (desc->page_index >= desc->pvec.index &&
+- desc->page_index < (desc->pvec.index + desc->pvec.nr)) {
+- /*
+- * page and desc->pvec.pages[x] are valid, don't need to check
+- * whether or not to be NULL.
+- */
+- copy_highpage(page, desc->pvec.pages[desc->page_index - desc->pvec.index]);
+- ret = 0;
+- } else {
+- ret = nfs_readdir_xdr_to_array(desc, page, inode);
+- if (ret < 0)
+- goto error;
+- }
+-
++ ret = nfs_readdir_xdr_to_array(desc, page, inode);
++ if (ret < 0)
++ goto error;
+ SetPageUptodate(page);
+
+ if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
+@@ -907,7 +837,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ *desc = &my_desc;
+ struct nfs_open_dir_context *dir_ctx = file->private_data;
+ int res = 0;
+- int max_rapages = NFS_MAX_READDIR_RAPAGES;
+
+ dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+ file, (long long)ctx->pos);
+@@ -927,12 +856,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ desc->decode = NFS_PROTO(inode)->decode_dirent;
+ desc->plus = nfs_use_readdirplus(inode, ctx);
+
+- res = nfs_readdir_alloc_pages(desc->pvec.pages, max_rapages);
+- if (res < 0)
+- return -ENOMEM;
+-
+- nfs_readdir_rapages_init(desc);
+-
+ if (ctx->pos == 0 || nfs_attribute_cache_expired(inode))
+ res = nfs_revalidate_mapping(inode, file->f_mapping);
+ if (res < 0)
+@@ -968,7 +891,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
+ break;
+ } while (!desc->eof);
+ out:
+- nfs_readdir_free_pages(desc->pvec.pages, max_rapages);
+ if (res > 0)
+ res = 0;
+ dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index f359e760ed41..a2346a2f8361 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -69,8 +69,7 @@ struct nfs_clone_mount {
+ * Maximum number of pages that readdir can use for creating
+ * a vmapped array of pages.
+ */
+-#define NFS_MAX_READDIR_PAGES 64
+-#define NFS_MAX_READDIR_RAPAGES 8
++#define NFS_MAX_READDIR_PAGES 8
+
+ struct nfs_client_initdata {
+ unsigned long init_flags;
regulator-s2mps11-fix-buck7-and-buck8-wrong-voltages.patch
arm64-tegra-update-jetson-tx1-gpu-regulator-timings.patch
iwlwifi-pcie-don-t-service-an-interrupt-that-was-masked.patch
+tracing-snapshot-resize-spare-buffer-if-size-changed.patch
+nfsv4-handle-the-special-linux-file-open-access-mode.patch
+lib-scatterlist-fix-mapping-iterator-when-sg-offset-is-greater-than-page_size.patch
+alsa-seq-break-too-long-mutex-context-in-the-write-loop.patch
+alsa-hda-realtek-apply-alc891-headset-fixup-to-one-dell-machine.patch
+media-v4l2-test-type-instead-of-cfg-type-in-v4l2_ctrl_new_custom.patch
+media-coda-remove-unbalanced-and-unneeded-mutex-unlock.patch
+revert-nfs-readdirplus-optimization-by-cache-mechanism-memleak.patch
--- /dev/null
+From 46cc0b44428d0f0e81f11ea98217fc0edfbeab07 Mon Sep 17 00:00:00 2001
+From: Eiichi Tsukata <devel@etsukata.com>
+Date: Tue, 25 Jun 2019 10:29:10 +0900
+Subject: tracing/snapshot: Resize spare buffer if size changed
+
+From: Eiichi Tsukata <devel@etsukata.com>
+
+commit 46cc0b44428d0f0e81f11ea98217fc0edfbeab07 upstream.
+
+Current snapshot implementation swaps two ring_buffers even though their
+sizes are different from each other, that can cause an inconsistency
+between the contents of buffer_size_kb file and the current buffer size.
+
+For example:
+
+ # cat buffer_size_kb
+ 7 (expanded: 1408)
+ # echo 1 > events/enable
+ # grep bytes per_cpu/cpu0/stats
+ bytes: 1441020
+ # echo 1 > snapshot // current:1408, spare:1408
+ # echo 123 > buffer_size_kb // current:123, spare:1408
+ # echo 1 > snapshot // current:1408, spare:123
+ # grep bytes per_cpu/cpu0/stats
+ bytes: 1443700
+ # cat buffer_size_kb
+ 123 // != current:1408
+
+And also, a similar per-cpu case hits the following WARNING:
+
+Reproducer:
+
+ # echo 1 > per_cpu/cpu0/snapshot
+ # echo 123 > buffer_size_kb
+ # echo 1 > per_cpu/cpu0/snapshot
+
+WARNING:
+
+ WARNING: CPU: 0 PID: 1946 at kernel/trace/trace.c:1607 update_max_tr_single.part.0+0x2b8/0x380
+ Modules linked in:
+ CPU: 0 PID: 1946 Comm: bash Not tainted 5.2.0-rc6 #20
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-2.fc30 04/01/2014
+ RIP: 0010:update_max_tr_single.part.0+0x2b8/0x380
+ Code: ff e8 dc da f9 ff 0f 0b e9 88 fe ff ff e8 d0 da f9 ff 44 89 ee bf f5 ff ff ff e8 33 dc f9 ff 41 83 fd f5 74 96 e8 b8 da f9 ff <0f> 0b eb 8d e8 af da f9 ff 0f 0b e9 bf fd ff ff e8 a3 da f9 ff 48
+ RSP: 0018:ffff888063e4fca0 EFLAGS: 00010093
+ RAX: ffff888066214380 RBX: ffffffff99850fe0 RCX: ffffffff964298a8
+ RDX: 0000000000000000 RSI: 00000000fffffff5 RDI: 0000000000000005
+ RBP: 1ffff1100c7c9f96 R08: ffff888066214380 R09: ffffed100c7c9f9b
+ R10: ffffed100c7c9f9a R11: 0000000000000003 R12: 0000000000000000
+ R13: 00000000ffffffea R14: ffff888066214380 R15: ffffffff99851060
+ FS: 00007f9f8173c700(0000) GS:ffff88806d000000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000714dc0 CR3: 0000000066fa6000 CR4: 00000000000006f0
+ Call Trace:
+ ? trace_array_printk_buf+0x140/0x140
+ ? __mutex_lock_slowpath+0x10/0x10
+ tracing_snapshot_write+0x4c8/0x7f0
+ ? trace_printk_init_buffers+0x60/0x60
+ ? selinux_file_permission+0x3b/0x540
+ ? tracer_preempt_off+0x38/0x506
+ ? trace_printk_init_buffers+0x60/0x60
+ __vfs_write+0x81/0x100
+ vfs_write+0x1e1/0x560
+ ksys_write+0x126/0x250
+ ? __ia32_sys_read+0xb0/0xb0
+ ? do_syscall_64+0x1f/0x390
+ do_syscall_64+0xc1/0x390
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+This patch adds resize_buffer_duplicate_size() to check if there is a
+difference between current/spare buffer sizes and resize a spare buffer
+if necessary.
+
+Link: http://lkml.kernel.org/r/20190625012910.13109-1-devel@etsukata.com
+
+Cc: stable@vger.kernel.org
+Fixes: ad909e21bbe69 ("tracing: Add internal tracing_snapshot() functions")
+Signed-off-by: Eiichi Tsukata <devel@etsukata.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5820,11 +5820,15 @@ tracing_snapshot_write(struct file *filp
+ break;
+ }
+ #endif
+- if (!tr->allocated_snapshot) {
++ if (!tr->allocated_snapshot)
++ ret = resize_buffer_duplicate_size(&tr->max_buffer,
++ &tr->trace_buffer, iter->cpu_file);
++ else
+ ret = alloc_snapshot(tr);
+- if (ret < 0)
+- break;
+- }
++
++ if (ret < 0)
++ break;
++
+ local_irq_disable();
+ /* Now, we're going to swap */
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS)