--- /dev/null
+From 4977d121bc9bc5138d4d48b85469123001859573 Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Wed, 28 Oct 2020 16:25:36 +0900
+Subject: block: advance iov_iter on bio_add_hw_page failure
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit 4977d121bc9bc5138d4d48b85469123001859573 upstream.
+
+When the bio's size reaches max_append_sectors, bio_add_hw_page returns
+0 then __bio_iov_append_get_pages returns -EINVAL. This is an expected
+result of building a small enough bio not to be split in the IO path.
+However, iov_iter is not advanced in this case, causing the same pages
+are filled for the bio again and again.
+
+Fix the case by properly advancing the iov_iter for already processed
+pages.
+
+Fixes: 0512a75b98f8 ("block: Introduce REQ_OP_ZONE_APPEND")
+Cc: stable@vger.kernel.org # 5.8+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/bio.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1046,6 +1046,7 @@ static int __bio_iov_append_get_pages(st
+ ssize_t size, left;
+ unsigned len, i;
+ size_t offset;
++ int ret = 0;
+
+ if (WARN_ON_ONCE(!max_append_sectors))
+ return 0;
+@@ -1068,15 +1069,17 @@ static int __bio_iov_append_get_pages(st
+
+ len = min_t(size_t, PAGE_SIZE - offset, left);
+ if (bio_add_hw_page(q, bio, page, len, offset,
+- max_append_sectors, &same_page) != len)
+- return -EINVAL;
++ max_append_sectors, &same_page) != len) {
++ ret = -EINVAL;
++ break;
++ }
+ if (same_page)
+ put_page(page);
+ offset = 0;
+ }
+
+- iov_iter_advance(iter, size);
+- return 0;
++ iov_iter_advance(iter, size - left);
++ return ret;
+ }
+
+ /**
--- /dev/null
+From baf6fd97b16ea8f981b8a8b04039596f32fc2972 Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Sun, 4 Oct 2020 16:03:07 +0200
+Subject: dmaengine: dma-jz4780: Fix race in jz4780_dma_tx_status
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit baf6fd97b16ea8f981b8a8b04039596f32fc2972 upstream.
+
+The jz4780_dma_tx_status() function would check if a channel's cookie
+state was set to 'completed', and if not, it would enter the critical
+section. However, in that time frame, the jz4780_dma_chan_irq() function
+was able to set the cookie to 'completed', and clear the jzchan->vchan
+pointer, which was deferenced in the critical section of the first
+function.
+
+Fix this race by checking the channel's cookie state after entering the
+critical function and not before.
+
+Fixes: d894fc6046fe ("dmaengine: jz4780: add driver for the Ingenic JZ4780 DMA controller")
+Cc: stable@vger.kernel.org # v4.0
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Reported-by: Artur Rojek <contact@artur-rojek.eu>
+Tested-by: Artur Rojek <contact@artur-rojek.eu>
+Link: https://lore.kernel.org/r/20201004140307.885556-1-paul@crapouillou.net
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/dma-jz4780.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/dma/dma-jz4780.c
++++ b/drivers/dma/dma-jz4780.c
+@@ -639,11 +639,11 @@ static enum dma_status jz4780_dma_tx_sta
+ unsigned long flags;
+ unsigned long residue = 0;
+
++ spin_lock_irqsave(&jzchan->vchan.lock, flags);
++
+ status = dma_cookie_status(chan, cookie, txstate);
+ if ((status == DMA_COMPLETE) || (txstate == NULL))
+- return status;
+-
+- spin_lock_irqsave(&jzchan->vchan.lock, flags);
++ goto out_unlock_irqrestore;
+
+ vdesc = vchan_find_desc(&jzchan->vchan, cookie);
+ if (vdesc) {
+@@ -660,6 +660,7 @@ static enum dma_status jz4780_dma_tx_sta
+ && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+ status = DMA_ERROR;
+
++out_unlock_irqrestore:
+ spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
+ return status;
+ }
--- /dev/null
+From f49a51bfdc8ea717c97ccd4cc98b7e6daaa5553a Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 27 Oct 2020 22:49:22 +0100
+Subject: drm/shme-helpers: Fix dma_buf_mmap forwarding bug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit f49a51bfdc8ea717c97ccd4cc98b7e6daaa5553a upstream.
+
+When we forward an mmap to the dma_buf exporter, they get to own
+everything. Unfortunately drm_gem_mmap_obj() overwrote
+vma->vm_private_data after the driver callback, wreaking the
+exporter complete. This was noticed because vb2_common_vm_close blew
+up on mali gpu with panfrost after commit 26d3ac3cb04d
+("drm/shmem-helpers: Redirect mmap for imported dma-buf").
+
+Unfortunately drm_gem_mmap_obj also acquires a surplus reference that
+we need to drop in shmem helpers, which is a bit of a mislayer
+situation. Maybe the entire dma_buf_mmap forwarding should be pulled
+into core gem code.
+
+Note that the only two other drivers which forward mmap in their own
+code (etnaviv and exynos) get this somewhat right by overwriting the
+gem mmap code. But they seem to still have the leak. This might be a
+good excuse to move these drivers over to shmem helpers completely.
+
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Acked-by: Christian König <christian.koenig@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Sumit Semwal <sumit.semwal@linaro.org>
+Cc: Lucas Stach <l.stach@pengutronix.de>
+Cc: Russell King <linux+etnaviv@armlinux.org.uk>
+Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
+Cc: Inki Dae <inki.dae@samsung.com>
+Cc: Joonyoung Shim <jy0922.shim@samsung.com>
+Cc: Seung-Woo Kim <sw0312.kim@samsung.com>
+Cc: Kyungmin Park <kyungmin.park@samsung.com>
+Fixes: 26d3ac3cb04d ("drm/shmem-helpers: Redirect mmap for imported dma-buf")
+Cc: Boris Brezillon <boris.brezillon@collabora.com>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: dri-devel@lists.freedesktop.org
+Cc: linux-media@vger.kernel.org
+Cc: linaro-mm-sig@lists.linaro.org
+Cc: <stable@vger.kernel.org> # v5.9+
+Reported-and-tested-by: piotr.oniszczuk@gmail.com
+Cc: piotr.oniszczuk@gmail.com
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20201027214922.3566743-1-daniel.vetter@ffwll.ch
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/drm_gem.c | 4 ++--
+ drivers/gpu/drm/drm_gem_shmem_helper.c | 7 ++++++-
+ 2 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -1085,6 +1085,8 @@ int drm_gem_mmap_obj(struct drm_gem_obje
+ */
+ drm_gem_object_get(obj);
+
++ vma->vm_private_data = obj;
++
+ if (obj->funcs && obj->funcs->mmap) {
+ ret = obj->funcs->mmap(obj, vma);
+ if (ret) {
+@@ -1107,8 +1109,6 @@ int drm_gem_mmap_obj(struct drm_gem_obje
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ }
+
+- vma->vm_private_data = obj;
+-
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_gem_mmap_obj);
+--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
++++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
+@@ -594,8 +594,13 @@ int drm_gem_shmem_mmap(struct drm_gem_ob
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+
+- if (obj->import_attach)
++ if (obj->import_attach) {
++ /* Drop the reference drm_gem_mmap_obj() acquired.*/
++ drm_gem_object_put(obj);
++ vma->vm_private_data = NULL;
++
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
++ }
+
+ shmem = to_drm_gem_shmem_obj(obj);
+
--- /dev/null
+From 5a61ae1402f15276ee4e003e198aab816958ca69 Mon Sep 17 00:00:00 2001
+From: Andreas Gruenbacher <agruenba@redhat.com>
+Date: Fri, 28 Aug 2020 23:44:36 +0200
+Subject: gfs2: Make sure we don't miss any delayed withdraws
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+commit 5a61ae1402f15276ee4e003e198aab816958ca69 upstream.
+
+Commit ca399c96e96e changes gfs2_log_flush to not withdraw the
+filesystem while holding the log flush lock, but it fails to check if
+the filesystem needs to be withdrawn once the log flush lock has been
+released. Likewise, commit f05b86db314d depends on gfs2_log_flush to
+trigger for delayed withdraws. Add that and clean up the code flow
+somewhat.
+
+In gfs2_put_super, add a check for delayed withdraws that have been
+missed to prevent these kinds of bugs in the future.
+
+Fixes: ca399c96e96e ("gfs2: flesh out delayed withdraw for gfs2_log_flush")
+Fixes: f05b86db314d ("gfs2: Prepare to withdraw as soon as an IO error occurs in log write")
+Cc: stable@vger.kernel.org # v5.7+: 462582b99b607: gfs2: add some much needed cleanup for log flushes that fail
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/log.c | 61 ++++++++++++++++++++++++++++----------------------------
+ fs/gfs2/super.c | 2 +
+ fs/gfs2/util.h | 10 +++++++++
+ 3 files changed, 43 insertions(+), 30 deletions(-)
+
+--- a/fs/gfs2/log.c
++++ b/fs/gfs2/log.c
+@@ -954,10 +954,8 @@ void gfs2_log_flush(struct gfs2_sbd *sdp
+ goto out;
+
+ /* Log might have been flushed while we waited for the flush lock */
+- if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
+- up_write(&sdp->sd_log_flush_lock);
+- return;
+- }
++ if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags))
++ goto out;
+ trace_gfs2_log_flush(sdp, 1, flags);
+
+ if (flags & GFS2_LOG_HEAD_FLUSH_SHUTDOWN)
+@@ -971,25 +969,25 @@ void gfs2_log_flush(struct gfs2_sbd *sdp
+ if (unlikely (state == SFS_FROZEN))
+ if (gfs2_assert_withdraw_delayed(sdp,
+ !tr->tr_num_buf_new && !tr->tr_num_databuf_new))
+- goto out;
++ goto out_withdraw;
+ }
+
+ if (unlikely(state == SFS_FROZEN))
+ if (gfs2_assert_withdraw_delayed(sdp, !sdp->sd_log_num_revoke))
+- goto out;
++ goto out_withdraw;
+ if (gfs2_assert_withdraw_delayed(sdp,
+ sdp->sd_log_num_revoke == sdp->sd_log_committed_revoke))
+- goto out;
++ goto out_withdraw;
+
+ gfs2_ordered_write(sdp);
+ if (gfs2_withdrawn(sdp))
+- goto out;
++ goto out_withdraw;
+ lops_before_commit(sdp, tr);
+ if (gfs2_withdrawn(sdp))
+- goto out;
++ goto out_withdraw;
+ gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
+ if (gfs2_withdrawn(sdp))
+- goto out;
++ goto out_withdraw;
+
+ if (sdp->sd_log_head != sdp->sd_log_flush_head) {
+ log_flush_wait(sdp);
+@@ -1000,7 +998,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp
+ log_write_header(sdp, flags);
+ }
+ if (gfs2_withdrawn(sdp))
+- goto out;
++ goto out_withdraw;
+ lops_after_commit(sdp, tr);
+
+ gfs2_log_lock(sdp);
+@@ -1020,7 +1018,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp
+ if (!sdp->sd_log_idle) {
+ empty_ail1_list(sdp);
+ if (gfs2_withdrawn(sdp))
+- goto out;
++ goto out_withdraw;
+ atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
+ trace_gfs2_log_blocks(sdp, -1);
+ log_write_header(sdp, flags);
+@@ -1033,27 +1031,30 @@ void gfs2_log_flush(struct gfs2_sbd *sdp
+ atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
+ }
+
+-out:
+- if (gfs2_withdrawn(sdp)) {
+- trans_drain(tr);
+- /**
+- * If the tr_list is empty, we're withdrawing during a log
+- * flush that targets a transaction, but the transaction was
+- * never queued onto any of the ail lists. Here we add it to
+- * ail1 just so that ail_drain() will find and free it.
+- */
+- spin_lock(&sdp->sd_ail_lock);
+- if (tr && list_empty(&tr->tr_list))
+- list_add(&tr->tr_list, &sdp->sd_ail1_list);
+- spin_unlock(&sdp->sd_ail_lock);
+- ail_drain(sdp); /* frees all transactions */
+- tr = NULL;
+- }
+-
++out_end:
+ trace_gfs2_log_flush(sdp, 0, flags);
++out:
+ up_write(&sdp->sd_log_flush_lock);
+-
+ gfs2_trans_free(sdp, tr);
++ if (gfs2_withdrawing(sdp))
++ gfs2_withdraw(sdp);
++ return;
++
++out_withdraw:
++ trans_drain(tr);
++ /**
++ * If the tr_list is empty, we're withdrawing during a log
++ * flush that targets a transaction, but the transaction was
++ * never queued onto any of the ail lists. Here we add it to
++ * ail1 just so that ail_drain() will find and free it.
++ */
++ spin_lock(&sdp->sd_ail_lock);
++ if (tr && list_empty(&tr->tr_list))
++ list_add(&tr->tr_list, &sdp->sd_ail1_list);
++ spin_unlock(&sdp->sd_ail_lock);
++ ail_drain(sdp); /* frees all transactions */
++ tr = NULL;
++ goto out_end;
+ }
+
+ /**
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -702,6 +702,8 @@ restart:
+ if (error)
+ gfs2_io_error(sdp);
+ }
++ WARN_ON(gfs2_withdrawing(sdp));
++
+ /* At this point, we're through modifying the disk */
+
+ /* Release stuff */
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -205,6 +205,16 @@ static inline bool gfs2_withdrawn(struct
+ test_bit(SDF_WITHDRAWING, &sdp->sd_flags);
+ }
+
++/**
++ * gfs2_withdrawing - check if a withdraw is pending
++ * @sdp: the superblock
++ */
++static inline bool gfs2_withdrawing(struct gfs2_sbd *sdp)
++{
++ return test_bit(SDF_WITHDRAWING, &sdp->sd_flags) &&
++ !test_bit(SDF_WITHDRAWN, &sdp->sd_flags);
++}
++
+ #define gfs2_tune_get(sdp, field) \
+ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
+
--- /dev/null
+From 2ffed5290b3bff7562d29fd06621be4705704242 Mon Sep 17 00:00:00 2001
+From: Bob Peterson <rpeterso@redhat.com>
+Date: Thu, 15 Oct 2020 11:16:48 -0500
+Subject: gfs2: Only access gl_delete for iopen glocks
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+commit 2ffed5290b3bff7562d29fd06621be4705704242 upstream.
+
+Only initialize gl_delete for iopen glocks, but more importantly, only access
+it for iopen glocks in flush_delete_work: flush_delete_work is called for
+different types of glocks including rgrp glocks, and those use gl_vm which is
+in a union with gl_delete. Without this fix, we'll end up clobbering gl_vm,
+which results in general memory corruption.
+
+Fixes: a0e3cc65fa29 ("gfs2: Turn gl_delete into a delayed work")
+Cc: stable@vger.kernel.org # v5.8+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/gfs2/glock.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/fs/gfs2/glock.c
++++ b/fs/gfs2/glock.c
+@@ -1054,7 +1054,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp,
+ gl->gl_object = NULL;
+ gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
+ INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
+- INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
++ if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
++ INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
+
+ mapping = gfs2_glock2aspace(gl);
+ if (mapping) {
+@@ -1906,9 +1907,11 @@ bool gfs2_delete_work_queued(const struc
+
+ static void flush_delete_work(struct gfs2_glock *gl)
+ {
+- if (cancel_delayed_work(&gl->gl_delete)) {
+- queue_delayed_work(gfs2_delete_workqueue,
+- &gl->gl_delete, 0);
++ if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
++ if (cancel_delayed_work(&gl->gl_delete)) {
++ queue_delayed_work(gfs2_delete_workqueue,
++ &gl->gl_delete, 0);
++ }
+ }
+ gfs2_glock_queue_work(gl, 0);
+ }
--- /dev/null
+From b8a533f3c24b3b8f1fdbefc5ada6a7d5733d63e6 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nuno=20S=C3=A1?= <nuno.sa@analog.com>
+Date: Fri, 25 Sep 2020 11:10:45 +0200
+Subject: iio: ad7292: Fix of_node refcounting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+commit b8a533f3c24b3b8f1fdbefc5ada6a7d5733d63e6 upstream.
+
+When returning or breaking early from a
+`for_each_available_child_of_node()` loop, we need to explicitly call
+`of_node_put()` on the child node to possibly release the node.
+
+Fixes: 506d2e317a0a0 ("iio: adc: Add driver support for AD7292")
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200925091045.302-2-nuno.sa@analog.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/ad7292.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iio/adc/ad7292.c
++++ b/drivers/iio/adc/ad7292.c
+@@ -310,8 +310,10 @@ static int ad7292_probe(struct spi_devic
+
+ for_each_available_child_of_node(spi->dev.of_node, child) {
+ diff_channels = of_property_read_bool(child, "diff-channels");
+- if (diff_channels)
++ if (diff_channels) {
++ of_node_put(child);
+ break;
++ }
+ }
+
+ if (diff_channels) {
--- /dev/null
+From 1a198794451449113fa86994ed491d6986802c23 Mon Sep 17 00:00:00 2001
+From: Eugen Hristev <eugen.hristev@microchip.com>
+Date: Wed, 23 Sep 2020 15:17:48 +0300
+Subject: iio: adc: at91-sama5d2_adc: fix DMA conversion crash
+
+From: Eugen Hristev <eugen.hristev@microchip.com>
+
+commit 1a198794451449113fa86994ed491d6986802c23 upstream.
+
+After the move of the postenable code to preenable, the DMA start was
+done before the DMA init, which is not correct.
+The DMA is initialized in set_watermark. Because of this, we need to call
+the DMA start functions in set_watermark, after the DMA init, instead of
+preenable hook, when the DMA is not properly setup yet.
+
+Fixes: f3c034f61775 ("iio: at91-sama5d2_adc: adjust iio_triggered_buffer_{predisable,postenable} positions")
+Signed-off-by: Eugen Hristev <eugen.hristev@microchip.com>
+Link: https://lore.kernel.org/r/20200923121748.49384-1-eugen.hristev@microchip.com
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/at91-sama5d2_adc.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/iio/adc/at91-sama5d2_adc.c
++++ b/drivers/iio/adc/at91-sama5d2_adc.c
+@@ -884,7 +884,7 @@ static bool at91_adc_current_chan_is_tou
+ AT91_SAMA5D2_MAX_CHAN_IDX + 1);
+ }
+
+-static int at91_adc_buffer_preenable(struct iio_dev *indio_dev)
++static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
+ {
+ int ret;
+ u8 bit;
+@@ -901,7 +901,7 @@ static int at91_adc_buffer_preenable(str
+ /* we continue with the triggered buffer */
+ ret = at91_adc_dma_start(indio_dev);
+ if (ret) {
+- dev_err(&indio_dev->dev, "buffer postenable failed\n");
++ dev_err(&indio_dev->dev, "buffer prepare failed\n");
+ return ret;
+ }
+
+@@ -989,7 +989,6 @@ static int at91_adc_buffer_postdisable(s
+ }
+
+ static const struct iio_buffer_setup_ops at91_buffer_setup_ops = {
+- .preenable = &at91_adc_buffer_preenable,
+ .postdisable = &at91_adc_buffer_postdisable,
+ };
+
+@@ -1563,6 +1562,7 @@ static void at91_adc_dma_disable(struct
+ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
+ {
+ struct at91_adc_state *st = iio_priv(indio_dev);
++ int ret;
+
+ if (val > AT91_HWFIFO_MAX_SIZE)
+ return -EINVAL;
+@@ -1586,7 +1586,15 @@ static int at91_adc_set_watermark(struct
+ else if (val > 1)
+ at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+
+- return 0;
++ /*
++ * We can start the DMA only after setting the watermark and
++ * having the DMA initialization completed
++ */
++ ret = at91_adc_buffer_prepare(indio_dev);
++ if (ret)
++ at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
++
++ return ret;
+ }
+
+ static int at91_adc_update_scan_mode(struct iio_dev *indio_dev,
--- /dev/null
+From da4410d4078ba4ead9d6f1027d6db77c5a74ecee Mon Sep 17 00:00:00 2001
+From: Tobias Jordan <kernel@cdqe.de>
+Date: Sat, 26 Sep 2020 18:19:46 +0200
+Subject: iio: adc: gyroadc: fix leak of device node iterator
+
+From: Tobias Jordan <kernel@cdqe.de>
+
+commit da4410d4078ba4ead9d6f1027d6db77c5a74ecee upstream.
+
+Add missing of_node_put calls when exiting the for_each_child_of_node
+loop in rcar_gyroadc_parse_subdevs early.
+
+Also add goto-exception handling for the error paths in that loop.
+
+Fixes: 059c53b32329 ("iio: adc: Add Renesas GyroADC driver")
+Signed-off-by: Tobias Jordan <kernel@cdqe.de>
+Link: https://lore.kernel.org/r/20200926161946.GA10240@agrajag.zerfleddert.de
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/rcar-gyroadc.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+--- a/drivers/iio/adc/rcar-gyroadc.c
++++ b/drivers/iio/adc/rcar-gyroadc.c
+@@ -357,7 +357,7 @@ static int rcar_gyroadc_parse_subdevs(st
+ num_channels = ARRAY_SIZE(rcar_gyroadc_iio_channels_3);
+ break;
+ default:
+- return -EINVAL;
++ goto err_e_inval;
+ }
+
+ /*
+@@ -374,7 +374,7 @@ static int rcar_gyroadc_parse_subdevs(st
+ dev_err(dev,
+ "Failed to get child reg property of ADC \"%pOFn\".\n",
+ child);
+- return ret;
++ goto err_of_node_put;
+ }
+
+ /* Channel number is too high. */
+@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(st
+ dev_err(dev,
+ "Only %i channels supported with %pOFn, but reg = <%i>.\n",
+ num_channels, child, reg);
+- return -EINVAL;
++ goto err_e_inval;
+ }
+ }
+
+@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(st
+ dev_err(dev,
+ "Channel %i uses different ADC mode than the rest.\n",
+ reg);
+- return -EINVAL;
++ goto err_e_inval;
+ }
+
+ /* Channel is valid, grab the regulator. */
+@@ -401,7 +401,8 @@ static int rcar_gyroadc_parse_subdevs(st
+ if (IS_ERR(vref)) {
+ dev_dbg(dev, "Channel %i 'vref' supply not connected.\n",
+ reg);
+- return PTR_ERR(vref);
++ ret = PTR_ERR(vref);
++ goto err_of_node_put;
+ }
+
+ priv->vref[reg] = vref;
+@@ -425,8 +426,10 @@ static int rcar_gyroadc_parse_subdevs(st
+ * attached to the GyroADC at a time, so if we found it,
+ * we can stop parsing here.
+ */
+- if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A)
++ if (childmode == RCAR_GYROADC_MODE_SELECT_1_MB88101A) {
++ of_node_put(child);
+ break;
++ }
+ }
+
+ if (first) {
+@@ -435,6 +438,12 @@ static int rcar_gyroadc_parse_subdevs(st
+ }
+
+ return 0;
++
++err_e_inval:
++ ret = -EINVAL;
++err_of_node_put:
++ of_node_put(child);
++ return ret;
+ }
+
+ static void rcar_gyroadc_deinit_supplies(struct iio_dev *indio_dev)
--- /dev/null
+From 39e91f3be4cba51c1560bcda3a343ed1f64dc916 Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:51:00 +0100
+Subject: iio:adc:ti-adc0832 Fix alignment issue with timestamp
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit 39e91f3be4cba51c1560bcda3a343ed1f64dc916 upstream.
+
+One of a class of bugs pointed out by Lars in a recent review.
+iio_push_to_buffers_with_timestamp assumes the buffer used is aligned
+to the size of the timestamp (8 bytes). This is not guaranteed in
+this driver which uses an array of smaller elements on the stack.
+
+We fix this issues by moving to a suitable structure in the iio_priv()
+data with alignment explicitly requested. This data is allocated
+with kzalloc so no data can leak apart from previous readings.
+Note that previously no data could leak 'including' previous readings
+but I don't think it is an issue to potentially leak them like
+this now does.
+
+In this case the postioning of the timestamp is depends on what
+other channels are enabled. As such we cannot use a structure to
+make the alignment explicit as it would be missleading by suggesting
+only one possible location for the timestamp.
+
+Fixes: 815bbc87462a ("iio: ti-adc0832: add triggered buffer support")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Akinobu Mita <akinobu.mita@gmail.com>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-25-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/ti-adc0832.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/adc/ti-adc0832.c
++++ b/drivers/iio/adc/ti-adc0832.c
+@@ -29,6 +29,12 @@ struct adc0832 {
+ struct regulator *reg;
+ struct mutex lock;
+ u8 mux_bits;
++ /*
++ * Max size needed: 16x 1 byte ADC data + 8 bytes timestamp
++ * May be shorter if not all channels are enabled subject
++ * to the timestamp remaining 8 byte aligned.
++ */
++ u8 data[24] __aligned(8);
+
+ u8 tx_buf[2] ____cacheline_aligned;
+ u8 rx_buf[2];
+@@ -200,7 +206,6 @@ static irqreturn_t adc0832_trigger_handl
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc0832 *adc = iio_priv(indio_dev);
+- u8 data[24] = { }; /* 16x 1 byte ADC data + 8 bytes timestamp */
+ int scan_index;
+ int i = 0;
+
+@@ -218,10 +223,10 @@ static irqreturn_t adc0832_trigger_handl
+ goto out;
+ }
+
+- data[i] = ret;
++ adc->data[i] = ret;
+ i++;
+ }
+- iio_push_to_buffers_with_timestamp(indio_dev, data,
++ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
+ iio_get_time_ns(indio_dev));
+ out:
+ mutex_unlock(&adc->lock);
--- /dev/null
+From 293e809b2e8e608b65a949101aaf7c0bd1224247 Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:51:01 +0100
+Subject: iio:adc:ti-adc12138 Fix alignment issue with timestamp
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit 293e809b2e8e608b65a949101aaf7c0bd1224247 upstream.
+
+One of a class of bugs pointed out by Lars in a recent review.
+iio_push_to_buffers_with_timestamp assumes the buffer used is aligned
+to the size of the timestamp (8 bytes). This is not guaranteed in
+this driver which uses an array of smaller elements on the stack.
+
+We move to a suitable structure in the iio_priv() data with alignment
+explicitly requested. This data is allocated with kzalloc so no
+data can leak apart from previous readings. Note that previously
+no leak at all could occur, but previous readings should never
+be a problem.
+
+In this case the timestamp location depends on what other channels
+are enabled. As such we can't use a structure without misleading
+by suggesting only one possible timestamp location.
+
+Fixes: 50a6edb1b6e0 ("iio: adc: add ADC12130/ADC12132/ADC12138 ADC driver")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Akinobu Mita <akinobu.mita@gmail.com>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-26-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/adc/ti-adc12138.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/drivers/iio/adc/ti-adc12138.c
++++ b/drivers/iio/adc/ti-adc12138.c
+@@ -47,6 +47,12 @@ struct adc12138 {
+ struct completion complete;
+ /* The number of cclk periods for the S/H's acquisition time */
+ unsigned int acquisition_time;
++ /*
++ * Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
++ * Less may be need if not all channels are enabled, as long as
++ * the 8 byte alignment of the timestamp is maintained.
++ */
++ __be16 data[20] __aligned(8);
+
+ u8 tx_buf[2] ____cacheline_aligned;
+ u8 rx_buf[2];
+@@ -329,7 +335,6 @@ static irqreturn_t adc12138_trigger_hand
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc12138 *adc = iio_priv(indio_dev);
+- __be16 data[20] = { }; /* 16x 2 bytes ADC data + 8 bytes timestamp */
+ __be16 trash;
+ int ret;
+ int scan_index;
+@@ -345,7 +350,7 @@ static irqreturn_t adc12138_trigger_hand
+ reinit_completion(&adc->complete);
+
+ ret = adc12138_start_and_read_conv(adc, scan_chan,
+- i ? &data[i - 1] : &trash);
++ i ? &adc->data[i - 1] : &trash);
+ if (ret) {
+ dev_warn(&adc->spi->dev,
+ "failed to start conversion\n");
+@@ -362,7 +367,7 @@ static irqreturn_t adc12138_trigger_hand
+ }
+
+ if (i) {
+- ret = adc12138_read_conv_data(adc, &data[i - 1]);
++ ret = adc12138_read_conv_data(adc, &adc->data[i - 1]);
+ if (ret) {
+ dev_warn(&adc->spi->dev,
+ "failed to get conversion data\n");
+@@ -370,7 +375,7 @@ static irqreturn_t adc12138_trigger_hand
+ }
+ }
+
+- iio_push_to_buffers_with_timestamp(indio_dev, data,
++ iio_push_to_buffers_with_timestamp(indio_dev, adc->data,
+ iio_get_time_ns(indio_dev));
+ out:
+ mutex_unlock(&adc->lock);
--- /dev/null
+From 10ab7cfd5522f0041028556dac864a003e158556 Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:50:41 +0100
+Subject: iio:gyro:itg3200: Fix timestamp alignment and prevent data leak.
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit 10ab7cfd5522f0041028556dac864a003e158556 upstream.
+
+One of a class of bugs pointed out by Lars in a recent review.
+iio_push_to_buffers_with_timestamp assumes the buffer used is aligned
+to the size of the timestamp (8 bytes). This is not guaranteed in
+this driver which uses a 16 byte array of smaller elements on the stack.
+This is fixed by using an explicit c structure. As there are no
+holes in the structure, there is no possiblity of data leakage
+in this case.
+
+The explicit alignment of ts is not strictly necessary but potentially
+makes the code slightly less fragile. It also removes the possibility
+of this being cut and paste into another driver where the alignment
+isn't already true.
+
+Fixes: 36e0371e7764 ("iio:itg3200: Use iio_push_to_buffers_with_timestamp()")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-6-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/gyro/itg3200_buffer.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/gyro/itg3200_buffer.c
++++ b/drivers/iio/gyro/itg3200_buffer.c
+@@ -46,13 +46,20 @@ static irqreturn_t itg3200_trigger_handl
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct itg3200 *st = iio_priv(indio_dev);
+- __be16 buf[ITG3200_SCAN_ELEMENTS + sizeof(s64)/sizeof(u16)];
++ /*
++ * Ensure correct alignment and padding including for the
++ * timestamp that may be inserted.
++ */
++ struct {
++ __be16 buf[ITG3200_SCAN_ELEMENTS];
++ s64 ts __aligned(8);
++ } scan;
+
+- int ret = itg3200_read_all_channels(st->i2c, buf);
++ int ret = itg3200_read_all_channels(st->i2c, scan.buf);
+ if (ret < 0)
+ goto error_ret;
+
+- iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp);
++ iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
--- /dev/null
+From 6b0cc5dce0725ae8f1a2883514da731c55eeb35e Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:50:53 +0100
+Subject: iio:imu:inv_mpu6050 Fix dma and ts alignment and data leak issues.
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit 6b0cc5dce0725ae8f1a2883514da731c55eeb35e upstream.
+
+This case is a bit different to the rest of the series. The driver
+was doing a regmap_bulk_read into a buffer that wasn't dma safe
+as it was on the stack with no guarantee of it being in a cacheline
+on it's own. Fixing that also dealt with the data leak and
+alignment issues that Lars-Peter pointed out.
+
+Also removed some unaligned handling as we are now aligned.
+
+Fixes tag is for the dma safe buffer issue. Potentially we would
+need to backport timestamp alignment futher but that is a totally
+different patch.
+
+Fixes: fd64df16f40e ("iio: imu: inv_mpu6050: Add SPI support for MPU6000")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Reviewed-by: Jean-Baptiste Maneyrol <jmaneyrol@invensense.com>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-18-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | 12 +++++++++---
+ drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | 12 +++++-------
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+@@ -122,6 +122,13 @@ struct inv_mpu6050_chip_config {
+ u8 user_ctrl;
+ };
+
++/*
++ * Maximum of 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8.
++ * May be less if fewer channels are enabled, as long as the timestamp
++ * remains 8 byte aligned
++ */
++#define INV_MPU6050_OUTPUT_DATA_SIZE 32
++
+ /**
+ * struct inv_mpu6050_hw - Other important hardware information.
+ * @whoami: Self identification byte from WHO_AM_I register
+@@ -165,6 +172,7 @@ struct inv_mpu6050_hw {
+ * @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
+ * @magn_orient: magnetometer sensor chip orientation if available.
+ * @suspended_sensors: sensors mask of sensors turned off for suspend
++ * @data: dma safe buffer used for bulk reads.
+ */
+ struct inv_mpu6050_state {
+ struct mutex lock;
+@@ -190,6 +198,7 @@ struct inv_mpu6050_state {
+ s32 magn_raw_to_gauss[3];
+ struct iio_mount_matrix magn_orient;
+ unsigned int suspended_sensors;
++ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] ____cacheline_aligned;
+ };
+
+ /*register and associated bit definition*/
+@@ -334,9 +343,6 @@ struct inv_mpu6050_state {
+ #define INV_ICM20608_TEMP_OFFSET 8170
+ #define INV_ICM20608_TEMP_SCALE 3059976
+
+-/* 6 + 6 + 2 + 7 (for MPU9x50) = 21 round up to 24 and plus 8 */
+-#define INV_MPU6050_OUTPUT_DATA_SIZE 32
+-
+ #define INV_MPU6050_REG_INT_PIN_CFG 0x37
+ #define INV_MPU6050_ACTIVE_HIGH 0x00
+ #define INV_MPU6050_ACTIVE_LOW 0x80
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+@@ -13,7 +13,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/poll.h>
+ #include <linux/math64.h>
+-#include <asm/unaligned.h>
+ #include "inv_mpu_iio.h"
+
+ /**
+@@ -121,7 +120,6 @@ irqreturn_t inv_mpu6050_read_fifo(int ir
+ struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ size_t bytes_per_datum;
+ int result;
+- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
+ u16 fifo_count;
+ s64 timestamp;
+ int int_status;
+@@ -160,11 +158,11 @@ irqreturn_t inv_mpu6050_read_fifo(int ir
+ * read fifo_count register to know how many bytes are inside the FIFO
+ * right now
+ */
+- result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data,
+- INV_MPU6050_FIFO_COUNT_BYTE);
++ result = regmap_bulk_read(st->map, st->reg->fifo_count_h,
++ st->data, INV_MPU6050_FIFO_COUNT_BYTE);
+ if (result)
+ goto end_session;
+- fifo_count = get_unaligned_be16(&data[0]);
++ fifo_count = be16_to_cpup((__be16 *)&st->data[0]);
+
+ /*
+ * Handle fifo overflow by resetting fifo.
+@@ -182,7 +180,7 @@ irqreturn_t inv_mpu6050_read_fifo(int ir
+ inv_mpu6050_update_period(st, pf->timestamp, nb);
+ for (i = 0; i < nb; ++i) {
+ result = regmap_bulk_read(st->map, st->reg->fifo_r_w,
+- data, bytes_per_datum);
++ st->data, bytes_per_datum);
+ if (result)
+ goto flush_fifo;
+ /* skip first samples if needed */
+@@ -191,7 +189,7 @@ irqreturn_t inv_mpu6050_read_fifo(int ir
+ continue;
+ }
+ timestamp = inv_mpu6050_get_timestamp(st);
+- iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
++ iio_push_to_buffers_with_timestamp(indio_dev, st->data, timestamp);
+ }
+
+ end_session:
--- /dev/null
+From f71e41e23e129640f620b65fc362a6da02580310 Mon Sep 17 00:00:00 2001
+From: Tom Rix <trix@redhat.com>
+Date: Sun, 9 Aug 2020 10:55:51 -0700
+Subject: iio:imu:st_lsm6dsx: check st_lsm6dsx_shub_read_output return
+
+From: Tom Rix <trix@redhat.com>
+
+commit f71e41e23e129640f620b65fc362a6da02580310 upstream.
+
+Potential error return is not checked. This can lead to use
+of undefined data.
+
+Detected by clang static analysis.
+
+st_lsm6dsx_shub.c:540:8: warning: Assigned value is garbage or undefined
+ *val = (s16)le16_to_cpu(*((__le16 *)data));
+ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Fixes: c91c1c844ebd ("iio: imu: st_lsm6dsx: add i2c embedded controller support")
+Signed-off-by: Tom Rix <trix@redhat.com
+Cc: <Stable@vger.kernel.org>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Link: https://lore.kernel.org/r/20200809175551.6794-1-trix@redhat.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+@@ -313,6 +313,8 @@ st_lsm6dsx_shub_read(struct st_lsm6dsx_s
+
+ err = st_lsm6dsx_shub_read_output(hw, data,
+ len & ST_LS6DSX_READ_OP_MASK);
++ if (err < 0)
++ return err;
+
+ st_lsm6dsx_shub_master_enable(sensor, false);
+
--- /dev/null
+From c14edb4d0bdc53f969ea84c7f384472c28b1a9f8 Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:50:52 +0100
+Subject: iio:imu:st_lsm6dsx Fix alignment and data leak issues
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit c14edb4d0bdc53f969ea84c7f384472c28b1a9f8 upstream.
+
+One of a class of bugs pointed out by Lars in a recent review.
+iio_push_to_buffers_with_timestamp assumes the buffer used is aligned
+to the size of the timestamp (8 bytes). This is not guaranteed in
+this driver which uses an array of smaller elements on the stack.
+As Lars also noted this anti pattern can involve a leak of data to
+userspace and that indeed can happen here. We close both issues by
+moving to an array of suitable structures in the iio_priv() data.
+
+This data is allocated with kzalloc so no data can leak apart from
+previous readings.
+
+For the tagged path the data is aligned by using __aligned(8) for
+the buffer on the stack.
+
+There has been a lot of churn in this driver, so likely backports
+may be needed for stable.
+
+Fixes: 290a6ce11d93 ("iio: imu: add support to lsm6dsx driver")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-17-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h | 6 +++
+ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | 42 +++++++++++++++----------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+@@ -383,6 +383,7 @@ struct st_lsm6dsx_sensor {
+ * @iio_devs: Pointers to acc/gyro iio_dev instances.
+ * @settings: Pointer to the specific sensor settings in use.
+ * @orientation: sensor chip orientation relative to main hardware.
++ * @scan: Temporary buffers used to align data before iio_push_to_buffers()
+ */
+ struct st_lsm6dsx_hw {
+ struct device *dev;
+@@ -411,6 +412,11 @@ struct st_lsm6dsx_hw {
+ const struct st_lsm6dsx_settings *settings;
+
+ struct iio_mount_matrix orientation;
++ /* Ensure natural alignment of buffer elements */
++ struct {
++ __le16 channels[3];
++ s64 ts __aligned(8);
++ } scan[3];
+ };
+
+ static __maybe_unused const struct iio_event_spec st_lsm6dsx_event = {
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+@@ -353,9 +353,6 @@ int st_lsm6dsx_read_fifo(struct st_lsm6d
+ int err, sip, acc_sip, gyro_sip, ts_sip, ext_sip, read_len, offset;
+ u16 fifo_len, pattern_len = hw->sip * ST_LSM6DSX_SAMPLE_SIZE;
+ u16 fifo_diff_mask = hw->settings->fifo_ops.fifo_diff.mask;
+- u8 gyro_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+- u8 acc_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+- u8 ext_buff[ST_LSM6DSX_IIO_BUFF_SIZE];
+ bool reset_ts = false;
+ __le16 fifo_status;
+ s64 ts = 0;
+@@ -416,19 +413,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6d
+
+ while (acc_sip > 0 || gyro_sip > 0 || ext_sip > 0) {
+ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+- memcpy(gyro_buff, &hw->buff[offset],
+- ST_LSM6DSX_SAMPLE_SIZE);
+- offset += ST_LSM6DSX_SAMPLE_SIZE;
++ memcpy(hw->scan[ST_LSM6DSX_ID_GYRO].channels,
++ &hw->buff[offset],
++ sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels));
++ offset += sizeof(hw->scan[ST_LSM6DSX_ID_GYRO].channels);
+ }
+ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+- memcpy(acc_buff, &hw->buff[offset],
+- ST_LSM6DSX_SAMPLE_SIZE);
+- offset += ST_LSM6DSX_SAMPLE_SIZE;
++ memcpy(hw->scan[ST_LSM6DSX_ID_ACC].channels,
++ &hw->buff[offset],
++ sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels));
++ offset += sizeof(hw->scan[ST_LSM6DSX_ID_ACC].channels);
+ }
+ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+- memcpy(ext_buff, &hw->buff[offset],
+- ST_LSM6DSX_SAMPLE_SIZE);
+- offset += ST_LSM6DSX_SAMPLE_SIZE;
++ memcpy(hw->scan[ST_LSM6DSX_ID_EXT0].channels,
++ &hw->buff[offset],
++ sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels));
++ offset += sizeof(hw->scan[ST_LSM6DSX_ID_EXT0].channels);
+ }
+
+ if (ts_sip-- > 0) {
+@@ -458,19 +458,22 @@ int st_lsm6dsx_read_fifo(struct st_lsm6d
+ if (gyro_sip > 0 && !(sip % gyro_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_GYRO],
+- gyro_buff, gyro_sensor->ts_ref + ts);
++ &hw->scan[ST_LSM6DSX_ID_GYRO],
++ gyro_sensor->ts_ref + ts);
+ gyro_sip--;
+ }
+ if (acc_sip > 0 && !(sip % acc_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_ACC],
+- acc_buff, acc_sensor->ts_ref + ts);
++ &hw->scan[ST_LSM6DSX_ID_ACC],
++ acc_sensor->ts_ref + ts);
+ acc_sip--;
+ }
+ if (ext_sip > 0 && !(sip % ext_sensor->decimator)) {
+ iio_push_to_buffers_with_timestamp(
+ hw->iio_devs[ST_LSM6DSX_ID_EXT0],
+- ext_buff, ext_sensor->ts_ref + ts);
++ &hw->scan[ST_LSM6DSX_ID_EXT0],
++ ext_sensor->ts_ref + ts);
+ ext_sip--;
+ }
+ sip++;
+@@ -555,7 +558,14 @@ int st_lsm6dsx_read_tagged_fifo(struct s
+ {
+ u16 pattern_len = hw->sip * ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
+ u16 fifo_len, fifo_diff_mask;
+- u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE], tag;
++ /*
++ * Alignment needed as this can ultimately be passed to a
++ * call to iio_push_to_buffers_with_timestamp() which
++ * must be passed a buffer that is aligned to 8 bytes so
++ * as to allow insertion of a naturally aligned timestamp.
++ */
++ u8 iio_buff[ST_LSM6DSX_IIO_BUFF_SIZE] __aligned(8);
++ u8 tag;
+ bool reset_ts = false;
+ int i, err, read_len;
+ __le16 fifo_status;
--- /dev/null
+From 0456ecf34d466261970e0ff92b2b9c78a4908637 Mon Sep 17 00:00:00 2001
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Date: Wed, 22 Jul 2020 16:50:44 +0100
+Subject: iio:light:si1145: Fix timestamp alignment and prevent data leak.
+
+From: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+
+commit 0456ecf34d466261970e0ff92b2b9c78a4908637 upstream.
+
+One of a class of bugs pointed out by Lars in a recent review.
+iio_push_to_buffers_with_timestamp assumes the buffer used is aligned
+to the size of the timestamp (8 bytes). This is not guaranteed in
+this driver which uses a 24 byte array of smaller elements on the stack.
+As Lars also noted this anti pattern can involve a leak of data to
+userspace and that indeed can happen here. We close both issues by
+moving to a suitable array in the iio_priv() data with alignment
+explicitly requested. This data is allocated with kzalloc so no
+data can leak appart from previous readings.
+
+Depending on the enabled channels, the location of the timestamp
+can be at various aligned offsets through the buffer. As such we
+any use of a structure to enforce this alignment would incorrectly
+suggest a single location for the timestamp. Comments adjusted to
+express this clearly in the code.
+
+Fixes: ac45e57f1590 ("iio: light: Add driver for Silabs si1132, si1141/2/3 and si1145/6/7 ambient light, uv index and proximity sensors")
+Reported-by: Lars-Peter Clausen <lars@metafoo.de>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Reviewed-by: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+Cc: <Stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/20200722155103.979802-9-jic23@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/light/si1145.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/drivers/iio/light/si1145.c
++++ b/drivers/iio/light/si1145.c
+@@ -168,6 +168,7 @@ struct si1145_part_info {
+ * @part_info: Part information
+ * @trig: Pointer to iio trigger
+ * @meas_rate: Value of MEAS_RATE register. Only set in HW in auto mode
++ * @buffer: Used to pack data read from sensor.
+ */
+ struct si1145_data {
+ struct i2c_client *client;
+@@ -179,6 +180,14 @@ struct si1145_data {
+ bool autonomous;
+ struct iio_trigger *trig;
+ int meas_rate;
++ /*
++ * Ensure timestamp will be naturally aligned if present.
++ * Maximum buffer size (may be only partly used if not all
++ * channels are enabled):
++ * 6*2 bytes channels data + 4 bytes alignment +
++ * 8 bytes timestamp
++ */
++ u8 buffer[24] __aligned(8);
+ };
+
+ /*
+@@ -440,12 +449,6 @@ static irqreturn_t si1145_trigger_handle
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct si1145_data *data = iio_priv(indio_dev);
+- /*
+- * Maximum buffer size:
+- * 6*2 bytes channels data + 4 bytes alignment +
+- * 8 bytes timestamp
+- */
+- u8 buffer[24];
+ int i, j = 0;
+ int ret;
+ u8 irq_status = 0;
+@@ -478,7 +481,7 @@ static irqreturn_t si1145_trigger_handle
+
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(
+ data->client, indio_dev->channels[i].address,
+- sizeof(u16) * run, &buffer[j]);
++ sizeof(u16) * run, &data->buffer[j]);
+ if (ret < 0)
+ goto done;
+ j += run * sizeof(u16);
+@@ -493,7 +496,7 @@ static irqreturn_t si1145_trigger_handle
+ goto done;
+ }
+
+- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
++ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns(indio_dev));
+
+ done:
--- /dev/null
+From b07c47bfab6f5c4c7182d23e854bbceaf7829c85 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Nuno=20S=C3=A1?= <nuno.sa@analog.com>
+Date: Fri, 25 Sep 2020 11:10:44 +0200
+Subject: iio: ltc2983: Fix of_node refcounting
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Nuno Sá <nuno.sa@analog.com>
+
+commit b07c47bfab6f5c4c7182d23e854bbceaf7829c85 upstream.
+
+When returning or breaking early from a
+`for_each_available_child_of_node()` loop, we need to explicitly call
+`of_node_put()` on the child node to possibly release the node.
+
+Fixes: f110f3188e563 ("iio: temperature: Add support for LTC2983")
+Signed-off-by: Nuno Sá <nuno.sa@analog.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200925091045.302-1-nuno.sa@analog.com
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/temperature/ltc2983.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/drivers/iio/temperature/ltc2983.c
++++ b/drivers/iio/temperature/ltc2983.c
+@@ -1285,18 +1285,20 @@ static int ltc2983_parse_dt(struct ltc29
+ ret = of_property_read_u32(child, "reg", &sensor.chan);
+ if (ret) {
+ dev_err(dev, "reg property must given for child nodes\n");
+- return ret;
++ goto put_child;
+ }
+
+ /* check if we have a valid channel */
+ if (sensor.chan < LTC2983_MIN_CHANNELS_NR ||
+ sensor.chan > LTC2983_MAX_CHANNELS_NR) {
++ ret = -EINVAL;
+ dev_err(dev,
+ "chan:%d must be from 1 to 20\n", sensor.chan);
+- return -EINVAL;
++ goto put_child;
+ } else if (channel_avail_mask & BIT(sensor.chan)) {
++ ret = -EINVAL;
+ dev_err(dev, "chan:%d already in use\n", sensor.chan);
+- return -EINVAL;
++ goto put_child;
+ }
+
+ ret = of_property_read_u32(child, "adi,sensor-type",
+@@ -1304,7 +1306,7 @@ static int ltc2983_parse_dt(struct ltc29
+ if (ret) {
+ dev_err(dev,
+ "adi,sensor-type property must given for child nodes\n");
+- return ret;
++ goto put_child;
+ }
+
+ dev_dbg(dev, "Create new sensor, type %u, chann %u",
+@@ -1334,13 +1336,15 @@ static int ltc2983_parse_dt(struct ltc29
+ st->sensors[chan] = ltc2983_adc_new(child, st, &sensor);
+ } else {
+ dev_err(dev, "Unknown sensor type %d\n", sensor.type);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto put_child;
+ }
+
+ if (IS_ERR(st->sensors[chan])) {
+ dev_err(dev, "Failed to create sensor %ld",
+ PTR_ERR(st->sensors[chan]));
+- return PTR_ERR(st->sensors[chan]);
++ ret = PTR_ERR(st->sensors[chan]);
++ goto put_child;
+ }
+ /* set generic sensor parameters */
+ st->sensors[chan]->chan = sensor.chan;
+@@ -1351,6 +1355,9 @@ static int ltc2983_parse_dt(struct ltc29
+ }
+
+ return 0;
++put_child:
++ of_node_put(child);
++ return ret;
+ }
+
+ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio)
--- /dev/null
+From c8b5e2600a2cfa1cdfbecf151afd67aee227381d Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Sun, 25 Oct 2020 13:53:26 -0600
+Subject: io_uring: use type appropriate io_kiocb handler for double poll
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit c8b5e2600a2cfa1cdfbecf151afd67aee227381d upstream.
+
+io_poll_double_wake() is called for both request types - both pure poll
+requests, and internal polls. This means that we should be using the
+right handler based on the request type. Use the one that the original
+caller already assigned for the waitqueue handling, that will always
+match the correct type.
+
+Cc: stable@vger.kernel.org # v5.8+
+Reported-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/io_uring.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4786,8 +4786,10 @@ static int io_poll_double_wake(struct wa
+ /* make sure double remove sees this as being gone */
+ wait->private = NULL;
+ spin_unlock(&poll->head->lock);
+- if (!done)
+- __io_async_wake(req, poll, mask, io_poll_task_func);
++ if (!done) {
++ /* use wait func handler, so it matches the rq type */
++ poll->wait.func(&poll->wait, mode, sync, key);
++ }
+ }
+ refcount_dec(&req->refs);
+ return 1;
--- /dev/null
+From 7487abbe85afd02c35c283315cefc5e19c28d40f Mon Sep 17 00:00:00 2001
+From: Paul Cercueil <paul@crapouillou.net>
+Date: Sun, 6 Sep 2020 21:29:21 +0200
+Subject: MIPS: configs: lb60: Fix defconfig not selecting correct board
+
+From: Paul Cercueil <paul@crapouillou.net>
+
+commit 7487abbe85afd02c35c283315cefc5e19c28d40f upstream.
+
+Since INGENIC_GENERIC_BOARD was introduced, the JZ4740_QI_LB60 option
+is no longer the default, so the symbol has to be selected by the
+defconfig, otherwise the kernel built will be for a generic Ingenic
+board and won't have the Device Tree blob built-in.
+
+Cc: stable@vger.kernel.org # v5.7
+Fixes: 62249209a772 ("MIPS: ingenic: Default to a generic board")
+Signed-off-by: Paul Cercueil <paul@crapouillou.net>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/configs/qi_lb60_defconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/configs/qi_lb60_defconfig
++++ b/arch/mips/configs/qi_lb60_defconfig
+@@ -8,6 +8,7 @@ CONFIG_EMBEDDED=y
+ # CONFIG_COMPAT_BRK is not set
+ CONFIG_SLAB=y
+ CONFIG_MACH_INGENIC=y
++CONFIG_JZ4740_QI_LB60=y
+ CONFIG_HZ_100=y
+ # CONFIG_SECCOMP is not set
+ CONFIG_MODULES=y
--- /dev/null
+From cf3af0a4d3b62ab48e0b90180ea161d0f5d4953f Mon Sep 17 00:00:00 2001
+From: "Maciej W. Rozycki" <macro@linux-mips.org>
+Date: Wed, 14 Oct 2020 22:34:56 +0100
+Subject: MIPS: DEC: Restore bootmem reservation for firmware working memory area
+
+From: Maciej W. Rozycki <macro@linux-mips.org>
+
+commit cf3af0a4d3b62ab48e0b90180ea161d0f5d4953f upstream.
+
+Fix a crash on DEC platforms starting with:
+
+VFS: Mounted root (nfs filesystem) on device 0:11.
+Freeing unused PROM memory: 124k freed
+BUG: Bad page state in process swapper pfn:00001
+page:(ptrval) refcount:0 mapcount:-128 mapping:00000000 index:0x1 pfn:0x1
+flags: 0x0()
+raw: 00000000 00000100 00000122 00000000 00000001 00000000 ffffff7f 00000000
+page dumped because: nonzero mapcount
+Modules linked in:
+CPU: 0 PID: 1 Comm: swapper Not tainted 5.9.0-00858-g865c50e1d279 #1
+Stack : 8065dc48 0000000b 8065d2b8 9bc27dcc 80645bfc 9bc259a4 806a1b97 80703124
+ 80710000 8064a900 00000001 80099574 806b116c 1000ec00 9bc27d88 806a6f30
+ 00000000 00000000 80645bfc 00000000 31232039 80706ba4 2e392e35 8039f348
+ 2d383538 00000070 0000000a 35363867 00000000 806c2830 80710000 806b0000
+ 80710000 8064a900 00000001 81000000 00000000 00000000 8035af2c 80700000
+ ...
+Call Trace:
+[<8004bc5c>] show_stack+0x34/0x104
+[<8015675c>] bad_page+0xfc/0x128
+[<80157714>] free_pcppages_bulk+0x1f4/0x5dc
+[<801591cc>] free_unref_page+0xc0/0x130
+[<8015cb04>] free_reserved_area+0x144/0x1d8
+[<805abd78>] kernel_init+0x20/0x100
+[<80046070>] ret_from_kernel_thread+0x14/0x1c
+Disabling lock debugging due to kernel taint
+
+caused by an attempt to free bootmem space that as from
+commit b93ddc4f9156 ("mips: Reserve memory for the kernel image resources")
+has not been anymore reserved due to the removal of generic MIPS arch code
+that used to reserve all the memory from the beginning of RAM up to the
+kernel load address.
+
+This memory does need to be reserved on DEC platforms however as it is
+used by REX firmware as working area, as per the TURBOchannel firmware
+specification[1]:
+
+Table 2-2 REX Memory Regions
+-------------------------------------------------------------------------
+ Starting Ending
+Region Address Address Use
+-------------------------------------------------------------------------
+0 0xa0000000 0xa000ffff Restart block, exception vectors,
+ REX stack and bss
+1 0xa0010000 0xa0017fff Keyboard or tty drivers
+
+2 0xa0018000 0xa001f3ff 1) CRT driver
+
+3 0xa0020000 0xa002ffff boot, cnfg, init and t objects
+
+4 0xa0020000 0xa002ffff 64KB scratch space
+-------------------------------------------------------------------------
+1) Note that the last 3 Kbytes of region 2 are reserved for backward
+compatibility with previous system software.
+-------------------------------------------------------------------------
+
+(this table uses KSEG2 unmapped virtual addresses, which in the MIPS
+architecture are offset from physical addresses by a fixed value of
+0xa0000000 and therefore the regions referred do correspond to the
+beginning of the physical address space) and we call into the firmware
+on several occasions throughout the bootstrap process. It is believed
+that pre-REX firmware used with non-TURBOchannel DEC platforms has the
+same requirements, as hinted by note #1 cited.
+
+Recreate the discarded reservation then, in DEC platform code, removing
+the crash.
+
+References:
+
+[1] "TURBOchannel Firmware Specification", On-line version,
+ EK-TCAAD-FS-004, Digital Equipment Corporation, January 1993,
+ Chapter 2 "System Module Firmware", p. 2-5
+
+Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org>
+Fixes: b93ddc4f9156 ("mips: Reserve memory for the kernel image resources")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+
+---
+ arch/mips/dec/setup.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/dec/setup.c
++++ b/arch/mips/dec/setup.c
+@@ -6,7 +6,7 @@
+ * for more details.
+ *
+ * Copyright (C) 1998 Harald Koerfgen
+- * Copyright (C) 2000, 2001, 2002, 2003, 2005 Maciej W. Rozycki
++ * Copyright (C) 2000, 2001, 2002, 2003, 2005, 2020 Maciej W. Rozycki
+ */
+ #include <linux/console.h>
+ #include <linux/export.h>
+@@ -15,6 +15,7 @@
+ #include <linux/ioport.h>
+ #include <linux/irq.h>
+ #include <linux/irqnr.h>
++#include <linux/memblock.h>
+ #include <linux/param.h>
+ #include <linux/percpu-defs.h>
+ #include <linux/sched.h>
+@@ -22,6 +23,7 @@
+ #include <linux/types.h>
+ #include <linux/pm.h>
+
++#include <asm/addrspace.h>
+ #include <asm/bootinfo.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-features.h>
+@@ -29,7 +31,9 @@
+ #include <asm/irq.h>
+ #include <asm/irq_cpu.h>
+ #include <asm/mipsregs.h>
++#include <asm/page.h>
+ #include <asm/reboot.h>
++#include <asm/sections.h>
+ #include <asm/time.h>
+ #include <asm/traps.h>
+ #include <asm/wbflush.h>
+@@ -146,6 +150,9 @@ void __init plat_mem_setup(void)
+
+ ioport_resource.start = ~0UL;
+ ioport_resource.end = 0UL;
++
++ /* Stay away from the firmware working memory area for now. */
++ memblock_reserve(PHYS_OFFSET, __pa_symbol(&_text) - PHYS_OFFSET);
+ }
+
+ /*
--- /dev/null
+From 6b3dccd48de8a4c650b01499a0b09d1e2279649e Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 1 Oct 2020 18:58:56 -0400
+Subject: NFSD: Add missing NFSv2 .pc_func methods
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 6b3dccd48de8a4c650b01499a0b09d1e2279649e upstream.
+
+There's no protection in nfsd_dispatch() against a NULL .pc_func
+helpers. A malicious NFS client can trigger a crash by invoking the
+unused/unsupported NFSv2 ROOT or WRITECACHE procedures.
+
+The current NFSD dispatcher does not support returning a void reply
+to a non-NULL procedure, so the reply to both of these is wrong, for
+the moment.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfsd/nfsproc.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -118,6 +118,13 @@ done:
+ return nfsd_return_attrs(nfserr, resp);
+ }
+
++/* Obsolete, replaced by MNTPROC_MNT. */
++static __be32
++nfsd_proc_root(struct svc_rqst *rqstp)
++{
++ return nfs_ok;
++}
++
+ /*
+ * Look up a path name component
+ * Note: the dentry in the resp->fh may be negative if the file
+@@ -203,6 +210,13 @@ nfsd_proc_read(struct svc_rqst *rqstp)
+ return fh_getattr(&resp->fh, &resp->stat);
+ }
+
++/* Reserved */
++static __be32
++nfsd_proc_writecache(struct svc_rqst *rqstp)
++{
++ return nfs_ok;
++}
++
+ /*
+ * Write data to a file
+ * N.B. After this call resp->fh needs an fh_put
+@@ -617,6 +631,7 @@ static const struct svc_procedure nfsd_p
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_ROOT] = {
++ .pc_func = nfsd_proc_root,
+ .pc_decode = nfssvc_decode_void,
+ .pc_encode = nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+@@ -654,6 +669,7 @@ static const struct svc_procedure nfsd_p
+ .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ },
+ [NFSPROC_WRITECACHE] = {
++ .pc_func = nfsd_proc_writecache,
+ .pc_decode = nfssvc_decode_void,
+ .pc_encode = nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
--- /dev/null
+From b4868b44c5628995fdd8ef2e24dda73cef963a75 Mon Sep 17 00:00:00 2001
+From: Benjamin Coddington <bcodding@redhat.com>
+Date: Fri, 25 Sep 2020 15:48:39 -0400
+Subject: NFSv4: Wait for stateid updates after CLOSE/OPEN_DOWNGRADE
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+commit b4868b44c5628995fdd8ef2e24dda73cef963a75 upstream.
+
+Since commit 0e0cb35b417f ("NFSv4: Handle NFS4ERR_OLD_STATEID in
+CLOSE/OPEN_DOWNGRADE") the following livelock may occur if a CLOSE races
+with the update of the nfs_state:
+
+Process 1 Process 2 Server
+========= ========= ========
+ OPEN file
+ OPEN file
+ Reply OPEN (1)
+ Reply OPEN (2)
+ Update state (1)
+ CLOSE file (1)
+ Reply OLD_STATEID (1)
+ CLOSE file (2)
+ Reply CLOSE (-1)
+ Update state (2)
+ wait for state change
+ OPEN file
+ wake
+ CLOSE file
+ OPEN file
+ wake
+ CLOSE file
+ ...
+ ...
+
+We can avoid this situation by not issuing an immediate retry with a bumped
+seqid when CLOSE/OPEN_DOWNGRADE receives NFS4ERR_OLD_STATEID. Instead,
+take the same approach used by OPEN and wait at least 5 seconds for
+outstanding stateid updates to complete if we can detect that we're out of
+sequence.
+
+Note that after this change it is still possible (though unlikely) that
+CLOSE waits a full 5 seconds, bumps the seqid, and retries -- and that
+attempt races with another OPEN at the same time. In order to avoid this
+race (which would result in the livelock), update
+nfs_need_update_open_stateid() to handle the case where:
+ - the state is NFS_OPEN_STATE, and
+ - the stateid doesn't match the current open stateid
+
+Finally, nfs_need_update_open_stateid() is modified to be idempotent and
+renamed to better suit the purpose of signaling that the stateid passed
+is the next stateid in sequence.
+
+Fixes: 0e0cb35b417f ("NFSv4: Handle NFS4ERR_OLD_STATEID in CLOSE/OPEN_DOWNGRADE")
+Cc: stable@vger.kernel.org # v5.4+
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4_fs.h | 8 +++++
+ fs/nfs/nfs4proc.c | 81 ++++++++++++++++++++++++++++++-----------------------
+ fs/nfs/nfs4trace.h | 1
+ 3 files changed, 56 insertions(+), 34 deletions(-)
+
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -599,6 +599,14 @@ static inline bool nfs4_stateid_is_newer
+ return (s32)(be32_to_cpu(s1->seqid) - be32_to_cpu(s2->seqid)) > 0;
+ }
+
++static inline bool nfs4_stateid_is_next(const nfs4_stateid *s1, const nfs4_stateid *s2)
++{
++ u32 seq1 = be32_to_cpu(s1->seqid);
++ u32 seq2 = be32_to_cpu(s2->seqid);
++
++ return seq2 == seq1 + 1U || (seq2 == 1U && seq1 == 0xffffffffU);
++}
++
+ static inline bool nfs4_stateid_match_or_older(const nfs4_stateid *dst, const nfs4_stateid *src)
+ {
+ return nfs4_stateid_match_other(dst, src) &&
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1547,19 +1547,6 @@ static void nfs_state_log_update_open_st
+ wake_up_all(&state->waitq);
+ }
+
+-static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
+- const nfs4_stateid *stateid)
+-{
+- u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
+- u32 stateid_seqid = be32_to_cpu(stateid->seqid);
+-
+- if (stateid_seqid == state_seqid + 1U ||
+- (stateid_seqid == 1U && state_seqid == 0xffffffffU))
+- nfs_state_log_update_open_stateid(state);
+- else
+- set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+-}
+-
+ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
+ {
+ struct nfs_client *clp = state->owner->so_server->nfs_client;
+@@ -1585,21 +1572,19 @@ static void nfs_test_and_clear_all_open_
+ * i.e. The stateid seqids have to be initialised to 1, and
+ * are then incremented on every state transition.
+ */
+-static bool nfs_need_update_open_stateid(struct nfs4_state *state,
++static bool nfs_stateid_is_sequential(struct nfs4_state *state,
+ const nfs4_stateid *stateid)
+ {
+- if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
+- !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
++ if (test_bit(NFS_OPEN_STATE, &state->flags)) {
++ /* The common case - we're updating to a new sequence number */
++ if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
++ nfs4_stateid_is_next(&state->open_stateid, stateid)) {
++ return true;
++ }
++ } else {
++ /* This is the first OPEN in this generation */
+ if (stateid->seqid == cpu_to_be32(1))
+- nfs_state_log_update_open_stateid(state);
+- else
+- set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+- return true;
+- }
+-
+- if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
+- nfs_state_log_out_of_order_open_stateid(state, stateid);
+- return true;
++ return true;
+ }
+ return false;
+ }
+@@ -1673,16 +1658,16 @@ static void nfs_set_open_stateid_locked(
+ int status = 0;
+ for (;;) {
+
+- if (!nfs_need_update_open_stateid(state, stateid))
+- return;
+- if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
++ if (nfs_stateid_is_sequential(state, stateid))
+ break;
++
+ if (status)
+ break;
+ /* Rely on seqids for serialisation with NFSv4.0 */
+ if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
+ break;
+
++ set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
+ prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
+ /*
+ * Ensure we process the state changes in the same order
+@@ -1693,6 +1678,7 @@ static void nfs_set_open_stateid_locked(
+ spin_unlock(&state->owner->so_lock);
+ rcu_read_unlock();
+ trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
++
+ if (!signal_pending(current)) {
+ if (schedule_timeout(5*HZ) == 0)
+ status = -EAGAIN;
+@@ -3435,7 +3421,8 @@ static bool nfs4_refresh_open_old_statei
+ __be32 seqid_open;
+ u32 dst_seqid;
+ bool ret;
+- int seq;
++ int seq, status = -EAGAIN;
++ DEFINE_WAIT(wait);
+
+ for (;;) {
+ ret = false;
+@@ -3447,15 +3434,41 @@ static bool nfs4_refresh_open_old_statei
+ continue;
+ break;
+ }
++
++ write_seqlock(&state->seqlock);
+ seqid_open = state->open_stateid.seqid;
+- if (read_seqretry(&state->seqlock, seq))
+- continue;
+
+ dst_seqid = be32_to_cpu(dst->seqid);
+- if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) >= 0)
+- dst->seqid = cpu_to_be32(dst_seqid + 1);
+- else
++
++ /* Did another OPEN bump the state's seqid? try again: */
++ if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
+ dst->seqid = seqid_open;
++ write_sequnlock(&state->seqlock);
++ ret = true;
++ break;
++ }
++
++ /* server says we're behind but we haven't seen the update yet */
++ set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
++ prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
++ write_sequnlock(&state->seqlock);
++ trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
++
++ if (signal_pending(current))
++ status = -EINTR;
++ else
++ if (schedule_timeout(5*HZ) != 0)
++ status = 0;
++
++ finish_wait(&state->waitq, &wait);
++
++ if (!status)
++ continue;
++ if (status == -EINTR)
++ break;
++
++ /* we slept the whole 5 seconds, we must have lost a seqid */
++ dst->seqid = cpu_to_be32(dst_seqid + 1);
+ ret = true;
+ break;
+ }
+--- a/fs/nfs/nfs4trace.h
++++ b/fs/nfs/nfs4trace.h
+@@ -1511,6 +1511,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_set
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
+ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
++DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
+
+ DECLARE_EVENT_CLASS(nfs4_getattr_event,
+ TP_PROTO(
--- /dev/null
+From 8c39076c276be0b31982e44654e2c2357473258a Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <kolga@netapp.com>
+Date: Fri, 16 Oct 2020 09:25:45 -0400
+Subject: NFSv4.2: support EXCHGID4_FLAG_SUPP_FENCE_OPS 4.2 EXCHANGE_ID flag
+
+From: Olga Kornievskaia <kolga@netapp.com>
+
+commit 8c39076c276be0b31982e44654e2c2357473258a upstream.
+
+RFC 7862 introduced a new flag that either client or server is
+allowed to set: EXCHGID4_FLAG_SUPP_FENCE_OPS.
+
+Client needs to update its bitmask to allow for this flag value.
+
+v2: changed minor version argument to unsigned int
+
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+CC: <stable@vger.kernel.org>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 9 ++++++---
+ include/uapi/linux/nfs4.h | 3 +++
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8052,9 +8052,11 @@ int nfs4_proc_secinfo(struct inode *dir,
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+-static int nfs4_check_cl_exchange_flags(u32 flags)
++static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
+ {
+- if (flags & ~EXCHGID4_FLAG_MASK_R)
++ if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
++ goto out_inval;
++ else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
+ goto out_inval;
+ if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+ (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+@@ -8467,7 +8469,8 @@ static int _nfs4_proc_exchange_id(struct
+ if (status != 0)
+ goto out;
+
+- status = nfs4_check_cl_exchange_flags(resp->flags);
++ status = nfs4_check_cl_exchange_flags(resp->flags,
++ clp->cl_mvops->minor_version);
+ if (status != 0)
+ goto out;
+
+--- a/include/uapi/linux/nfs4.h
++++ b/include/uapi/linux/nfs4.h
+@@ -139,6 +139,8 @@
+
+ #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
+ #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
++
++#define EXCHGID4_FLAG_SUPP_FENCE_OPS 0x00000004
+ /*
+ * Since the validity of these bits depends on whether
+ * they're set in the argument or response, have separate
+@@ -146,6 +148,7 @@
+ */
+ #define EXCHGID4_FLAG_MASK_A 0x40070103
+ #define EXCHGID4_FLAG_MASK_R 0x80070103
++#define EXCHGID4_2_FLAG_MASK_R 0x80070107
+
+ #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
+ #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
--- /dev/null
+From c118c7303ad528be8ff2aea8cd1ee15452c763f0 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 7 Sep 2020 13:42:09 +0000
+Subject: powerpc/32: Fix vmap stack - Do not activate MMU before reading task struct
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit c118c7303ad528be8ff2aea8cd1ee15452c763f0 upstream.
+
+We need r1 to be properly set before activating MMU, so
+reading task_struct->stack must be done with MMU off.
+
+This means we need an additional register to play with MSR
+bits while r11 now points to the stack. For that, move r10
+back to CR (As is already done for hash MMU) and use r10.
+
+We still don't have r1 correct yet when we activate MMU.
+It is done in following patch.
+
+Fixes: 028474876f47 ("powerpc/32: prepare for CONFIG_VMAP_STACK")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/a027d447022a006c9c4958ac734128e577a3c5c1.1599486108.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/head_32.S | 6 ------
+ arch/powerpc/kernel/head_32.h | 31 ++++++-------------------------
+ 2 files changed, 6 insertions(+), 31 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -274,14 +274,8 @@ __secondary_hold_acknowledge:
+ DO_KVM 0x200
+ MachineCheck:
+ EXCEPTION_PROLOG_0
+-#ifdef CONFIG_VMAP_STACK
+- li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+- mtmsr r11
+- isync
+-#endif
+ #ifdef CONFIG_PPC_CHRP
+ mfspr r11, SPRN_SPRG_THREAD
+- tovirt_vmstack r11, r11
+ lwz r11, RTAS_SP(r11)
+ cmpwi cr1, r11, 0
+ bne cr1, 7f
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -39,24 +39,13 @@
+ .endm
+
+ .macro EXCEPTION_PROLOG_1 for_rtas=0
+-#ifdef CONFIG_VMAP_STACK
+- .ifeq \for_rtas
+- li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
+- mtmsr r11
+- isync
+- .endif
+ subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
+-#else
+- tophys(r11,r1) /* use tophys(r1) if kernel */
+- subi r11, r11, INT_FRAME_SIZE /* alloc exc. frame */
+-#endif
+ beq 1f
+ mfspr r11,SPRN_SPRG_THREAD
+- tovirt_vmstack r11, r11
+ lwz r11,TASK_STACK-THREAD(r11)
+ addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
+- tophys_novmstack r11, r11
+ 1:
++ tophys_novmstack r11, r11
+ #ifdef CONFIG_VMAP_STACK
+ mtcrf 0x7f, r11
+ bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
+@@ -64,12 +53,11 @@
+ .endm
+
+ .macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
++#ifdef CONFIG_VMAP_STACK
+ mtcr r10
+-FTR_SECTION_ELSE
+- stw r10, _CCR(r11)
+-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
++ li r10, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
++ mtmsr r10
++ isync
+ #else
+ stw r10,_CCR(r11) /* save registers */
+ #endif
+@@ -77,11 +65,9 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HP
+ stw r12,GPR12(r11)
+ stw r9,GPR9(r11)
+ stw r10,GPR10(r11)
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
++#ifdef CONFIG_VMAP_STACK
+ mfcr r10
+ stw r10, _CCR(r11)
+-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+ #endif
+ mfspr r12,SPRN_SPRG_SCRATCH1
+ stw r12,GPR11(r11)
+@@ -97,11 +83,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_T
+ stw r10, _DSISR(r11)
+ .endif
+ lwz r9, SRR1(r12)
+-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+-BEGIN_MMU_FTR_SECTION
+ andi. r10, r9, MSR_PR
+-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+-#endif
+ lwz r12, SRR0(r12)
+ #else
+ mfspr r12,SPRN_SRR0
+@@ -328,7 +310,6 @@ label:
+ #ifdef CONFIG_VMAP_STACK
+ #ifdef CONFIG_SMP
+ mfspr r11, SPRN_SPRG_THREAD
+- tovirt(r11, r11)
+ lwz r11, TASK_CPU - THREAD(r11)
+ slwi r11, r11, 3
+ addis r11, r11, emergency_ctx@ha
--- /dev/null
+From da7bb43ab9da39bcfed0d146ce94e1f0cbae4ca0 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 7 Sep 2020 13:42:10 +0000
+Subject: powerpc/32: Fix vmap stack - Properly set r1 before activating MMU
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit da7bb43ab9da39bcfed0d146ce94e1f0cbae4ca0 upstream.
+
+We need r1 to be properly set before activating MMU, otherwise any new
+exception taken while saving registers into the stack in exception
+prologs will use the user stack, which is wrong and will even lockup
+or crash when KUAP is selected.
+
+Do that by switching the meaning of r11 and r1 until we have saved r1
+to the stack: copy r1 into r11 and setup the new stack pointer in r1.
+To avoid complicating and impacting all generic and specific prolog
+code (and more), copy back r1 into r11 once r11 is save onto
+the stack.
+
+We could get rid of copying r1 back and forth at the cost of
+rewriting everything to use r1 instead of r11 all the way when
+CONFIG_VMAP_STACK is set, but the effort is probably not worth it.
+
+Fixes: 028474876f47 ("powerpc/32: prepare for CONFIG_VMAP_STACK")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/8f85e8752ac5af602db7237ef53d634f4f3d3892.1599486108.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/head_32.h | 43 ++++++++++++++++++++++++++++--------------
+ 1 file changed, 29 insertions(+), 14 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.h
++++ b/arch/powerpc/kernel/head_32.h
+@@ -39,15 +39,24 @@
+ .endm
+
+ .macro EXCEPTION_PROLOG_1 for_rtas=0
++#ifdef CONFIG_VMAP_STACK
++ mr r11, r1
++ subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
++ beq 1f
++ mfspr r1,SPRN_SPRG_THREAD
++ lwz r1,TASK_STACK-THREAD(r1)
++ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
++#else
+ subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
+ beq 1f
+ mfspr r11,SPRN_SPRG_THREAD
+ lwz r11,TASK_STACK-THREAD(r11)
+ addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
++#endif
+ 1:
+ tophys_novmstack r11, r11
+ #ifdef CONFIG_VMAP_STACK
+- mtcrf 0x7f, r11
++ mtcrf 0x7f, r1
+ bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
+ #endif
+ .endm
+@@ -62,6 +71,15 @@
+ stw r10,_CCR(r11) /* save registers */
+ #endif
+ mfspr r10, SPRN_SPRG_SCRATCH0
++#ifdef CONFIG_VMAP_STACK
++ stw r11,GPR1(r1)
++ stw r11,0(r1)
++ mr r11, r1
++#else
++ stw r1,GPR1(r11)
++ stw r1,0(r11)
++ tovirt(r1, r11) /* set new kernel sp */
++#endif
+ stw r12,GPR12(r11)
+ stw r9,GPR9(r11)
+ stw r10,GPR10(r11)
+@@ -89,9 +107,6 @@
+ mfspr r12,SPRN_SRR0
+ mfspr r9,SPRN_SRR1
+ #endif
+- stw r1,GPR1(r11)
+- stw r1,0(r11)
+- tovirt_novmstack r1, r11 /* set new kernel sp */
+ #ifdef CONFIG_40x
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+ #else
+@@ -309,19 +324,19 @@ label:
+ .macro vmap_stack_overflow_exception
+ #ifdef CONFIG_VMAP_STACK
+ #ifdef CONFIG_SMP
+- mfspr r11, SPRN_SPRG_THREAD
+- lwz r11, TASK_CPU - THREAD(r11)
+- slwi r11, r11, 3
+- addis r11, r11, emergency_ctx@ha
++ mfspr r1, SPRN_SPRG_THREAD
++ lwz r1, TASK_CPU - THREAD(r1)
++ slwi r1, r1, 3
++ addis r1, r1, emergency_ctx@ha
+ #else
+- lis r11, emergency_ctx@ha
++ lis r1, emergency_ctx@ha
+ #endif
+- lwz r11, emergency_ctx@l(r11)
+- cmpwi cr1, r11, 0
++ lwz r1, emergency_ctx@l(r1)
++ cmpwi cr1, r1, 0
+ bne cr1, 1f
+- lis r11, init_thread_union@ha
+- addi r11, r11, init_thread_union@l
+-1: addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
++ lis r1, init_thread_union@ha
++ addi r1, r1, init_thread_union@l
++1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ EXCEPTION_PROLOG_2
+ SAVE_NVGPRS(r11)
+ addi r3, r1, STACK_FRAME_OVERHEAD
--- /dev/null
+From ec72024e35dddb88a81e40071c87ceb18b5ee835 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Wed, 7 Oct 2020 17:18:33 +0530
+Subject: powerpc/drmem: Make lmb_size 64 bit
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit ec72024e35dddb88a81e40071c87ceb18b5ee835 upstream.
+
+Similar to commit 89c140bbaeee ("pseries: Fix 64 bit logical memory block panic")
+make sure different variables tracking lmb_size are updated to be 64 bit.
+
+This was found by code audit.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Acked-by: Nathan Lynch <nathanl@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201007114836.282468-2-aneesh.kumar@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/include/asm/drmem.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/drmem.h
++++ b/arch/powerpc/include/asm/drmem.h
+@@ -20,7 +20,7 @@ struct drmem_lmb {
+ struct drmem_lmb_info {
+ struct drmem_lmb *lmbs;
+ int n_lmbs;
+- u32 lmb_size;
++ u64 lmb_size;
+ };
+
+ extern struct drmem_lmb_info *drmem_info;
+@@ -80,7 +80,7 @@ struct of_drconf_cell_v2 {
+ #define DRCONF_MEM_RESERVED 0x00000080
+ #define DRCONF_MEM_HOTREMOVABLE 0x00000100
+
+-static inline u32 drmem_lmb_size(void)
++static inline u64 drmem_lmb_size(void)
+ {
+ return drmem_info->lmb_size;
+ }
--- /dev/null
+From 542db12a9c42d1ce70c45091765e02f74c129f43 Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Mon, 31 Aug 2020 07:58:19 +0000
+Subject: powerpc: Fix random segfault when freeing hugetlb range
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 542db12a9c42d1ce70c45091765e02f74c129f43 upstream.
+
+The following random segfault is observed from time to time with
+map_hugetlb selftest:
+
+root@localhost:~# ./map_hugetlb 1 19
+524288 kB hugepages
+Mapping 1 Mbytes
+Segmentation fault
+
+[ 31.219972] map_hugetlb[365]: segfault (11) at 117 nip 77974f8c lr 779a6834 code 1 in ld-2.23.so[77966000+21000]
+[ 31.220192] map_hugetlb[365]: code: 9421ffc0 480318d1 93410028 90010044 9361002c 93810030 93a10034 93c10038
+[ 31.220307] map_hugetlb[365]: code: 93e1003c 93210024 8123007c 81430038 <80e90004> 814a0004 7f443a14 813a0004
+[ 31.221911] BUG: Bad rss-counter state mm:(ptrval) type:MM_FILEPAGES val:33
+[ 31.229362] BUG: Bad rss-counter state mm:(ptrval) type:MM_ANONPAGES val:5
+
+This fault is due to hugetlb_free_pgd_range() freeing page tables
+that are also used by regular pages.
+
+As explain in the comment at the beginning of
+hugetlb_free_pgd_range(), the verification done in free_pgd_range()
+on floor and ceiling is not done here, which means
+hugetlb_free_pte_range() can free outside the expected range.
+
+As the verification cannot be done in hugetlb_free_pgd_range(), it
+must be done in hugetlb_free_pte_range().
+
+Fixes: b250c8c08c79 ("powerpc/8xx: Manage 512k huge pages as standard pages.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/f0cb2a5477cd87d1eaadb128042e20aeb2bc2859.1598860677.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -330,10 +330,24 @@ static void free_hugepd_range(struct mmu
+ get_hugepd_cache_index(pdshift - shift));
+ }
+
+-static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr)
++static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
++ unsigned long addr, unsigned long end,
++ unsigned long floor, unsigned long ceiling)
+ {
++ unsigned long start = addr;
+ pgtable_t token = pmd_pgtable(*pmd);
+
++ start &= PMD_MASK;
++ if (start < floor)
++ return;
++ if (ceiling) {
++ ceiling &= PMD_MASK;
++ if (!ceiling)
++ return;
++ }
++ if (end - 1 > ceiling - 1)
++ return;
++
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, token, addr);
+ mm_dec_nr_ptes(tlb->mm);
+@@ -363,7 +377,7 @@ static void hugetlb_free_pmd_range(struc
+ */
+ WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
+
+- hugetlb_free_pte_range(tlb, pmd, addr);
++ hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
+
+ continue;
+ }
--- /dev/null
+From 1da4a0272c5469169f78cd76cf175ff984f52f06 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Tue, 13 Oct 2020 15:37:40 +1100
+Subject: powerpc: Fix undetected data corruption with P9N DD2.1 VSX CI load emulation
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit 1da4a0272c5469169f78cd76cf175ff984f52f06 upstream.
+
+__get_user_atomic_128_aligned() stores to kaddr using stvx which is a
+VMX store instruction, hence kaddr must be 16 byte aligned otherwise
+the store won't occur as expected.
+
+Unfortunately when we call __get_user_atomic_128_aligned() in
+p9_hmi_special_emu(), the buffer we pass as kaddr (ie. vbuf) isn't
+guaranteed to be 16B aligned. This means that the write to vbuf in
+__get_user_atomic_128_aligned() has the bottom bits of the address
+truncated. This results in other local variables being
+overwritten. Also vbuf will not contain the correct data which results
+in the userspace emulation being wrong and hence undetected user data
+corruption.
+
+In the past we've been mostly lucky as vbuf has ended up aligned but
+this is fragile and isn't always true. CONFIG_STACKPROTECTOR in
+particular can change the stack arrangement enough that our luck runs
+out.
+
+This issue only occurs on POWER9 Nimbus <= DD2.1 bare metal.
+
+The fix is to align vbuf to a 16 byte boundary.
+
+Fixes: 5080332c2c89 ("powerpc/64s: Add workaround for P9 vector CI load issue")
+Cc: stable@vger.kernel.org # v4.15+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201013043741.743413-1-mikey@neuling.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/traps.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -889,7 +889,7 @@ static void p9_hmi_special_emu(struct pt
+ {
+ unsigned int ra, rb, t, i, sel, instr, rc;
+ const void __user *addr;
+- u8 vbuf[16], *vdst;
++ u8 vbuf[16] __aligned(16), *vdst;
+ unsigned long ea, msr, msr_mask;
+ bool swap;
+
--- /dev/null
+From 8d0e2101274358d9b6b1f27232b40253ca48bab5 Mon Sep 17 00:00:00 2001
+From: Ganesh Goudar <ganeshgr@linux.ibm.com>
+Date: Fri, 9 Oct 2020 12:10:04 +0530
+Subject: powerpc/mce: Avoid nmi_enter/exit in real mode on pseries hash
+
+From: Ganesh Goudar <ganeshgr@linux.ibm.com>
+
+commit 8d0e2101274358d9b6b1f27232b40253ca48bab5 upstream.
+
+Use of nmi_enter/exit in real mode handler causes the kernel to panic
+and reboot on injecting SLB mutihit on pseries machine running in hash
+MMU mode, because these calls try to accesses memory outside RMO
+region in real mode handler where translation is disabled.
+
+Add check to not to use these calls on pseries machine running in hash
+MMU mode.
+
+Fixes: 116ac378bb3f ("powerpc/64s: machine check interrupt update NMI accounting")
+Cc: stable@vger.kernel.org # v5.8+
+Signed-off-by: Ganesh Goudar <ganeshgr@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201009064005.19777-2-ganeshgr@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/mce.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -591,12 +591,11 @@ EXPORT_SYMBOL_GPL(machine_check_print_ev
+ long notrace machine_check_early(struct pt_regs *regs)
+ {
+ long handled = 0;
+- bool nested = in_nmi();
+ u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
+
+ this_cpu_set_ftrace_enabled(0);
+-
+- if (!nested)
++ /* Do not use nmi_enter/exit for pseries hpte guest */
++ if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ nmi_enter();
+
+ hv_nmi_check_nonrecoverable(regs);
+@@ -607,7 +606,7 @@ long notrace machine_check_early(struct
+ if (ppc_md.machine_check_early)
+ handled = ppc_md.machine_check_early(regs);
+
+- if (!nested)
++ if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
+ nmi_exit();
+
+ this_cpu_set_ftrace_enabled(ftrace_enabled);
--- /dev/null
+From 301d2ea6572386245c5d2d2dc85c3b5a737b85ac Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Wed, 7 Oct 2020 17:18:34 +0530
+Subject: powerpc/memhotplug: Make lmb size 64bit
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 301d2ea6572386245c5d2d2dc85c3b5a737b85ac upstream.
+
+Similar to commit 89c140bbaeee ("pseries: Fix 64 bit logical memory block panic")
+make sure different variables tracking lmb_size are updated to be 64 bit.
+
+This was found by code audit.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201007114836.282468-3-aneesh.kumar@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/pseries/hotplug-memory.c | 43 ++++++++++++++++--------
+ 1 file changed, 29 insertions(+), 14 deletions(-)
+
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -277,7 +277,7 @@ static int dlpar_offline_lmb(struct drme
+ return dlpar_change_lmb_state(lmb, false);
+ }
+
+-static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
++static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
+ {
+ unsigned long block_sz, start_pfn;
+ int sections_per_block;
+@@ -308,10 +308,11 @@ out:
+
+ static int pseries_remove_mem_node(struct device_node *np)
+ {
+- const __be32 *regs;
++ const __be32 *prop;
+ unsigned long base;
+- unsigned int lmb_size;
++ unsigned long lmb_size;
+ int ret = -EINVAL;
++ int addr_cells, size_cells;
+
+ /*
+ * Check to see if we are actually removing memory
+@@ -322,12 +323,19 @@ static int pseries_remove_mem_node(struc
+ /*
+ * Find the base address and size of the memblock
+ */
+- regs = of_get_property(np, "reg", NULL);
+- if (!regs)
++ prop = of_get_property(np, "reg", NULL);
++ if (!prop)
+ return ret;
+
+- base = be64_to_cpu(*(unsigned long *)regs);
+- lmb_size = be32_to_cpu(regs[3]);
++ addr_cells = of_n_addr_cells(np);
++ size_cells = of_n_size_cells(np);
++
++ /*
++ * "reg" property represents (addr,size) tuple.
++ */
++ base = of_read_number(prop, addr_cells);
++ prop += addr_cells;
++ lmb_size = of_read_number(prop, size_cells);
+
+ pseries_remove_memblock(base, lmb_size);
+ return 0;
+@@ -564,7 +572,7 @@ static int dlpar_memory_remove_by_ic(u32
+
+ #else
+ static inline int pseries_remove_memblock(unsigned long base,
+- unsigned int memblock_size)
++ unsigned long memblock_size)
+ {
+ return -EOPNOTSUPP;
+ }
+@@ -886,10 +894,11 @@ int dlpar_memory(struct pseries_hp_error
+
+ static int pseries_add_mem_node(struct device_node *np)
+ {
+- const __be32 *regs;
++ const __be32 *prop;
+ unsigned long base;
+- unsigned int lmb_size;
++ unsigned long lmb_size;
+ int ret = -EINVAL;
++ int addr_cells, size_cells;
+
+ /*
+ * Check to see if we are actually adding memory
+@@ -900,12 +909,18 @@ static int pseries_add_mem_node(struct d
+ /*
+ * Find the base and size of the memblock
+ */
+- regs = of_get_property(np, "reg", NULL);
+- if (!regs)
++ prop = of_get_property(np, "reg", NULL);
++ if (!prop)
+ return ret;
+
+- base = be64_to_cpu(*(unsigned long *)regs);
+- lmb_size = be32_to_cpu(regs[3]);
++ addr_cells = of_n_addr_cells(np);
++ size_cells = of_n_size_cells(np);
++ /*
++ * "reg" property represents (addr,size) tuple.
++ */
++ base = of_read_number(prop, addr_cells);
++ prop += addr_cells;
++ lmb_size = of_read_number(prop, size_cells);
+
+ /*
+ * Update memory region to represent the memory add
--- /dev/null
+From 2c637d2df4ee4830e9d3eb2bd5412250522ce96e Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Fri, 11 Sep 2020 10:29:15 +0000
+Subject: powerpc/powermac: Fix low_sleep_handler with KUAP and KUEP
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 2c637d2df4ee4830e9d3eb2bd5412250522ce96e upstream.
+
+low_sleep_handler() has an hardcoded restore of segment registers
+that doesn't take KUAP and KUEP into account.
+
+Use head_32's load_segment_registers() routine instead.
+
+Fixes: a68c31fc01ef ("powerpc/32s: Implement Kernel Userspace Access Protection")
+Fixes: 31ed2b13c48d ("powerpc/32s: Implement Kernel Userspace Execution Prevention.")
+Cc: stable@vger.kernel.org
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/21b05f7298c1b18f73e6e5b4cd5005aafa24b6da.1599820109.git.christophe.leroy@csgroup.eu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/head_32.S | 2 +-
+ arch/powerpc/platforms/powermac/sleep.S | 9 +--------
+ 2 files changed, 2 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/kernel/head_32.S
++++ b/arch/powerpc/kernel/head_32.S
+@@ -1002,7 +1002,7 @@ BEGIN_MMU_FTR_SECTION
+ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
+ blr
+
+-load_segment_registers:
++_GLOBAL(load_segment_registers)
+ li r0, NUM_USER_SEGMENTS /* load up user segment register values */
+ mtctr r0 /* for context 0 */
+ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
+--- a/arch/powerpc/platforms/powermac/sleep.S
++++ b/arch/powerpc/platforms/powermac/sleep.S
+@@ -294,14 +294,7 @@ grackle_wake_up:
+ * we do any r1 memory access as we are not sure they
+ * are in a sane state above the first 256Mb region
+ */
+- li r0,16 /* load up segment register values */
+- mtctr r0 /* for context 0 */
+- lis r3,0x2000 /* Ku = 1, VSID = 0 */
+- li r4,0
+-3: mtsrin r3,r4
+- addi r3,r3,0x111 /* increment VSID */
+- addis r4,r4,0x1000 /* address of next segment */
+- bdnz 3b
++ bl load_segment_registers
+ sync
+ isync
+
--- /dev/null
+From aea948bb80b478ddc2448f7359d574387521a52d Mon Sep 17 00:00:00 2001
+From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+Date: Tue, 6 Oct 2020 13:02:18 +0530
+Subject: powerpc/powernv/elog: Fix race while processing OPAL error log event.
+
+From: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+
+commit aea948bb80b478ddc2448f7359d574387521a52d upstream.
+
+Every error log reported by OPAL is exported to userspace through a
+sysfs interface and notified using kobject_uevent(). The userspace
+daemon (opal_errd) then reads the error log and acknowledges the error
+log is saved safely to disk. Once acknowledged the kernel removes the
+respective sysfs file entry causing respective resources to be
+released including kobject.
+
+However it's possible the userspace daemon may already be scanning
+elog entries when a new sysfs elog entry is created by the kernel.
+User daemon may read this new entry and ack it even before kernel can
+notify userspace about it through kobject_uevent() call. If that
+happens then we have a potential race between
+elog_ack_store->kobject_put() and kobject_uevent which can lead to
+use-after-free of a kernfs object resulting in a kernel crash. eg:
+
+ BUG: Unable to handle kernel data access on read at 0x6b6b6b6b6b6b6bfb
+ Faulting instruction address: 0xc0000000008ff2a0
+ Oops: Kernel access of bad area, sig: 11 [#1]
+ LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA PowerNV
+ CPU: 27 PID: 805 Comm: irq/29-opal-elo Not tainted 5.9.0-rc2-gcc-8.2.0-00214-g6f56a67bcbb5-dirty #363
+ ...
+ NIP kobject_uevent_env+0xa0/0x910
+ LR elog_event+0x1f4/0x2d0
+ Call Trace:
+ 0x5deadbeef0000122 (unreliable)
+ elog_event+0x1f4/0x2d0
+ irq_thread_fn+0x4c/0xc0
+ irq_thread+0x1c0/0x2b0
+ kthread+0x1c4/0x1d0
+ ret_from_kernel_thread+0x5c/0x6c
+
+This patch fixes this race by protecting the sysfs file
+creation/notification by holding a reference count on kobject until we
+safely send kobject_uevent().
+
+The function create_elog_obj() returns the elog object which if used
+by caller function will end up in use-after-free problem again.
+However, the return value of create_elog_obj() function isn't being
+used today and there is no need as well. Hence change it to return
+void to make this fix complete.
+
+Fixes: 774fea1a38c6 ("powerpc/powernv: Read OPAL error log and export it through sysfs")
+Cc: stable@vger.kernel.org # v3.15+
+Reported-by: Oliver O'Halloran <oohall@gmail.com>
+Signed-off-by: Mahesh Salgaonkar <mahesh@linux.ibm.com>
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Reviewed-by: Oliver O'Halloran <oohall@gmail.com>
+Reviewed-by: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
+[mpe: Rework the logic to use a single return, reword comments, add oops]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20201006122051.190176-1-mpe@ellerman.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal-elog.c | 33 ++++++++++++++++++++++-------
+ 1 file changed, 26 insertions(+), 7 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/opal-elog.c
++++ b/arch/powerpc/platforms/powernv/opal-elog.c
+@@ -179,14 +179,14 @@ static ssize_t raw_attr_read(struct file
+ return count;
+ }
+
+-static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
++static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
+ {
+ struct elog_obj *elog;
+ int rc;
+
+ elog = kzalloc(sizeof(*elog), GFP_KERNEL);
+ if (!elog)
+- return NULL;
++ return;
+
+ elog->kobj.kset = elog_kset;
+
+@@ -219,18 +219,37 @@ static struct elog_obj *create_elog_obj(
+ rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
+ if (rc) {
+ kobject_put(&elog->kobj);
+- return NULL;
++ return;
+ }
+
++ /*
++ * As soon as the sysfs file for this elog is created/activated there is
++ * a chance the opal_errd daemon (or any userspace) might read and
++ * acknowledge the elog before kobject_uevent() is called. If that
++ * happens then there is a potential race between
++ * elog_ack_store->kobject_put() and kobject_uevent() which leads to a
++ * use-after-free of a kernfs object resulting in a kernel crash.
++ *
++ * To avoid that, we need to take a reference on behalf of the bin file,
++ * so that our reference remains valid while we call kobject_uevent().
++ * We then drop our reference before exiting the function, leaving the
++ * bin file to drop the last reference (if it hasn't already).
++ */
++
++ /* Take a reference for the bin file */
++ kobject_get(&elog->kobj);
+ rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
+- if (rc) {
++ if (rc == 0) {
++ kobject_uevent(&elog->kobj, KOBJ_ADD);
++ } else {
++ /* Drop the reference taken for the bin file */
+ kobject_put(&elog->kobj);
+- return NULL;
+ }
+
+- kobject_uevent(&elog->kobj, KOBJ_ADD);
++ /* Drop our reference */
++ kobject_put(&elog->kobj);
+
+- return elog;
++ return;
+ }
+
+ static irqreturn_t elog_event(int irq, void *data)
--- /dev/null
+From bd59380c5ba4147dcbaad3e582b55ccfd120b764 Mon Sep 17 00:00:00 2001
+From: Andrew Donnellan <ajd@linux.ibm.com>
+Date: Thu, 20 Aug 2020 14:45:12 +1000
+Subject: powerpc/rtas: Restrict RTAS requests from userspace
+
+From: Andrew Donnellan <ajd@linux.ibm.com>
+
+commit bd59380c5ba4147dcbaad3e582b55ccfd120b764 upstream.
+
+A number of userspace utilities depend on making calls to RTAS to retrieve
+information and update various things.
+
+The existing API through which we expose RTAS to userspace exposes more
+RTAS functionality than we actually need, through the sys_rtas syscall,
+which allows root (or anyone with CAP_SYS_ADMIN) to make any RTAS call they
+want with arbitrary arguments.
+
+Many RTAS calls take the address of a buffer as an argument, and it's up to
+the caller to specify the physical address of the buffer as an argument. We
+allocate a buffer (the "RMO buffer") in the Real Memory Area that RTAS can
+access, and then expose the physical address and size of this buffer in
+/proc/powerpc/rtas/rmo_buffer. Userspace is expected to read this address,
+poke at the buffer using /dev/mem, and pass an address in the RMO buffer to
+the RTAS call.
+
+However, there's nothing stopping the caller from specifying whatever
+address they want in the RTAS call, and it's easy to construct a series of
+RTAS calls that can overwrite arbitrary bytes (even without /dev/mem
+access).
+
+Additionally, there are some RTAS calls that do potentially dangerous
+things and for which there are no legitimate userspace use cases.
+
+In the past, this would not have been a particularly big deal as it was
+assumed that root could modify all system state freely, but with Secure
+Boot and lockdown we need to care about this.
+
+We can't fundamentally change the ABI at this point, however we can address
+this by implementing a filter that checks RTAS calls against a list
+of permitted calls and forces the caller to use addresses within the RMO
+buffer.
+
+The list is based off the list of calls that are used by the librtas
+userspace library, and has been tested with a number of existing userspace
+RTAS utilities. For compatibility with any applications we are not aware of
+that require other calls, the filter can be turned off at build time.
+
+Cc: stable@vger.kernel.org
+Reported-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200820044512.7543-1-ajd@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/Kconfig | 13 +++
+ arch/powerpc/kernel/rtas.c | 153 +++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 166 insertions(+)
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -1001,6 +1001,19 @@ config PPC_SECVAR_SYSFS
+ read/write operations on these variables. Say Y if you have
+ secure boot enabled and want to expose variables to userspace.
+
++config PPC_RTAS_FILTER
++ bool "Enable filtering of RTAS syscalls"
++ default y
++ depends on PPC_RTAS
++ help
++ The RTAS syscall API has security issues that could be used to
++ compromise system integrity. This option enforces restrictions on the
++ RTAS calls and arguments passed by userspace programs to mitigate
++ these issues.
++
++ Say Y unless you know what you are doing and the filter is causing
++ problems for you.
++
+ endmenu
+
+ config ISA_DMA_API
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -992,6 +992,147 @@ struct pseries_errorlog *get_pseries_err
+ return NULL;
+ }
+
++#ifdef CONFIG_PPC_RTAS_FILTER
++
++/*
++ * The sys_rtas syscall, as originally designed, allows root to pass
++ * arbitrary physical addresses to RTAS calls. A number of RTAS calls
++ * can be abused to write to arbitrary memory and do other things that
++ * are potentially harmful to system integrity, and thus should only
++ * be used inside the kernel and not exposed to userspace.
++ *
++ * All known legitimate users of the sys_rtas syscall will only ever
++ * pass addresses that fall within the RMO buffer, and use a known
++ * subset of RTAS calls.
++ *
++ * Accordingly, we filter RTAS requests to check that the call is
++ * permitted, and that provided pointers fall within the RMO buffer.
++ * The rtas_filters list contains an entry for each permitted call,
++ * with the indexes of the parameters which are expected to contain
++ * addresses and sizes of buffers allocated inside the RMO buffer.
++ */
++struct rtas_filter {
++ const char *name;
++ int token;
++ /* Indexes into the args buffer, -1 if not used */
++ int buf_idx1;
++ int size_idx1;
++ int buf_idx2;
++ int size_idx2;
++
++ int fixed_size;
++};
++
++static struct rtas_filter rtas_filters[] __ro_after_init = {
++ { "ibm,activate-firmware", -1, -1, -1, -1, -1 },
++ { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */
++ { "display-character", -1, -1, -1, -1, -1 },
++ { "ibm,display-message", -1, 0, -1, -1, -1 },
++ { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
++ { "ibm,close-errinjct", -1, -1, -1, -1, -1 },
++ { "ibm,open-errinct", -1, -1, -1, -1, -1 },
++ { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
++ { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
++ { "ibm,get-indices", -1, 2, 3, -1, -1 },
++ { "get-power-level", -1, -1, -1, -1, -1 },
++ { "get-sensor-state", -1, -1, -1, -1, -1 },
++ { "ibm,get-system-parameter", -1, 1, 2, -1, -1 },
++ { "get-time-of-day", -1, -1, -1, -1, -1 },
++ { "ibm,get-vpd", -1, 0, -1, 1, 2 },
++ { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
++ { "ibm,platform-dump", -1, 4, 5, -1, -1 },
++ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
++ { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
++ { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
++ { "ibm,set-eeh-option", -1, -1, -1, -1, -1 },
++ { "set-indicator", -1, -1, -1, -1, -1 },
++ { "set-power-level", -1, -1, -1, -1, -1 },
++ { "set-time-for-power-on", -1, -1, -1, -1, -1 },
++ { "ibm,set-system-parameter", -1, 1, -1, -1, -1 },
++ { "set-time-of-day", -1, -1, -1, -1, -1 },
++ { "ibm,suspend-me", -1, -1, -1, -1, -1 },
++ { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 },
++ { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 },
++ { "ibm,physical-attestation", -1, 0, 1, -1, -1 },
++};
++
++static bool in_rmo_buf(u32 base, u32 end)
++{
++ return base >= rtas_rmo_buf &&
++ base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) &&
++ base <= end &&
++ end >= rtas_rmo_buf &&
++ end < (rtas_rmo_buf + RTAS_RMOBUF_MAX);
++}
++
++static bool block_rtas_call(int token, int nargs,
++ struct rtas_args *args)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
++ struct rtas_filter *f = &rtas_filters[i];
++ u32 base, size, end;
++
++ if (token != f->token)
++ continue;
++
++ if (f->buf_idx1 != -1) {
++ base = be32_to_cpu(args->args[f->buf_idx1]);
++ if (f->size_idx1 != -1)
++ size = be32_to_cpu(args->args[f->size_idx1]);
++ else if (f->fixed_size)
++ size = f->fixed_size;
++ else
++ size = 1;
++
++ end = base + size - 1;
++ if (!in_rmo_buf(base, end))
++ goto err;
++ }
++
++ if (f->buf_idx2 != -1) {
++ base = be32_to_cpu(args->args[f->buf_idx2]);
++ if (f->size_idx2 != -1)
++ size = be32_to_cpu(args->args[f->size_idx2]);
++ else if (f->fixed_size)
++ size = f->fixed_size;
++ else
++ size = 1;
++ end = base + size - 1;
++
++ /*
++ * Special case for ibm,configure-connector where the
++ * address can be 0
++ */
++ if (!strcmp(f->name, "ibm,configure-connector") &&
++ base == 0)
++ return false;
++
++ if (!in_rmo_buf(base, end))
++ goto err;
++ }
++
++ return false;
++ }
++
++err:
++ pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
++ pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
++ token, nargs, current->comm);
++ return true;
++}
++
++#else
++
++static bool block_rtas_call(int token, int nargs,
++ struct rtas_args *args)
++{
++ return false;
++}
++
++#endif /* CONFIG_PPC_RTAS_FILTER */
++
+ /* We assume to be passed big endian arguments */
+ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
+ {
+@@ -1029,6 +1170,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args _
+ args.rets = &args.args[nargs];
+ memset(args.rets, 0, nret * sizeof(rtas_arg_t));
+
++ if (block_rtas_call(token, nargs, &args))
++ return -EINVAL;
++
+ /* Need to handle ibm,suspend_me call specially */
+ if (token == ibm_suspend_me_token) {
+
+@@ -1090,6 +1234,9 @@ void __init rtas_initialize(void)
+ unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
+ u32 base, size, entry;
+ int no_base, no_size, no_entry;
++#ifdef CONFIG_PPC_RTAS_FILTER
++ int i;
++#endif
+
+ /* Get RTAS dev node and fill up our "rtas" structure with infos
+ * about it.
+@@ -1129,6 +1276,12 @@ void __init rtas_initialize(void)
+ #ifdef CONFIG_RTAS_ERROR_LOGGING
+ rtas_last_error_token = rtas_token("rtas-last-error");
+ #endif
++
++#ifdef CONFIG_PPC_RTAS_FILTER
++ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
++ rtas_filters[i].token = rtas_token(rtas_filters[i].name);
++ }
++#endif
+ }
+
+ int __init early_init_dt_scan_rtas(unsigned long node,
--- /dev/null
+From a02f6d42357acf6e5de6ffc728e6e77faf3ad217 Mon Sep 17 00:00:00 2001
+From: Joel Stanley <joel@jms.id.au>
+Date: Wed, 2 Sep 2020 09:30:11 +0930
+Subject: powerpc: Warn about use of smt_snooze_delay
+
+From: Joel Stanley <joel@jms.id.au>
+
+commit a02f6d42357acf6e5de6ffc728e6e77faf3ad217 upstream.
+
+It's not done anything for a long time. Save the percpu variable, and
+emit a warning to remind users to not expect it to do anything.
+
+This uses pr_warn_once instead of pr_warn_ratelimit as testing
+'ppc64_cpu --smt=off' on a 24 core / 4 SMT system showed the warning
+to be noisy, as the online/offline loop is slow.
+
+Fixes: 3fa8cad82b94 ("powerpc/pseries/cpuidle: smt-snooze-delay cleanup.")
+Cc: stable@vger.kernel.org # v3.14
+Signed-off-by: Joel Stanley <joel@jms.id.au>
+Acked-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200902000012.3440389-1-joel@jms.id.au
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/sysfs.c | 42 +++++++++++++++++-------------------------
+ 1 file changed, 17 insertions(+), 25 deletions(-)
+
+--- a/arch/powerpc/kernel/sysfs.c
++++ b/arch/powerpc/kernel/sysfs.c
+@@ -32,29 +32,27 @@
+
+ static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+-/*
+- * SMT snooze delay stuff, 64-bit only for now
+- */
+-
+ #ifdef CONFIG_PPC64
+
+-/* Time in microseconds we delay before sleeping in the idle loop */
+-static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
++/*
++ * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
++ * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
++ * 2014:
++ *
++ * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
++ * up the kernel code."
++ *
++ * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
++ * code should be removed.
++ */
+
+ static ssize_t store_smt_snooze_delay(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+ {
+- struct cpu *cpu = container_of(dev, struct cpu, dev);
+- ssize_t ret;
+- long snooze;
+-
+- ret = sscanf(buf, "%ld", &snooze);
+- if (ret != 1)
+- return -EINVAL;
+-
+- per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
++ pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
++ current->comm, current->pid);
+ return count;
+ }
+
+@@ -62,9 +60,9 @@ static ssize_t show_smt_snooze_delay(str
+ struct device_attribute *attr,
+ char *buf)
+ {
+- struct cpu *cpu = container_of(dev, struct cpu, dev);
+-
+- return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
++ pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
++ current->comm, current->pid);
++ return sprintf(buf, "100\n");
+ }
+
+ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
+@@ -72,16 +70,10 @@ static DEVICE_ATTR(smt_snooze_delay, 064
+
+ static int __init setup_smt_snooze_delay(char *str)
+ {
+- unsigned int cpu;
+- long snooze;
+-
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return 1;
+
+- snooze = simple_strtol(str, NULL, 10);
+- for_each_possible_cpu(cpu)
+- per_cpu(smt_snooze_delay, cpu) = snooze;
+-
++ pr_warn("smt-snooze-delay command line option has no effect\n");
+ return 1;
+ }
+ __setup("smt-snooze-delay=", setup_smt_snooze_delay);
--- /dev/null
+From f747c7e15d7bc71a967a94ceda686cf2460b69e8 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Tue, 15 Sep 2020 14:27:38 -0700
+Subject: rcu-tasks: Enclose task-list scan in rcu_read_lock()
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit f747c7e15d7bc71a967a94ceda686cf2460b69e8 upstream.
+
+The rcu_tasks_trace_postgp() function uses for_each_process_thread()
+to scan the task list without the benefit of RCU read-side protection,
+which can result in use-after-free errors on task_struct structures.
+This error was missed because the TRACE01 rcutorture scenario enables
+lockdep, but also builds with CONFIG_PREEMPT_NONE=y. In this situation,
+preemption is disabled everywhere, so lockdep thinks everywhere can
+be a legitimate RCU reader. This commit therefore adds the needed
+rcu_read_lock() and rcu_read_unlock().
+
+Note that this bug can occur only after an RCU Tasks Trace CPU stall
+warning, which by default only happens after a grace period has extended
+for ten minutes (yes, not a typo, minutes).
+
+Fixes: 4593e772b502 ("rcu-tasks: Add stall warnings for RCU Tasks Trace")
+Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: <bpf@vger.kernel.org>
+Cc: <stable@vger.kernel.org> # 5.7.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/rcu/tasks.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1078,9 +1078,11 @@ static void rcu_tasks_trace_postgp(struc
+ if (ret)
+ break; // Count reached zero.
+ // Stall warning time, so make a list of the offenders.
++ rcu_read_lock();
+ for_each_process_thread(g, t)
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
+ trc_add_holdout(t, &holdouts);
++ rcu_read_unlock();
+ firstreport = true;
+ list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
--- /dev/null
+From ba3a86e47232ad9f76160929f33ac9c64e4d0567 Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Mon, 14 Sep 2020 15:44:37 -0700
+Subject: rcu-tasks: Fix grace-period/unlock race in RCU Tasks Trace
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit ba3a86e47232ad9f76160929f33ac9c64e4d0567 upstream.
+
+The more intense grace-period processing resulting from the 50x RCU
+Tasks Trace grace-period speedups exposed the following race condition:
+
+o Task A running on CPU 0 executes rcu_read_lock_trace(),
+ entering a read-side critical section.
+
+o When Task A eventually invokes rcu_read_unlock_trace()
+ to exit its read-side critical section, this function
+ notes that the ->trc_reader_special.s flag is zero and
+ and therefore invoke wil set ->trc_reader_nesting to zero
+ using WRITE_ONCE(). But before that happens...
+
+o The RCU Tasks Trace grace-period kthread running on some other
+ CPU interrogates Task A, but this fails because this task is
+ currently running. This kthread therefore sends an IPI to CPU 0.
+
+o CPU 0 receives the IPI, and thus invokes trc_read_check_handler().
+ Because Task A has not yet cleared its ->trc_reader_nesting
+ counter, this function sees that Task A is still within its
+ read-side critical section. This function therefore sets the
+ ->trc_reader_nesting.b.need_qs flag, AKA the .need_qs flag.
+
+ Except that Task A has already checked the .need_qs flag, which
+ is part of the ->trc_reader_special.s flag. The .need_qs flag
+ therefore remains set until Task A's next rcu_read_unlock_trace().
+
+o Task A now invokes synchronize_rcu_tasks_trace(), which cannot
+ start a new grace period until the current grace period completes.
+ And thus cannot return until after that time.
+
+ But Task A's .need_qs flag is still set, which prevents the current
+ grace period from completing. And because Task A is blocked, it
+ will never execute rcu_read_unlock_trace() until its call to
+ synchronize_rcu_tasks_trace() returns.
+
+ We are therefore deadlocked.
+
+This race is improbable, but 80 hours of rcutorture made it happen twice.
+The race was possible before the grace-period speedup, but roughly 50x
+less probable. Several thousand hours of rcutorture would have been
+necessary to have a reasonable chance of making this happen before this
+50x speedup.
+
+This commit therefore eliminates this deadlock by setting
+->trc_reader_nesting to a large negative number before checking the
+.need_qs and zeroing (or decrementing with respect to its initial
+value) ->trc_reader_nesting. For its part, the IPI handler's
+trc_read_check_handler() function adds a check for negative values,
+deferring evaluation of the task in this case. Taken together, these
+changes avoid this deadlock scenario.
+
+Fixes: 276c410448db ("rcu-tasks: Split ->trc_reader_need_end")
+Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: <bpf@vger.kernel.org>
+Cc: <stable@vger.kernel.org> # 5.7.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/rcupdate_trace.h | 4 ++++
+ kernel/rcu/tasks.h | 6 ++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/include/linux/rcupdate_trace.h
++++ b/include/linux/rcupdate_trace.h
+@@ -50,6 +50,7 @@ static inline void rcu_read_lock_trace(v
+ struct task_struct *t = current;
+
+ WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
++ barrier();
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
+ t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers
+@@ -72,6 +73,9 @@ static inline void rcu_read_unlock_trace
+
+ rcu_lock_release(&rcu_trace_lock_map);
+ nesting = READ_ONCE(t->trc_reader_nesting) - 1;
++ barrier(); // Critical section before disabling.
++ // Disable IPI-based setting of .need_qs.
++ WRITE_ONCE(t->trc_reader_nesting, INT_MIN);
+ if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
+ return; // We assume shallow reader nesting.
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -821,6 +821,12 @@ static void trc_read_check_handler(void
+ WRITE_ONCE(t->trc_reader_checked, true);
+ goto reset_ipi;
+ }
++ // If we are racing with an rcu_read_unlock_trace(), try again later.
++ if (unlikely(t->trc_reader_nesting < 0)) {
++ if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
++ wake_up(&trc_wait);
++ goto reset_ipi;
++ }
+ WRITE_ONCE(t->trc_reader_checked, true);
+
+ // Get here if the task is in a read-side critical section. Set
--- /dev/null
+From 592031cc10858be4adb10f6c0f2608f6f21824aa Mon Sep 17 00:00:00 2001
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Tue, 15 Sep 2020 14:03:34 -0700
+Subject: rcu-tasks: Fix low-probability task_struct leak
+
+From: Paul E. McKenney <paulmck@kernel.org>
+
+commit 592031cc10858be4adb10f6c0f2608f6f21824aa upstream.
+
+When rcu_tasks_trace_postgp() function detects an RCU Tasks Trace
+CPU stall, it adds all tasks blocking the current grace period to
+a list, invoking get_task_struct() on each to prevent them from
+being freed while on the list. It then traverses that list,
+printing stall-warning messages for each one that is still blocking
+the current grace period and removing it from the list. The list
+removal invokes the matching put_task_struct().
+
+This of course means that in the admittedly unlikely event that some
+task executes its outermost rcu_read_unlock_trace() in the meantime, it
+won't be removed from the list and put_task_struct() won't be executing,
+resulting in a task_struct leak. This commit therefore makes the list
+removal and put_task_struct() unconditional, stopping the leak.
+
+Note further that this bug can occur only after an RCU Tasks Trace CPU
+stall warning, which by default only happens after a grace period has
+extended for ten minutes (yes, not a typo, minutes).
+
+Fixes: 4593e772b502 ("rcu-tasks: Add stall warnings for RCU Tasks Trace")
+Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: <bpf@vger.kernel.org>
+Cc: <stable@vger.kernel.org> # 5.7.x
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/rcu/tasks.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1082,11 +1082,11 @@ static void rcu_tasks_trace_postgp(struc
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
+ trc_add_holdout(t, &holdouts);
+ firstreport = true;
+- list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
+- if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
++ list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
++ if (READ_ONCE(t->trc_reader_special.b.need_qs))
+ show_stalled_task_trace(t, &firstreport);
+- trc_del_holdout(t);
+- }
++ trc_del_holdout(t); // Release task_struct reference.
++ }
+ if (firstreport)
+ pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
+ show_stalled_ipi_trace();
--- /dev/null
+From 1894622636745237f882bfab47925afc48e122e0 Mon Sep 17 00:00:00 2001
+From: Sibi Sankar <sibis@codeaurora.org>
+Date: Wed, 16 Sep 2020 20:21:00 +0530
+Subject: remoteproc: Fixup coredump debugfs disable request
+
+From: Sibi Sankar <sibis@codeaurora.org>
+
+commit 1894622636745237f882bfab47925afc48e122e0 upstream.
+
+Fix the discrepancy observed between accepted input and read back value
+while disabling remoteproc coredump through the coredump debugfs entry.
+
+Fixes: 3afdc59e4390 ("remoteproc: Add coredump debugfs entry")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
+Link: https://lore.kernel.org/r/20200916145100.15872-1-sibis@codeaurora.org
+Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/remoteproc/remoteproc_debugfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/remoteproc/remoteproc_debugfs.c
++++ b/drivers/remoteproc/remoteproc_debugfs.c
+@@ -94,7 +94,7 @@ static ssize_t rproc_coredump_write(stru
+ goto out;
+ }
+
+- if (!strncmp(buf, "disable", count)) {
++ if (!strncmp(buf, "disabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "inline", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
--- /dev/null
+From 5e1a3149eec8675c2767cc465903f5e4829de5b0 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Thu, 29 Oct 2020 17:53:36 -0400
+Subject: Revert "vhost-vdpa: fix page pinning leakage in error path"
+
+From: Michael S. Tsirkin <mst@redhat.com>
+
+commit 5e1a3149eec8675c2767cc465903f5e4829de5b0 upstream.
+
+This reverts commit 7ed9e3d97c32d969caded2dfb6e67c1a2cc5a0b1.
+
+The patch creates a DoS risk since it can result in a high order memory
+allocation.
+
+Fixes: 7ed9e3d97c32d ("vhost-vdpa: fix page pinning leakage in error path")
+Cc: stable@vger.kernel.org
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vhost/vdpa.c | 117 ++++++++++++++++++++-------------------------------
+ 1 file changed, 47 insertions(+), 70 deletions(-)
+
+--- a/drivers/vhost/vdpa.c
++++ b/drivers/vhost/vdpa.c
+@@ -595,19 +595,21 @@ static int vhost_vdpa_process_iotlb_upda
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct page **page_list;
+- struct vm_area_struct **vmas;
++ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ unsigned int gup_flags = FOLL_LONGTERM;
+- unsigned long map_pfn, last_pfn = 0;
+- unsigned long npages, lock_limit;
+- unsigned long i, nmap = 0;
++ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
++ unsigned long locked, lock_limit, pinned, i;
+ u64 iova = msg->iova;
+- long pinned;
+ int ret = 0;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
++ page_list = (struct page **) __get_free_page(GFP_KERNEL);
++ if (!page_list)
++ return -ENOMEM;
++
+ if (msg->perm & VHOST_ACCESS_WO)
+ gup_flags |= FOLL_WRITE;
+
+@@ -615,86 +617,61 @@ static int vhost_vdpa_process_iotlb_upda
+ if (!npages)
+ return -EINVAL;
+
+- page_list = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+- vmas = kvmalloc_array(npages, sizeof(struct vm_area_struct *),
+- GFP_KERNEL);
+- if (!page_list || !vmas) {
+- ret = -ENOMEM;
+- goto free;
+- }
+-
+ mmap_read_lock(dev->mm);
+
++ locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+- if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
+- ret = -ENOMEM;
+- goto unlock;
+- }
+
+- pinned = pin_user_pages(msg->uaddr & PAGE_MASK, npages, gup_flags,
+- page_list, vmas);
+- if (npages != pinned) {
+- if (pinned < 0) {
+- ret = pinned;
+- } else {
+- unpin_user_pages(page_list, pinned);
+- ret = -ENOMEM;
+- }
+- goto unlock;
++ if (locked > lock_limit) {
++ ret = -ENOMEM;
++ goto out;
+ }
+
++ cur_base = msg->uaddr & PAGE_MASK;
+ iova &= PAGE_MASK;
+- map_pfn = page_to_pfn(page_list[0]);
+
+- /* One more iteration to avoid extra vdpa_map() call out of loop. */
+- for (i = 0; i <= npages; i++) {
+- unsigned long this_pfn;
+- u64 csize;
+-
+- /* The last chunk may have no valid PFN next to it */
+- this_pfn = i < npages ? page_to_pfn(page_list[i]) : -1UL;
+-
+- if (last_pfn && (this_pfn == -1UL ||
+- this_pfn != last_pfn + 1)) {
+- /* Pin a contiguous chunk of memory */
+- csize = last_pfn - map_pfn + 1;
+- ret = vhost_vdpa_map(v, iova, csize << PAGE_SHIFT,
+- map_pfn << PAGE_SHIFT,
+- msg->perm);
+- if (ret) {
+- /*
+- * Unpin the rest chunks of memory on the
+- * flight with no corresponding vdpa_map()
+- * calls having been made yet. On the other
+- * hand, vdpa_unmap() in the failure path
+- * is in charge of accounting the number of
+- * pinned pages for its own.
+- * This asymmetrical pattern of accounting
+- * is for efficiency to pin all pages at
+- * once, while there is no other callsite
+- * of vdpa_map() than here above.
+- */
+- unpin_user_pages(&page_list[nmap],
+- npages - nmap);
+- goto out;
++ while (npages) {
++ pinned = min_t(unsigned long, npages, list_size);
++ ret = pin_user_pages(cur_base, pinned,
++ gup_flags, page_list, NULL);
++ if (ret != pinned)
++ goto out;
++
++ if (!last_pfn)
++ map_pfn = page_to_pfn(page_list[0]);
++
++ for (i = 0; i < ret; i++) {
++ unsigned long this_pfn = page_to_pfn(page_list[i]);
++ u64 csize;
++
++ if (last_pfn && (this_pfn != last_pfn + 1)) {
++ /* Pin a contiguous chunk of memory */
++ csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
++ if (vhost_vdpa_map(v, iova, csize,
++ map_pfn << PAGE_SHIFT,
++ msg->perm))
++ goto out;
++ map_pfn = this_pfn;
++ iova += csize;
+ }
+- atomic64_add(csize, &dev->mm->pinned_vm);
+- nmap += csize;
+- iova += csize << PAGE_SHIFT;
+- map_pfn = this_pfn;
++
++ last_pfn = this_pfn;
+ }
+- last_pfn = this_pfn;
++
++ cur_base += ret << PAGE_SHIFT;
++ npages -= ret;
+ }
+
+- WARN_ON(nmap != npages);
++ /* Pin the rest chunk */
++ ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
++ map_pfn << PAGE_SHIFT, msg->perm);
+ out:
+- if (ret)
++ if (ret) {
+ vhost_vdpa_unmap(v, msg->iova, msg->size);
+-unlock:
++ atomic64_sub(npages, &dev->mm->pinned_vm);
++ }
+ mmap_read_unlock(dev->mm);
+-free:
+- kvfree(vmas);
+- kvfree(page_list);
++ free_page((unsigned long)page_list);
+ return ret;
+ }
+
--- /dev/null
+From b3bd02495cb339124f13135d51940cf48d83e5cb Mon Sep 17 00:00:00 2001
+From: Sven Schnelle <svens@linux.ibm.com>
+Date: Tue, 15 Sep 2020 08:53:50 +0200
+Subject: s390/stp: add locking to sysfs functions
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+commit b3bd02495cb339124f13135d51940cf48d83e5cb upstream.
+
+The sysfs function might race with stp_work_fn. To prevent that,
+add the required locking. Another issue is that the sysfs functions
+are checking the stp_online flag, but this flag just holds the user
+setting whether STP is enabled. Add a flag to clock_sync_flag whether
+stp_info holds valid data and use that instead.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Reviewed-by: Alexander Egorenkov <egorenar@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/time.c | 118 ++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 85 insertions(+), 33 deletions(-)
+
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -345,8 +345,9 @@ static DEFINE_PER_CPU(atomic_t, clock_sy
+ static DEFINE_MUTEX(clock_sync_mutex);
+ static unsigned long clock_sync_flags;
+
+-#define CLOCK_SYNC_HAS_STP 0
+-#define CLOCK_SYNC_STP 1
++#define CLOCK_SYNC_HAS_STP 0
++#define CLOCK_SYNC_STP 1
++#define CLOCK_SYNC_STPINFO_VALID 2
+
+ /*
+ * The get_clock function for the physical clock. It will get the current
+@@ -583,6 +584,22 @@ void stp_queue_work(void)
+ queue_work(time_sync_wq, &stp_work);
+ }
+
++static int __store_stpinfo(void)
++{
++ int rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
++
++ if (rc)
++ clear_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++ else
++ set_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++ return rc;
++}
++
++static int stpinfo_valid(void)
++{
++ return stp_online && test_bit(CLOCK_SYNC_STPINFO_VALID, &clock_sync_flags);
++}
++
+ static int stp_sync_clock(void *data)
+ {
+ struct clock_sync_data *sync = data;
+@@ -604,8 +621,7 @@ static int stp_sync_clock(void *data)
+ if (rc == 0) {
+ sync->clock_delta = clock_delta;
+ clock_sync_global(clock_delta);
+- rc = chsc_sstpi(stp_page, &stp_info,
+- sizeof(struct stp_sstpi));
++ rc = __store_stpinfo();
+ if (rc == 0 && stp_info.tmd != 2)
+ rc = -EAGAIN;
+ }
+@@ -650,7 +666,7 @@ static void stp_work_fn(struct work_stru
+ if (rc)
+ goto out_unlock;
+
+- rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
++ rc = __store_stpinfo();
+ if (rc || stp_info.c == 0)
+ goto out_unlock;
+
+@@ -687,10 +703,14 @@ static ssize_t ctn_id_show(struct device
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online)
+- return -ENODATA;
+- return sprintf(buf, "%016llx\n",
+- *(unsigned long long *) stp_info.ctnid);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid())
++ ret = sprintf(buf, "%016llx\n",
++ *(unsigned long long *) stp_info.ctnid);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(ctn_id);
+@@ -699,9 +719,13 @@ static ssize_t ctn_type_show(struct devi
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online)
+- return -ENODATA;
+- return sprintf(buf, "%i\n", stp_info.ctn);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid())
++ ret = sprintf(buf, "%i\n", stp_info.ctn);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(ctn_type);
+@@ -710,9 +734,13 @@ static ssize_t dst_offset_show(struct de
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online || !(stp_info.vbits & 0x2000))
+- return -ENODATA;
+- return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid() && (stp_info.vbits & 0x2000))
++ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(dst_offset);
+@@ -721,9 +749,13 @@ static ssize_t leap_seconds_show(struct
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online || !(stp_info.vbits & 0x8000))
+- return -ENODATA;
+- return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid() && (stp_info.vbits & 0x8000))
++ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(leap_seconds);
+@@ -732,9 +764,13 @@ static ssize_t stratum_show(struct devic
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online)
+- return -ENODATA;
+- return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid())
++ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(stratum);
+@@ -743,9 +779,13 @@ static ssize_t time_offset_show(struct d
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online || !(stp_info.vbits & 0x0800))
+- return -ENODATA;
+- return sprintf(buf, "%i\n", (int) stp_info.tto);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid() && (stp_info.vbits & 0x0800))
++ ret = sprintf(buf, "%i\n", (int) stp_info.tto);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(time_offset);
+@@ -754,9 +794,13 @@ static ssize_t time_zone_offset_show(str
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online || !(stp_info.vbits & 0x4000))
+- return -ENODATA;
+- return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid() && (stp_info.vbits & 0x4000))
++ ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(time_zone_offset);
+@@ -765,9 +809,13 @@ static ssize_t timing_mode_show(struct d
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online)
+- return -ENODATA;
+- return sprintf(buf, "%i\n", stp_info.tmd);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid())
++ ret = sprintf(buf, "%i\n", stp_info.tmd);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(timing_mode);
+@@ -776,9 +824,13 @@ static ssize_t timing_state_show(struct
+ struct device_attribute *attr,
+ char *buf)
+ {
+- if (!stp_online)
+- return -ENODATA;
+- return sprintf(buf, "%i\n", stp_info.tst);
++ ssize_t ret = -ENODATA;
++
++ mutex_lock(&stp_work_mutex);
++ if (stpinfo_valid())
++ ret = sprintf(buf, "%i\n", stp_info.tst);
++ mutex_unlock(&stp_work_mutex);
++ return ret;
+ }
+
+ static DEVICE_ATTR_RO(timing_state);
vt_ioctl-fix-gio_unimap-regression.patch
hid-wacom-avoid-entering-wacom_wac_pen_report-for-pad-battery.patch
x86-mce-allow-for-copy_mc_fragile-symbol-checksum-to-be-generated.patch
+tty-serial-21285-fix-lockup-on-open.patch
+tty-serial-fsl_lpuart-ls1021a-has-a-fifo-size-of-16-words-like-ls1028a.patch
+tracing-fix-race-in-trace_open-and-buffer-resize-call.patch
+revert-vhost-vdpa-fix-page-pinning-leakage-in-error-path.patch
+powerpc-fix-random-segfault-when-freeing-hugetlb-range.patch
+udf-fix-memory-leak-when-mounting.patch
+dmaengine-dma-jz4780-fix-race-in-jz4780_dma_tx_status.patch
+vdpa_sim-fix-dma-mask.patch
+drm-shme-helpers-fix-dma_buf_mmap-forwarding-bug.patch
+iio-ltc2983-fix-of_node-refcounting.patch
+iio-adc-at91-sama5d2_adc-fix-dma-conversion-crash.patch
+iio-imu-inv_mpu6050-fix-dma-and-ts-alignment-and-data-leak-issues.patch
+iio-imu-st_lsm6dsx-check-st_lsm6dsx_shub_read_output-return.patch
+iio-light-si1145-fix-timestamp-alignment-and-prevent-data-leak.patch
+iio-adc-gyroadc-fix-leak-of-device-node-iterator.patch
+iio-ad7292-fix-of_node-refcounting.patch
+iio-adc-ti-adc0832-fix-alignment-issue-with-timestamp.patch
+iio-adc-ti-adc12138-fix-alignment-issue-with-timestamp.patch
+iio-imu-st_lsm6dsx-fix-alignment-and-data-leak-issues.patch
+iio-gyro-itg3200-fix-timestamp-alignment-and-prevent-data-leak.patch
+powerpc-drmem-make-lmb_size-64-bit.patch
+rcu-tasks-fix-grace-period-unlock-race-in-rcu-tasks-trace.patch
+rcu-tasks-fix-low-probability-task_struct-leak.patch
+rcu-tasks-enclose-task-list-scan-in-rcu_read_lock.patch
+mips-dec-restore-bootmem-reservation-for-firmware-working-memory-area.patch
+mips-configs-lb60-fix-defconfig-not-selecting-correct-board.patch
+s390-stp-add-locking-to-sysfs-functions.patch
+powerpc-rtas-restrict-rtas-requests-from-userspace.patch
+powerpc-warn-about-use-of-smt_snooze_delay.patch
+powerpc-memhotplug-make-lmb-size-64bit.patch
+powerpc-powernv-elog-fix-race-while-processing-opal-error-log-event.patch
+powerpc-powermac-fix-low_sleep_handler-with-kuap-and-kuep.patch
+powerpc-mce-avoid-nmi_enter-exit-in-real-mode-on-pseries-hash.patch
+powerpc-fix-undetected-data-corruption-with-p9n-dd2.1-vsx-ci-load-emulation.patch
+powerpc-32-fix-vmap-stack-do-not-activate-mmu-before-reading-task-struct.patch
+powerpc-32-fix-vmap-stack-properly-set-r1-before-activating-mmu.patch
+block-advance-iov_iter-on-bio_add_hw_page-failure.patch
+io_uring-use-type-appropriate-io_kiocb-handler-for-double-poll.patch
+remoteproc-fixup-coredump-debugfs-disable-request.patch
+gfs2-make-sure-we-don-t-miss-any-delayed-withdraws.patch
+gfs2-only-access-gl_delete-for-iopen-glocks.patch
+nfsv4-wait-for-stateid-updates-after-close-open_downgrade.patch
+nfsv4.2-support-exchgid4_flag_supp_fence_ops-4.2-exchange_id-flag.patch
+nfsd-add-missing-nfsv2-.pc_func-methods.patch
+ubifs-dent-fix-some-potential-memory-leaks-while-iterating-entries.patch
+ubifs-xattr-fix-some-potential-memory-leaks-while-iterating-entries.patch
+ubifs-journal-make-sure-to-not-dirty-twice-for-auth-nodes.patch
+ubifs-fix-a-memleak-after-dumping-authentication-mount-options.patch
+ubifs-don-t-parse-authentication-mount-options-in-remount-process.patch
+ubifs-mount_ubifs-release-authentication-resource-in-error-handling-path.patch
--- /dev/null
+From bbeb97464eefc65f506084fd9f18f21653e01137 Mon Sep 17 00:00:00 2001
+From: Gaurav Kohli <gkohli@codeaurora.org>
+Date: Tue, 6 Oct 2020 15:03:53 +0530
+Subject: tracing: Fix race in trace_open and buffer resize call
+
+From: Gaurav Kohli <gkohli@codeaurora.org>
+
+commit bbeb97464eefc65f506084fd9f18f21653e01137 upstream.
+
+Below race can come, if trace_open and resize of
+cpu buffer is running parallely on different cpus
+CPUX CPUY
+ ring_buffer_resize
+ atomic_read(&buffer->resize_disabled)
+tracing_open
+tracing_reset_online_cpus
+ring_buffer_reset_cpu
+rb_reset_cpu
+ rb_update_pages
+ remove/insert pages
+resetting pointer
+
+This race can cause data abort or some times infinte loop in
+rb_remove_pages and rb_insert_pages while checking pages
+for sanity.
+
+Take buffer lock to fix this.
+
+Link: https://lkml.kernel.org/r/1601976833-24377-1-git-send-email-gkohli@codeaurora.org
+
+Cc: stable@vger.kernel.org
+Fixes: b23d7a5f4a07a ("ring-buffer: speed up buffer resets by avoiding synchronize_rcu for each CPU")
+Signed-off-by: Gaurav Kohli <gkohli@codeaurora.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ring_buffer.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -4866,6 +4866,9 @@ void ring_buffer_reset_cpu(struct trace_
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return;
+
++ /* prevent another thread from changing buffer sizes */
++ mutex_lock(&buffer->mutex);
++
+ atomic_inc(&cpu_buffer->resize_disabled);
+ atomic_inc(&cpu_buffer->record_disabled);
+
+@@ -4876,6 +4879,8 @@ void ring_buffer_reset_cpu(struct trace_
+
+ atomic_dec(&cpu_buffer->record_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
++
++ mutex_unlock(&buffer->mutex);
+ }
+ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
+
+@@ -4889,6 +4894,9 @@ void ring_buffer_reset_online_cpus(struc
+ struct ring_buffer_per_cpu *cpu_buffer;
+ int cpu;
+
++ /* prevent another thread from changing buffer sizes */
++ mutex_lock(&buffer->mutex);
++
+ for_each_online_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+@@ -4907,6 +4915,8 @@ void ring_buffer_reset_online_cpus(struc
+ atomic_dec(&cpu_buffer->record_disabled);
+ atomic_dec(&cpu_buffer->resize_disabled);
+ }
++
++ mutex_unlock(&buffer->mutex);
+ }
+
+ /**
--- /dev/null
+From 82776f6c75a90e1d2103e689b84a689de8f1aa02 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Sun, 18 Oct 2020 09:42:04 +0100
+Subject: tty: serial: 21285: fix lockup on open
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+commit 82776f6c75a90e1d2103e689b84a689de8f1aa02 upstream.
+
+Commit 293f89959483 ("tty: serial: 21285: stop using the unused[]
+variable from struct uart_port") introduced a bug which stops the
+transmit interrupt being disabled when there are no characters to
+transmit - disabling the transmit interrupt at the interrupt controller
+is the only way to stop an interrupt storm. If this interrupt is not
+disabled when there are no transmit characters, we end up with an
+interrupt storm which prevents the machine making forward progress.
+
+Fixes: 293f89959483 ("tty: serial: 21285: stop using the unused[] variable from struct uart_port")
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Cc: stable <stable@vger.kernel.org>
+Link: https://lore.kernel.org/r/E1kU4GS-0006lE-OO@rmk-PC.armlinux.org.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/21285.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/21285.c
++++ b/drivers/tty/serial/21285.c
+@@ -50,25 +50,25 @@ static const char serial21285_name[] = "
+
+ static bool is_enabled(struct uart_port *port, int bit)
+ {
+- unsigned long private_data = (unsigned long)port->private_data;
++ unsigned long *private_data = (unsigned long *)&port->private_data;
+
+- if (test_bit(bit, &private_data))
++ if (test_bit(bit, private_data))
+ return true;
+ return false;
+ }
+
+ static void enable(struct uart_port *port, int bit)
+ {
+- unsigned long private_data = (unsigned long)port->private_data;
++ unsigned long *private_data = (unsigned long *)&port->private_data;
+
+- set_bit(bit, &private_data);
++ set_bit(bit, private_data);
+ }
+
+ static void disable(struct uart_port *port, int bit)
+ {
+- unsigned long private_data = (unsigned long)port->private_data;
++ unsigned long *private_data = (unsigned long *)&port->private_data;
+
+- clear_bit(bit, &private_data);
++ clear_bit(bit, private_data);
+ }
+
+ #define is_tx_enabled(port) is_enabled(port, tx_enabled_bit)
--- /dev/null
+From c97f2a6fb3dfbfbbc88edc8ea62ef2b944e18849 Mon Sep 17 00:00:00 2001
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+Date: Fri, 23 Oct 2020 04:34:29 +0300
+Subject: tty: serial: fsl_lpuart: LS1021A has a FIFO size of 16 words, like LS1028A
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+commit c97f2a6fb3dfbfbbc88edc8ea62ef2b944e18849 upstream.
+
+Prior to the commit that this one fixes, the FIFO size was derived from
+the read-only register LPUARTx_FIFO[TXFIFOSIZE] using the following
+formula:
+
+TX FIFO size = 2 ^ (LPUARTx_FIFO[TXFIFOSIZE] - 1)
+
+The documentation for LS1021A is a mess. Under chapter 26.1.3 LS1021A
+LPUART module special consideration, it mentions TXFIFO_SZ and RXFIFO_SZ
+being equal to 4, and in the register description for LPUARTx_FIFO, it
+shows the out-of-reset value of TXFIFOSIZE and RXFIFOSIZE fields as "011",
+even though these registers read as "101" in reality.
+
+And when LPUART on LS1021A was working, the "101" value did correspond
+to "16 datawords", by applying the formula above, even though the
+documentation is wrong again (!!!!) and says that "101" means 64 datawords
+(hint: it doesn't).
+
+So the "new" formula created by commit f77ebb241ce0 has all the premises
+of being wrong for LS1021A, because it relied only on false data and no
+actual experimentation.
+
+Interestingly, in commit c2f448cff22a ("tty: serial: fsl_lpuart: add
+LS1028A support"), Michael Walle applied a workaround to this by manually
+setting the FIFO widths for LS1028A. It looks like the same values are
+used by LS1021A as well, in fact.
+
+When the driver thinks that it has a deeper FIFO than it really has,
+getty (user space) output gets truncated.
+
+Many thanks to Michael for pointing out where to look.
+
+Fixes: f77ebb241ce0 ("tty: serial: fsl_lpuart: correct the FIFO depth size")
+Suggested-by: Michael Walle <michael@walle.cc>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://lore.kernel.org/r/20201023013429.3551026-1-vladimir.oltean@nxp.com
+Reviewed-by:Fugang Duan <fugang.duan@nxp.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/tty/serial/fsl_lpuart.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -314,9 +314,10 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
+ /* Forward declare this for the dma callbacks*/
+ static void lpuart_dma_tx_complete(void *arg);
+
+-static inline bool is_ls1028a_lpuart(struct lpuart_port *sport)
++static inline bool is_layerscape_lpuart(struct lpuart_port *sport)
+ {
+- return sport->devtype == LS1028A_LPUART;
++ return (sport->devtype == LS1021A_LPUART ||
++ sport->devtype == LS1028A_LPUART);
+ }
+
+ static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
+@@ -1644,11 +1645,11 @@ static int lpuart32_startup(struct uart_
+ UARTFIFO_FIFOSIZE_MASK);
+
+ /*
+- * The LS1028A has a fixed length of 16 words. Although it supports the
+- * RX/TXSIZE fields their encoding is different. Eg the reference manual
+- * states 0b101 is 16 words.
++ * The LS1021A and LS1028A have a fixed FIFO depth of 16 words.
++ * Although they support the RX/TXSIZE fields, their encoding is
++ * different. Eg the reference manual states 0b101 is 16 words.
+ */
+- if (is_ls1028a_lpuart(sport)) {
++ if (is_layerscape_lpuart(sport)) {
+ sport->rxfifo_size = 16;
+ sport->txfifo_size = 16;
+ sport->port.fifosize = sport->txfifo_size;
--- /dev/null
+From 58f6e78a65f1fcbf732f60a7478ccc99873ff3ba Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Mon, 1 Jun 2020 17:10:37 +0800
+Subject: ubifs: dent: Fix some potential memory leaks while iterating entries
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit 58f6e78a65f1fcbf732f60a7478ccc99873ff3ba upstream.
+
+Fix some potential memory leaks in error handling branches while
+iterating dent entries. For example, function dbg_check_dir()
+forgets to free pdent if it exists.
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 1e51764a3c2ac05a2 ("UBIFS: add new flash file system")
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/debug.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/ubifs/debug.c
++++ b/fs/ubifs/debug.c
+@@ -1123,6 +1123,7 @@ int dbg_check_dir(struct ubifs_info *c,
+ err = PTR_ERR(dent);
+ if (err == -ENOENT)
+ break;
++ kfree(pdent);
+ return err;
+ }
+
--- /dev/null
+From bb674a4d4de1032837fcbf860a63939e66f0b7ad Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Tue, 29 Sep 2020 20:45:30 +0800
+Subject: ubifs: Don't parse authentication mount options in remount process
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit bb674a4d4de1032837fcbf860a63939e66f0b7ad upstream.
+
+There is no need to dump authentication options while remounting,
+because authentication initialization can only be doing once in
+the first mount process. Dumping authentication mount options in
+remount process may cause memory leak if UBIFS has already been
+mounted with old authentication mount options.
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Cc: <stable@vger.kernel.org> # 4.20+
+Fixes: d8a22773a12c6d7 ("ubifs: Enable authentication support")
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/super.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1110,14 +1110,20 @@ static int ubifs_parse_options(struct ub
+ break;
+ }
+ case Opt_auth_key:
+- c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL);
+- if (!c->auth_key_name)
+- return -ENOMEM;
++ if (!is_remount) {
++ c->auth_key_name = kstrdup(args[0].from,
++ GFP_KERNEL);
++ if (!c->auth_key_name)
++ return -ENOMEM;
++ }
+ break;
+ case Opt_auth_hash_name:
+- c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL);
+- if (!c->auth_hash_name)
+- return -ENOMEM;
++ if (!is_remount) {
++ c->auth_hash_name = kstrdup(args[0].from,
++ GFP_KERNEL);
++ if (!c->auth_hash_name)
++ return -ENOMEM;
++ }
+ break;
+ case Opt_ignore:
+ break;
--- /dev/null
+From 47f6d9ce45b03a40c34b668a9884754c58122b39 Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Tue, 29 Sep 2020 20:45:29 +0800
+Subject: ubifs: Fix a memleak after dumping authentication mount options
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit 47f6d9ce45b03a40c34b668a9884754c58122b39 upstream.
+
+Fix a memory leak after dumping authentication mount options in error
+handling branch.
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Cc: <stable@vger.kernel.org> # 4.20+
+Fixes: d8a22773a12c6d7 ("ubifs: Enable authentication support")
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/super.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1141,6 +1141,18 @@ static int ubifs_parse_options(struct ub
+ return 0;
+ }
+
++/*
++ * ubifs_release_options - release mount parameters which have been dumped.
++ * @c: UBIFS file-system description object
++ */
++static void ubifs_release_options(struct ubifs_info *c)
++{
++ kfree(c->auth_key_name);
++ c->auth_key_name = NULL;
++ kfree(c->auth_hash_name);
++ c->auth_hash_name = NULL;
++}
++
+ /**
+ * destroy_journal - destroy journal data structures.
+ * @c: UBIFS file-system description object
+@@ -1650,8 +1662,7 @@ static void ubifs_umount(struct ubifs_in
+ ubifs_lpt_free(c, 0);
+ ubifs_exit_authentication(c);
+
+- kfree(c->auth_key_name);
+- kfree(c->auth_hash_name);
++ ubifs_release_options(c);
+ kfree(c->cbuf);
+ kfree(c->rcvrd_mst_node);
+ kfree(c->mst_node);
+@@ -2219,6 +2230,7 @@ out_umount:
+ out_unlock:
+ mutex_unlock(&c->umount_mutex);
+ out_close:
++ ubifs_release_options(c);
+ ubi_close_volume(c->ubi);
+ out:
+ return err;
--- /dev/null
+From 78c7d49f55d8631b67c09f9bfbe8155211a9ea06 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Mon, 28 Sep 2020 20:58:59 +0200
+Subject: ubifs: journal: Make sure to not dirty twice for auth nodes
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 78c7d49f55d8631b67c09f9bfbe8155211a9ea06 upstream.
+
+When removing the last reference of an inode the size of an auth node
+is already part of write_len. So we must not call ubifs_add_auth_dirt().
+Call it only when needed.
+
+Cc: <stable@vger.kernel.org>
+Cc: Sascha Hauer <s.hauer@pengutronix.de>
+Cc: Kristof Havasi <havasiefr@gmail.com>
+Fixes: 6a98bc4614de ("ubifs: Add authentication nodes to journal")
+Reported-and-tested-by: Kristof Havasi <havasiefr@gmail.com>
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/journal.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -938,8 +938,6 @@ int ubifs_jnl_write_inode(struct ubifs_i
+ inode->i_ino);
+ release_head(c, BASEHD);
+
+- ubifs_add_auth_dirt(c, lnum);
+-
+ if (last_reference) {
+ err = ubifs_tnc_remove_ino(c, inode->i_ino);
+ if (err)
+@@ -949,6 +947,8 @@ int ubifs_jnl_write_inode(struct ubifs_i
+ } else {
+ union ubifs_key key;
+
++ ubifs_add_auth_dirt(c, lnum);
++
+ ino_key_init(c, &key, inode->i_ino);
+ err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
+ }
--- /dev/null
+From e2a05cc7f8229e150243cdae40f2af9021d67a4a Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Tue, 29 Sep 2020 20:45:31 +0800
+Subject: ubifs: mount_ubifs: Release authentication resource in error handling path
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit e2a05cc7f8229e150243cdae40f2af9021d67a4a upstream.
+
+Release the authentication related resource in some error handling
+branches in mount_ubifs().
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Cc: <stable@vger.kernel.org> # 4.20+
+Fixes: d8a22773a12c6d7 ("ubifs: Enable authentication support")
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/super.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1331,7 +1331,7 @@ static int mount_ubifs(struct ubifs_info
+
+ err = ubifs_read_superblock(c);
+ if (err)
+- goto out_free;
++ goto out_auth;
+
+ c->probing = 0;
+
+@@ -1343,18 +1343,18 @@ static int mount_ubifs(struct ubifs_info
+ ubifs_err(c, "'compressor \"%s\" is not compiled in",
+ ubifs_compr_name(c, c->default_compr));
+ err = -ENOTSUPP;
+- goto out_free;
++ goto out_auth;
+ }
+
+ err = init_constants_sb(c);
+ if (err)
+- goto out_free;
++ goto out_auth;
+
+ sz = ALIGN(c->max_idx_node_sz, c->min_io_size) * 2;
+ c->cbuf = kmalloc(sz, GFP_NOFS);
+ if (!c->cbuf) {
+ err = -ENOMEM;
+- goto out_free;
++ goto out_auth;
+ }
+
+ err = alloc_wbufs(c);
+@@ -1629,6 +1629,8 @@ out_wbufs:
+ free_wbufs(c);
+ out_cbuf:
+ kfree(c->cbuf);
++out_auth:
++ ubifs_exit_authentication(c);
+ out_free:
+ kfree(c->write_reserve_buf);
+ kfree(c->bu.buf);
--- /dev/null
+From f2aae745b82c842221f4f233051f9ac641790959 Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Mon, 1 Jun 2020 17:10:36 +0800
+Subject: ubifs: xattr: Fix some potential memory leaks while iterating entries
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit f2aae745b82c842221f4f233051f9ac641790959 upstream.
+
+Fix some potential memory leaks in error handling branches while
+iterating xattr entries. For example, function ubifs_tnc_remove_ino()
+forgets to free pxent if it exists. Similar problems also exist in
+ubifs_purge_xattrs(), ubifs_add_orphan() and ubifs_jnl_write_inode().
+
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Cc: <stable@vger.kernel.org>
+Fixes: 1e51764a3c2ac05a2 ("UBIFS: add new flash file system")
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/journal.c | 2 ++
+ fs/ubifs/orphan.c | 2 ++
+ fs/ubifs/tnc.c | 3 +++
+ fs/ubifs/xattr.c | 2 ++
+ 4 files changed, 9 insertions(+)
+
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -894,6 +894,7 @@ int ubifs_jnl_write_inode(struct ubifs_i
+ if (err == -ENOENT)
+ break;
+
++ kfree(pxent);
+ goto out_release;
+ }
+
+@@ -906,6 +907,7 @@ int ubifs_jnl_write_inode(struct ubifs_i
+ ubifs_err(c, "dead directory entry '%s', error %d",
+ xent->name, err);
+ ubifs_ro_mode(c, err);
++ kfree(pxent);
+ kfree(xent);
+ goto out_release;
+ }
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -173,6 +173,7 @@ int ubifs_add_orphan(struct ubifs_info *
+ err = PTR_ERR(xent);
+ if (err == -ENOENT)
+ break;
++ kfree(pxent);
+ return err;
+ }
+
+@@ -182,6 +183,7 @@ int ubifs_add_orphan(struct ubifs_info *
+
+ xattr_orphan = orphan_add(c, xattr_inum, orphan);
+ if (IS_ERR(xattr_orphan)) {
++ kfree(pxent);
+ kfree(xent);
+ return PTR_ERR(xattr_orphan);
+ }
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -2885,6 +2885,7 @@ int ubifs_tnc_remove_ino(struct ubifs_in
+ err = PTR_ERR(xent);
+ if (err == -ENOENT)
+ break;
++ kfree(pxent);
+ return err;
+ }
+
+@@ -2898,6 +2899,7 @@ int ubifs_tnc_remove_ino(struct ubifs_in
+ fname_len(&nm) = le16_to_cpu(xent->nlen);
+ err = ubifs_tnc_remove_nm(c, &key1, &nm);
+ if (err) {
++ kfree(pxent);
+ kfree(xent);
+ return err;
+ }
+@@ -2906,6 +2908,7 @@ int ubifs_tnc_remove_ino(struct ubifs_in
+ highest_ino_key(c, &key2, xattr_inum);
+ err = ubifs_tnc_remove_range(c, &key1, &key2);
+ if (err) {
++ kfree(pxent);
+ kfree(xent);
+ return err;
+ }
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -522,6 +522,7 @@ int ubifs_purge_xattrs(struct inode *hos
+ xent->name, err);
+ ubifs_ro_mode(c, err);
+ kfree(pxent);
++ kfree(xent);
+ return err;
+ }
+
+@@ -531,6 +532,7 @@ int ubifs_purge_xattrs(struct inode *hos
+ err = remove_xattr(c, host, xino, &nm);
+ if (err) {
+ kfree(pxent);
++ kfree(xent);
+ iput(xino);
+ ubifs_err(c, "cannot remove xattr, error %d", err);
+ return err;
--- /dev/null
+From a7be300de800e755714c71103ae4a0d205e41e99 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Tue, 22 Sep 2020 12:20:14 +0200
+Subject: udf: Fix memory leak when mounting
+
+From: Jan Kara <jack@suse.cz>
+
+commit a7be300de800e755714c71103ae4a0d205e41e99 upstream.
+
+udf_process_sequence() allocates temporary array for processing
+partition descriptors on volume which it fails to free. Free the array
+when it is not needed anymore.
+
+Fixes: 7b78fd02fb19 ("udf: Fix handling of Partition Descriptors")
+CC: stable@vger.kernel.org
+Reported-by: syzbot+128f4dd6e796c98b3760@syzkaller.appspotmail.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/udf/super.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1704,7 +1704,8 @@ static noinline int udf_process_sequence
+ "Pointers (max %u supported)\n",
+ UDF_MAX_TD_NESTING);
+ brelse(bh);
+- return -EIO;
++ ret = -EIO;
++ goto out;
+ }
+
+ vdp = (struct volDescPtr *)bh->b_data;
+@@ -1724,7 +1725,8 @@ static noinline int udf_process_sequence
+ curr = get_volume_descriptor_record(ident, bh, &data);
+ if (IS_ERR(curr)) {
+ brelse(bh);
+- return PTR_ERR(curr);
++ ret = PTR_ERR(curr);
++ goto out;
+ }
+ /* Descriptor we don't care about? */
+ if (!curr)
+@@ -1746,28 +1748,31 @@ static noinline int udf_process_sequence
+ */
+ if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
+ udf_err(sb, "Primary Volume Descriptor not found!\n");
+- return -EAGAIN;
++ ret = -EAGAIN;
++ goto out;
+ }
+ ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
+ if (ret < 0)
+- return ret;
++ goto out;
+
+ if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+ ret = udf_load_logicalvol(sb,
+ data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
+ fileset);
+ if (ret < 0)
+- return ret;
++ goto out;
+ }
+
+ /* Now handle prevailing Partition Descriptors */
+ for (i = 0; i < data.num_part_descs; i++) {
+ ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+ if (ret < 0)
+- return ret;
++ goto out;
+ }
+-
+- return 0;
++ ret = 0;
++out:
++ kfree(data.part_descs_loc);
++ return ret;
+ }
+
+ /*
--- /dev/null
+From 1eca16b231570c8ae57fb91fdfbc48eb52c6a93b Mon Sep 17 00:00:00 2001
+From: Laurent Vivier <lvivier@redhat.com>
+Date: Tue, 27 Oct 2020 18:59:14 +0100
+Subject: vdpa_sim: Fix DMA mask
+
+From: Laurent Vivier <lvivier@redhat.com>
+
+commit 1eca16b231570c8ae57fb91fdfbc48eb52c6a93b upstream.
+
+Since commit f959dcd6ddfd
+("dma-direct: Fix potential NULL pointer dereference")
+an error is reported when we load vdpa_sim and virtio-vdpa:
+
+[ 129.351207] net eth0: Unexpected TXQ (0) queue failure: -12
+
+It seems that dma_mask is not initialized.
+
+This patch initializes dma_mask() and calls dma_set_mask_and_coherent()
+to fix the problem.
+
+Full log:
+
+[ 128.548628] ------------[ cut here ]------------
+[ 128.553268] WARNING: CPU: 23 PID: 1105 at kernel/dma/mapping.c:149 dma_map_page_attrs+0x14c/0x1d0
+[ 128.562139] Modules linked in: virtio_net net_failover failover virtio_vdpa vdpa_sim vringh vhost_iotlb vdpa xt_CHECKSUM xt_MASQUERADE xt_conntrack ipt_REJECT nf_reject_ipv4 nft_compat nft_counter nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nf_tables nfnetlink tun bridge stp llc iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi rfkill intel_rapl_msr intel_rapl_common isst_if_common sunrpc skx_edac nfit libnvdimm x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel ipmi_ssif kvm mgag200 i2c_algo_bit irqbypass drm_kms_helper crct10dif_pclmul crc32_pclmul syscopyarea ghash_clmulni_intel iTCO_wdt sysfillrect iTCO_vendor_support sysimgblt rapl fb_sys_fops dcdbas intel_cstate drm acpi_ipmi ipmi_si mei_me dell_smbios intel_uncore ipmi_devintf mei i2c_i801 dell_wmi_descriptor wmi_bmof pcspkr lpc_ich i2c_smbus ipmi_msghandler acpi_power_meter ip_tables xfs libcrc32c sd_mod t10_pi sg ahci libahci libata megaraid_sas tg3 crc32c_intel wmi dm_mirror dm_region_hash dm_log
+[ 128.562188] dm_mod
+[ 128.651334] CPU: 23 PID: 1105 Comm: NetworkManager Tainted: G S I 5.10.0-rc1+ #59
+[ 128.659939] Hardware name: Dell Inc. PowerEdge R440/04JN2K, BIOS 2.8.1 06/30/2020
+[ 128.667419] RIP: 0010:dma_map_page_attrs+0x14c/0x1d0
+[ 128.672384] Code: 1c 25 28 00 00 00 0f 85 97 00 00 00 48 83 c4 10 5b 5d 41 5c 41 5d c3 4c 89 da eb d7 48 89 f2 48 2b 50 18 48 89 d0 eb 8d 0f 0b <0f> 0b 48 c7 c0 ff ff ff ff eb c3 48 89 d9 48 8b 40 40 e8 2d a0 aa
+[ 128.691131] RSP: 0018:ffffae0f0151f3c8 EFLAGS: 00010246
+[ 128.696357] RAX: ffffffffc06b7400 RBX: 00000000000005fa RCX: 0000000000000000
+[ 128.703488] RDX: 0000000000000040 RSI: ffffcee3c7861200 RDI: ffff9e2bc16cd000
+[ 128.710620] RBP: 0000000000000000 R08: 0000000000000002 R09: 0000000000000000
+[ 128.717754] R10: 0000000000000002 R11: 0000000000000000 R12: ffff9e472cb291f8
+[ 128.724886] R13: ffff9e2bc14da780 R14: ffff9e472bc20000 R15: ffff9e2bc1b14940
+[ 128.732020] FS: 00007f887bae23c0(0000) GS:ffff9e4ac01c0000(0000) knlGS:0000000000000000
+[ 128.740105] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 128.745852] CR2: 0000562bc09de998 CR3: 00000003c156c006 CR4: 00000000007706e0
+[ 128.752982] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 128.760114] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 128.767247] PKRU: 55555554
+[ 128.769961] Call Trace:
+[ 128.772418] virtqueue_add+0x81e/0xb00
+[ 128.776176] virtqueue_add_inbuf_ctx+0x26/0x30
+[ 128.780625] try_fill_recv+0x3a2/0x6e0 [virtio_net]
+[ 128.785509] virtnet_open+0xf9/0x180 [virtio_net]
+[ 128.790217] __dev_open+0xe8/0x180
+[ 128.793620] __dev_change_flags+0x1a7/0x210
+[ 128.797808] dev_change_flags+0x21/0x60
+[ 128.801646] do_setlink+0x328/0x10e0
+[ 128.805227] ? __nla_validate_parse+0x121/0x180
+[ 128.809757] ? __nla_parse+0x21/0x30
+[ 128.813338] ? inet6_validate_link_af+0x5c/0xf0
+[ 128.817871] ? cpumask_next+0x17/0x20
+[ 128.821535] ? __snmp6_fill_stats64.isra.54+0x6b/0x110
+[ 128.826676] ? __nla_validate_parse+0x47/0x180
+[ 128.831120] __rtnl_newlink+0x541/0x8e0
+[ 128.834962] ? __nla_reserve+0x38/0x50
+[ 128.838713] ? security_sock_rcv_skb+0x2a/0x40
+[ 128.843158] ? netlink_deliver_tap+0x2c/0x1e0
+[ 128.847518] ? netlink_attachskb+0x1d8/0x220
+[ 128.851793] ? skb_queue_tail+0x1b/0x50
+[ 128.855641] ? fib6_clean_node+0x43/0x170
+[ 128.859652] ? _cond_resched+0x15/0x30
+[ 128.863406] ? kmem_cache_alloc_trace+0x3a3/0x420
+[ 128.868110] rtnl_newlink+0x43/0x60
+[ 128.871602] rtnetlink_rcv_msg+0x12c/0x380
+[ 128.875701] ? rtnl_calcit.isra.39+0x110/0x110
+[ 128.880147] netlink_rcv_skb+0x50/0x100
+[ 128.883987] netlink_unicast+0x1a5/0x280
+[ 128.887913] netlink_sendmsg+0x23d/0x470
+[ 128.891839] sock_sendmsg+0x5b/0x60
+[ 128.895331] ____sys_sendmsg+0x1ef/0x260
+[ 128.899255] ? copy_msghdr_from_user+0x5c/0x90
+[ 128.903702] ___sys_sendmsg+0x7c/0xc0
+[ 128.907369] ? dev_forward_change+0x130/0x130
+[ 128.911731] ? sysctl_head_finish.part.29+0x24/0x40
+[ 128.916616] ? new_sync_write+0x11f/0x1b0
+[ 128.920628] ? mntput_no_expire+0x47/0x240
+[ 128.924727] __sys_sendmsg+0x57/0xa0
+[ 128.928309] do_syscall_64+0x33/0x40
+[ 128.931887] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 128.936937] RIP: 0033:0x7f88792e3857
+[ 128.940518] Code: c3 66 90 41 54 41 89 d4 55 48 89 f5 53 89 fb 48 83 ec 10 e8 0b ed ff ff 44 89 e2 48 89 ee 89 df 41 89 c0 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 35 44 89 c7 48 89 44 24 08 e8 44 ed ff ff 48
+[ 128.959263] RSP: 002b:00007ffdca60dea0 EFLAGS: 00000293 ORIG_RAX: 000000000000002e
+[ 128.966827] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f88792e3857
+[ 128.973960] RDX: 0000000000000000 RSI: 00007ffdca60def0 RDI: 000000000000000c
+[ 128.981095] RBP: 00007ffdca60def0 R08: 0000000000000000 R09: 0000000000000000
+[ 128.988224] R10: 0000000000000001 R11: 0000000000000293 R12: 0000000000000000
+[ 128.995357] R13: 0000000000000000 R14: 00007ffdca60e0a8 R15: 00007ffdca60e09c
+[ 129.002492] CPU: 23 PID: 1105 Comm: NetworkManager Tainted: G S I 5.10.0-rc1+ #59
+[ 129.011093] Hardware name: Dell Inc. PowerEdge R440/04JN2K, BIOS 2.8.1 06/30/2020
+[ 129.018571] Call Trace:
+[ 129.021027] dump_stack+0x57/0x6a
+[ 129.024346] __warn.cold.14+0xe/0x3d
+[ 129.027925] ? dma_map_page_attrs+0x14c/0x1d0
+[ 129.032283] report_bug+0xbd/0xf0
+[ 129.035602] handle_bug+0x44/0x80
+[ 129.038922] exc_invalid_op+0x13/0x60
+[ 129.042589] asm_exc_invalid_op+0x12/0x20
+[ 129.046602] RIP: 0010:dma_map_page_attrs+0x14c/0x1d0
+[ 129.051566] Code: 1c 25 28 00 00 00 0f 85 97 00 00 00 48 83 c4 10 5b 5d 41 5c 41 5d c3 4c 89 da eb d7 48 89 f2 48 2b 50 18 48 89 d0 eb 8d 0f 0b <0f> 0b 48 c7 c0 ff ff ff ff eb c3 48 89 d9 48 8b 40 40 e8 2d a0 aa
+[ 129.070311] RSP: 0018:ffffae0f0151f3c8 EFLAGS: 00010246
+[ 129.075536] RAX: ffffffffc06b7400 RBX: 00000000000005fa RCX: 0000000000000000
+[ 129.082669] RDX: 0000000000000040 RSI: ffffcee3c7861200 RDI: ffff9e2bc16cd000
+[ 129.089803] RBP: 0000000000000000 R08: 0000000000000002 R09: 0000000000000000
+[ 129.096936] R10: 0000000000000002 R11: 0000000000000000 R12: ffff9e472cb291f8
+[ 129.104068] R13: ffff9e2bc14da780 R14: ffff9e472bc20000 R15: ffff9e2bc1b14940
+[ 129.111200] virtqueue_add+0x81e/0xb00
+[ 129.114952] virtqueue_add_inbuf_ctx+0x26/0x30
+[ 129.119399] try_fill_recv+0x3a2/0x6e0 [virtio_net]
+[ 129.124280] virtnet_open+0xf9/0x180 [virtio_net]
+[ 129.128984] __dev_open+0xe8/0x180
+[ 129.132390] __dev_change_flags+0x1a7/0x210
+[ 129.136575] dev_change_flags+0x21/0x60
+[ 129.140415] do_setlink+0x328/0x10e0
+[ 129.143994] ? __nla_validate_parse+0x121/0x180
+[ 129.148528] ? __nla_parse+0x21/0x30
+[ 129.152107] ? inet6_validate_link_af+0x5c/0xf0
+[ 129.156639] ? cpumask_next+0x17/0x20
+[ 129.160306] ? __snmp6_fill_stats64.isra.54+0x6b/0x110
+[ 129.165443] ? __nla_validate_parse+0x47/0x180
+[ 129.169890] __rtnl_newlink+0x541/0x8e0
+[ 129.173731] ? __nla_reserve+0x38/0x50
+[ 129.177483] ? security_sock_rcv_skb+0x2a/0x40
+[ 129.181928] ? netlink_deliver_tap+0x2c/0x1e0
+[ 129.186286] ? netlink_attachskb+0x1d8/0x220
+[ 129.190560] ? skb_queue_tail+0x1b/0x50
+[ 129.194401] ? fib6_clean_node+0x43/0x170
+[ 129.198411] ? _cond_resched+0x15/0x30
+[ 129.202163] ? kmem_cache_alloc_trace+0x3a3/0x420
+[ 129.206869] rtnl_newlink+0x43/0x60
+[ 129.210361] rtnetlink_rcv_msg+0x12c/0x380
+[ 129.214462] ? rtnl_calcit.isra.39+0x110/0x110
+[ 129.218908] netlink_rcv_skb+0x50/0x100
+[ 129.222747] netlink_unicast+0x1a5/0x280
+[ 129.226672] netlink_sendmsg+0x23d/0x470
+[ 129.230599] sock_sendmsg+0x5b/0x60
+[ 129.234090] ____sys_sendmsg+0x1ef/0x260
+[ 129.238015] ? copy_msghdr_from_user+0x5c/0x90
+[ 129.242461] ___sys_sendmsg+0x7c/0xc0
+[ 129.246128] ? dev_forward_change+0x130/0x130
+[ 129.250487] ? sysctl_head_finish.part.29+0x24/0x40
+[ 129.255368] ? new_sync_write+0x11f/0x1b0
+[ 129.259381] ? mntput_no_expire+0x47/0x240
+[ 129.263478] __sys_sendmsg+0x57/0xa0
+[ 129.267058] do_syscall_64+0x33/0x40
+[ 129.270639] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+[ 129.275689] RIP: 0033:0x7f88792e3857
+[ 129.279268] Code: c3 66 90 41 54 41 89 d4 55 48 89 f5 53 89 fb 48 83 ec 10 e8 0b ed ff ff 44 89 e2 48 89 ee 89 df 41 89 c0 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 35 44 89 c7 48 89 44 24 08 e8 44 ed ff ff 48
+[ 129.298015] RSP: 002b:00007ffdca60dea0 EFLAGS: 00000293 ORIG_RAX: 000000000000002e
+[ 129.305581] RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f88792e3857
+[ 129.312712] RDX: 0000000000000000 RSI: 00007ffdca60def0 RDI: 000000000000000c
+[ 129.319846] RBP: 00007ffdca60def0 R08: 0000000000000000 R09: 0000000000000000
+[ 129.326978] R10: 0000000000000001 R11: 0000000000000293 R12: 0000000000000000
+[ 129.334109] R13: 0000000000000000 R14: 00007ffdca60e0a8 R15: 00007ffdca60e09c
+[ 129.341249] ---[ end trace c551e8028fbaf59d ]---
+[ 129.351207] net eth0: Unexpected TXQ (0) queue failure: -12
+[ 129.360445] net eth0: Unexpected TXQ (0) queue failure: -12
+[ 129.824428] net eth0: Unexpected TXQ (0) queue failure: -12
+
+Fixes: 2c53d0f64c06 ("vdpasim: vDPA device simulator")
+Signed-off-by: Laurent Vivier <lvivier@redhat.com>
+Link: https://lore.kernel.org/r/20201027175914.689278-1-lvivier@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Cc: stable@vger.kernel.org
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/vdpa/vdpa_sim/vdpa_sim.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
++++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
+@@ -362,7 +362,9 @@ static struct vdpasim *vdpasim_create(vo
+ spin_lock_init(&vdpasim->iommu_lock);
+
+ dev = &vdpasim->vdpa.dev;
+- dev->coherent_dma_mask = DMA_BIT_MASK(64);
++ dev->dma_mask = &dev->coherent_dma_mask;
++ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
++ goto err_iommu;
+ set_dma_ops(dev, &vdpasim_dma_ops);
+
+ vdpasim->iommu = vhost_iotlb_alloc(2048, 0);