--- /dev/null
+From b4f1e694a9485f7a82e6895a25d927c15a45e854 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Feb 2022 15:25:20 -0800
+Subject: ASoC: qcom: Actually clear DMA interrupt register for HDMI
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit c8d251f51ee61df06ee0e419348d8c9160bbfb86 ]
+
+In commit da0363f7bfd3 ("ASoC: qcom: Fix for DMA interrupt clear reg
+overwriting") we changed regmap_write() to regmap_update_bits() so that
+we can avoid overwriting bits that we didn't intend to modify.
+Unfortunately this change breaks the case where a register is writable
+but not readable, which is exactly how the HDMI irq clear register is
+designed (grep around LPASS_HDMITX_APP_IRQCLEAR_REG to see how it's
+write only). That's because regmap_update_bits() tries to read the
+register from the hardware and if it isn't readable it looks in the
+regmap cache to see what was written there last time to compare against
+what we want to write there. Eventually, we're unable to modify this
+register at all because the bits that we're trying to set are already
+set in the cache.
+
+This is doubly bad for the irq clear register because you have to write
+the bit to clear an interrupt. Given the irq is level triggered, we see
+an interrupt storm upon plugging in an HDMI cable and starting audio
+playback. The irq storm is so great that performance degrades
+significantly, leading to CPU soft lockups.
+
+Fix it by using regmap_write_bits() so that we really do write the bits
+in the clear register that we want to. This brings the number of irqs
+handled by lpass_dma_interrupt_handler() down from ~150k/sec to ~10/sec.
+
+Fixes: da0363f7bfd3 ("ASoC: qcom: Fix for DMA interrupt clear reg overwriting")
+Cc: Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
+Cc: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+Link: https://lore.kernel.org/r/20220209232520.4017634-1-swboyd@chromium.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/qcom/lpass-platform.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
+index a59e9d20cb46b..4b1773c1fb95f 100644
+--- a/sound/soc/qcom/lpass-platform.c
++++ b/sound/soc/qcom/lpass-platform.c
+@@ -524,7 +524,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
+ return -EINVAL;
+ }
+
+- ret = regmap_update_bits(map, reg_irqclr, val_irqclr, val_irqclr);
++ ret = regmap_write_bits(map, reg_irqclr, val_irqclr, val_irqclr);
+ if (ret) {
+ dev_err(soc_runtime->dev, "error writing to irqclear reg: %d\n", ret);
+ return ret;
+@@ -665,7 +665,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
+ return -EINVAL;
+ }
+ if (interrupts & LPAIF_IRQ_PER(chan)) {
+- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_PER(chan) | val));
+ if (rv) {
+ dev_err(soc_runtime->dev,
+ "error writing to irqclear reg: %d\n", rv);
+@@ -676,7 +676,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
+ }
+
+ if (interrupts & LPAIF_IRQ_XRUN(chan)) {
+- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_XRUN(chan) | val));
+ if (rv) {
+ dev_err(soc_runtime->dev,
+ "error writing to irqclear reg: %d\n", rv);
+@@ -688,7 +688,7 @@ static irqreturn_t lpass_dma_interrupt_handler(
+ }
+
+ if (interrupts & LPAIF_IRQ_ERR(chan)) {
+- rv = regmap_update_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
++ rv = regmap_write_bits(map, reg, mask, (LPAIF_IRQ_ERR(chan) | val));
+ if (rv) {
+ dev_err(soc_runtime->dev,
+ "error writing to irqclear reg: %d\n", rv);
+--
+2.34.1
+
--- /dev/null
+From 2c59de4e916a121ae69878d0b8292d25e24deea6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Feb 2022 10:53:01 +0100
+Subject: ASoC: tas2770: Insert post reset delay
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Martin Povišer <povik+lin@cutebit.org>
+
+[ Upstream commit 307f31452078792aab94a729fce33200c6e42dc4 ]
+
+Per TAS2770 datasheet there must be a 1 ms delay from reset to first
+command. So insert delays into the driver where appropriate.
+
+Fixes: 1a476abc723e ("tas2770: add tas2770 smart PA kernel driver")
+Signed-off-by: Martin Povišer <povik+lin@cutebit.org>
+Link: https://lore.kernel.org/r/20220204095301.5554-1-povik+lin@cutebit.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/tas2770.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c
+index 6549e7fef3e32..c5ea3b115966b 100644
+--- a/sound/soc/codecs/tas2770.c
++++ b/sound/soc/codecs/tas2770.c
+@@ -38,10 +38,12 @@ static void tas2770_reset(struct tas2770_priv *tas2770)
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(tas2770->reset_gpio, 1);
++ usleep_range(1000, 2000);
+ }
+
+ snd_soc_component_write(tas2770->component, TAS2770_SW_RST,
+ TAS2770_RST);
++ usleep_range(1000, 2000);
+ }
+
+ static int tas2770_set_bias_level(struct snd_soc_component *component,
+@@ -110,6 +112,7 @@ static int tas2770_codec_resume(struct snd_soc_component *component)
+
+ if (tas2770->sdz_gpio) {
+ gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
++ usleep_range(1000, 2000);
+ } else {
+ ret = snd_soc_component_update_bits(component, TAS2770_PWR_CTRL,
+ TAS2770_PWR_CTRL_MASK,
+@@ -510,8 +513,10 @@ static int tas2770_codec_probe(struct snd_soc_component *component)
+
+ tas2770->component = component;
+
+- if (tas2770->sdz_gpio)
++ if (tas2770->sdz_gpio) {
+ gpiod_set_value_cansleep(tas2770->sdz_gpio, 1);
++ usleep_range(1000, 2000);
++ }
+
+ tas2770_reset(tas2770);
+
+--
+2.34.1
+
--- /dev/null
+From d2da2942d3a84f4d6db7e5ceaef2094cd23aa307 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Feb 2022 08:52:31 +0100
+Subject: block: fix surprise removal for drivers calling blk_set_queue_dying
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 7a5428dcb7902700b830e912feee4e845df7c019 ]
+
+Various block drivers call blk_set_queue_dying to mark a disk as dead due
+to surprise removal events, but since commit 8e141f9eb803 that doesn't
+work given that the GD_DEAD flag needs to be set to stop I/O.
+
+Replace the driver calls to blk_set_queue_dying with a new (and properly
+documented) blk_mark_disk_dead API, and fold blk_set_queue_dying into the
+only remaining caller.
+
+Fixes: 8e141f9eb803 ("block: drain file system I/O on del_gendisk")
+Reported-by: Markus Blöchl <markus.bloechl@ipetronik.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Link: https://lore.kernel.org/r/20220217075231.1140-1-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-core.c | 10 ++--------
+ block/genhd.c | 14 ++++++++++++++
+ drivers/block/mtip32xx/mtip32xx.c | 2 +-
+ drivers/block/rbd.c | 2 +-
+ drivers/block/xen-blkfront.c | 2 +-
+ drivers/md/dm.c | 2 +-
+ drivers/nvme/host/core.c | 2 +-
+ drivers/nvme/host/multipath.c | 2 +-
+ include/linux/blkdev.h | 3 ++-
+ 9 files changed, 24 insertions(+), 15 deletions(-)
+
+diff --git a/block/blk-core.c b/block/blk-core.c
+index d42a0f3ff7361..42ac3a985c2d7 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -350,13 +350,6 @@ void blk_queue_start_drain(struct request_queue *q)
+ wake_up_all(&q->mq_freeze_wq);
+ }
+
+-void blk_set_queue_dying(struct request_queue *q)
+-{
+- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+- blk_queue_start_drain(q);
+-}
+-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+-
+ /**
+ * blk_cleanup_queue - shutdown a request queue
+ * @q: request queue to shutdown
+@@ -374,7 +367,8 @@ void blk_cleanup_queue(struct request_queue *q)
+ WARN_ON_ONCE(blk_queue_registered(q));
+
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
+- blk_set_queue_dying(q);
++ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
++ blk_queue_start_drain(q);
+
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+diff --git a/block/genhd.c b/block/genhd.c
+index de789d1a1e3d2..2dcedbe4ef046 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -544,6 +544,20 @@ int device_add_disk(struct device *parent, struct gendisk *disk,
+ }
+ EXPORT_SYMBOL(device_add_disk);
+
++/**
++ * blk_mark_disk_dead - mark a disk as dead
++ * @disk: disk to mark as dead
++ *
++ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
++ * to this disk.
++ */
++void blk_mark_disk_dead(struct gendisk *disk)
++{
++ set_bit(GD_DEAD, &disk->state);
++ blk_queue_start_drain(disk->queue);
++}
++EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
++
+ /**
+ * del_gendisk - remove the gendisk
+ * @disk: the struct gendisk to remove
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 901855717cb53..ba61e72741eab 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ "Completion workers still active!\n");
+ }
+
+- blk_set_queue_dying(dd->queue);
++ blk_mark_disk_dead(dd->disk);
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
+ /* Clean up the block layer. */
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index e65c9d706f6fb..c4a52f33604dc 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -7182,7 +7182,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
+ * IO to complete/fail.
+ */
+ blk_mq_freeze_queue(rbd_dev->disk->queue);
+- blk_set_queue_dying(rbd_dev->disk->queue);
++ blk_mark_disk_dead(rbd_dev->disk);
+ }
+
+ del_gendisk(rbd_dev->disk);
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 4dbb71230d6e7..3efd341959832 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -2128,7 +2128,7 @@ static void blkfront_closing(struct blkfront_info *info)
+
+ /* No more blkif_request(). */
+ blk_mq_stop_hw_queues(info->rq);
+- blk_set_queue_dying(info->rq);
++ blk_mark_disk_dead(info->gd);
+ set_capacity(info->gd, 0);
+
+ for_each_rinfo(info, rinfo, i) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index b75ff6b2b9525..5f33700d12473 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2156,7 +2156,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ set_bit(DMF_FREEING, &md->flags);
+ spin_unlock(&_minor_lock);
+
+- blk_set_queue_dying(md->queue);
++ blk_mark_disk_dead(md->disk);
+
+ /*
+ * Take suspend_lock so that presuspend and postsuspend methods
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index ab1892886b453..739ffbfad6283 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4484,7 +4484,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+
+- blk_set_queue_dying(ns->queue);
++ blk_mark_disk_dead(ns->disk);
+ nvme_start_ns_queue(ns);
+
+ set_capacity_and_notify(ns->disk, 0);
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 2f76969408b27..727520c397109 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -792,7 +792,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ {
+ if (!head->disk)
+ return;
+- blk_set_queue_dying(head->disk->queue);
++ blk_mark_disk_dead(head->disk);
+ /* make sure all pending bios are cleaned up */
+ kblockd_schedule_work(&head->requeue_work);
+ flush_work(&head->requeue_work);
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index be8e7a55d803c..413c0148c0ce5 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1184,7 +1184,8 @@ extern void blk_dump_rq_flags(struct request *, char *);
+
+ bool __must_check blk_get_queue(struct request_queue *);
+ extern void blk_put_queue(struct request_queue *);
+-extern void blk_set_queue_dying(struct request_queue *);
++
++void blk_mark_disk_dead(struct gendisk *disk);
+
+ #ifdef CONFIG_BLOCK
+ /*
+--
+2.34.1
+
--- /dev/null
+From 6bb521657c79f18e0f0ae0f05ead221303773087 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Jan 2022 19:10:45 +0800
+Subject: block/wbt: fix negative inflight counter when remove scsi device
+
+From: Laibin Qiu <qiulaibin@huawei.com>
+
+[ Upstream commit e92bc4cd34de2ce454bdea8cd198b8067ee4e123 ]
+
+Now that we disable wbt by set WBT_STATE_OFF_DEFAULT in
+wbt_disable_default() when switch elevator to bfq. And when
+we remove scsi device, wbt will be enabled by wbt_enable_default.
+If it become false positive between wbt_wait() and wbt_track()
+when submit write request.
+
+The following is the scenario that triggered the problem.
+
+T1 T2 T3
+ elevator_switch_mq
+ bfq_init_queue
+ wbt_disable_default <= Set
+ rwb->enable_state (OFF)
+Submit_bio
+blk_mq_make_request
+rq_qos_throttle
+<= rwb->enable_state (OFF)
+ scsi_remove_device
+ sd_remove
+ del_gendisk
+ blk_unregister_queue
+ elv_unregister_queue
+ wbt_enable_default
+ <= Set rwb->enable_state (ON)
+q_qos_track
+<= rwb->enable_state (ON)
+^^^^^^ this request will mark WBT_TRACKED without inflight add and will
+lead to drop rqw->inflight to -1 in wbt_done() which will trigger IO hung.
+
+Fix this by move wbt_enable_default() from elv_unregister to
+bfq_exit_queue(). Only re-enable wbt when bfq exit.
+
+Fixes: 76a8040817b4b ("blk-wbt: make sure throttle is enabled properly")
+
+Remove oneline stale comment, and kill one oneshot local variable.
+
+Signed-off-by: Ming Lei <ming.lei@rehdat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/linux-block/20211214133103.551813-1-qiulaibin@huawei.com/
+Signed-off-by: Laibin Qiu <qiulaibin@huawei.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c | 2 ++
+ block/elevator.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index ea9a086d0498f..e66970bf27dbe 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -6878,6 +6878,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
+ spin_unlock_irq(&bfqd->lock);
+ #endif
+
++ wbt_enable_default(bfqd->queue);
++
+ kfree(bfqd);
+ }
+
+diff --git a/block/elevator.c b/block/elevator.c
+index cd02ae332c4eb..1b5e57f6115f3 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -523,8 +523,6 @@ void elv_unregister_queue(struct request_queue *q)
+ kobject_del(&e->kobj);
+
+ e->registered = 0;
+- /* Re-enable throttling in case elevator disabled it */
+- wbt_enable_default(q);
+ }
+ }
+
+--
+2.34.1
+
--- /dev/null
+From c69b4f3e378bf4048fce54d37b423789adaf7a8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Feb 2022 01:30:08 +0800
+Subject: Drivers: hv: vmbus: Fix memory leak in vmbus_add_channel_kobj
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 8bc69f86328e87a0ffa79438430cc82f3aa6a194 ]
+
+kobject_init_and_add() takes reference even when it fails.
+According to the doc of kobject_init_and_add():
+
+ If this function returns an error, kobject_put() must be called to
+ properly clean up the memory associated with the object.
+
+Fix memory leak by calling kobject_put().
+
+Fixes: c2e5df616e1a ("vmbus: add per-channel sysfs info")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Juan Vazquez <juvazq@linux.microsoft.com>
+Link: https://lore.kernel.org/r/20220203173008.43480-1-linmq006@gmail.com
+Signed-off-by: Wei Liu <wei.liu@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hv/vmbus_drv.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 392c1ac4f8193..44bd0b6ff5059 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -2027,8 +2027,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+ kobj->kset = dev->channels_kset;
+ ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
+ "%u", relid);
+- if (ret)
++ if (ret) {
++ kobject_put(kobj);
+ return ret;
++ }
+
+ ret = sysfs_create_group(kobj, &vmbus_chan_group);
+
+@@ -2037,6 +2039,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
+ * The calling functions' error handling paths will cleanup the
+ * empty channel directory.
+ */
++ kobject_put(kobj);
+ dev_err(device, "Unable to set up channel sysfs files\n");
+ return ret;
+ }
+--
+2.34.1
+
--- /dev/null
+From ad920a9a394da2533bc43e59e1b90a364140989d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 22 Jan 2022 17:48:26 +0800
+Subject: HID: elo: fix memory leak in elo_probe
+
+From: Dongliang Mu <mudongliangabcd@gmail.com>
+
+[ Upstream commit 817b8b9c5396d2b2d92311b46719aad5d3339dbe ]
+
+When hid_parse() in elo_probe() fails, it forgets to call usb_put_dev to
+decrease the refcount.
+
+Fix this by adding usb_put_dev() in the error handling code of elo_probe().
+
+Fixes: fbf42729d0e9 ("HID: elo: update the reference count of the usb device structure")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Dongliang Mu <mudongliangabcd@gmail.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-elo.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index 8e960d7b233b3..9b42b0cdeef06 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
+
+ return 0;
+ err_free:
++ usb_put_dev(udev);
+ kfree(priv);
+ return ret;
+ }
+--
+2.34.1
+
--- /dev/null
+From a0a3bab2a42d5fb00ebce2287a8675deafede631 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Feb 2022 17:48:12 -0800
+Subject: KVM: x86/pmu: Don't truncate the PerfEvtSeln MSR when creating a perf
+ event
+
+From: Jim Mattson <jmattson@google.com>
+
+[ Upstream commit b8bfee85f1307426e0242d654f3a14c06ef639c5 ]
+
+AMD's event select is 3 nybbles, with the high nybble in bits 35:32 of
+a PerfEvtSeln MSR. Don't drop the high nybble when setting up the
+config field of a perf_event_attr structure for a call to
+perf_event_create_kernel_counter().
+
+Fixes: ca724305a2b0 ("KVM: x86/vPMU: Implement AMD vPMU code for KVM")
+Reported-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Message-Id: <20220203014813.2130559-1-jmattson@google.com>
+Reviewed-by: David Dunn <daviddunn@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/pmu.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index eec614de9af30..bfef0c658730e 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -95,7 +95,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+ }
+
+ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
+- unsigned config, bool exclude_user,
++ u64 config, bool exclude_user,
+ bool exclude_kernel, bool intr,
+ bool in_tx, bool in_tx_cp)
+ {
+@@ -173,7 +173,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
+
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ {
+- unsigned config, type = PERF_TYPE_RAW;
++ u64 config;
++ u32 type = PERF_TYPE_RAW;
+ struct kvm *kvm = pmc->vcpu->kvm;
+ struct kvm_pmu_event_filter *filter;
+ int i;
+--
+2.34.1
+
--- /dev/null
+From bc9f9a13e58817f2fb101dea8a320228b02f6f14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Nov 2021 15:42:17 +0800
+Subject: KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id()
+
+From: Like Xu <likexu@tencent.com>
+
+[ Upstream commit 7c174f305cbee6bdba5018aae02b84369e7ab995 ]
+
+The find_arch_event() returns a "unsigned int" value,
+which is used by the pmc_reprogram_counter() to
+program a PERF_TYPE_HARDWARE type perf_event.
+
+The returned value is actually the kernel defined generic
+perf_hw_id, let's rename it to pmc_perf_hw_id() with simpler
+incoming parameters for better self-explanation.
+
+Signed-off-by: Like Xu <likexu@tencent.com>
+Message-Id: <20211130074221.93635-3-likexu@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/pmu.c | 8 +-------
+ arch/x86/kvm/pmu.h | 3 +--
+ arch/x86/kvm/svm/pmu.c | 8 ++++----
+ arch/x86/kvm/vmx/pmu_intel.c | 9 +++++----
+ 4 files changed, 11 insertions(+), 17 deletions(-)
+
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index 0772bad9165c5..eec614de9af30 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -174,7 +174,6 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
+ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ {
+ unsigned config, type = PERF_TYPE_RAW;
+- u8 event_select, unit_mask;
+ struct kvm *kvm = pmc->vcpu->kvm;
+ struct kvm_pmu_event_filter *filter;
+ int i;
+@@ -206,17 +205,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ if (!allow_event)
+ return;
+
+- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+-
+ if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+ ARCH_PERFMON_EVENTSEL_INV |
+ ARCH_PERFMON_EVENTSEL_CMASK |
+ HSW_IN_TX |
+ HSW_IN_TX_CHECKPOINTED))) {
+- config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
+- event_select,
+- unit_mask);
++ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
+ if (config != PERF_COUNT_HW_MAX)
+ type = PERF_TYPE_HARDWARE;
+ }
+diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
+index 0e4f2b1fa9fbd..a06d95165ac7c 100644
+--- a/arch/x86/kvm/pmu.h
++++ b/arch/x86/kvm/pmu.h
+@@ -24,8 +24,7 @@ struct kvm_event_hw_type_mapping {
+ };
+
+ struct kvm_pmu_ops {
+- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
+- u8 unit_mask);
++ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
+ unsigned (*find_fixed_event)(int idx);
+ bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
+ struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
+diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
+index e152241d1d709..06f8034f62e4f 100644
+--- a/arch/x86/kvm/svm/pmu.c
++++ b/arch/x86/kvm/svm/pmu.c
+@@ -134,10 +134,10 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
+ return &pmu->gp_counters[msr_to_index(msr)];
+ }
+
+-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
+- u8 event_select,
+- u8 unit_mask)
++static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
+ {
++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+@@ -320,7 +320,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
+ }
+
+ struct kvm_pmu_ops amd_pmu_ops = {
+- .find_arch_event = amd_find_arch_event,
++ .pmc_perf_hw_id = amd_pmc_perf_hw_id,
+ .find_fixed_event = amd_find_fixed_event,
+ .pmc_is_enabled = amd_pmc_is_enabled,
+ .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
+diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
+index 10cc4f65c4efd..6427d95de01cf 100644
+--- a/arch/x86/kvm/vmx/pmu_intel.c
++++ b/arch/x86/kvm/vmx/pmu_intel.c
+@@ -68,10 +68,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+ reprogram_counter(pmu, bit);
+ }
+
+-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
+- u8 event_select,
+- u8 unit_mask)
++static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
+ {
++ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
++ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
++ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+@@ -706,7 +707,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
+ }
+
+ struct kvm_pmu_ops intel_pmu_ops = {
+- .find_arch_event = intel_find_arch_event,
++ .pmc_perf_hw_id = intel_pmc_perf_hw_id,
+ .find_fixed_event = intel_find_fixed_event,
+ .pmc_is_enabled = intel_pmc_is_enabled,
+ .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
+--
+2.34.1
+
--- /dev/null
+From 6d8b20638926085a945b9531d0b29e9054c40707 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Feb 2022 17:48:13 -0800
+Subject: KVM: x86/pmu: Use AMD64_RAW_EVENT_MASK for PERF_TYPE_RAW
+
+From: Jim Mattson <jmattson@google.com>
+
+[ Upstream commit 710c476514313c74045c41c0571bb5178fd16e3d ]
+
+AMD's event select is 3 nybbles, with the high nybble in bits 35:32 of
+a PerfEvtSeln MSR. Don't mask off the high nybble when configuring a
+RAW perf event.
+
+Fixes: ca724305a2b0 ("KVM: x86/vPMU: Implement AMD vPMU code for KVM")
+Signed-off-by: Jim Mattson <jmattson@google.com>
+Message-Id: <20220203014813.2130559-2-jmattson@google.com>
+Reviewed-by: David Dunn <daviddunn@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/pmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
+index bfef0c658730e..f256f01056bdb 100644
+--- a/arch/x86/kvm/pmu.c
++++ b/arch/x86/kvm/pmu.c
+@@ -217,7 +217,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+ }
+
+ if (type == PERF_TYPE_RAW)
+- config = eventsel & X86_RAW_EVENT_MASK;
++ config = eventsel & AMD64_RAW_EVENT_MASK;
+
+ if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
+ return;
+--
+2.34.1
+
--- /dev/null
+From ef714afc040ce98367efcc5a66292f7749ae68de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Jan 2022 04:22:10 +0100
+Subject: mtd: parsers: qcom: Fix kernel panic on skipped partition
+
+From: Ansuel Smith <ansuelsmth@gmail.com>
+
+[ Upstream commit 65d003cca335cabc0160d3cd7daa689eaa9dd3cd ]
+
+In the event of a skipped partition (case when the entry name is empty)
+the kernel panics in the cleanup function as the name entry is NULL.
+Rework the parser logic by first checking the real partition number and
+then allocate the space and set the data for the valid partitions.
+
+The logic was also fundamentally wrong as with a skipped partition, the
+parts number returned was incorrect by not decreasing it for the skipped
+partitions.
+
+Fixes: 803eb124e1a6 ("mtd: parsers: Add Qcom SMEM parser")
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20220116032211.9728-1-ansuelsmth@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/parsers/qcomsmempart.c | 31 ++++++++++++++++++------------
+ 1 file changed, 19 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
+index 06a818cd2433f..f4fc7635c1f39 100644
+--- a/drivers/mtd/parsers/qcomsmempart.c
++++ b/drivers/mtd/parsers/qcomsmempart.c
+@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ const struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+ {
++ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
++ int ret, i, j, tmpparts, numparts = 0;
+ struct smem_flash_pentry *pentry;
+ struct smem_flash_ptable *ptable;
+- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ struct mtd_partition *parts;
+- int ret, i, numparts;
+ char *name, *c;
+
+ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
+@@ -87,8 +87,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ }
+
+ /* Ensure that # of partitions is less than the max we have allocated */
+- numparts = le32_to_cpu(ptable->numparts);
+- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
++ tmpparts = le32_to_cpu(ptable->numparts);
++ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ pr_err("Partition numbers exceed the max limit\n");
+ return -EINVAL;
+ }
+@@ -116,11 +116,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ return PTR_ERR(ptable);
+ }
+
++ for (i = 0; i < tmpparts; i++) {
++ pentry = &ptable->pentry[i];
++ if (pentry->name[0] != '\0')
++ numparts++;
++ }
++
+ parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+- for (i = 0; i < numparts; i++) {
++ for (i = 0, j = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] == '\0')
+ continue;
+@@ -135,24 +141,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
+ for (c = name; *c != '\0'; c++)
+ *c = tolower(*c);
+
+- parts[i].name = name;
+- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+- parts[i].mask_flags = pentry->attr;
+- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
++ parts[j].name = name;
++ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
++ parts[j].mask_flags = pentry->attr;
++ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
+ i, pentry->name, le32_to_cpu(pentry->offset),
+ le32_to_cpu(pentry->length), pentry->attr);
++ j++;
+ }
+
+ pr_debug("SMEM partition table found: ver: %d len: %d\n",
+- le32_to_cpu(ptable->version), numparts);
++ le32_to_cpu(ptable->version), tmpparts);
+ *pparts = parts;
+
+ return numparts;
+
+ out_free_parts:
+- while (--i >= 0)
+- kfree(parts[i].name);
++ while (--j >= 0)
++ kfree(parts[j].name);
+ kfree(parts);
+ *pparts = NULL;
+
+--
+2.34.1
+
--- /dev/null
+From 34e4a9ce299f7f5a20814c71c590cc14810fe2c5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Jan 2022 04:22:11 +0100
+Subject: mtd: parsers: qcom: Fix missing free for pparts in cleanup
+
+From: Ansuel Smith <ansuelsmth@gmail.com>
+
+[ Upstream commit 3dd8ba961b9356c4113b96541c752c73d98fef70 ]
+
+Mtdpart doesn't free pparts when a cleanup function is declared.
+Add missing free for pparts in cleanup function for smem to fix the
+leak.
+
+Fixes: 10f3b4d79958 ("mtd: parsers: qcom: Fix leaking of partition name")
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20220116032211.9728-2-ansuelsmth@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/parsers/qcomsmempart.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
+index f4fc7635c1f39..32ddfea701423 100644
+--- a/drivers/mtd/parsers/qcomsmempart.c
++++ b/drivers/mtd/parsers/qcomsmempart.c
+@@ -173,6 +173,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
+
+ for (i = 0; i < nr_parts; i++)
+ kfree(pparts[i].name);
++
++ kfree(pparts);
+ }
+
+ static const struct of_device_id qcomsmem_of_match_table[] = {
+--
+2.34.1
+
--- /dev/null
+From a02095910008aefa874965e5d3533d341fc56de3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Jan 2022 14:55:05 +0300
+Subject: mtd: phram: Prevent divide by zero bug in phram_setup()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 3e3765875b1b8864898603768fd5c93eeb552211 ]
+
+The problem is that "erasesize" is a uint64_t type so it might be
+non-zero but the lower 32 bits are zero so when it's truncated,
+"(uint32_t)erasesize", then that value is zero. This leads to a
+divide by zero bug.
+
+Avoid the bug by delaying the divide until after we have validated
+that "erasesize" is non-zero and within the uint32_t range.
+
+Fixes: dc2b3e5cbc80 ("mtd: phram: use div_u64_rem to stop overwrite len in phram_setup")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20220121115505.GI1978@kadam
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/devices/phram.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
+index 6ed6c51fac69e..d503821a3e606 100644
+--- a/drivers/mtd/devices/phram.c
++++ b/drivers/mtd/devices/phram.c
+@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
+ }
+ }
+
+- if (erasesize)
+- div_u64_rem(len, (uint32_t)erasesize, &rem);
+-
+ if (len == 0 || erasesize == 0 || erasesize > len
+- || erasesize > UINT_MAX || rem) {
++ || erasesize > UINT_MAX) {
+ parse_err("illegal erasesize or len\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
++ div_u64_rem(len, (uint32_t)erasesize, &rem);
++ if (rem) {
++ parse_err("len is not multiple of erasesize\n");
++ ret = -EINVAL;
++ goto error;
++ }
++
+ ret = register_device(name, start, len, (uint32_t)erasesize);
+ if (ret)
+ goto error;
+--
+2.34.1
+
--- /dev/null
+From f5e013ef1edd162d5e8e5e1f2400bd376d3421b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jan 2022 23:43:44 +0100
+Subject: mtd: rawnand: brcmnand: Fixed incorrect sub-page ECC status
+
+From: david regan <dregan@mail.com>
+
+[ Upstream commit 36415a7964711822e63695ea67fede63979054d9 ]
+
+The brcmnand driver contains a bug in which if a page (example 2k byte)
+is read from the parallel/ONFI NAND and within that page a subpage (512
+byte) has correctable errors which is followed by a subpage with
+uncorrectable errors, the page read will return the wrong status of
+correctable (as opposed to the actual status of uncorrectable.)
+
+The bug is in function brcmnand_read_by_pio where there is a check for
+uncorrectable bits which will be preempted if a previous status for
+correctable bits is detected.
+
+The fix is to stop checking for bad bits only if we already have a bad
+bits status.
+
+Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller")
+Signed-off-by: david regan <dregan@mail.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/trinity-478e0c09-9134-40e8-8f8c-31c371225eda-1643237024774@3c-app-mailcom-lxa02
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/brcmnand/brcmnand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+index f75929783b941..aee78f5f4f156 100644
+--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+ mtd->oobsize / trans,
+ host->hwcfg.sector_size_1k);
+
+- if (!ret) {
++ if (ret != -EBADMSG) {
+ *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
+
+ if (*err_addr)
+--
+2.34.1
+
--- /dev/null
+From 063d5f1d313df76cc282c5ea8b1a41b158f1a8d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Dec 2021 07:27:51 +0000
+Subject: mtd: rawnand: ingenic: Fix missing put_device in ingenic_ecc_get
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit ba1b71b008e97fd747845ff3a818420b11bbe830 ]
+
+If of_find_device_by_node() succeeds, ingenic_ecc_get() doesn't have
+a corresponding put_device(). Thus add put_device() to fix the exception
+handling.
+
+Fixes: 15de8c6efd0e ("mtd: rawnand: ingenic: Separate top-level and SoC specific code")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Reviewed-by: Paul Cercueil <paul@crapouillou.net>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20211230072751.21622-1-linmq006@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/ingenic/ingenic_ecc.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+index efe0ffe4f1abc..9054559e52dda 100644
+--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
++++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
+ struct ingenic_ecc *ecc;
+
+ pdev = of_find_device_by_node(np);
+- if (!pdev || !platform_get_drvdata(pdev))
++ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
++ if (!platform_get_drvdata(pdev)) {
++ put_device(&pdev->dev);
++ return ERR_PTR(-EPROBE_DEFER);
++ }
++
+ ecc = platform_get_drvdata(pdev);
+ clk_prepare_enable(ecc->clk);
+
+--
+2.34.1
+
--- /dev/null
+From 06c33aa1bafd307c5a2b5fe79d2e98bc75e5abaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Jan 2022 03:03:15 +0000
+Subject: mtd: rawnand: qcom: Fix clock sequencing in qcom_nandc_probe()
+
+From: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+
+[ Upstream commit 5c23b3f965bc9ee696bf2ed4bdc54d339dd9a455 ]
+
+Interacting with a NAND chip on an IPQ6018 I found that the qcomsmem NAND
+partition parser was returning -EPROBE_DEFER waiting for the main smem
+driver to load.
+
+This caused the board to reset. Playing about with the probe() function
+shows that the problem lies in the core clock being switched off before the
+nandc_unalloc() routine has completed.
+
+If we look at how qcom_nandc_remove() tears down allocated resources we see
+the expected order is
+
+qcom_nandc_unalloc(nandc);
+
+clk_disable_unprepare(nandc->aon_clk);
+clk_disable_unprepare(nandc->core_clk);
+
+dma_unmap_resource(&pdev->dev, nandc->base_dma, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+
+Tweaking probe() to both bring up and tear-down in that order removes the
+reset if we end up deferring elsewhere.
+
+Fixes: c76b78d8ec05 ("mtd: nand: Qualcomm NAND controller driver")
+Signed-off-by: Bryan O'Donoghue <bryan.odonoghue@linaro.org>
+Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20220103030316.58301-2-bryan.odonoghue@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/qcom_nandc.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
+index 04e6f7b267064..0f41a9a421575 100644
+--- a/drivers/mtd/nand/raw/qcom_nandc.c
++++ b/drivers/mtd/nand/raw/qcom_nandc.c
+@@ -2,7 +2,6 @@
+ /*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+-
+ #include <linux/clk.h>
+ #include <linux/slab.h>
+ #include <linux/bitops.h>
+@@ -3063,10 +3062,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ if (dma_mapping_error(dev, nandc->base_dma))
+ return -ENXIO;
+
+- ret = qcom_nandc_alloc(nandc);
+- if (ret)
+- goto err_nandc_alloc;
+-
+ ret = clk_prepare_enable(nandc->core_clk);
+ if (ret)
+ goto err_core_clk;
+@@ -3075,6 +3070,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ if (ret)
+ goto err_aon_clk;
+
++ ret = qcom_nandc_alloc(nandc);
++ if (ret)
++ goto err_nandc_alloc;
++
+ ret = qcom_nandc_setup(nandc);
+ if (ret)
+ goto err_setup;
+@@ -3086,15 +3085,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
+ return 0;
+
+ err_setup:
++ qcom_nandc_unalloc(nandc);
++err_nandc_alloc:
+ clk_disable_unprepare(nandc->aon_clk);
+ err_aon_clk:
+ clk_disable_unprepare(nandc->core_clk);
+ err_core_clk:
+- qcom_nandc_unalloc(nandc);
+-err_nandc_alloc:
+ dma_unmap_resource(dev, res->start, resource_size(res),
+ DMA_BIDIRECTIONAL, 0);
+-
+ return ret;
+ }
+
+--
+2.34.1
+
--- /dev/null
+From 3f19bdd8450917deb44625cf5a7291df219541e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Feb 2022 18:05:18 -0500
+Subject: NFS: Do not report writeback errors in nfs_getattr()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit d19e0183a88306acda07f4a01fedeeffe2a2a06b ]
+
+The result of the writeback, whether it is an ENOSPC or an EIO, or
+anything else, does not inhibit the NFS client from reporting the
+correct file timestamps.
+
+Fixes: 79566ef018f5 ("NFS: Getattr doesn't require data sync semantics")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/inode.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index f9d3ad3acf114..410f87bc48cca 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -840,12 +840,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
+ }
+
+ /* Flush out writes to the server in order to update c/mtime. */
+- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
+- S_ISREG(inode->i_mode)) {
+- err = filemap_write_and_wait(inode->i_mapping);
+- if (err)
+- goto out;
+- }
++ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
++ S_ISREG(inode->i_mode))
++ filemap_write_and_wait(inode->i_mapping);
+
+ /*
+ * We may force a getattr if the user cares about atime.
+--
+2.34.1
+
--- /dev/null
+From 5144a1613a55e6a69b427e39e1f8ce61af8c79db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Feb 2022 13:38:23 -0500
+Subject: NFS: LOOKUP_DIRECTORY is also ok with symlinks
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit e0caaf75d443e02e55e146fd75fe2efc8aed5540 ]
+
+Commit ac795161c936 (NFSv4: Handle case where the lookup of a directory
+fails) [1], part of Linux since 5.17-rc2, introduced a regression, where
+a symbolic link on an NFS mount to a directory on another NFS does not
+resolve(?) the first time it is accessed:
+
+Reported-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Fixes: ac795161c936 ("NFSv4: Handle case where the lookup of a directory fails")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Tested-by: Donald Buczek <buczek@molgen.mpg.de>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index f6381c675cbe9..9adc6f57a0083 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1987,14 +1987,14 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ if (!res) {
+ inode = d_inode(dentry);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+- !S_ISDIR(inode->i_mode))
++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+ res = ERR_PTR(-ENOTDIR);
+ else if (inode && S_ISREG(inode->i_mode))
+ res = ERR_PTR(-EOPENSTALE);
+ } else if (!IS_ERR(res)) {
+ inode = d_inode(res);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+- !S_ISDIR(inode->i_mode)) {
++ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
+ dput(res);
+ res = ERR_PTR(-ENOTDIR);
+ } else if (inode && S_ISREG(inode->i_mode)) {
+--
+2.34.1
+
--- /dev/null
+From b2849b09fe5d9aa6b8687c5bd40ee280e9bc52a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Feb 2022 12:14:44 -0500
+Subject: NFS: Remove an incorrect revalidation in
+ nfs4_update_changeattr_locked()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 9d047bf68fe8cdb4086deaf4edd119731a9481ed ]
+
+In nfs4_update_changeattr_locked(), we don't need to set the
+NFS_INO_REVAL_PAGECACHE flag, because we already know the value of the
+change attribute, and we're already flagging the size. In fact, this
+forces us to revalidate the change attribute a second time for no good
+reason.
+This extra flag appears to have been introduced as part of the xattr
+feature, when update_changeattr_locked() was converted for use by the
+xattr code.
+
+Fixes: 1b523ca972ed ("nfs: modify update_changeattr to deal with regular files")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 389fa72d4ca98..53be03681f69e 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1232,8 +1232,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
+ NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
+ NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
+ NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
+- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
+- NFS_INO_REVAL_PAGECACHE;
++ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ }
+ nfsi->attrtimeo_timestamp = jiffies;
+--
+2.34.1
+
--- /dev/null
+From 560910d924d51de7328304d367741f461900890a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Oct 2021 16:17:07 +0800
+Subject: nvme: prepare for pairing quiescing and unquiescing
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit ebc9b95260151d966728cf0063b3b4e465f934d9 ]
+
+Add two helpers so that we can prepare for pairing quiescing and
+unquiescing which will be done in next patch.
+
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20211014081710.1871747-4-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 52 ++++++++++++++++++++++++----------------
+ 1 file changed, 31 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 8aa92ebb8b7c1..ab1892886b453 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -118,25 +118,6 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
+ struct nvme_command *cmd);
+
+-/*
+- * Prepare a queue for teardown.
+- *
+- * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+- * the capacity to 0 after that to avoid blocking dispatchers that may be
+- * holding bd_butex. This will end buffered writers dirtying pages that can't
+- * be synced.
+- */
+-static void nvme_set_queue_dying(struct nvme_ns *ns)
+-{
+- if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+- return;
+-
+- blk_set_queue_dying(ns->queue);
+- blk_mq_unquiesce_queue(ns->queue);
+-
+- set_capacity_and_notify(ns->disk, 0);
+-}
+-
+ void nvme_queue_scan(struct nvme_ctrl *ctrl)
+ {
+ /*
+@@ -4480,6 +4461,35 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
+ }
+ EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+
++static void nvme_start_ns_queue(struct nvme_ns *ns)
++{
++ blk_mq_unquiesce_queue(ns->queue);
++}
++
++static void nvme_stop_ns_queue(struct nvme_ns *ns)
++{
++ blk_mq_quiesce_queue(ns->queue);
++}
++
++/*
++ * Prepare a queue for teardown.
++ *
++ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
++ * the capacity to 0 after that to avoid blocking dispatchers that may be
++ * holding bd_butex. This will end buffered writers dirtying pages that can't
++ * be synced.
++ */
++static void nvme_set_queue_dying(struct nvme_ns *ns)
++{
++ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
++ return;
++
++ blk_set_queue_dying(ns->queue);
++ nvme_start_ns_queue(ns);
++
++ set_capacity_and_notify(ns->disk, 0);
++}
++
+ /**
+ * nvme_kill_queues(): Ends all namespace queues
+ * @ctrl: the dead controller that needs to end
+@@ -4558,7 +4568,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+- blk_mq_quiesce_queue(ns->queue);
++ nvme_stop_ns_queue(ns);
+ up_read(&ctrl->namespaces_rwsem);
+ }
+ EXPORT_SYMBOL_GPL(nvme_stop_queues);
+@@ -4569,7 +4579,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
+
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+- blk_mq_unquiesce_queue(ns->queue);
++ nvme_start_ns_queue(ns);
+ up_read(&ctrl->namespaces_rwsem);
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_queues);
+--
+2.34.1
+
tipc-fix-wrong-notification-node-addresses.patch
scsi-ufs-remove-dead-code.patch
scsi-ufs-fix-a-deadlock-in-the-error-handler.patch
+hid-elo-fix-memory-leak-in-elo_probe.patch
+mtd-rawnand-ingenic-fix-missing-put_device-in-ingeni.patch
+mtd-rawnand-qcom-fix-clock-sequencing-in-qcom_nandc_.patch
+mtd-parsers-qcom-fix-kernel-panic-on-skipped-partiti.patch
+mtd-parsers-qcom-fix-missing-free-for-pparts-in-clea.patch
+mtd-phram-prevent-divide-by-zero-bug-in-phram_setup.patch
+mtd-rawnand-brcmnand-fixed-incorrect-sub-page-ecc-st.patch
+drivers-hv-vmbus-fix-memory-leak-in-vmbus_add_channe.patch
+asoc-tas2770-insert-post-reset-delay.patch
+asoc-qcom-actually-clear-dma-interrupt-register-for-.patch
+kvm-x86-pmu-refactoring-find_arch_event-to-pmc_perf_.patch
+kvm-x86-pmu-don-t-truncate-the-perfevtseln-msr-when-.patch
+kvm-x86-pmu-use-amd64_raw_event_mask-for-perf_type_r.patch
+nfs-remove-an-incorrect-revalidation-in-nfs4_update_.patch
+nfs-lookup_directory-is-also-ok-with-symlinks.patch
+tty-n_tty-do-not-look-ahead-for-eol-character-past-t.patch
+nfs-do-not-report-writeback-errors-in-nfs_getattr.patch
+nvme-prepare-for-pairing-quiescing-and-unquiescing.patch
+block-fix-surprise-removal-for-drivers-calling-blk_s.patch
+block-wbt-fix-negative-inflight-counter-when-remove-.patch
--- /dev/null
+From 89d7ab6d588819c15092e1ac731822cfc115c1ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Feb 2022 15:28:00 -0800
+Subject: tty: n_tty: do not look ahead for EOL character past the end of the
+ buffer
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+[ Upstream commit 3593030761630e09200072a4bd06468892c27be3 ]
+
+Daniel Gibson reports that the n_tty code gets line termination wrong in
+very specific cases:
+
+ "If you feed a line with exactly 64 chars + terminating newline, and
+ directly afterwards (without reading) another line into a pseudo
+ terminal, the the first read() on the other side will return the 64
+ char line *without* terminating newline, and the next read() will
+ return the missing terminating newline AND the complete next line (if
+ it fits in the buffer)"
+
+and bisected the behavior to commit 3b830a9c34d5 ("tty: convert
+tty_ldisc_ops 'read()' function to take a kernel pointer").
+
+Now, digging deeper, it turns out that the behavior isn't exactly new:
+what changed in commit 3b830a9c34d5 was that the tty line discipline
+.read() function is now passed an intermediate kernel buffer rather than
+the final user space buffer.
+
+And that intermediate kernel buffer is 64 bytes in size - thus that
+special case with exactly 64 bytes plus terminating newline.
+
+The same problem did exist before, but historically the boundary was not
+the 64-byte chunk, but the user-supplied buffer size, which is obviously
+generally bigger (and potentially bigger than N_TTY_BUF_SIZE, which
+would hide the issue entirely).
+
+The reason is that the n_tty canon_copy_from_read_buf() code would look
+ahead for the EOL character one byte further than it would actually
+copy. It would then decide that it had found the terminator, and unmark
+it as an EOL character - which in turn explains why the next read
+wouldn't then be terminated by it.
+
+Now, the reason it did all this in the first place is related to some
+historical and pretty obscure EOF behavior, see commit ac8f3bf8832a
+("n_tty: Fix poll() after buffer-limited eof push read") and commit
+40d5e0905a03 ("n_tty: Fix EOF push handling").
+
+And the reason for the EOL confusion is that we treat EOF as a special
+EOL condition, with the EOL character being NUL (aka "__DISABLED_CHAR"
+in the kernel sources).
+
+So that EOF look-ahead also affects the normal EOL handling.
+
+This patch just removes the look-ahead that causes problems, because EOL
+is much more critical than the historical "EOF in the middle of a line
+that coincides with the end of the buffer" handling ever was.
+
+Now, it is possible that we should indeed re-introduce the "look at next
+character to see if it's a EOF" behavior, but if so, that should be done
+not at the kernel buffer chunk boundary in canon_copy_from_read_buf(),
+but at a higher level, when we run out of the user buffer.
+
+In particular, the place to do that would be at the top of
+'n_tty_read()', where we check if it's a continuation of a previously
+started read, and there is no more buffer space left, we could decide to
+just eat the __DISABLED_CHAR at that point.
+
+But that would be a separate patch, because I suspect nobody actually
+cares, and I'd like to get a report about it before bothering.
+
+Fixes: 3b830a9c34d5 ("tty: convert tty_ldisc_ops 'read()' function to take a kernel pointer")
+Fixes: ac8f3bf8832a ("n_tty: Fix poll() after buffer-limited eof push read")
+Fixes: 40d5e0905a03 ("n_tty: Fix EOF push handling")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=215611
+Reported-and-tested-by: Daniel Gibson <metalcaedes@gmail.com>
+Cc: Peter Hurley <peter@hurleysoftware.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jiri Slaby <jirislaby@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/n_tty.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 451e02cd06377..de5b45de50402 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1963,7 +1963,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
+ return false;
+
+ canon_head = smp_load_acquire(&ldata->canon_head);
+- n = min(*nr + 1, canon_head - ldata->read_tail);
++ n = min(*nr, canon_head - ldata->read_tail);
+
+ tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
+@@ -1985,10 +1985,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
+ n += N_TTY_BUF_SIZE;
+ c = n + found;
+
+- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
+- c = min(*nr, c);
++ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
+ n = c;
+- }
+
+ n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
+ __func__, eol, found, n, c, tail, more);
+--
+2.34.1
+