--- /dev/null
+From 320fb0f91e55ba248d4bad106b408e59099cfa89 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Mon, 29 Aug 2022 10:22:37 +0800
+Subject: blk-throttle: fix that io throttle can only work for single bio
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 320fb0f91e55ba248d4bad106b408e59099cfa89 upstream.
+
+Test scripts:
+cd /sys/fs/cgroup/blkio/
+echo "8:0 1024" > blkio.throttle.write_bps_device
+echo $$ > cgroup.procs
+dd if=/dev/zero of=/dev/sda bs=10k count=1 oflag=direct &
+dd if=/dev/zero of=/dev/sda bs=10k count=1 oflag=direct &
+
+Test result:
+10240 bytes (10 kB, 10 KiB) copied, 10.0134 s, 1.0 kB/s
+10240 bytes (10 kB, 10 KiB) copied, 10.0135 s, 1.0 kB/s
+
+The problem is that the second bio is finished after 10s instead of 20s.
+
+Root cause:
+1) second bio will be flagged:
+
+__blk_throtl_bio
+ while (true) {
+ ...
+ if (sq->nr_queued[rw]) -> some bio is throttled already
+ break
+ };
+ bio_set_flag(bio, BIO_THROTTLED); -> flag the bio
+
+2) flagged bio will be dispatched without waiting:
+
+throtl_dispatch_tg
+ tg_may_dispatch
+ tg_with_in_bps_limit
+ if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED))
+ *wait = 0; -> wait time is zero
+ return true;
+
+commit 9f5ede3c01f9 ("block: throttle split bio in case of iops limit")
+support to count split bios for iops limit, thus it adds flagged bio
+checking in tg_with_in_bps_limit() so that split bios will only count
+once for bps limit, however, it introduce a new problem that io throttle
+won't work if multiple bios are throttled.
+
+In order to fix the problem, handle iops/bps limit in different ways:
+
+1) for iops limit, there is no flag to record if the bio is throttled,
+ and iops is always applied.
+2) for bps limit, original bio will be flagged with BIO_BPS_THROTTLED,
+ and io throttle will ignore bio with the flag.
+
+Noted this patch also remove the code to set flag in __bio_clone(), it's
+introduced in commit 111be8839817 ("block-throttle: avoid double
+charge"), and author thinks split bio can be resubmited and throttled
+again, which is wrong because split bio will continue to dispatch from
+caller.
+
+Fixes: 9f5ede3c01f9 ("block: throttle split bio in case of iops limit")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Acked-by: Tejun Heo <tj@kernel.org>
+Link: https://lore.kernel.org/r/20220829022240.3348319-2-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bio.c | 2 --
+ block/blk-throttle.c | 20 ++++++--------------
+ block/blk-throttle.h | 2 +-
+ include/linux/bio.h | 2 +-
+ include/linux/blk_types.h | 2 +-
+ 5 files changed, 9 insertions(+), 19 deletions(-)
+
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -760,8 +760,6 @@ EXPORT_SYMBOL(bio_put);
+ static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
+ {
+ bio_set_flag(bio, BIO_CLONED);
+- if (bio_flagged(bio_src, BIO_THROTTLED))
+- bio_set_flag(bio, BIO_THROTTLED);
+ bio->bi_ioprio = bio_src->bi_ioprio;
+ bio->bi_iter = bio_src->bi_iter;
+
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -811,7 +811,7 @@ static bool tg_with_in_bps_limit(struct
+ unsigned int bio_size = throtl_bio_data_size(bio);
+
+ /* no need to throttle if this bio's bytes have been accounted */
+- if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED)) {
++ if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
+ if (wait)
+ *wait = 0;
+ return true;
+@@ -921,22 +921,13 @@ static void throtl_charge_bio(struct thr
+ unsigned int bio_size = throtl_bio_data_size(bio);
+
+ /* Charge the bio to the group */
+- if (!bio_flagged(bio, BIO_THROTTLED)) {
++ if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
+ tg->bytes_disp[rw] += bio_size;
+ tg->last_bytes_disp[rw] += bio_size;
+ }
+
+ tg->io_disp[rw]++;
+ tg->last_io_disp[rw]++;
+-
+- /*
+- * BIO_THROTTLED is used to prevent the same bio to be throttled
+- * more than once as a throttled bio will go through blk-throtl the
+- * second time when it eventually gets issued. Set it when a bio
+- * is being charged to a tg.
+- */
+- if (!bio_flagged(bio, BIO_THROTTLED))
+- bio_set_flag(bio, BIO_THROTTLED);
+ }
+
+ /**
+@@ -1026,6 +1017,7 @@ static void tg_dispatch_one_bio(struct t
+ sq->nr_queued[rw]--;
+
+ throtl_charge_bio(tg, bio);
++ bio_set_flag(bio, BIO_BPS_THROTTLED);
+
+ /*
+ * If our parent is another tg, we just need to transfer @bio to
+@@ -2159,8 +2151,10 @@ again:
+ qn = &tg->qnode_on_parent[rw];
+ sq = sq->parent_sq;
+ tg = sq_to_tg(sq);
+- if (!tg)
++ if (!tg) {
++ bio_set_flag(bio, BIO_BPS_THROTTLED);
+ goto out_unlock;
++ }
+ }
+
+ /* out-of-limit, queue to @tg */
+@@ -2189,8 +2183,6 @@ again:
+ }
+
+ out_unlock:
+- bio_set_flag(bio, BIO_THROTTLED);
+-
+ #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ if (throttled || !td->track_bio_latency)
+ bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
+--- a/block/blk-throttle.h
++++ b/block/blk-throttle.h
+@@ -175,7 +175,7 @@ static inline bool blk_throtl_bio(struct
+ struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
+
+ /* no need to throttle bps any more if the bio has been throttled */
+- if (bio_flagged(bio, BIO_THROTTLED) &&
++ if (bio_flagged(bio, BIO_BPS_THROTTLED) &&
+ !(tg->flags & THROTL_TG_HAS_IOPS_LIMIT))
+ return false;
+
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -509,7 +509,7 @@ static inline void bio_set_dev(struct bi
+ {
+ bio_clear_flag(bio, BIO_REMAPPED);
+ if (bio->bi_bdev != bdev)
+- bio_clear_flag(bio, BIO_THROTTLED);
++ bio_clear_flag(bio, BIO_BPS_THROTTLED);
+ bio->bi_bdev = bdev;
+ bio_associate_blkg(bio);
+ }
+--- a/include/linux/blk_types.h
++++ b/include/linux/blk_types.h
+@@ -323,7 +323,7 @@ enum {
+ BIO_QUIET, /* Make BIO Quiet */
+ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
+ BIO_REFFED, /* bio has elevated ->bi_cnt */
+- BIO_THROTTLED, /* This bio has already been subjected to
++ BIO_BPS_THROTTLED, /* This bio has already been subjected to
+ * throttling rules. Don't do it again. */
+ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
+ * of this bio. */
--- /dev/null
+From 8c5035dfbb9475b67c82b3fdb7351236525bf52b Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Tue, 13 Sep 2022 18:57:49 +0800
+Subject: blk-wbt: call rq_qos_add() after wb_normal is initialized
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit 8c5035dfbb9475b67c82b3fdb7351236525bf52b upstream.
+
+Our test found a problem that wbt inflight counter is negative, which
+will cause io hang(noted that this problem doesn't exist in mainline):
+
+t1: device create t2: issue io
+add_disk
+ blk_register_queue
+ wbt_enable_default
+ wbt_init
+ rq_qos_add
+ // wb_normal is still 0
+ /*
+ * in mainline, disk can't be opened before
+ * bdev_add(), however, in old kernels, disk
+ * can be opened before blk_register_queue().
+ */
+ blkdev_issue_flush
+ // disk size is 0, however, it's not checked
+ submit_bio_wait
+ submit_bio
+ blk_mq_submit_bio
+ rq_qos_throttle
+ wbt_wait
+ bio_to_wbt_flags
+ rwb_enabled
+ // wb_normal is 0, inflight is not increased
+
+ wbt_queue_depth_changed(&rwb->rqos);
+ wbt_update_limits
+ // wb_normal is initialized
+ rq_qos_track
+ wbt_track
+ rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
+ // wb_normal is not 0,wbt_flags will be set
+t3: io completion
+blk_mq_free_request
+ rq_qos_done
+ wbt_done
+ wbt_is_tracked
+ // return true
+ __wbt_done
+ wbt_rqw_done
+ atomic_dec_return(&rqw->inflight);
+ // inflight is decreased
+
+commit 8235b5c1e8c1 ("block: call bdev_add later in device_add_disk") can
+avoid this problem, however it's better to fix this problem in wbt:
+
+1) Lower kernel can't backport this patch due to lots of refactor.
+2) Root cause is that wbt call rq_qos_add() before wb_normal is
+initialized.
+
+Fixes: e34cbd307477 ("blk-wbt: add general throttling mechanism")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20220913105749.3086243-1-yukuai1@huaweicloud.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-wbt.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/block/blk-wbt.c
++++ b/block/blk-wbt.c
+@@ -843,6 +843,10 @@ int wbt_init(struct request_queue *q)
+ rwb->enable_state = WBT_STATE_ON_DEFAULT;
+ rwb->wc = 1;
+ rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
++ rwb->min_lat_nsec = wbt_default_latency_nsec(q);
++
++ wbt_queue_depth_changed(&rwb->rqos);
++ wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+
+ /*
+ * Assign rwb and add the stats callback.
+@@ -853,11 +857,6 @@ int wbt_init(struct request_queue *q)
+
+ blk_stat_add_callback(q, rwb->cb);
+
+- rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+-
+- wbt_queue_depth_changed(&rwb->rqos);
+- wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
+-
+ return 0;
+
+ err_free:
--- /dev/null
+From 20e377e7b2e7c327039f10db80ba5bcc1f6c882d Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris.p.wilson@intel.com>
+Date: Mon, 26 Sep 2022 16:33:33 +0100
+Subject: drm/i915/gt: Use i915_vm_put on ppgtt_create error paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chris Wilson <chris.p.wilson@intel.com>
+
+commit 20e377e7b2e7c327039f10db80ba5bcc1f6c882d upstream.
+
+Now that the scratch page and page directories have a reference back to
+the i915_address_space, we cannot do an immediate free of the ppgtt upon
+error as those buffer objects will perform a later i915_vm_put in their
+deferred frees.
+
+The downside is that by replacing the onion unwind along the error
+paths, the ppgtt cleanup must handle a partially constructed vm. This
+includes ensuring that the vm->cleanup is set prior to the error path.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/6900
+Signed-off-by: Chris Wilson <chris.p.wilson@intel.com>
+Fixes: 4d8151ae5329 ("drm/i915: Don't free shared locks while shared")
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: <stable@vger.kernel.org> # v5.14+
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220926153333.102195-1-matthew.auld@intel.com
+(cherry picked from commit c286558f58535cf97b717b946d6c96d774a09d17)
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 16 ++++-----
+ drivers/gpu/drm/i915/gt/gen8_ppgtt.c | 58 ++++++++++++++++++-----------------
+ drivers/gpu/drm/i915/gt/intel_gtt.c | 3 +
+ 3 files changed, 41 insertions(+), 36 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+@@ -247,6 +247,7 @@ err_scratch1:
+ i915_gem_object_put(vm->scratch[1]);
+ err_scratch0:
+ i915_gem_object_put(vm->scratch[0]);
++ vm->scratch[0] = NULL;
+ return ret;
+ }
+
+@@ -268,9 +269,10 @@ static void gen6_ppgtt_cleanup(struct i9
+ gen6_ppgtt_free_pd(ppgtt);
+ free_scratch(vm);
+
+- mutex_destroy(&ppgtt->flush);
++ if (ppgtt->base.pd)
++ free_pd(&ppgtt->base.vm, ppgtt->base.pd);
+
+- free_pd(&ppgtt->base.vm, ppgtt->base.pd);
++ mutex_destroy(&ppgtt->flush);
+ }
+
+ static void pd_vma_bind(struct i915_address_space *vm,
+@@ -449,19 +451,17 @@ struct i915_ppgtt *gen6_ppgtt_create(str
+
+ err = gen6_ppgtt_init_scratch(ppgtt);
+ if (err)
+- goto err_free;
++ goto err_put;
+
+ ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
+ if (IS_ERR(ppgtt->base.pd)) {
+ err = PTR_ERR(ppgtt->base.pd);
+- goto err_scratch;
++ goto err_put;
+ }
+
+ return &ppgtt->base;
+
+-err_scratch:
+- free_scratch(&ppgtt->base.vm);
+-err_free:
+- kfree(ppgtt);
++err_put:
++ i915_vm_put(&ppgtt->base.vm);
+ return ERR_PTR(err);
+ }
+--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
++++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+@@ -196,7 +196,10 @@ static void gen8_ppgtt_cleanup(struct i9
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
+
+- __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
++ if (ppgtt->pd)
++ __gen8_ppgtt_cleanup(vm, ppgtt->pd,
++ gen8_pd_top_count(vm), vm->top);
++
+ free_scratch(vm);
+ }
+
+@@ -803,8 +806,10 @@ static int gen8_init_scratch(struct i915
+ struct drm_i915_gem_object *obj;
+
+ obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
+- if (IS_ERR(obj))
++ if (IS_ERR(obj)) {
++ ret = PTR_ERR(obj);
+ goto free_scratch;
++ }
+
+ ret = map_pt_dma(vm, obj);
+ if (ret) {
+@@ -823,7 +828,8 @@ static int gen8_init_scratch(struct i915
+ free_scratch:
+ while (i--)
+ i915_gem_object_put(vm->scratch[i]);
+- return -ENOMEM;
++ vm->scratch[0] = NULL;
++ return ret;
+ }
+
+ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+@@ -901,6 +907,7 @@ err_pd:
+ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
+ {
++ struct i915_page_directory *pd;
+ struct i915_ppgtt *ppgtt;
+ int err;
+
+@@ -946,21 +953,7 @@ struct i915_ppgtt *gen8_ppgtt_create(str
+ ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
+ }
+
+- err = gen8_init_scratch(&ppgtt->vm);
+- if (err)
+- goto err_free;
+-
+- ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+- if (IS_ERR(ppgtt->pd)) {
+- err = PTR_ERR(ppgtt->pd);
+- goto err_free_scratch;
+- }
+-
+- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
+- err = gen8_preallocate_top_level_pdp(ppgtt);
+- if (err)
+- goto err_free_pd;
+- }
++ ppgtt->vm.pte_encode = gen8_pte_encode;
+
+ ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
+ ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+@@ -971,22 +964,31 @@ struct i915_ppgtt *gen8_ppgtt_create(str
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.foreach = gen8_ppgtt_foreach;
++ ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+
+- ppgtt->vm.pte_encode = gen8_pte_encode;
++ err = gen8_init_scratch(&ppgtt->vm);
++ if (err)
++ goto err_put;
++
++ pd = gen8_alloc_top_pd(&ppgtt->vm);
++ if (IS_ERR(pd)) {
++ err = PTR_ERR(pd);
++ goto err_put;
++ }
++ ppgtt->pd = pd;
++
++ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
++ err = gen8_preallocate_top_level_pdp(ppgtt);
++ if (err)
++ goto err_put;
++ }
+
+ if (intel_vgpu_active(gt->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, true);
+
+- ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
+-
+ return ppgtt;
+
+-err_free_pd:
+- __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+- gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
+-err_free_scratch:
+- free_scratch(&ppgtt->vm);
+-err_free:
+- kfree(ppgtt);
++err_put:
++ i915_vm_put(&ppgtt->vm);
+ return ERR_PTR(err);
+ }
+--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
++++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
+@@ -405,6 +405,9 @@ void free_scratch(struct i915_address_sp
+ {
+ int i;
+
++ if (!vm->scratch[0])
++ return;
++
+ for (i = 0; i <= vm->top; i++)
+ i915_gem_object_put(vm->scratch[i]);
+ }
--- /dev/null
+From 540dfd188ea2940582841c1c220bd035a7db0e51 Mon Sep 17 00:00:00 2001
+From: Jianglei Nie <niejianglei2021@163.com>
+Date: Tue, 5 Jul 2022 21:25:46 +0800
+Subject: drm/nouveau: fix a use-after-free in nouveau_gem_prime_import_sg_table()
+
+From: Jianglei Nie <niejianglei2021@163.com>
+
+commit 540dfd188ea2940582841c1c220bd035a7db0e51 upstream.
+
+nouveau_bo_init() is backed by ttm_bo_init() and ferries its return code
+back to the caller. On failures, ttm will call nouveau_bo_del_ttm() and
+free the memory.Thus, when nouveau_bo_init() returns an error, the gem
+object has already been released. Then the call to nouveau_bo_ref() will
+use the freed "nvbo->bo" and lead to a use-after-free bug.
+
+We should delete the call to nouveau_bo_ref() to avoid the use-after-free.
+
+Signed-off-by: Jianglei Nie <niejianglei2021@163.com>
+Reviewed-by: Lyude Paul <lyude@redhat.com>
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Fixes: 019cbd4a4feb ("drm/nouveau: Initialize GEM object before TTM object")
+Cc: Thierry Reding <treding@nvidia.com>
+Cc: <stable@vger.kernel.org> # v5.4+
+Link: https://patchwork.freedesktop.org/patch/msgid/20220705132546.2247677-1-niejianglei2021@163.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_prime.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
++++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
+@@ -71,7 +71,6 @@ struct drm_gem_object *nouveau_gem_prime
+ ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
+ sg, robj);
+ if (ret) {
+- nouveau_bo_ref(NULL, &nvbo);
+ obj = ERR_PTR(ret);
+ goto unlock;
+ }
--- /dev/null
+From 8ba9249396bef37cb68be9e8dee7847f1737db9d Mon Sep 17 00:00:00 2001
+From: Lyude Paul <lyude@redhat.com>
+Date: Tue, 16 Aug 2022 14:04:36 -0400
+Subject: drm/nouveau/kms/nv140-: Disable interlacing
+
+From: Lyude Paul <lyude@redhat.com>
+
+commit 8ba9249396bef37cb68be9e8dee7847f1737db9d upstream.
+
+As it turns out: while Nvidia does actually have interlacing knobs on their
+GPU still pretty much no current GPUs since Volta actually support it.
+Trying interlacing on these GPUs will result in NVDisplay being quite
+unhappy like so:
+
+nouveau 0000:1f:00.0: disp: chid 0 stat 00004802 reason 4 [INVALID_ARG] mthd 2008 data 00000001 code 00080000
+nouveau 0000:1f:00.0: disp: chid 0 stat 10005080 reason 5 [INVALID_STATE] mthd 0200 data 00000001 code 00000001
+
+So let's fix this by following the same behavior Nvidia's driver does and
+disable interlacing entirely.
+
+Signed-off-by: Lyude Paul <lyude@redhat.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Karol Herbst <kherbst@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220816180436.156310-1-lyude@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_connector.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -503,7 +503,8 @@ nouveau_connector_set_encoder(struct drm
+ connector->interlace_allowed =
+ nv_encoder->caps.dp_interlace;
+ else
+- connector->interlace_allowed = true;
++ connector->interlace_allowed =
++ drm->client.device.info.family < NV_DEVICE_INFO_V0_VOLTA;
+ connector->doublescan_allowed = true;
+ } else
+ if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
--- /dev/null
+From d80ca810f096ff66f451e7a3ed2f0cd9ef1ff519 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ardb@kernel.org>
+Date: Thu, 15 Sep 2022 19:00:24 +0200
+Subject: efi: libstub: drop pointless get_memory_map() call
+
+From: Ard Biesheuvel <ardb@kernel.org>
+
+commit d80ca810f096ff66f451e7a3ed2f0cd9ef1ff519 upstream.
+
+Currently, the non-x86 stub code calls get_memory_map() redundantly,
+given that the data it returns is never used anywhere. So drop the call.
+
+Cc: <stable@vger.kernel.org> # v4.14+
+Fixes: 24d7c494ce46 ("efi/arm-stub: Round up FDT allocation to mapping size")
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/efi/libstub/fdt.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -280,14 +280,6 @@ efi_status_t allocate_new_fdt_and_exit_b
+ goto fail;
+ }
+
+- /*
+- * Now that we have done our final memory allocation (and free)
+- * we can get the memory map key needed for exit_boot_services().
+- */
+- status = efi_get_memory_map(&map);
+- if (status != EFI_SUCCESS)
+- goto fail_free_new_fdt;
+-
+ status = update_fdt((void *)fdt_addr, fdt_size,
+ (void *)*new_fdt_addr, MAX_FDT_SIZE, cmdline_ptr,
+ initrd_addr, initrd_size);
--- /dev/null
+From def9d705c05eab3fdedeb10ad67907513b12038e Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 30 Aug 2022 15:37:21 +0200
+Subject: KVM: nVMX: Don't propagate vmcs12's PERF_GLOBAL_CTRL settings to vmcs02
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit def9d705c05eab3fdedeb10ad67907513b12038e upstream.
+
+Don't propagate vmcs12's VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL to vmcs02.
+KVM doesn't disallow L1 from using VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
+even when KVM itself doesn't use the control, e.g. due to the various
+CPU errata that where the MSR can be corrupted on VM-Exit.
+
+Preserve KVM's (vmcs01) setting to hopefully avoid having to toggle the
+bit in vmcs02 at a later point. E.g. if KVM is loading PERF_GLOBAL_CTRL
+when running L1, then odds are good KVM will also load the MSR when
+running L2.
+
+Fixes: 8bf00a529967 ("KVM: VMX: add support for switching of PERF_GLOBAL_CTRL")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Link: https://lore.kernel.org/r/20220830133737.1539624-18-vkuznets@redhat.com
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2322,9 +2322,14 @@ static void prepare_vmcs02_early(struct
+ * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
+ * on the related bits (if supported by the CPU) in the hope that
+ * we can avoid VMWrites during vmx_set_efer().
++ *
++ * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
++ * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
++ * do the same for L2.
+ */
+ exec_control = __vm_entry_controls_get(vmcs01);
+- exec_control |= vmcs12->vm_entry_controls;
++ exec_control |= (vmcs12->vm_entry_controls &
++ ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
+ exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
+ if (cpu_has_load_ia32_efer()) {
+ if (guest_efer & EFER_LMA)
--- /dev/null
+From d953540430c5af57f5de97ea9e36253908204027 Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 30 Aug 2022 23:15:48 +0000
+Subject: KVM: nVMX: Unconditionally purge queued/injected events on nested "exit"
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit d953540430c5af57f5de97ea9e36253908204027 upstream.
+
+Drop pending exceptions and events queued for re-injection when leaving
+nested guest mode, even if the "exit" is due to VM-Fail, SMI, or forced
+by host userspace. Failure to purge events could result in an event
+belonging to L2 being injected into L1.
+
+This _should_ never happen for VM-Fail as all events should be blocked by
+nested_run_pending, but it's possible if KVM, not the L1 hypervisor, is
+the source of VM-Fail when running vmcs02.
+
+SMI is a nop (barring unknown bugs) as recognition of SMI and thus entry
+to SMM is blocked by pending exceptions and re-injected events.
+
+Forced exit is definitely buggy, but has likely gone unnoticed because
+userspace probably follows the forced exit with KVM_SET_VCPU_EVENTS (or
+some other ioctl() that purges the queue).
+
+Fixes: 4f350c6dbcb9 ("kvm: nVMX: Handle deferred early VMLAUNCH/VMRESUME failure properly")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20220830231614.3580124-2-seanjc@google.com
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -4264,14 +4264,6 @@ static void prepare_vmcs12(struct kvm_vc
+ nested_vmx_abort(vcpu,
+ VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ }
+-
+- /*
+- * Drop what we picked up for L2 via vmx_complete_interrupts. It is
+- * preserved above and would only end up incorrectly in L1.
+- */
+- vcpu->arch.nmi_injected = false;
+- kvm_clear_exception_queue(vcpu);
+- kvm_clear_interrupt_queue(vcpu);
+ }
+
+ /*
+@@ -4611,6 +4603,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *
+ WARN_ON_ONCE(nested_early_check);
+ }
+
++ /*
++ * Drop events/exceptions that were queued for re-injection to L2
++ * (picked up via vmx_complete_interrupts()), as well as exceptions
++ * that were pending for L2. Note, this must NOT be hoisted above
++ * prepare_vmcs12(), events/exceptions queued for re-injection need to
++ * be captured in vmcs12 (see vmcs12_save_pending_event()).
++ */
++ vcpu->arch.nmi_injected = false;
++ kvm_clear_exception_queue(vcpu);
++ kvm_clear_interrupt_queue(vcpu);
++
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+
+ /* Update any VMCS fields that might have changed while L2 ran */
--- /dev/null
+From eba9799b5a6efe2993cf92529608e4aa8163d73b Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Tue, 30 Aug 2022 23:15:49 +0000
+Subject: KVM: VMX: Drop bits 31:16 when shoving exception error code into VMCS
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit eba9799b5a6efe2993cf92529608e4aa8163d73b upstream.
+
+Deliberately truncate the exception error code when shoving it into the
+VMCS (VM-Entry field for vmcs01 and vmcs02, VM-Exit field for vmcs12).
+Intel CPUs are incapable of handling 32-bit error codes and will never
+generate an error code with bits 31:16, but userspace can provide an
+arbitrary error code via KVM_SET_VCPU_EVENTS. Failure to drop the bits
+on exception injection results in failed VM-Entry, as VMX disallows
+setting bits 31:16. Setting the bits on VM-Exit would at best confuse
+L1, and at worse induce a nested VM-Entry failure, e.g. if L1 decided to
+reinject the exception back into L2.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
+Link: https://lore.kernel.org/r/20220830231614.3580124-3-seanjc@google.com
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/vmx/nested.c | 11 ++++++++++-
+ arch/x86/kvm/vmx/vmx.c | 12 +++++++++++-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -3839,7 +3839,16 @@ static void nested_vmx_inject_exception_
+ u32 intr_info = nr | INTR_INFO_VALID_MASK;
+
+ if (vcpu->arch.exception.has_error_code) {
+- vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
++ /*
++ * Intel CPUs do not generate error codes with bits 31:16 set,
++ * and more importantly VMX disallows setting bits 31:16 in the
++ * injected error code for VM-Entry. Drop the bits to mimic
++ * hardware and avoid inducing failure on nested VM-Entry if L1
++ * chooses to inject the exception back to L2. AMD CPUs _do_
++ * generate "full" 32-bit error codes, so KVM allows userspace
++ * to inject exception error codes with bits 31:16 set.
++ */
++ vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
+ intr_info |= INTR_INFO_DELIVER_CODE_MASK;
+ }
+
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1687,7 +1687,17 @@ static void vmx_queue_exception(struct k
+ kvm_deliver_exception_payload(vcpu);
+
+ if (has_error_code) {
+- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
++ /*
++ * Despite the error code being architecturally defined as 32
++ * bits, and the VMCS field being 32 bits, Intel CPUs and thus
++ * VMX don't actually supporting setting bits 31:16. Hardware
++ * will (should) never provide a bogus error code, but AMD CPUs
++ * do generate error codes with bits 31:16 set, and so KVM's
++ * ABI lets userspace shove in arbitrary 32-bit values. Drop
++ * the upper bits to avoid VM-Fail, losing information that
++ * does't really exist is preferable to killing the VM.
++ */
++ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)error_code);
+ intr_info |= INTR_INFO_DELIVER_CODE_MASK;
+ }
+
--- /dev/null
+From 6aa5c47c351b22c21205c87977c84809cd015fcf Mon Sep 17 00:00:00 2001
+From: Michal Luczaj <mhal@rbox.co>
+Date: Mon, 22 Aug 2022 00:06:47 +0200
+Subject: KVM: x86/emulator: Fix handing of POP SS to correctly set interruptibility
+
+From: Michal Luczaj <mhal@rbox.co>
+
+commit 6aa5c47c351b22c21205c87977c84809cd015fcf upstream.
+
+The emulator checks the wrong variable while setting the CPU
+interruptibility state, the target segment is embedded in the instruction
+opcode, not the ModR/M register. Fix the condition.
+
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Fixes: a5457e7bcf9a ("KVM: emulate: POP SS triggers a MOV SS shadow too")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/all/20220821215900.1419215-1-mhal@rbox.co
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/emulate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -1955,7 +1955,7 @@ static int em_pop_sreg(struct x86_emulat
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- if (ctxt->modrm_reg == VCPU_SREG_SS)
++ if (seg == VCPU_SREG_SS)
+ ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+ if (ctxt->op_bytes > 2)
+ rsp_increment(ctxt, ctxt->op_bytes - 2);
--- /dev/null
+From 91db7a3fc7fe670cf1770a398a43bb4a1f776bf1 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Thu, 18 Aug 2022 22:33:08 +0200
+Subject: media: cedrus: Fix endless loop in cedrus_h265_skip_bits()
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit 91db7a3fc7fe670cf1770a398a43bb4a1f776bf1 upstream.
+
+The busy status bit may never de-assert if number of programmed skip
+bits is incorrect, resulting in a kernel hang because the bit is polled
+endlessly in the code. Fix it by adding timeout for the bit-polling.
+This problem is reproducible by setting the data_bit_offset field of
+the HEVC slice params to a wrong value by userspace.
+
+Cc: stable@vger.kernel.org
+Fixes: 7678c5462680 (media: cedrus: Fix decoding for some HEVC videos)
+Reported-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/media/sunxi/cedrus/cedrus_h265.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+@@ -234,8 +234,9 @@ static void cedrus_h265_skip_bits(struct
+ cedrus_write(dev, VE_DEC_H265_TRIGGER,
+ VE_DEC_H265_TRIGGER_FLUSH_BITS |
+ VE_DEC_H265_TRIGGER_TYPE_N_BITS(tmp));
+- while (cedrus_read(dev, VE_DEC_H265_STATUS) & VE_DEC_H265_STATUS_VLD_BUSY)
+- udelay(1);
++
++ if (cedrus_wait_for(dev, VE_DEC_H265_STATUS, VE_DEC_H265_STATUS_VLD_BUSY))
++ dev_err_ratelimited(dev->dev, "timed out waiting to skip bits\n");
+
+ count += tmp;
+ }
--- /dev/null
+From 708938f8495147fe2e77a9a3e1015d8e6899323e Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Date: Thu, 18 Aug 2022 22:33:07 +0200
+Subject: media: cedrus: Set the platform driver data earlier
+
+From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+
+commit 708938f8495147fe2e77a9a3e1015d8e6899323e upstream.
+
+The cedrus_hw_resume() crashes with NULL deference on driver probe if
+runtime PM is disabled because it uses platform data that hasn't been
+set up yet. Fix this by setting the platform data earlier during probe.
+
+Cc: stable@vger.kernel.org
+Fixes: 50e761516f2b (media: platform: Add Cedrus VPU decoder driver)
+Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
+Signed-off-by: Nicolas Dufresne <nicolas.dufresne@collabora.com>
+Reviewed-by: Samuel Holland <samuel@sholland.org>
+Acked-by: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/media/sunxi/cedrus/cedrus.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
+@@ -422,6 +422,8 @@ static int cedrus_probe(struct platform_
+ if (!dev)
+ return -ENOMEM;
+
++ platform_set_drvdata(pdev, dev);
++
+ dev->vfd = cedrus_video_device;
+ dev->dev = &pdev->dev;
+ dev->pdev = pdev;
+@@ -495,8 +497,6 @@ static int cedrus_probe(struct platform_
+ goto err_m2m_mc;
+ }
+
+- platform_set_drvdata(pdev, dev);
+-
+ return 0;
+
+ err_m2m_mc:
tracing-fix-reading-strings-from-synthetic-events.patch
rpmsg-char-avoid-double-destroy-of-default-endpoint.patch
thunderbolt-explicitly-enable-lane-adapter-hotplug-events-at-startup.patch
+efi-libstub-drop-pointless-get_memory_map-call.patch
+media-cedrus-set-the-platform-driver-data-earlier.patch
+media-cedrus-fix-endless-loop-in-cedrus_h265_skip_bits.patch
+blk-throttle-fix-that-io-throttle-can-only-work-for-single-bio.patch
+blk-wbt-call-rq_qos_add-after-wb_normal-is-initialized.patch
+kvm-x86-emulator-fix-handing-of-pop-ss-to-correctly-set-interruptibility.patch
+kvm-nvmx-unconditionally-purge-queued-injected-events-on-nested-exit.patch
+kvm-nvmx-don-t-propagate-vmcs12-s-perf_global_ctrl-settings-to-vmcs02.patch
+kvm-vmx-drop-bits-31-16-when-shoving-exception-error-code-into-vmcs.patch
+staging-greybus-audio_helper-remove-unused-and-wrong-debugfs-usage.patch
+drm-nouveau-kms-nv140-disable-interlacing.patch
+drm-nouveau-fix-a-use-after-free-in-nouveau_gem_prime_import_sg_table.patch
+drm-i915-gt-use-i915_vm_put-on-ppgtt_create-error-paths.patch
--- /dev/null
+From d517cdeb904ddc0cbebcc959d43596426cac40b0 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Fri, 2 Sep 2022 16:37:15 +0200
+Subject: staging: greybus: audio_helper: remove unused and wrong debugfs usage
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit d517cdeb904ddc0cbebcc959d43596426cac40b0 upstream.
+
+In the greybus audio_helper code, the debugfs file for the dapm has the
+potential to be removed and memory will be leaked. There is also the
+very real potential for this code to remove ALL debugfs entries from the
+system, and it seems like this is what will really happen if this code
+ever runs. This all is very wrong as the greybus audio driver did not
+create this debugfs file, the sound core did and controls the lifespan
+of it.
+
+So remove all of the debugfs logic from the audio_helper code as there's
+no way it could be correct. If this really is needed, it can come back
+with a fixup for the incorrect usage of the debugfs_lookup() call which
+is what caused this to be noticed at all.
+
+Cc: Johan Hovold <johan@kernel.org>
+Cc: Alex Elder <elder@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable <stable@kernel.org>
+Link: https://lore.kernel.org/r/20220902143715.320500-1-gregkh@linuxfoundation.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/greybus/audio_helper.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+--- a/drivers/staging/greybus/audio_helper.c
++++ b/drivers/staging/greybus/audio_helper.c
+@@ -3,7 +3,6 @@
+ * Greybus Audio Sound SoC helper APIs
+ */
+
+-#include <linux/debugfs.h>
+ #include <sound/core.h>
+ #include <sound/soc.h>
+ #include <sound/soc-dapm.h>
+@@ -116,10 +115,6 @@ int gbaudio_dapm_free_controls(struct sn
+ {
+ int i;
+ struct snd_soc_dapm_widget *w, *next_w;
+-#ifdef CONFIG_DEBUG_FS
+- struct dentry *parent = dapm->debugfs_dapm;
+- struct dentry *debugfs_w = NULL;
+-#endif
+
+ mutex_lock(&dapm->card->dapm_mutex);
+ for (i = 0; i < num; i++) {
+@@ -139,12 +134,6 @@ int gbaudio_dapm_free_controls(struct sn
+ continue;
+ }
+ widget++;
+-#ifdef CONFIG_DEBUG_FS
+- if (!parent)
+- debugfs_w = debugfs_lookup(w->name, parent);
+- debugfs_remove(debugfs_w);
+- debugfs_w = NULL;
+-#endif
+ gbaudio_dapm_free_widget(w);
+ }
+ mutex_unlock(&dapm->card->dapm_mutex);