]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Feb 2021 14:22:18 +0000 (15:22 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Feb 2021 14:22:18 +0000 (15:22 +0100)
added patches:
blk-mq-don-t-hold-q-sysfs_lock-in-blk_mq_map_swqueue.patch
block-don-t-hold-q-sysfs_lock-in-elevator_init_mq.patch
fix-unsynchronized-access-to-sev-members-through-svm_register_enc_region.patch

queue-4.19/blk-mq-don-t-hold-q-sysfs_lock-in-blk_mq_map_swqueue.patch [new file with mode: 0644]
queue-4.19/block-don-t-hold-q-sysfs_lock-in-elevator_init_mq.patch [new file with mode: 0644]
queue-4.19/fix-unsynchronized-access-to-sev-members-through-svm_register_enc_region.patch [new file with mode: 0644]
queue-4.19/series

diff --git a/queue-4.19/blk-mq-don-t-hold-q-sysfs_lock-in-blk_mq_map_swqueue.patch b/queue-4.19/blk-mq-don-t-hold-q-sysfs_lock-in-blk_mq_map_swqueue.patch
new file mode 100644 (file)
index 0000000..e54e678
--- /dev/null
@@ -0,0 +1,57 @@
+From c6ba933358f0d7a6a042b894dba20cc70396a6d3 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Tue, 27 Aug 2019 19:01:46 +0800
+Subject: blk-mq: don't hold q->sysfs_lock in blk_mq_map_swqueue
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit c6ba933358f0d7a6a042b894dba20cc70396a6d3 upstream.
+
+blk_mq_map_swqueue() is called from blk_mq_init_allocated_queue()
+and blk_mq_update_nr_hw_queues(). For the former caller, the kobject
+isn't exposed to userspace yet. For the latter caller, hctx sysfs entries
+and debugfs are un-registered before updating nr_hw_queues.
+
+On the other hand, commit 2f8f1336a48b ("blk-mq: always free hctx after
+request queue is freed") moves freeing hctx into queue's release
+handler, so there won't be race with queue release path too.
+
+So don't hold q->sysfs_lock in blk_mq_map_swqueue().
+
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Mike Snitzer <snitzer@redhat.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2324,11 +2324,6 @@ static void blk_mq_map_swqueue(struct re
+       struct blk_mq_ctx *ctx;
+       struct blk_mq_tag_set *set = q->tag_set;
+-      /*
+-       * Avoid others reading imcomplete hctx->cpumask through sysfs
+-       */
+-      mutex_lock(&q->sysfs_lock);
+-
+       queue_for_each_hw_ctx(q, hctx, i) {
+               cpumask_clear(hctx->cpumask);
+               hctx->nr_ctx = 0;
+@@ -2362,8 +2357,6 @@ static void blk_mq_map_swqueue(struct re
+               hctx->ctxs[hctx->nr_ctx++] = ctx;
+       }
+-      mutex_unlock(&q->sysfs_lock);
+-
+       queue_for_each_hw_ctx(q, hctx, i) {
+               /*
+                * If no software queues are mapped to this hardware queue,
diff --git a/queue-4.19/block-don-t-hold-q-sysfs_lock-in-elevator_init_mq.patch b/queue-4.19/block-don-t-hold-q-sysfs_lock-in-elevator_init_mq.patch
new file mode 100644 (file)
index 0000000..dca53f8
--- /dev/null
@@ -0,0 +1,69 @@
+From c48dac137a62a5d6fa1ef3fa445cbd9c43655a76 Mon Sep 17 00:00:00 2001
+From: Ming Lei <ming.lei@redhat.com>
+Date: Tue, 27 Aug 2019 19:01:45 +0800
+Subject: block: don't hold q->sysfs_lock in elevator_init_mq
+
+From: Ming Lei <ming.lei@redhat.com>
+
+commit c48dac137a62a5d6fa1ef3fa445cbd9c43655a76 upstream.
+
+The original comment says:
+
+       q->sysfs_lock must be held to provide mutual exclusion between
+       elevator_switch() and here.
+
+Which is simply wrong. elevator_init_mq() is only called from
+blk_mq_init_allocated_queue, which is always called before the request
+queue is registered via blk_register_queue(), for dm-rq or normal rq
+based driver. However, queue's kobject is only exposed and added to sysfs
+in blk_register_queue(). So there isn't such race between elevator_switch()
+and elevator_init_mq().
+
+So avoid to hold q->sysfs_lock in elevator_init_mq().
+
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Hannes Reinecke <hare@suse.com>
+Cc: Greg KH <gregkh@linuxfoundation.org>
+Cc: Mike Snitzer <snitzer@redhat.com>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Cc: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/elevator.c |   14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -980,23 +980,19 @@ int elevator_init_mq(struct request_queu
+       if (q->nr_hw_queues != 1)
+               return 0;
+-      /*
+-       * q->sysfs_lock must be held to provide mutual exclusion between
+-       * elevator_switch() and here.
+-       */
+-      mutex_lock(&q->sysfs_lock);
++      WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags));
++
+       if (unlikely(q->elevator))
+-              goto out_unlock;
++              goto out;
+       e = elevator_get(q, "mq-deadline", false);
+       if (!e)
+-              goto out_unlock;
++              goto out;
+       err = blk_mq_init_sched(q, e);
+       if (err)
+               elevator_put(e);
+-out_unlock:
+-      mutex_unlock(&q->sysfs_lock);
++out:
+       return err;
+ }
diff --git a/queue-4.19/fix-unsynchronized-access-to-sev-members-through-svm_register_enc_region.patch b/queue-4.19/fix-unsynchronized-access-to-sev-members-through-svm_register_enc_region.patch
new file mode 100644 (file)
index 0000000..2e4eb94
--- /dev/null
@@ -0,0 +1,93 @@
+From 19a23da53932bc8011220bd8c410cb76012de004 Mon Sep 17 00:00:00 2001
+From: Peter Gonda <pgonda@google.com>
+Date: Wed, 27 Jan 2021 08:15:24 -0800
+Subject: Fix unsynchronized access to sev members through svm_register_enc_region
+
+From: Peter Gonda <pgonda@google.com>
+
+commit 19a23da53932bc8011220bd8c410cb76012de004 upstream.
+
+Grab kvm->lock before pinning memory when registering an encrypted
+region; sev_pin_memory() relies on kvm->lock being held to ensure
+correctness when checking and updating the number of pinned pages.
+
+Add a lockdep assertion to help prevent future regressions.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Sean Christopherson <seanjc@google.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: stable@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Fixes: 1e80fdc09d12 ("KVM: SVM: Pin guest memory when SEV is active")
+Signed-off-by: Peter Gonda <pgonda@google.com>
+
+V2
+ - Fix up patch description
+ - Correct file paths svm.c -> sev.c
+ - Add unlock of kvm->lock on sev_pin_memory error
+
+V1
+ - https://lore.kernel.org/kvm/20210126185431.1824530-1-pgonda@google.com/
+
+Message-Id: <20210127161524.2832400-1-pgonda@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm.c |   18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1832,6 +1832,8 @@ static struct page **sev_pin_memory(stru
+       struct page **pages;
+       unsigned long first, last;
++      lockdep_assert_held(&kvm->lock);
++
+       if (ulen == 0 || uaddr + ulen < uaddr)
+               return NULL;
+@@ -7084,12 +7086,21 @@ static int svm_register_enc_region(struc
+       if (!region)
+               return -ENOMEM;
++      mutex_lock(&kvm->lock);
+       region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+       if (!region->pages) {
+               ret = -ENOMEM;
++              mutex_unlock(&kvm->lock);
+               goto e_free;
+       }
++      region->uaddr = range->addr;
++      region->size = range->size;
++
++      mutex_lock(&kvm->lock);
++      list_add_tail(&region->list, &sev->regions_list);
++      mutex_unlock(&kvm->lock);
++
+       /*
+        * The guest may change the memory encryption attribute from C=0 -> C=1
+        * or vice versa for this memory range. Lets make sure caches are
+@@ -7098,13 +7109,6 @@ static int svm_register_enc_region(struc
+        */
+       sev_clflush_pages(region->pages, region->npages);
+-      region->uaddr = range->addr;
+-      region->size = range->size;
+-
+-      mutex_lock(&kvm->lock);
+-      list_add_tail(&region->list, &sev->regions_list);
+-      mutex_unlock(&kvm->lock);
+-
+       return ret;
+ e_free:
index e6c19d000eae98a00b82c841aec8cbd9121da42a..4b11696494515a34b03b4284db4186e171cfe4fd 100644 (file)
@@ -16,3 +16,6 @@ sunrpc-handle-0-length-opaque-xdr-object-data-proper.patch
 lib-string-add-strscpy_pad-function.patch
 include-trace-events-writeback.h-fix-wstringop-trunc.patch
 memcg-fix-a-crash-in-wb_workfn-when-a-device-disappe.patch
+fix-unsynchronized-access-to-sev-members-through-svm_register_enc_region.patch
+block-don-t-hold-q-sysfs_lock-in-elevator_init_mq.patch
+blk-mq-don-t-hold-q-sysfs_lock-in-blk_mq_map_swqueue.patch