From: Greg Kroah-Hartman Date: Mon, 12 Nov 2018 02:32:30 +0000 (-0800) Subject: 4.19-stable patches X-Git-Tag: v4.19.2~7 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=622af54a19f4ccabbcf1b5554ccb4b2fb6deee55;p=thirdparty%2Fkernel%2Fstable-queue.git 4.19-stable patches added patches: md-fix-invalid-stored-role-for-a-disk-try2.patch --- diff --git a/queue-4.19/md-fix-invalid-stored-role-for-a-disk-try2.patch b/queue-4.19/md-fix-invalid-stored-role-for-a-disk-try2.patch new file mode 100644 index 00000000000..30ac58c2d3c --- /dev/null +++ b/queue-4.19/md-fix-invalid-stored-role-for-a-disk-try2.patch @@ -0,0 +1,58 @@ +From 9e753ba9b9b405e3902d9f08aec5f2ea58a0c317 Mon Sep 17 00:00:00 2001 +From: Shaohua Li +Date: Sun, 14 Oct 2018 17:05:07 -0700 +Subject: MD: fix invalid stored role for a disk - try2 + +From: Shaohua Li + +commit 9e753ba9b9b405e3902d9f08aec5f2ea58a0c317 upstream. + +Commit d595567dc4f0 (MD: fix invalid stored role for a disk) broke linear +hotadd. Let's only fix the role for disks in raid1/10. +Based on Guoqing's original patch. + +Reported-by: kernel test robot +Cc: Gioh Kim +Cc: Guoqing Jiang +Signed-off-by: Shaohua Li +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/md/md.c | 4 ---- + drivers/md/raid1.c | 1 + + drivers/md/raid10.c | 1 + + 3 files changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -1776,10 +1776,6 @@ static int super_1_validate(struct mddev + } else + set_bit(In_sync, &rdev->flags); + rdev->raid_disk = role; +- if (role >= mddev->raid_disks) { +- rdev->saved_raid_disk = -1; +- rdev->raid_disk = -1; +- } + break; + } + if (sb->devflags & WriteMostly1) +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev * + */ + if (rdev->saved_raid_disk >= 0 && + rdev->saved_raid_disk >= first && ++ rdev->saved_raid_disk < conf->raid_disks && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + first = last = rdev->saved_raid_disk; + +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -1808,6 +1808,7 @@ static int raid10_add_disk(struct mddev + first = last = rdev->raid_disk; + + if (rdev->saved_raid_disk >= first && ++ rdev->saved_raid_disk < conf->geo.raid_disks && + conf->mirrors[rdev->saved_raid_disk].rdev == NULL) + mirror = rdev->saved_raid_disk; + else diff --git a/queue-4.19/nvmet-rdma-use-a-private-workqueue-for-delete.patch b/queue-4.19/nvmet-rdma-use-a-private-workqueue-for-delete.patch deleted file mode 100644 index 4f930652ff4..00000000000 --- a/queue-4.19/nvmet-rdma-use-a-private-workqueue-for-delete.patch +++ /dev/null @@ -1,158 +0,0 @@ -From foo@baz Sat Nov 10 10:51:03 PST 2018 -From: Sagi Grimberg -Date: Thu, 27 Sep 2018 11:00:31 -0700 -Subject: nvmet-rdma: use a private workqueue for delete - -From: Sagi Grimberg - -[ Upstream commit 2acf70ade79d26b97611a8df52eb22aa33814cd4 ] - -Queue deletion is done asynchronous when the last reference on the queue -is dropped. Thus, in order to make sure we don't over allocate under a -connect/disconnect storm, we let queue deletion complete before making -forward progress. - -However, given that we flush the system_wq from rdma_cm context which -runs from a workqueue context, we can have a circular locking complaint -[1]. Fix that by using a private workqueue for queue deletion. - -[1]: -====================================================== -WARNING: possible circular locking dependency detected -4.19.0-rc4-dbg+ #3 Not tainted ------------------------------------------------------- -kworker/5:0/39 is trying to acquire lock: -00000000a10b6db9 (&id_priv->handler_mutex){+.+.}, at: rdma_destroy_id+0x6f/0x440 [rdma_cm] - -but task is already holding lock: -00000000331b4e2c ((work_completion)(&queue->release_work)){+.+.}, at: process_one_work+0x3ed/0xa20 - -which lock already depends on the new lock. - -the existing dependency chain (in reverse order) is: - --> #3 ((work_completion)(&queue->release_work)){+.+.}: - process_one_work+0x474/0xa20 - worker_thread+0x63/0x5a0 - kthread+0x1cf/0x1f0 - ret_from_fork+0x24/0x30 - --> #2 ((wq_completion)"events"){+.+.}: - flush_workqueue+0xf3/0x970 - nvmet_rdma_cm_handler+0x133d/0x1734 [nvmet_rdma] - cma_ib_req_handler+0x72f/0xf90 [rdma_cm] - cm_process_work+0x2e/0x110 [ib_cm] - cm_req_handler+0x135b/0x1c30 [ib_cm] - cm_work_handler+0x2b7/0x38cd [ib_cm] - process_one_work+0x4ae/0xa20 -nvmet_rdma:nvmet_rdma_cm_handler: nvmet_rdma: disconnected (10): status 0 id 0000000040357082 - worker_thread+0x63/0x5a0 - kthread+0x1cf/0x1f0 - ret_from_fork+0x24/0x30 -nvme nvme0: Reconnecting in 10 seconds... - --> #1 (&id_priv->handler_mutex/1){+.+.}: - __mutex_lock+0xfe/0xbe0 - mutex_lock_nested+0x1b/0x20 - cma_ib_req_handler+0x6aa/0xf90 [rdma_cm] - cm_process_work+0x2e/0x110 [ib_cm] - cm_req_handler+0x135b/0x1c30 [ib_cm] - cm_work_handler+0x2b7/0x38cd [ib_cm] - process_one_work+0x4ae/0xa20 - worker_thread+0x63/0x5a0 - kthread+0x1cf/0x1f0 - ret_from_fork+0x24/0x30 - --> #0 (&id_priv->handler_mutex){+.+.}: - lock_acquire+0xc5/0x200 - __mutex_lock+0xfe/0xbe0 - mutex_lock_nested+0x1b/0x20 - rdma_destroy_id+0x6f/0x440 [rdma_cm] - nvmet_rdma_release_queue_work+0x8e/0x1b0 [nvmet_rdma] - process_one_work+0x4ae/0xa20 - worker_thread+0x63/0x5a0 - kthread+0x1cf/0x1f0 - ret_from_fork+0x24/0x30 - -Fixes: 777dc82395de ("nvmet-rdma: occasionally flush ongoing controller teardown") -Reported-by: Bart Van Assche -Signed-off-by: Sagi Grimberg -Tested-by: Bart Van Assche - -Signed-off-by: Christoph Hellwig - -Signed-off-by: Sasha Levin -Signed-off-by: Greg Kroah-Hartman ---- - drivers/nvme/target/rdma.c | 19 +++++++++++++++---- - 1 file changed, 15 insertions(+), 4 deletions(-) - ---- a/drivers/nvme/target/rdma.c -+++ b/drivers/nvme/target/rdma.c -@@ -122,6 +122,7 @@ struct nvmet_rdma_device { - int inline_page_count; - }; - -+struct workqueue_struct *nvmet_rdma_delete_wq; - static bool nvmet_rdma_use_srq; - module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); - MODULE_PARM_DESC(use_srq, "Use shared receive queue."); -@@ -1267,12 +1268,12 @@ static int nvmet_rdma_queue_connect(stru - - if (queue->host_qid == 0) { - /* Let inflight controller teardown complete */ -- flush_scheduled_work(); -+ flush_workqueue(nvmet_rdma_delete_wq); - } - - ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); - if (ret) { -- schedule_work(&queue->release_work); -+ queue_work(nvmet_rdma_delete_wq, &queue->release_work); - /* Destroying rdma_cm id is not needed here */ - return 0; - } -@@ -1337,7 +1338,7 @@ static void __nvmet_rdma_queue_disconnec - - if (disconnect) { - rdma_disconnect(queue->cm_id); -- schedule_work(&queue->release_work); -+ queue_work(nvmet_rdma_delete_wq, &queue->release_work); - } - } - -@@ -1367,7 +1368,7 @@ static void nvmet_rdma_queue_connect_fai - mutex_unlock(&nvmet_rdma_queue_mutex); - - pr_err("failed to connect queue %d\n", queue->idx); -- schedule_work(&queue->release_work); -+ queue_work(nvmet_rdma_delete_wq, &queue->release_work); - } - - /** -@@ -1649,8 +1650,17 @@ static int __init nvmet_rdma_init(void) - if (ret) - goto err_ib_client; - -+ nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq", -+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); -+ if (!nvmet_rdma_delete_wq) { -+ ret = -ENOMEM; -+ goto err_unreg_transport; -+ } -+ - return 0; - -+err_unreg_transport: -+ nvmet_unregister_transport(&nvmet_rdma_ops); - err_ib_client: - ib_unregister_client(&nvmet_rdma_ib_client); - return ret; -@@ -1658,6 +1668,7 @@ err_ib_client: - - static void __exit nvmet_rdma_exit(void) - { -+ destroy_workqueue(nvmet_rdma_delete_wq); - nvmet_unregister_transport(&nvmet_rdma_ops); - ib_unregister_client(&nvmet_rdma_ib_client); - WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); diff --git a/queue-4.19/series b/queue-4.19/series index a21d1a33643..b1086c855ff 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -75,7 +75,6 @@ iwlwifi-mvm-clear-hw_restart_requested-when-stopping-the-interface.patch iwlwifi-mvm-check-for-n_profiles-validity-in-ewrd-acpi.patch x86-olpc-indicate-that-legacy-pc-xo-1-platform-should-not-register-rtc.patch wlcore-fix-bug-with-clear-completion-on-timeout.patch -nvmet-rdma-use-a-private-workqueue-for-delete.patch acpi-pptt-handle-architecturally-unknown-cache-types.patch acpi-pm-lpit-register-sysfs-attributes-based-on-fadt.patch acpi-processor-fix-the-return-value-of-acpi_processor_ids_walk.patch @@ -359,3 +358,4 @@ vt-fix-broken-display-when-running-aptitude.patch userns-also-map-extents-in-the-reverse-map-to-kernel-ids.patch bpf-wait-for-running-bpf-programs-when-updating-map-in-map.patch vga_switcheroo-fix-missing-gpu_bound-call-at-audio-client-registration.patch +md-fix-invalid-stored-role-for-a-disk-try2.patch