From b628c8b445d35fc284e7fa85d1aaa980ebe2243d Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 21 Sep 2020 18:15:16 +0200 Subject: [PATCH] 5.8-stable patches added patches: mm-memcg-fix-memcg-reclaim-soft-lockup.patch nvme-loop-set-ctrl-state-connecting-after-init.patch --- ...-memcg-fix-memcg-reclaim-soft-lockup.patch | 72 +++++++++++++++++++ ...set-ctrl-state-connecting-after-init.patch | 64 +++++++++++++++++ queue-5.8/series | 2 + 3 files changed, 138 insertions(+) create mode 100644 queue-5.8/mm-memcg-fix-memcg-reclaim-soft-lockup.patch create mode 100644 queue-5.8/nvme-loop-set-ctrl-state-connecting-after-init.patch diff --git a/queue-5.8/mm-memcg-fix-memcg-reclaim-soft-lockup.patch b/queue-5.8/mm-memcg-fix-memcg-reclaim-soft-lockup.patch new file mode 100644 index 00000000000..889feaa47dc --- /dev/null +++ b/queue-5.8/mm-memcg-fix-memcg-reclaim-soft-lockup.patch @@ -0,0 +1,72 @@ +From e3336cab2579012b1e72b5265adf98e2d6e244ad Mon Sep 17 00:00:00 2001 +From: Xunlei Pang +Date: Fri, 4 Sep 2020 16:35:27 -0700 +Subject: mm: memcg: fix memcg reclaim soft lockup + +From: Xunlei Pang + +commit e3336cab2579012b1e72b5265adf98e2d6e244ad upstream. + +We've met softlockup with "CONFIG_PREEMPT_NONE=y", when the target memcg +doesn't have any reclaimable memory. + +It can be easily reproduced as below: + + watchdog: BUG: soft lockup - CPU#0 stuck for 111s![memcg_test:2204] + CPU: 0 PID: 2204 Comm: memcg_test Not tainted 5.9.0-rc2+ #12 + Call Trace: + shrink_lruvec+0x49f/0x640 + shrink_node+0x2a6/0x6f0 + do_try_to_free_pages+0xe9/0x3e0 + try_to_free_mem_cgroup_pages+0xef/0x1f0 + try_charge+0x2c1/0x750 + mem_cgroup_charge+0xd7/0x240 + __add_to_page_cache_locked+0x2fd/0x370 + add_to_page_cache_lru+0x4a/0xc0 + pagecache_get_page+0x10b/0x2f0 + filemap_fault+0x661/0xad0 + ext4_filemap_fault+0x2c/0x40 + __do_fault+0x4d/0xf9 + handle_mm_fault+0x1080/0x1790 + +It only happens on our 1-vcpu instances, because there's no chance for +oom reaper to run to reclaim the to-be-killed process. + +Add a cond_resched() at the upper shrink_node_memcgs() to solve this +issue, this will mean that we will get a scheduling point for each memcg +in the reclaimed hierarchy without any dependency on the reclaimable +memory in that memcg thus making it more predictable. + +Suggested-by: Michal Hocko +Signed-off-by: Xunlei Pang +Signed-off-by: Andrew Morton +Acked-by: Chris Down +Acked-by: Michal Hocko +Acked-by: Johannes Weiner +Link: http://lkml.kernel.org/r/1598495549-67324-1-git-send-email-xlpang@linux.alibaba.com +Signed-off-by: Linus Torvalds +Fixes: b0dedc49a2da ("mm/vmscan.c: iterate only over charged shrinkers during memcg shrink_slab()") +Cc: stable@vger.kernel.org +Signed-off-by: Julius Hemanth Pitti +Signed-off-by: Greg Kroah-Hartman +--- + mm/vmscan.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2619,6 +2619,14 @@ static void shrink_node_memcgs(pg_data_t + unsigned long reclaimed; + unsigned long scanned; + ++ /* ++ * This loop can become CPU-bound when target memcgs ++ * aren't eligible for reclaim - either because they ++ * don't have any reclaimable pages, or because their ++ * memory is explicitly protected. Avoid soft lockups. ++ */ ++ cond_resched(); ++ + switch (mem_cgroup_protected(target_memcg, memcg)) { + case MEMCG_PROT_MIN: + /* diff --git a/queue-5.8/nvme-loop-set-ctrl-state-connecting-after-init.patch b/queue-5.8/nvme-loop-set-ctrl-state-connecting-after-init.patch new file mode 100644 index 00000000000..a4d8e0a1e34 --- /dev/null +++ b/queue-5.8/nvme-loop-set-ctrl-state-connecting-after-init.patch @@ -0,0 +1,64 @@ +From 64d452b3560b7a55277c8d9ef0a8635e62136580 Mon Sep 17 00:00:00 2001 +From: Chaitanya Kulkarni +Date: Tue, 28 Jul 2020 19:36:47 -0700 +Subject: nvme-loop: set ctrl state connecting after init + +From: Chaitanya Kulkarni + +commit 64d452b3560b7a55277c8d9ef0a8635e62136580 upstream. + +When creating a loop controller (ctrl) in nvme_loop_create_ctrl() -> +nvme_init_ctrl() we set the ctrl state to NVME_CTRL_NEW. + +Prior to [1] NVME_CTRL_NEW state was allowed in nvmf_check_ready() for +fabrics command type connect. Now, this fails in the following code path +for fabrics connect command when creating admin queue :- + +nvme_loop_create_ctrl() + nvme_loo_configure_admin_queue() + nvmf_connect_admin_queue() + __nvme_submit_sync_cmd() + blk_execute_rq() + nvme_loop_queue_rq() + nvmf_check_ready() + +# echo "transport=loop,nqn=fs" > /dev/nvme-fabrics +[ 6047.741327] nvmet: adding nsid 1 to subsystem fs +[ 6048.756430] nvme nvme1: Connect command failed, error wo/DNR bit: 880 + +We need to set the ctrl state to NVME_CTRL_CONNECTING after :- +nvme_loop_create_ctrl() + nvme_init_ctrl() +so that the above mentioned check for nvmf_check_ready() will return +true. + +This patch sets the ctrl state to connecting after we init the ctrl in +nvme_loop_create_ctrl() + nvme_init_ctrl() . + +[1] commit aa63fa6776a7 ("nvme-fabrics: allow to queue requests for live queues") + +Fixes: aa63fa6776a7 ("nvme-fabrics: allow to queue requests for live queues") +Signed-off-by: Chaitanya Kulkarni +Reviewed-by: Sagi Grimberg +Tested-by: Sagi Grimberg +Signed-off-by: Christoph Hellwig +Cc: Yi Zhang +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/nvme/target/loop.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/nvme/target/loop.c ++++ b/drivers/nvme/target/loop.c +@@ -583,6 +583,9 @@ static struct nvme_ctrl *nvme_loop_creat + if (ret) + goto out_put_ctrl; + ++ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); ++ WARN_ON_ONCE(!changed); ++ + ret = -ENOMEM; + + ctrl->ctrl.sqsize = opts->queue_size - 1; diff --git a/queue-5.8/series b/queue-5.8/series index 386613e08fc..fc70fe4962c 100644 --- a/queue-5.8/series +++ b/queue-5.8/series @@ -114,3 +114,5 @@ dm-dax-fix-table-reference-counts.patch mm-memory_hotplug-drain-per-cpu-pages-again-during-memory-offline.patch dm-call-proper-helper-to-determine-dax-support.patch dax-fix-compilation-for-config_dax-config_fs_dax.patch +mm-memcg-fix-memcg-reclaim-soft-lockup.patch +nvme-loop-set-ctrl-state-connecting-after-init.patch -- 2.47.3