]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.157/block-blkcg-use-__gfp_nowarn-for-best-effort-allocations-in-blkcg.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.157 / block-blkcg-use-__gfp_nowarn-for-best-effort-allocations-in-blkcg.patch
1 From e00f4f4d0ff7e13b9115428a245b49108d625f09 Mon Sep 17 00:00:00 2001
2 From: Tejun Heo <tj@kernel.org>
3 Date: Mon, 21 Nov 2016 18:03:32 -0500
4 Subject: block,blkcg: use __GFP_NOWARN for best-effort allocations in blkcg
5
6 From: Tejun Heo <tj@kernel.org>
7
8 commit e00f4f4d0ff7e13b9115428a245b49108d625f09 upstream.
9
10 blkcg allocates some per-cgroup data structures with GFP_NOWAIT and
11 when that fails falls back to operations which aren't specific to the
12 cgroup. Occassional failures are expected under pressure and falling
13 back to non-cgroup operation is the right thing to do.
14
15 Unfortunately, I forgot to add __GFP_NOWARN to these allocations and
16 these expected failures end up creating a lot of noise. Add
17 __GFP_NOWARN.
18
19 Signed-off-by: Tejun Heo <tj@kernel.org>
20 Reported-by: Marc MERLIN <marc@merlins.org>
21 Reported-by: Vlastimil Babka <vbabka@suse.cz>
22 Signed-off-by: Jens Axboe <axboe@fb.com>
23 Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25
26 ---
27 block/blk-cgroup.c | 9 +++++----
28 block/cfq-iosched.c | 3 ++-
29 2 files changed, 7 insertions(+), 5 deletions(-)
30
31 --- a/block/blk-cgroup.c
32 +++ b/block/blk-cgroup.c
33 @@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(stru
34 }
35
36 wb_congested = wb_congested_get_create(&q->backing_dev_info,
37 - blkcg->css.id, GFP_NOWAIT);
38 + blkcg->css.id,
39 + GFP_NOWAIT | __GFP_NOWARN);
40 if (!wb_congested) {
41 ret = -ENOMEM;
42 goto err_put_css;
43 @@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(stru
44
45 /* allocate */
46 if (!new_blkg) {
47 - new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
48 + new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
49 if (unlikely(!new_blkg)) {
50 ret = -ENOMEM;
51 goto err_put_congested;
52 @@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_sta
53 }
54
55 spin_lock_init(&blkcg->lock);
56 - INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
57 + INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
58 INIT_HLIST_HEAD(&blkcg->blkg_list);
59 #ifdef CONFIG_CGROUP_WRITEBACK
60 INIT_LIST_HEAD(&blkcg->cgwb_list);
61 @@ -1238,7 +1239,7 @@ pd_prealloc:
62 if (blkg->pd[pol->plid])
63 continue;
64
65 - pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
66 + pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
67 if (!pd)
68 swap(pd, pd_prealloc);
69 if (!pd) {
70 --- a/block/cfq-iosched.c
71 +++ b/block/cfq-iosched.c
72 @@ -3811,7 +3811,8 @@ cfq_get_queue(struct cfq_data *cfqd, boo
73 goto out;
74 }
75
76 - cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
77 + cfqq = kmem_cache_alloc_node(cfq_pool,
78 + GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
79 cfqd->queue->node);
80 if (!cfqq) {
81 cfqq = &cfqd->oom_cfqq;