]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.9.143/shmem-introduce-shmem_inode_acct_block.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.9.143 / shmem-introduce-shmem_inode_acct_block.patch
1 From d29cd29cf934dea66d08c49f7a055abfa7d27257 Mon Sep 17 00:00:00 2001
2 From: Mike Rapoport <rppt@linux.vnet.ibm.com>
3 Date: Wed, 6 Sep 2017 16:22:59 -0700
4 Subject: shmem: introduce shmem_inode_acct_block
5
6 commit 0f0796945614b7523987f7eea32407421af4b1ee upstream.
7
8 The shmem_acct_block and the update of used_blocks are following one
9 another in all the places they are used. Combine these two into a
10 helper function.
11
12 Link: http://lkml.kernel.org/r/1497939652-16528-3-git-send-email-rppt@linux.vnet.ibm.com
13 Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
14 Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
15 Cc: Andrea Arcangeli <aarcange@redhat.com>
16 Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
17 Cc: Hugh Dickins <hughd@google.com>
18 Cc: Pavel Emelyanov <xemul@virtuozzo.com>
19 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
20 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
21 Signed-off-by: Sasha Levin <sashal@kernel.org>
22 ---
23 mm/shmem.c | 82 ++++++++++++++++++++++++++++--------------------------
24 1 file changed, 42 insertions(+), 40 deletions(-)
25
26 diff --git a/mm/shmem.c b/mm/shmem.c
27 index b26f11221ea8..e30ffaa065a4 100644
28 --- a/mm/shmem.c
29 +++ b/mm/shmem.c
30 @@ -181,6 +181,38 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages)
31 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
32 }
33
34 +static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
35 +{
36 + struct shmem_inode_info *info = SHMEM_I(inode);
37 + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
38 +
39 + if (shmem_acct_block(info->flags, pages))
40 + return false;
41 +
42 + if (sbinfo->max_blocks) {
43 + if (percpu_counter_compare(&sbinfo->used_blocks,
44 + sbinfo->max_blocks - pages) > 0)
45 + goto unacct;
46 + percpu_counter_add(&sbinfo->used_blocks, pages);
47 + }
48 +
49 + return true;
50 +
51 +unacct:
52 + shmem_unacct_blocks(info->flags, pages);
53 + return false;
54 +}
55 +
56 +static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
57 +{
58 + struct shmem_inode_info *info = SHMEM_I(inode);
59 + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
60 +
61 + if (sbinfo->max_blocks)
62 + percpu_counter_sub(&sbinfo->used_blocks, pages);
63 + shmem_unacct_blocks(info->flags, pages);
64 +}
65 +
66 static const struct super_operations shmem_ops;
67 static const struct address_space_operations shmem_aops;
68 static const struct file_operations shmem_file_operations;
69 @@ -237,31 +269,20 @@ static void shmem_recalc_inode(struct inode *inode)
70
71 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
72 if (freed > 0) {
73 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
74 - if (sbinfo->max_blocks)
75 - percpu_counter_add(&sbinfo->used_blocks, -freed);
76 info->alloced -= freed;
77 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
78 - shmem_unacct_blocks(info->flags, freed);
79 + shmem_inode_unacct_blocks(inode, freed);
80 }
81 }
82
83 bool shmem_charge(struct inode *inode, long pages)
84 {
85 struct shmem_inode_info *info = SHMEM_I(inode);
86 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
87 unsigned long flags;
88
89 - if (shmem_acct_block(info->flags, pages))
90 + if (!shmem_inode_acct_block(inode, pages))
91 return false;
92
93 - if (sbinfo->max_blocks) {
94 - if (percpu_counter_compare(&sbinfo->used_blocks,
95 - sbinfo->max_blocks - pages) > 0)
96 - goto unacct;
97 - percpu_counter_add(&sbinfo->used_blocks, pages);
98 - }
99 -
100 spin_lock_irqsave(&info->lock, flags);
101 info->alloced += pages;
102 inode->i_blocks += pages * BLOCKS_PER_PAGE;
103 @@ -270,16 +291,11 @@ bool shmem_charge(struct inode *inode, long pages)
104 inode->i_mapping->nrpages += pages;
105
106 return true;
107 -
108 -unacct:
109 - shmem_unacct_blocks(info->flags, pages);
110 - return false;
111 }
112
113 void shmem_uncharge(struct inode *inode, long pages)
114 {
115 struct shmem_inode_info *info = SHMEM_I(inode);
116 - struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
117 unsigned long flags;
118
119 spin_lock_irqsave(&info->lock, flags);
120 @@ -288,9 +304,7 @@ void shmem_uncharge(struct inode *inode, long pages)
121 shmem_recalc_inode(inode);
122 spin_unlock_irqrestore(&info->lock, flags);
123
124 - if (sbinfo->max_blocks)
125 - percpu_counter_sub(&sbinfo->used_blocks, pages);
126 - shmem_unacct_blocks(info->flags, pages);
127 + shmem_inode_unacct_blocks(inode, pages);
128 }
129
130 /*
131 @@ -1423,9 +1437,10 @@ static struct page *shmem_alloc_page(gfp_t gfp,
132 }
133
134 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
135 - struct shmem_inode_info *info, struct shmem_sb_info *sbinfo,
136 + struct inode *inode,
137 pgoff_t index, bool huge)
138 {
139 + struct shmem_inode_info *info = SHMEM_I(inode);
140 struct page *page;
141 int nr;
142 int err = -ENOSPC;
143 @@ -1434,14 +1449,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
144 huge = false;
145 nr = huge ? HPAGE_PMD_NR : 1;
146
147 - if (shmem_acct_block(info->flags, nr))
148 + if (!shmem_inode_acct_block(inode, nr))
149 goto failed;
150 - if (sbinfo->max_blocks) {
151 - if (percpu_counter_compare(&sbinfo->used_blocks,
152 - sbinfo->max_blocks - nr) > 0)
153 - goto unacct;
154 - percpu_counter_add(&sbinfo->used_blocks, nr);
155 - }
156
157 if (huge)
158 page = shmem_alloc_hugepage(gfp, info, index);
159 @@ -1454,10 +1463,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
160 }
161
162 err = -ENOMEM;
163 - if (sbinfo->max_blocks)
164 - percpu_counter_add(&sbinfo->used_blocks, -nr);
165 -unacct:
166 - shmem_unacct_blocks(info->flags, nr);
167 + shmem_inode_unacct_blocks(inode, nr);
168 failed:
169 return ERR_PTR(err);
170 }
171 @@ -1717,10 +1723,9 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
172 }
173
174 alloc_huge:
175 - page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
176 - index, true);
177 + page = shmem_alloc_and_acct_page(gfp, inode, index, true);
178 if (IS_ERR(page)) {
179 -alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
180 +alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode,
181 index, false);
182 }
183 if (IS_ERR(page)) {
184 @@ -1842,10 +1847,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
185 * Error recovery.
186 */
187 unacct:
188 - if (sbinfo->max_blocks)
189 - percpu_counter_sub(&sbinfo->used_blocks,
190 - 1 << compound_order(page));
191 - shmem_unacct_blocks(info->flags, 1 << compound_order(page));
192 + shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
193
194 if (PageTransHuge(page)) {
195 unlock_page(page);
196 --
197 2.17.1
198