]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Joel Becker <joel.becker@oracle.com> |
2 | Date: Fri, 24 Oct 2008 17:33:40 -0700 | |
3 | Subject: ocfs2: Improve ocfs2_read_xattr_bucket(). | |
4 | Patch-mainline: 2.6.29 | |
5 | ||
6 | The ocfs2_read_xattr_bucket() function would read an xattr bucket into a | |
7 | list of buffer heads. However, we have a nice ocfs2_xattr_bucket | |
8 | structure. Let's have it fill that out instead. | |
9 | ||
10 | In addition, ocfs2_read_xattr_bucket() would initialize buffer heads for | |
11 | a bucket that's never been on disk before. That's confusing. Let's | |
12 | call that functionality ocfs2_init_xattr_bucket(). | |
13 | ||
14 | The functions ocfs2_cp_xattr_bucket() and ocfs2_half_xattr_bucket() are | |
15 | updated to use the ocfs2_xattr_bucket structure rather than raw bh | |
16 | lists. That way they can use the new read/init calls. In addition, | |
17 | they drop the wasted read of an existing target bucket. | |
18 | ||
19 | Signed-off-by: Joel Becker <joel.becker@oracle.com> | |
20 | Signed-off-by: Mark Fasheh <mfasheh@suse.com> | |
21 | --- | |
22 | fs/ocfs2/xattr.c | 165 ++++++++++++++++++++++++++---------------------------- | |
23 | 1 files changed, 79 insertions(+), 86 deletions(-) | |
24 | ||
25 | diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c | |
26 | index 3478ad1..fa13fa4 100644 | |
27 | --- a/fs/ocfs2/xattr.c | |
28 | +++ b/fs/ocfs2/xattr.c | |
29 | @@ -168,6 +168,48 @@ static void ocfs2_xattr_bucket_relse(struct inode *inode, | |
30 | } | |
31 | } | |
32 | ||
33 | +/* | |
34 | + * A bucket that has never been written to disk doesn't need to be | |
35 | + * read. We just need the buffer_heads. Don't call this for | |
36 | + * buckets that are already on disk. ocfs2_read_xattr_bucket() initializes | |
37 | + * them fully. | |
38 | + */ | |
39 | +static int ocfs2_init_xattr_bucket(struct inode *inode, | |
40 | + struct ocfs2_xattr_bucket *bucket, | |
41 | + u64 xb_blkno) | |
42 | +{ | |
43 | + int i, rc = 0; | |
44 | + int blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); | |
45 | + | |
46 | + for (i = 0; i < blks; i++) { | |
47 | + bucket->bu_bhs[i] = sb_getblk(inode->i_sb, xb_blkno + i); | |
48 | + if (!bucket->bu_bhs[i]) { | |
49 | + rc = -EIO; | |
50 | + mlog_errno(rc); | |
51 | + break; | |
52 | + } | |
53 | + | |
54 | + ocfs2_set_new_buffer_uptodate(inode, bucket->bu_bhs[i]); | |
55 | + } | |
56 | + | |
57 | + if (rc) | |
58 | + ocfs2_xattr_bucket_relse(inode, bucket); | |
59 | + return rc; | |
60 | +} | |
61 | + | |
62 | +/* Read the xattr bucket at xb_blkno */ | |
63 | +static int ocfs2_read_xattr_bucket(struct inode *inode, | |
64 | + struct ocfs2_xattr_bucket *bucket, | |
65 | + u64 xb_blkno) | |
66 | +{ | |
67 | + int rc, blks = ocfs2_blocks_per_xattr_bucket(inode->i_sb); | |
68 | + | |
69 | + rc = ocfs2_read_blocks(inode, xb_blkno, blks, bucket->bu_bhs, 0); | |
70 | + if (rc) | |
71 | + ocfs2_xattr_bucket_relse(inode, bucket); | |
72 | + return rc; | |
73 | +} | |
74 | + | |
75 | static inline const char *ocfs2_xattr_prefix(int name_index) | |
76 | { | |
77 | struct xattr_handler *handler = NULL; | |
78 | @@ -3097,31 +3139,6 @@ out: | |
79 | return ret; | |
80 | } | |
81 | ||
82 | -static int ocfs2_read_xattr_bucket(struct inode *inode, | |
83 | - u64 blkno, | |
84 | - struct buffer_head **bhs, | |
85 | - int new) | |
86 | -{ | |
87 | - int ret = 0; | |
88 | - u16 i, blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); | |
89 | - | |
90 | - if (!new) | |
91 | - return ocfs2_read_blocks(inode, blkno, | |
92 | - blk_per_bucket, bhs, 0); | |
93 | - | |
94 | - for (i = 0; i < blk_per_bucket; i++) { | |
95 | - bhs[i] = sb_getblk(inode->i_sb, blkno + i); | |
96 | - if (bhs[i] == NULL) { | |
97 | - ret = -EIO; | |
98 | - mlog_errno(ret); | |
99 | - break; | |
100 | - } | |
101 | - ocfs2_set_new_buffer_uptodate(inode, bhs[i]); | |
102 | - } | |
103 | - | |
104 | - return ret; | |
105 | -} | |
106 | - | |
107 | /* | |
108 | * Find the suitable pos when we divide a bucket into 2. | |
109 | * We have to make sure the xattrs with the same hash value exist | |
110 | @@ -3184,7 +3201,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |
111 | int ret, i; | |
112 | int count, start, len, name_value_len = 0, xe_len, name_offset = 0; | |
113 | u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); | |
114 | - struct buffer_head **s_bhs, **t_bhs = NULL; | |
115 | + struct ocfs2_xattr_bucket s_bucket, t_bucket; | |
116 | struct ocfs2_xattr_header *xh; | |
117 | struct ocfs2_xattr_entry *xe; | |
118 | int blocksize = inode->i_sb->s_blocksize; | |
119 | @@ -3192,37 +3209,34 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |
120 | mlog(0, "move some of xattrs from bucket %llu to %llu\n", | |
121 | (unsigned long long)blk, (unsigned long long)new_blk); | |
122 | ||
123 | - s_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); | |
124 | - if (!s_bhs) | |
125 | - return -ENOMEM; | |
126 | + memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); | |
127 | + memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); | |
128 | ||
129 | - ret = ocfs2_read_xattr_bucket(inode, blk, s_bhs, 0); | |
130 | + ret = ocfs2_read_xattr_bucket(inode, &s_bucket, blk); | |
131 | if (ret) { | |
132 | mlog_errno(ret); | |
133 | goto out; | |
134 | } | |
135 | ||
136 | - ret = ocfs2_journal_access(handle, inode, s_bhs[0], | |
137 | + ret = ocfs2_journal_access(handle, inode, s_bucket.bu_bhs[0], | |
138 | OCFS2_JOURNAL_ACCESS_WRITE); | |
139 | if (ret) { | |
140 | mlog_errno(ret); | |
141 | goto out; | |
142 | } | |
143 | ||
144 | - t_bhs = kcalloc(blk_per_bucket, sizeof(struct buffer_head *), GFP_NOFS); | |
145 | - if (!t_bhs) { | |
146 | - ret = -ENOMEM; | |
147 | - goto out; | |
148 | - } | |
149 | - | |
150 | - ret = ocfs2_read_xattr_bucket(inode, new_blk, t_bhs, new_bucket_head); | |
151 | + /* | |
152 | + * Even if !new_bucket_head, we're overwriting t_bucket. Thus, | |
153 | + * there's no need to read it. | |
154 | + */ | |
155 | + ret = ocfs2_init_xattr_bucket(inode, &t_bucket, new_blk); | |
156 | if (ret) { | |
157 | mlog_errno(ret); | |
158 | goto out; | |
159 | } | |
160 | ||
161 | for (i = 0; i < blk_per_bucket; i++) { | |
162 | - ret = ocfs2_journal_access(handle, inode, t_bhs[i], | |
163 | + ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], | |
164 | new_bucket_head ? | |
165 | OCFS2_JOURNAL_ACCESS_CREATE : | |
166 | OCFS2_JOURNAL_ACCESS_WRITE); | |
167 | @@ -3232,7 +3246,7 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |
168 | } | |
169 | } | |
170 | ||
171 | - xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; | |
172 | + xh = bucket_xh(&s_bucket); | |
173 | count = le16_to_cpu(xh->xh_count); | |
174 | start = ocfs2_xattr_find_divide_pos(xh); | |
175 | ||
176 | @@ -3245,9 +3259,9 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |
177 | * that of the last entry in the previous bucket. | |
178 | */ | |
179 | for (i = 0; i < blk_per_bucket; i++) | |
180 | - memset(t_bhs[i]->b_data, 0, blocksize); | |
181 | + memset(bucket_block(&t_bucket, i), 0, blocksize); | |
182 | ||
183 | - xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; | |
184 | + xh = bucket_xh(&t_bucket); | |
185 | xh->xh_free_start = cpu_to_le16(blocksize); | |
186 | xh->xh_entries[0].xe_name_hash = xe->xe_name_hash; | |
187 | le32_add_cpu(&xh->xh_entries[0].xe_name_hash, 1); | |
188 | @@ -3257,10 +3271,11 @@ static int ocfs2_divide_xattr_bucket(struct inode *inode, | |
189 | ||
190 | /* copy the whole bucket to the new first. */ | |
191 | for (i = 0; i < blk_per_bucket; i++) | |
192 | - memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); | |
193 | + memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), | |
194 | + blocksize); | |
195 | ||
196 | /* update the new bucket. */ | |
197 | - xh = (struct ocfs2_xattr_header *)t_bhs[0]->b_data; | |
198 | + xh = bucket_xh(&t_bucket); | |
199 | ||
200 | /* | |
201 | * Calculate the total name/value len and xh_free_start for | |
202 | @@ -3325,7 +3340,7 @@ set_num_buckets: | |
203 | xh->xh_num_buckets = 0; | |
204 | ||
205 | for (i = 0; i < blk_per_bucket; i++) { | |
206 | - ocfs2_journal_dirty(handle, t_bhs[i]); | |
207 | + ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); | |
208 | if (ret) | |
209 | mlog_errno(ret); | |
210 | } | |
211 | @@ -3342,29 +3357,20 @@ set_num_buckets: | |
212 | if (start == count) | |
213 | goto out; | |
214 | ||
215 | - xh = (struct ocfs2_xattr_header *)s_bhs[0]->b_data; | |
216 | + xh = bucket_xh(&s_bucket); | |
217 | memset(&xh->xh_entries[start], 0, | |
218 | sizeof(struct ocfs2_xattr_entry) * (count - start)); | |
219 | xh->xh_count = cpu_to_le16(start); | |
220 | xh->xh_free_start = cpu_to_le16(name_offset); | |
221 | xh->xh_name_value_len = cpu_to_le16(name_value_len); | |
222 | ||
223 | - ocfs2_journal_dirty(handle, s_bhs[0]); | |
224 | + ocfs2_journal_dirty(handle, s_bucket.bu_bhs[0]); | |
225 | if (ret) | |
226 | mlog_errno(ret); | |
227 | ||
228 | out: | |
229 | - if (s_bhs) { | |
230 | - for (i = 0; i < blk_per_bucket; i++) | |
231 | - brelse(s_bhs[i]); | |
232 | - } | |
233 | - kfree(s_bhs); | |
234 | - | |
235 | - if (t_bhs) { | |
236 | - for (i = 0; i < blk_per_bucket; i++) | |
237 | - brelse(t_bhs[i]); | |
238 | - } | |
239 | - kfree(t_bhs); | |
240 | + ocfs2_xattr_bucket_relse(inode, &s_bucket); | |
241 | + ocfs2_xattr_bucket_relse(inode, &t_bucket); | |
242 | ||
243 | return ret; | |
244 | } | |
245 | @@ -3384,7 +3390,7 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, | |
246 | int ret, i; | |
247 | int blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb); | |
248 | int blocksize = inode->i_sb->s_blocksize; | |
249 | - struct buffer_head **s_bhs, **t_bhs = NULL; | |
250 | + struct ocfs2_xattr_bucket s_bucket, t_bucket; | |
251 | ||
252 | BUG_ON(s_blkno == t_blkno); | |
253 | ||
254 | @@ -3392,28 +3398,23 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, | |
255 | (unsigned long long)s_blkno, (unsigned long long)t_blkno, | |
256 | t_is_new); | |
257 | ||
258 | - s_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, | |
259 | - GFP_NOFS); | |
260 | - if (!s_bhs) | |
261 | - return -ENOMEM; | |
262 | + memset(&s_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); | |
263 | + memset(&t_bucket, 0, sizeof(struct ocfs2_xattr_bucket)); | |
264 | ||
265 | - ret = ocfs2_read_xattr_bucket(inode, s_blkno, s_bhs, 0); | |
266 | + ret = ocfs2_read_xattr_bucket(inode, &s_bucket, s_blkno); | |
267 | if (ret) | |
268 | goto out; | |
269 | ||
270 | - t_bhs = kzalloc(sizeof(struct buffer_head *) * blk_per_bucket, | |
271 | - GFP_NOFS); | |
272 | - if (!t_bhs) { | |
273 | - ret = -ENOMEM; | |
274 | - goto out; | |
275 | - } | |
276 | - | |
277 | - ret = ocfs2_read_xattr_bucket(inode, t_blkno, t_bhs, t_is_new); | |
278 | + /* | |
279 | + * Even if !t_is_new, we're overwriting t_bucket. Thus, | |
280 | + * there's no need to read it. | |
281 | + */ | |
282 | + ret = ocfs2_init_xattr_bucket(inode, &t_bucket, t_blkno); | |
283 | if (ret) | |
284 | goto out; | |
285 | ||
286 | for (i = 0; i < blk_per_bucket; i++) { | |
287 | - ret = ocfs2_journal_access(handle, inode, t_bhs[i], | |
288 | + ret = ocfs2_journal_access(handle, inode, t_bucket.bu_bhs[i], | |
289 | t_is_new ? | |
290 | OCFS2_JOURNAL_ACCESS_CREATE : | |
291 | OCFS2_JOURNAL_ACCESS_WRITE); | |
292 | @@ -3422,22 +3423,14 @@ static int ocfs2_cp_xattr_bucket(struct inode *inode, | |
293 | } | |
294 | ||
295 | for (i = 0; i < blk_per_bucket; i++) { | |
296 | - memcpy(t_bhs[i]->b_data, s_bhs[i]->b_data, blocksize); | |
297 | - ocfs2_journal_dirty(handle, t_bhs[i]); | |
298 | + memcpy(bucket_block(&t_bucket, i), bucket_block(&s_bucket, i), | |
299 | + blocksize); | |
300 | + ocfs2_journal_dirty(handle, t_bucket.bu_bhs[i]); | |
301 | } | |
302 | ||
303 | out: | |
304 | - if (s_bhs) { | |
305 | - for (i = 0; i < blk_per_bucket; i++) | |
306 | - brelse(s_bhs[i]); | |
307 | - } | |
308 | - kfree(s_bhs); | |
309 | - | |
310 | - if (t_bhs) { | |
311 | - for (i = 0; i < blk_per_bucket; i++) | |
312 | - brelse(t_bhs[i]); | |
313 | - } | |
314 | - kfree(t_bhs); | |
315 | + ocfs2_xattr_bucket_relse(inode, &s_bucket); | |
316 | + ocfs2_xattr_bucket_relse(inode, &t_bucket); | |
317 | ||
318 | return ret; | |
319 | } | |
320 | -- | |
321 | 1.5.6 | |
322 |