]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - fs/btrfs/file-item.c
Merge branch 'ras-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[thirdparty/kernel/stable.git] / fs / btrfs / file-item.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/bio.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/highmem.h>
10 #include <linux/sched/mm.h>
11 #include "ctree.h"
12 #include "disk-io.h"
13 #include "transaction.h"
14 #include "volumes.h"
15 #include "print-tree.h"
16 #include "compression.h"
17
18 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
19 sizeof(struct btrfs_item) * 2) / \
20 size) - 1))
21
22 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
23 PAGE_SIZE))
24
25 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
26 sizeof(struct btrfs_ordered_sum)) / \
27 sizeof(u32) * (fs_info)->sectorsize)
28
29 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
30 struct btrfs_root *root,
31 u64 objectid, u64 pos,
32 u64 disk_offset, u64 disk_num_bytes,
33 u64 num_bytes, u64 offset, u64 ram_bytes,
34 u8 compression, u8 encryption, u16 other_encoding)
35 {
36 int ret = 0;
37 struct btrfs_file_extent_item *item;
38 struct btrfs_key file_key;
39 struct btrfs_path *path;
40 struct extent_buffer *leaf;
41
42 path = btrfs_alloc_path();
43 if (!path)
44 return -ENOMEM;
45 file_key.objectid = objectid;
46 file_key.offset = pos;
47 file_key.type = BTRFS_EXTENT_DATA_KEY;
48
49 path->leave_spinning = 1;
50 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
51 sizeof(*item));
52 if (ret < 0)
53 goto out;
54 BUG_ON(ret); /* Can't happen */
55 leaf = path->nodes[0];
56 item = btrfs_item_ptr(leaf, path->slots[0],
57 struct btrfs_file_extent_item);
58 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
59 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
60 btrfs_set_file_extent_offset(leaf, item, offset);
61 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
62 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
63 btrfs_set_file_extent_generation(leaf, item, trans->transid);
64 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
65 btrfs_set_file_extent_compression(leaf, item, compression);
66 btrfs_set_file_extent_encryption(leaf, item, encryption);
67 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
68
69 btrfs_mark_buffer_dirty(leaf);
70 out:
71 btrfs_free_path(path);
72 return ret;
73 }
74
75 static struct btrfs_csum_item *
76 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
77 struct btrfs_root *root,
78 struct btrfs_path *path,
79 u64 bytenr, int cow)
80 {
81 struct btrfs_fs_info *fs_info = root->fs_info;
82 int ret;
83 struct btrfs_key file_key;
84 struct btrfs_key found_key;
85 struct btrfs_csum_item *item;
86 struct extent_buffer *leaf;
87 u64 csum_offset = 0;
88 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
89 int csums_in_item;
90
91 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
92 file_key.offset = bytenr;
93 file_key.type = BTRFS_EXTENT_CSUM_KEY;
94 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
95 if (ret < 0)
96 goto fail;
97 leaf = path->nodes[0];
98 if (ret > 0) {
99 ret = 1;
100 if (path->slots[0] == 0)
101 goto fail;
102 path->slots[0]--;
103 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
104 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
105 goto fail;
106
107 csum_offset = (bytenr - found_key.offset) >>
108 fs_info->sb->s_blocksize_bits;
109 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
110 csums_in_item /= csum_size;
111
112 if (csum_offset == csums_in_item) {
113 ret = -EFBIG;
114 goto fail;
115 } else if (csum_offset > csums_in_item) {
116 goto fail;
117 }
118 }
119 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
120 item = (struct btrfs_csum_item *)((unsigned char *)item +
121 csum_offset * csum_size);
122 return item;
123 fail:
124 if (ret > 0)
125 ret = -ENOENT;
126 return ERR_PTR(ret);
127 }
128
129 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root,
131 struct btrfs_path *path, u64 objectid,
132 u64 offset, int mod)
133 {
134 int ret;
135 struct btrfs_key file_key;
136 int ins_len = mod < 0 ? -1 : 0;
137 int cow = mod != 0;
138
139 file_key.objectid = objectid;
140 file_key.offset = offset;
141 file_key.type = BTRFS_EXTENT_DATA_KEY;
142 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
143 return ret;
144 }
145
146 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
147 u64 logical_offset, u32 *dst, int dio)
148 {
149 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
150 struct bio_vec bvec;
151 struct bvec_iter iter;
152 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
153 struct btrfs_csum_item *item = NULL;
154 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
155 struct btrfs_path *path;
156 u8 *csum;
157 u64 offset = 0;
158 u64 item_start_offset = 0;
159 u64 item_last_offset = 0;
160 u64 disk_bytenr;
161 u64 page_bytes_left;
162 u32 diff;
163 int nblocks;
164 int count = 0;
165 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
166
167 path = btrfs_alloc_path();
168 if (!path)
169 return BLK_STS_RESOURCE;
170
171 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
172 if (!dst) {
173 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
174 btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
175 GFP_NOFS);
176 if (!btrfs_bio->csum) {
177 btrfs_free_path(path);
178 return BLK_STS_RESOURCE;
179 }
180 } else {
181 btrfs_bio->csum = btrfs_bio->csum_inline;
182 }
183 csum = btrfs_bio->csum;
184 } else {
185 csum = (u8 *)dst;
186 }
187
188 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
189 path->reada = READA_FORWARD;
190
191 /*
192 * the free space stuff is only read when it hasn't been
193 * updated in the current transaction. So, we can safely
194 * read from the commit root and sidestep a nasty deadlock
195 * between reading the free space cache and updating the csum tree.
196 */
197 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
198 path->search_commit_root = 1;
199 path->skip_locking = 1;
200 }
201
202 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
203 if (dio)
204 offset = logical_offset;
205
206 bio_for_each_segment(bvec, bio, iter) {
207 page_bytes_left = bvec.bv_len;
208 if (count)
209 goto next;
210
211 if (!dio)
212 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
213 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
214 (u32 *)csum, nblocks);
215 if (count)
216 goto found;
217
218 if (!item || disk_bytenr < item_start_offset ||
219 disk_bytenr >= item_last_offset) {
220 struct btrfs_key found_key;
221 u32 item_size;
222
223 if (item)
224 btrfs_release_path(path);
225 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
226 path, disk_bytenr, 0);
227 if (IS_ERR(item)) {
228 count = 1;
229 memset(csum, 0, csum_size);
230 if (BTRFS_I(inode)->root->root_key.objectid ==
231 BTRFS_DATA_RELOC_TREE_OBJECTID) {
232 set_extent_bits(io_tree, offset,
233 offset + fs_info->sectorsize - 1,
234 EXTENT_NODATASUM);
235 } else {
236 btrfs_info_rl(fs_info,
237 "no csum found for inode %llu start %llu",
238 btrfs_ino(BTRFS_I(inode)), offset);
239 }
240 item = NULL;
241 btrfs_release_path(path);
242 goto found;
243 }
244 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
245 path->slots[0]);
246
247 item_start_offset = found_key.offset;
248 item_size = btrfs_item_size_nr(path->nodes[0],
249 path->slots[0]);
250 item_last_offset = item_start_offset +
251 (item_size / csum_size) *
252 fs_info->sectorsize;
253 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
254 struct btrfs_csum_item);
255 }
256 /*
257 * this byte range must be able to fit inside
258 * a single leaf so it will also fit inside a u32
259 */
260 diff = disk_bytenr - item_start_offset;
261 diff = diff / fs_info->sectorsize;
262 diff = diff * csum_size;
263 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
264 inode->i_sb->s_blocksize_bits);
265 read_extent_buffer(path->nodes[0], csum,
266 ((unsigned long)item) + diff,
267 csum_size * count);
268 found:
269 csum += count * csum_size;
270 nblocks -= count;
271 next:
272 while (count--) {
273 disk_bytenr += fs_info->sectorsize;
274 offset += fs_info->sectorsize;
275 page_bytes_left -= fs_info->sectorsize;
276 if (!page_bytes_left)
277 break; /* move to next bio */
278 }
279 }
280
281 WARN_ON_ONCE(count);
282 btrfs_free_path(path);
283 return 0;
284 }
285
286 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
287 {
288 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
289 }
290
291 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
292 {
293 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
294 }
295
296 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
297 struct list_head *list, int search_commit)
298 {
299 struct btrfs_fs_info *fs_info = root->fs_info;
300 struct btrfs_key key;
301 struct btrfs_path *path;
302 struct extent_buffer *leaf;
303 struct btrfs_ordered_sum *sums;
304 struct btrfs_csum_item *item;
305 LIST_HEAD(tmplist);
306 unsigned long offset;
307 int ret;
308 size_t size;
309 u64 csum_end;
310 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
311
312 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
313 IS_ALIGNED(end + 1, fs_info->sectorsize));
314
315 path = btrfs_alloc_path();
316 if (!path)
317 return -ENOMEM;
318
319 if (search_commit) {
320 path->skip_locking = 1;
321 path->reada = READA_FORWARD;
322 path->search_commit_root = 1;
323 }
324
325 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
326 key.offset = start;
327 key.type = BTRFS_EXTENT_CSUM_KEY;
328
329 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
330 if (ret < 0)
331 goto fail;
332 if (ret > 0 && path->slots[0] > 0) {
333 leaf = path->nodes[0];
334 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
335 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
336 key.type == BTRFS_EXTENT_CSUM_KEY) {
337 offset = (start - key.offset) >>
338 fs_info->sb->s_blocksize_bits;
339 if (offset * csum_size <
340 btrfs_item_size_nr(leaf, path->slots[0] - 1))
341 path->slots[0]--;
342 }
343 }
344
345 while (start <= end) {
346 leaf = path->nodes[0];
347 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
348 ret = btrfs_next_leaf(root, path);
349 if (ret < 0)
350 goto fail;
351 if (ret > 0)
352 break;
353 leaf = path->nodes[0];
354 }
355
356 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
357 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
358 key.type != BTRFS_EXTENT_CSUM_KEY ||
359 key.offset > end)
360 break;
361
362 if (key.offset > start)
363 start = key.offset;
364
365 size = btrfs_item_size_nr(leaf, path->slots[0]);
366 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
367 if (csum_end <= start) {
368 path->slots[0]++;
369 continue;
370 }
371
372 csum_end = min(csum_end, end + 1);
373 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
374 struct btrfs_csum_item);
375 while (start < csum_end) {
376 size = min_t(size_t, csum_end - start,
377 MAX_ORDERED_SUM_BYTES(fs_info));
378 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
379 GFP_NOFS);
380 if (!sums) {
381 ret = -ENOMEM;
382 goto fail;
383 }
384
385 sums->bytenr = start;
386 sums->len = (int)size;
387
388 offset = (start - key.offset) >>
389 fs_info->sb->s_blocksize_bits;
390 offset *= csum_size;
391 size >>= fs_info->sb->s_blocksize_bits;
392
393 read_extent_buffer(path->nodes[0],
394 sums->sums,
395 ((unsigned long)item) + offset,
396 csum_size * size);
397
398 start += fs_info->sectorsize * size;
399 list_add_tail(&sums->list, &tmplist);
400 }
401 path->slots[0]++;
402 }
403 ret = 0;
404 fail:
405 while (ret < 0 && !list_empty(&tmplist)) {
406 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
407 list_del(&sums->list);
408 kfree(sums);
409 }
410 list_splice_tail(&tmplist, list);
411
412 btrfs_free_path(path);
413 return ret;
414 }
415
416 /*
417 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
418 * @inode: Owner of the data inside the bio
419 * @bio: Contains the data to be checksummed
420 * @file_start: offset in file this bio begins to describe
421 * @contig: Boolean. If true/1 means all bio vecs in this bio are
422 * contiguous and they begin at @file_start in the file. False/0
423 * means this bio can contains potentially discontigous bio vecs
424 * so the logical offset of each should be calculated separately.
425 */
426 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
427 u64 file_start, int contig)
428 {
429 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
430 struct btrfs_ordered_sum *sums;
431 struct btrfs_ordered_extent *ordered = NULL;
432 char *data;
433 struct bvec_iter iter;
434 struct bio_vec bvec;
435 int index;
436 int nr_sectors;
437 unsigned long total_bytes = 0;
438 unsigned long this_sum_bytes = 0;
439 int i;
440 u64 offset;
441 unsigned nofs_flag;
442
443 nofs_flag = memalloc_nofs_save();
444 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
445 GFP_KERNEL);
446 memalloc_nofs_restore(nofs_flag);
447
448 if (!sums)
449 return BLK_STS_RESOURCE;
450
451 sums->len = bio->bi_iter.bi_size;
452 INIT_LIST_HEAD(&sums->list);
453
454 if (contig)
455 offset = file_start;
456 else
457 offset = 0; /* shut up gcc */
458
459 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
460 index = 0;
461
462 bio_for_each_segment(bvec, bio, iter) {
463 if (!contig)
464 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
465
466 if (!ordered) {
467 ordered = btrfs_lookup_ordered_extent(inode, offset);
468 BUG_ON(!ordered); /* Logic error */
469 }
470
471 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
472 bvec.bv_len + fs_info->sectorsize
473 - 1);
474
475 for (i = 0; i < nr_sectors; i++) {
476 if (offset >= ordered->file_offset + ordered->len ||
477 offset < ordered->file_offset) {
478 unsigned long bytes_left;
479
480 sums->len = this_sum_bytes;
481 this_sum_bytes = 0;
482 btrfs_add_ordered_sum(ordered, sums);
483 btrfs_put_ordered_extent(ordered);
484
485 bytes_left = bio->bi_iter.bi_size - total_bytes;
486
487 nofs_flag = memalloc_nofs_save();
488 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
489 bytes_left), GFP_KERNEL);
490 memalloc_nofs_restore(nofs_flag);
491 BUG_ON(!sums); /* -ENOMEM */
492 sums->len = bytes_left;
493 ordered = btrfs_lookup_ordered_extent(inode,
494 offset);
495 ASSERT(ordered); /* Logic error */
496 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
497 + total_bytes;
498 index = 0;
499 }
500
501 sums->sums[index] = ~(u32)0;
502 data = kmap_atomic(bvec.bv_page);
503 sums->sums[index]
504 = btrfs_csum_data(data + bvec.bv_offset
505 + (i * fs_info->sectorsize),
506 sums->sums[index],
507 fs_info->sectorsize);
508 kunmap_atomic(data);
509 btrfs_csum_final(sums->sums[index],
510 (char *)(sums->sums + index));
511 index++;
512 offset += fs_info->sectorsize;
513 this_sum_bytes += fs_info->sectorsize;
514 total_bytes += fs_info->sectorsize;
515 }
516
517 }
518 this_sum_bytes = 0;
519 btrfs_add_ordered_sum(ordered, sums);
520 btrfs_put_ordered_extent(ordered);
521 return 0;
522 }
523
524 /*
525 * helper function for csum removal, this expects the
526 * key to describe the csum pointed to by the path, and it expects
527 * the csum to overlap the range [bytenr, len]
528 *
529 * The csum should not be entirely contained in the range and the
530 * range should not be entirely contained in the csum.
531 *
532 * This calls btrfs_truncate_item with the correct args based on the
533 * overlap, and fixes up the key as required.
534 */
535 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
536 struct btrfs_path *path,
537 struct btrfs_key *key,
538 u64 bytenr, u64 len)
539 {
540 struct extent_buffer *leaf;
541 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
542 u64 csum_end;
543 u64 end_byte = bytenr + len;
544 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
545
546 leaf = path->nodes[0];
547 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
548 csum_end <<= fs_info->sb->s_blocksize_bits;
549 csum_end += key->offset;
550
551 if (key->offset < bytenr && csum_end <= end_byte) {
552 /*
553 * [ bytenr - len ]
554 * [ ]
555 * [csum ]
556 * A simple truncate off the end of the item
557 */
558 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
559 new_size *= csum_size;
560 btrfs_truncate_item(path, new_size, 1);
561 } else if (key->offset >= bytenr && csum_end > end_byte &&
562 end_byte > key->offset) {
563 /*
564 * [ bytenr - len ]
565 * [ ]
566 * [csum ]
567 * we need to truncate from the beginning of the csum
568 */
569 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
570 new_size *= csum_size;
571
572 btrfs_truncate_item(path, new_size, 0);
573
574 key->offset = end_byte;
575 btrfs_set_item_key_safe(fs_info, path, key);
576 } else {
577 BUG();
578 }
579 }
580
581 /*
582 * deletes the csum items from the csum tree for a given
583 * range of bytes.
584 */
585 int btrfs_del_csums(struct btrfs_trans_handle *trans,
586 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
587 {
588 struct btrfs_root *root = fs_info->csum_root;
589 struct btrfs_path *path;
590 struct btrfs_key key;
591 u64 end_byte = bytenr + len;
592 u64 csum_end;
593 struct extent_buffer *leaf;
594 int ret;
595 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
596 int blocksize_bits = fs_info->sb->s_blocksize_bits;
597
598 path = btrfs_alloc_path();
599 if (!path)
600 return -ENOMEM;
601
602 while (1) {
603 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
604 key.offset = end_byte - 1;
605 key.type = BTRFS_EXTENT_CSUM_KEY;
606
607 path->leave_spinning = 1;
608 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
609 if (ret > 0) {
610 if (path->slots[0] == 0)
611 break;
612 path->slots[0]--;
613 } else if (ret < 0) {
614 break;
615 }
616
617 leaf = path->nodes[0];
618 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
619
620 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
621 key.type != BTRFS_EXTENT_CSUM_KEY) {
622 break;
623 }
624
625 if (key.offset >= end_byte)
626 break;
627
628 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
629 csum_end <<= blocksize_bits;
630 csum_end += key.offset;
631
632 /* this csum ends before we start, we're done */
633 if (csum_end <= bytenr)
634 break;
635
636 /* delete the entire item, it is inside our range */
637 if (key.offset >= bytenr && csum_end <= end_byte) {
638 int del_nr = 1;
639
640 /*
641 * Check how many csum items preceding this one in this
642 * leaf correspond to our range and then delete them all
643 * at once.
644 */
645 if (key.offset > bytenr && path->slots[0] > 0) {
646 int slot = path->slots[0] - 1;
647
648 while (slot >= 0) {
649 struct btrfs_key pk;
650
651 btrfs_item_key_to_cpu(leaf, &pk, slot);
652 if (pk.offset < bytenr ||
653 pk.type != BTRFS_EXTENT_CSUM_KEY ||
654 pk.objectid !=
655 BTRFS_EXTENT_CSUM_OBJECTID)
656 break;
657 path->slots[0] = slot;
658 del_nr++;
659 key.offset = pk.offset;
660 slot--;
661 }
662 }
663 ret = btrfs_del_items(trans, root, path,
664 path->slots[0], del_nr);
665 if (ret)
666 goto out;
667 if (key.offset == bytenr)
668 break;
669 } else if (key.offset < bytenr && csum_end > end_byte) {
670 unsigned long offset;
671 unsigned long shift_len;
672 unsigned long item_offset;
673 /*
674 * [ bytenr - len ]
675 * [csum ]
676 *
677 * Our bytes are in the middle of the csum,
678 * we need to split this item and insert a new one.
679 *
680 * But we can't drop the path because the
681 * csum could change, get removed, extended etc.
682 *
683 * The trick here is the max size of a csum item leaves
684 * enough room in the tree block for a single
685 * item header. So, we split the item in place,
686 * adding a new header pointing to the existing
687 * bytes. Then we loop around again and we have
688 * a nicely formed csum item that we can neatly
689 * truncate.
690 */
691 offset = (bytenr - key.offset) >> blocksize_bits;
692 offset *= csum_size;
693
694 shift_len = (len >> blocksize_bits) * csum_size;
695
696 item_offset = btrfs_item_ptr_offset(leaf,
697 path->slots[0]);
698
699 memzero_extent_buffer(leaf, item_offset + offset,
700 shift_len);
701 key.offset = bytenr;
702
703 /*
704 * btrfs_split_item returns -EAGAIN when the
705 * item changed size or key
706 */
707 ret = btrfs_split_item(trans, root, path, &key, offset);
708 if (ret && ret != -EAGAIN) {
709 btrfs_abort_transaction(trans, ret);
710 goto out;
711 }
712
713 key.offset = end_byte - 1;
714 } else {
715 truncate_one_csum(fs_info, path, &key, bytenr, len);
716 if (key.offset < bytenr)
717 break;
718 }
719 btrfs_release_path(path);
720 }
721 ret = 0;
722 out:
723 btrfs_free_path(path);
724 return ret;
725 }
726
727 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root,
729 struct btrfs_ordered_sum *sums)
730 {
731 struct btrfs_fs_info *fs_info = root->fs_info;
732 struct btrfs_key file_key;
733 struct btrfs_key found_key;
734 struct btrfs_path *path;
735 struct btrfs_csum_item *item;
736 struct btrfs_csum_item *item_end;
737 struct extent_buffer *leaf = NULL;
738 u64 next_offset;
739 u64 total_bytes = 0;
740 u64 csum_offset;
741 u64 bytenr;
742 u32 nritems;
743 u32 ins_size;
744 int index = 0;
745 int found_next;
746 int ret;
747 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
748
749 path = btrfs_alloc_path();
750 if (!path)
751 return -ENOMEM;
752 again:
753 next_offset = (u64)-1;
754 found_next = 0;
755 bytenr = sums->bytenr + total_bytes;
756 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
757 file_key.offset = bytenr;
758 file_key.type = BTRFS_EXTENT_CSUM_KEY;
759
760 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
761 if (!IS_ERR(item)) {
762 ret = 0;
763 leaf = path->nodes[0];
764 item_end = btrfs_item_ptr(leaf, path->slots[0],
765 struct btrfs_csum_item);
766 item_end = (struct btrfs_csum_item *)((char *)item_end +
767 btrfs_item_size_nr(leaf, path->slots[0]));
768 goto found;
769 }
770 ret = PTR_ERR(item);
771 if (ret != -EFBIG && ret != -ENOENT)
772 goto fail_unlock;
773
774 if (ret == -EFBIG) {
775 u32 item_size;
776 /* we found one, but it isn't big enough yet */
777 leaf = path->nodes[0];
778 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
779 if ((item_size / csum_size) >=
780 MAX_CSUM_ITEMS(fs_info, csum_size)) {
781 /* already at max size, make a new one */
782 goto insert;
783 }
784 } else {
785 int slot = path->slots[0] + 1;
786 /* we didn't find a csum item, insert one */
787 nritems = btrfs_header_nritems(path->nodes[0]);
788 if (!nritems || (path->slots[0] >= nritems - 1)) {
789 ret = btrfs_next_leaf(root, path);
790 if (ret == 1)
791 found_next = 1;
792 if (ret != 0)
793 goto insert;
794 slot = path->slots[0];
795 }
796 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
797 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
798 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
799 found_next = 1;
800 goto insert;
801 }
802 next_offset = found_key.offset;
803 found_next = 1;
804 goto insert;
805 }
806
807 /*
808 * at this point, we know the tree has an item, but it isn't big
809 * enough yet to put our csum in. Grow it
810 */
811 btrfs_release_path(path);
812 ret = btrfs_search_slot(trans, root, &file_key, path,
813 csum_size, 1);
814 if (ret < 0)
815 goto fail_unlock;
816
817 if (ret > 0) {
818 if (path->slots[0] == 0)
819 goto insert;
820 path->slots[0]--;
821 }
822
823 leaf = path->nodes[0];
824 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
825 csum_offset = (bytenr - found_key.offset) >>
826 fs_info->sb->s_blocksize_bits;
827
828 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
829 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
830 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
831 goto insert;
832 }
833
834 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
835 csum_size) {
836 int extend_nr;
837 u64 tmp;
838 u32 diff;
839 u32 free_space;
840
841 if (btrfs_leaf_free_space(leaf) <
842 sizeof(struct btrfs_item) + csum_size * 2)
843 goto insert;
844
845 free_space = btrfs_leaf_free_space(leaf) -
846 sizeof(struct btrfs_item) - csum_size;
847 tmp = sums->len - total_bytes;
848 tmp >>= fs_info->sb->s_blocksize_bits;
849 WARN_ON(tmp < 1);
850
851 extend_nr = max_t(int, 1, (int)tmp);
852 diff = (csum_offset + extend_nr) * csum_size;
853 diff = min(diff,
854 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
855
856 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
857 diff = min(free_space, diff);
858 diff /= csum_size;
859 diff *= csum_size;
860
861 btrfs_extend_item(path, diff);
862 ret = 0;
863 goto csum;
864 }
865
866 insert:
867 btrfs_release_path(path);
868 csum_offset = 0;
869 if (found_next) {
870 u64 tmp;
871
872 tmp = sums->len - total_bytes;
873 tmp >>= fs_info->sb->s_blocksize_bits;
874 tmp = min(tmp, (next_offset - file_key.offset) >>
875 fs_info->sb->s_blocksize_bits);
876
877 tmp = max_t(u64, 1, tmp);
878 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
879 ins_size = csum_size * tmp;
880 } else {
881 ins_size = csum_size;
882 }
883 path->leave_spinning = 1;
884 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
885 ins_size);
886 path->leave_spinning = 0;
887 if (ret < 0)
888 goto fail_unlock;
889 if (WARN_ON(ret != 0))
890 goto fail_unlock;
891 leaf = path->nodes[0];
892 csum:
893 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
894 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
895 btrfs_item_size_nr(leaf, path->slots[0]));
896 item = (struct btrfs_csum_item *)((unsigned char *)item +
897 csum_offset * csum_size);
898 found:
899 ins_size = (u32)(sums->len - total_bytes) >>
900 fs_info->sb->s_blocksize_bits;
901 ins_size *= csum_size;
902 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
903 ins_size);
904 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
905 ins_size);
906
907 ins_size /= csum_size;
908 total_bytes += ins_size * fs_info->sectorsize;
909 index += ins_size;
910
911 btrfs_mark_buffer_dirty(path->nodes[0]);
912 if (total_bytes < sums->len) {
913 btrfs_release_path(path);
914 cond_resched();
915 goto again;
916 }
917 out:
918 btrfs_free_path(path);
919 return ret;
920
921 fail_unlock:
922 goto out;
923 }
924
925 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
926 const struct btrfs_path *path,
927 struct btrfs_file_extent_item *fi,
928 const bool new_inline,
929 struct extent_map *em)
930 {
931 struct btrfs_fs_info *fs_info = inode->root->fs_info;
932 struct btrfs_root *root = inode->root;
933 struct extent_buffer *leaf = path->nodes[0];
934 const int slot = path->slots[0];
935 struct btrfs_key key;
936 u64 extent_start, extent_end;
937 u64 bytenr;
938 u8 type = btrfs_file_extent_type(leaf, fi);
939 int compress_type = btrfs_file_extent_compression(leaf, fi);
940
941 em->bdev = fs_info->fs_devices->latest_bdev;
942 btrfs_item_key_to_cpu(leaf, &key, slot);
943 extent_start = key.offset;
944
945 if (type == BTRFS_FILE_EXTENT_REG ||
946 type == BTRFS_FILE_EXTENT_PREALLOC) {
947 extent_end = extent_start +
948 btrfs_file_extent_num_bytes(leaf, fi);
949 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
950 size_t size;
951 size = btrfs_file_extent_ram_bytes(leaf, fi);
952 extent_end = ALIGN(extent_start + size,
953 fs_info->sectorsize);
954 }
955
956 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
957 if (type == BTRFS_FILE_EXTENT_REG ||
958 type == BTRFS_FILE_EXTENT_PREALLOC) {
959 em->start = extent_start;
960 em->len = extent_end - extent_start;
961 em->orig_start = extent_start -
962 btrfs_file_extent_offset(leaf, fi);
963 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
964 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
965 if (bytenr == 0) {
966 em->block_start = EXTENT_MAP_HOLE;
967 return;
968 }
969 if (compress_type != BTRFS_COMPRESS_NONE) {
970 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
971 em->compress_type = compress_type;
972 em->block_start = bytenr;
973 em->block_len = em->orig_block_len;
974 } else {
975 bytenr += btrfs_file_extent_offset(leaf, fi);
976 em->block_start = bytenr;
977 em->block_len = em->len;
978 if (type == BTRFS_FILE_EXTENT_PREALLOC)
979 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
980 }
981 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
982 em->block_start = EXTENT_MAP_INLINE;
983 em->start = extent_start;
984 em->len = extent_end - extent_start;
985 /*
986 * Initialize orig_start and block_len with the same values
987 * as in inode.c:btrfs_get_extent().
988 */
989 em->orig_start = EXTENT_MAP_HOLE;
990 em->block_len = (u64)-1;
991 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
992 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
993 em->compress_type = compress_type;
994 }
995 } else {
996 btrfs_err(fs_info,
997 "unknown file extent item type %d, inode %llu, offset %llu, "
998 "root %llu", type, btrfs_ino(inode), extent_start,
999 root->root_key.objectid);
1000 }
1001 }