]>
Commit | Line | Data |
---|---|---|
1c6fdbd8 KO |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com> | |
4 | * | |
5 | * Code for managing the extent btree and dynamically updating the writeback | |
6 | * dirty sector count. | |
7 | */ | |
8 | ||
9 | #include "bcachefs.h" | |
10 | #include "bkey_methods.h" | |
ec4edd7b | 11 | #include "btree_cache.h" |
1c6fdbd8 | 12 | #include "btree_gc.h" |
39fb2983 | 13 | #include "btree_io.h" |
08c07fea | 14 | #include "btree_iter.h" |
1c6fdbd8 KO |
15 | #include "buckets.h" |
16 | #include "checksum.h" | |
fb3f57bb | 17 | #include "compress.h" |
1c6fdbd8 | 18 | #include "debug.h" |
1c6fdbd8 KO |
19 | #include "disk_groups.h" |
20 | #include "error.h" | |
21 | #include "extents.h" | |
22 | #include "inode.h" | |
23 | #include "journal.h" | |
24 | #include "replicas.h" | |
25 | #include "super.h" | |
26 | #include "super-io.h" | |
27 | #include "trace.h" | |
28 | #include "util.h" | |
1c6fdbd8 | 29 | |
4de77495 KO |
30 | static unsigned bch2_crc_field_size_max[] = { |
31 | [BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX, | |
32 | [BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX, | |
33 | [BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX, | |
34 | }; | |
1c6fdbd8 | 35 | |
4de77495 KO |
36 | static void bch2_extent_crc_pack(union bch_extent_crc *, |
37 | struct bch_extent_crc_unpacked, | |
38 | enum bch_extent_entry_type); | |
1c6fdbd8 | 39 | |
26609b61 KO |
40 | static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f, |
41 | unsigned dev) | |
42 | { | |
43 | struct bch_dev_io_failures *i; | |
44 | ||
45 | for (i = f->devs; i < f->devs + f->nr; i++) | |
46 | if (i->dev == dev) | |
47 | return i; | |
48 | ||
49 | return NULL; | |
50 | } | |
51 | ||
52 | void bch2_mark_io_failure(struct bch_io_failures *failed, | |
53 | struct extent_ptr_decoded *p) | |
54 | { | |
55 | struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev); | |
56 | ||
57 | if (!f) { | |
58 | BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs)); | |
59 | ||
60 | f = &failed->devs[failed->nr++]; | |
61 | f->dev = p->ptr.dev; | |
62 | f->idx = p->idx; | |
63 | f->nr_failed = 1; | |
64 | f->nr_retries = 0; | |
65 | } else if (p->idx != f->idx) { | |
66 | f->idx = p->idx; | |
67 | f->nr_failed = 1; | |
68 | f->nr_retries = 0; | |
69 | } else { | |
70 | f->nr_failed++; | |
71 | } | |
72 | } | |
73 | ||
74 | /* | |
75 | * returns true if p1 is better than p2: | |
76 | */ | |
77 | static inline bool ptr_better(struct bch_fs *c, | |
78 | const struct extent_ptr_decoded p1, | |
79 | const struct extent_ptr_decoded p2) | |
80 | { | |
81 | if (likely(!p1.idx && !p2.idx)) { | |
82 | struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev); | |
83 | struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev); | |
84 | ||
85 | u64 l1 = atomic64_read(&dev1->cur_latency[READ]); | |
86 | u64 l2 = atomic64_read(&dev2->cur_latency[READ]); | |
87 | ||
88 | /* Pick at random, biased in favor of the faster device: */ | |
89 | ||
90 | return bch2_rand_range(l1 + l2) > l1; | |
91 | } | |
92 | ||
29364f34 | 93 | if (bch2_force_reconstruct_read) |
26609b61 KO |
94 | return p1.idx > p2.idx; |
95 | ||
96 | return p1.idx < p2.idx; | |
97 | } | |
98 | ||
99 | /* | |
100 | * This picks a non-stale pointer, preferably from a device other than @avoid. | |
101 | * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to | |
102 | * other devices, it will still pick a pointer from avoid. | |
103 | */ | |
104 | int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, | |
105 | struct bch_io_failures *failed, | |
106 | struct extent_ptr_decoded *pick) | |
107 | { | |
108 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
109 | const union bch_extent_entry *entry; | |
110 | struct extent_ptr_decoded p; | |
111 | struct bch_dev_io_failures *f; | |
112 | struct bch_dev *ca; | |
113 | int ret = 0; | |
114 | ||
115 | if (k.k->type == KEY_TYPE_error) | |
116 | return -EIO; | |
117 | ||
118 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { | |
79203111 KO |
119 | /* |
120 | * Unwritten extent: no need to actually read, treat it as a | |
121 | * hole and return 0s: | |
122 | */ | |
123 | if (p.ptr.unwritten) | |
124 | return 0; | |
125 | ||
26609b61 KO |
126 | ca = bch_dev_bkey_exists(c, p.ptr.dev); |
127 | ||
128 | /* | |
129 | * If there are any dirty pointers it's an error if we can't | |
130 | * read: | |
131 | */ | |
132 | if (!ret && !p.ptr.cached) | |
133 | ret = -EIO; | |
134 | ||
135 | if (p.ptr.cached && ptr_stale(ca, &p.ptr)) | |
136 | continue; | |
137 | ||
138 | f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL; | |
139 | if (f) | |
140 | p.idx = f->nr_failed < f->nr_retries | |
141 | ? f->idx | |
142 | : f->idx + 1; | |
143 | ||
144 | if (!p.idx && | |
145 | !bch2_dev_is_readable(ca)) | |
146 | p.idx++; | |
147 | ||
29364f34 | 148 | if (bch2_force_reconstruct_read && |
37954a27 | 149 | !p.idx && p.has_ec) |
26609b61 KO |
150 | p.idx++; |
151 | ||
37954a27 | 152 | if (p.idx >= (unsigned) p.has_ec + 1) |
26609b61 KO |
153 | continue; |
154 | ||
155 | if (ret > 0 && !ptr_better(c, p, *pick)) | |
156 | continue; | |
157 | ||
158 | *pick = p; | |
159 | ret = 1; | |
160 | } | |
161 | ||
162 | return ret; | |
163 | } | |
164 | ||
4de77495 | 165 | /* KEY_TYPE_btree_ptr: */ |
26609b61 | 166 | |
b65db750 | 167 | int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k, |
8726dc93 KO |
168 | enum bkey_invalid_flags flags, |
169 | struct printbuf *err) | |
26609b61 | 170 | { |
b65db750 | 171 | int ret = 0; |
26609b61 | 172 | |
b65db750 KO |
173 | bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err, |
174 | btree_ptr_val_too_big, | |
175 | "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); | |
176 | ||
177 | ret = bch2_bkey_ptrs_invalid(c, k, flags, err); | |
178 | fsck_err: | |
179 | return ret; | |
26609b61 KO |
180 | } |
181 | ||
4de77495 KO |
182 | void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c, |
183 | struct bkey_s_c k) | |
26609b61 | 184 | { |
4de77495 KO |
185 | bch2_bkey_ptrs_to_text(out, c, k); |
186 | } | |
26609b61 | 187 | |
b65db750 | 188 | int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k, |
8726dc93 KO |
189 | enum bkey_invalid_flags flags, |
190 | struct printbuf *err) | |
fad7cfed | 191 | { |
805b535a | 192 | struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); |
b65db750 | 193 | int ret = 0; |
fad7cfed | 194 | |
805b535a KO |
195 | bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, |
196 | c, err, btree_ptr_v2_val_too_big, | |
b65db750 KO |
197 | "value too big (%zu > %zu)", |
198 | bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX); | |
199 | ||
805b535a KO |
200 | bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p), |
201 | c, err, btree_ptr_v2_min_key_bad, | |
202 | "min_key > key"); | |
203 | ||
b65db750 KO |
204 | ret = bch2_bkey_ptrs_invalid(c, k, flags, err); |
205 | fsck_err: | |
206 | return ret; | |
fad7cfed KO |
207 | } |
208 | ||
59a38a38 | 209 | void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c, |
275c8426 | 210 | struct bkey_s_c k) |
59a38a38 KO |
211 | { |
212 | struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); | |
213 | ||
401ec4db | 214 | prt_printf(out, "seq %llx written %u min_key %s", |
59a38a38 | 215 | le64_to_cpu(bp.v->seq), |
f8f86c6a KO |
216 | le16_to_cpu(bp.v->sectors_written), |
217 | BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : ""); | |
59a38a38 KO |
218 | |
219 | bch2_bpos_to_text(out, bp.v->min_key); | |
401ec4db | 220 | prt_printf(out, " "); |
59a38a38 KO |
221 | bch2_bkey_ptrs_to_text(out, c, k); |
222 | } | |
223 | ||
39fb2983 KO |
224 | void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version, |
225 | unsigned big_endian, int write, | |
226 | struct bkey_s k) | |
227 | { | |
228 | struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(k); | |
229 | ||
230 | compat_bpos(0, btree_id, version, big_endian, write, &bp.v->min_key); | |
231 | ||
232 | if (version < bcachefs_metadata_version_inode_btree_change && | |
73bd774d | 233 | btree_id_is_extents(btree_id) && |
e88a75eb | 234 | !bkey_eq(bp.v->min_key, POS_MIN)) |
39fb2983 | 235 | bp.v->min_key = write |
e751c01a KO |
236 | ? bpos_nosnap_predecessor(bp.v->min_key) |
237 | : bpos_nosnap_successor(bp.v->min_key); | |
39fb2983 KO |
238 | } |
239 | ||
4de77495 | 240 | /* KEY_TYPE_extent: */ |
26609b61 | 241 | |
b058ac20 | 242 | bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) |
1c6fdbd8 | 243 | { |
b058ac20 KO |
244 | struct bkey_ptrs l_ptrs = bch2_bkey_ptrs(l); |
245 | struct bkey_ptrs_c r_ptrs = bch2_bkey_ptrs_c(r); | |
c2177e4d KO |
246 | union bch_extent_entry *en_l; |
247 | const union bch_extent_entry *en_r; | |
248 | struct extent_ptr_decoded lp, rp; | |
249 | bool use_right_ptr; | |
250 | struct bch_dev *ca; | |
a2753581 | 251 | |
b058ac20 KO |
252 | en_l = l_ptrs.start; |
253 | en_r = r_ptrs.start; | |
254 | while (en_l < l_ptrs.end && en_r < r_ptrs.end) { | |
4de77495 | 255 | if (extent_entry_type(en_l) != extent_entry_type(en_r)) |
59ba21d9 | 256 | return false; |
b058ac20 KO |
257 | |
258 | en_l = extent_entry_next(en_l); | |
259 | en_r = extent_entry_next(en_r); | |
c2177e4d | 260 | } |
4de77495 | 261 | |
b058ac20 KO |
262 | if (en_l < l_ptrs.end || en_r < r_ptrs.end) |
263 | return false; | |
264 | ||
265 | en_l = l_ptrs.start; | |
266 | en_r = r_ptrs.start; | |
c2177e4d KO |
267 | lp.crc = bch2_extent_crc_unpack(l.k, NULL); |
268 | rp.crc = bch2_extent_crc_unpack(r.k, NULL); | |
269 | ||
b058ac20 KO |
270 | while (__bkey_ptr_next_decode(l.k, l_ptrs.end, lp, en_l) && |
271 | __bkey_ptr_next_decode(r.k, r_ptrs.end, rp, en_r)) { | |
c2177e4d KO |
272 | if (lp.ptr.offset + lp.crc.offset + lp.crc.live_size != |
273 | rp.ptr.offset + rp.crc.offset || | |
274 | lp.ptr.dev != rp.ptr.dev || | |
275 | lp.ptr.gen != rp.ptr.gen || | |
79203111 | 276 | lp.ptr.unwritten != rp.ptr.unwritten || |
c2177e4d KO |
277 | lp.has_ec != rp.has_ec) |
278 | return false; | |
4de77495 | 279 | |
c2177e4d KO |
280 | /* Extents may not straddle buckets: */ |
281 | ca = bch_dev_bkey_exists(c, lp.ptr.dev); | |
282 | if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr)) | |
283 | return false; | |
4de77495 | 284 | |
c2177e4d KO |
285 | if (lp.has_ec != rp.has_ec || |
286 | (lp.has_ec && | |
287 | (lp.ec.block != rp.ec.block || | |
288 | lp.ec.redundancy != rp.ec.redundancy || | |
289 | lp.ec.idx != rp.ec.idx))) | |
290 | return false; | |
4de77495 | 291 | |
c2177e4d KO |
292 | if (lp.crc.compression_type != rp.crc.compression_type || |
293 | lp.crc.nonce != rp.crc.nonce) | |
294 | return false; | |
4de77495 | 295 | |
c2177e4d KO |
296 | if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <= |
297 | lp.crc.uncompressed_size) { | |
298 | /* can use left extent's crc entry */ | |
3e3e02e6 | 299 | } else if (lp.crc.live_size <= rp.crc.offset) { |
c2177e4d KO |
300 | /* can use right extent's crc entry */ |
301 | } else { | |
302 | /* check if checksums can be merged: */ | |
303 | if (lp.crc.csum_type != rp.crc.csum_type || | |
304 | lp.crc.nonce != rp.crc.nonce || | |
305 | crc_is_compressed(lp.crc) || | |
306 | !bch2_checksum_mergeable(lp.crc.csum_type)) | |
59ba21d9 | 307 | return false; |
4de77495 | 308 | |
c2177e4d KO |
309 | if (lp.crc.offset + lp.crc.live_size != lp.crc.compressed_size || |
310 | rp.crc.offset) | |
59ba21d9 | 311 | return false; |
4de77495 | 312 | |
c2177e4d KO |
313 | if (lp.crc.csum_type && |
314 | lp.crc.uncompressed_size + | |
e4099990 | 315 | rp.crc.uncompressed_size > (c->opts.encoded_extent_max >> 9)) |
59ba21d9 | 316 | return false; |
e320b42d KO |
317 | } |
318 | ||
319 | en_l = extent_entry_next(en_l); | |
320 | en_r = extent_entry_next(en_r); | |
321 | } | |
322 | ||
323 | en_l = l_ptrs.start; | |
324 | en_r = r_ptrs.start; | |
325 | while (en_l < l_ptrs.end && en_r < r_ptrs.end) { | |
326 | if (extent_entry_is_crc(en_l)) { | |
327 | struct bch_extent_crc_unpacked crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); | |
328 | struct bch_extent_crc_unpacked crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); | |
4de77495 | 329 | |
e320b42d | 330 | if (crc_l.uncompressed_size + crc_r.uncompressed_size > |
4de77495 | 331 | bch2_crc_field_size_max[extent_entry_type(en_l)]) |
59ba21d9 | 332 | return false; |
a2753581 | 333 | } |
c2177e4d KO |
334 | |
335 | en_l = extent_entry_next(en_l); | |
336 | en_r = extent_entry_next(en_r); | |
4de77495 | 337 | } |
cd575ddf | 338 | |
c2177e4d | 339 | use_right_ptr = false; |
b058ac20 KO |
340 | en_l = l_ptrs.start; |
341 | en_r = r_ptrs.start; | |
342 | while (en_l < l_ptrs.end) { | |
c2177e4d KO |
343 | if (extent_entry_type(en_l) == BCH_EXTENT_ENTRY_ptr && |
344 | use_right_ptr) | |
345 | en_l->ptr = en_r->ptr; | |
346 | ||
b058ac20 KO |
347 | if (extent_entry_is_crc(en_l)) { |
348 | struct bch_extent_crc_unpacked crc_l = | |
349 | bch2_extent_crc_unpack(l.k, entry_to_crc(en_l)); | |
350 | struct bch_extent_crc_unpacked crc_r = | |
351 | bch2_extent_crc_unpack(r.k, entry_to_crc(en_r)); | |
352 | ||
353 | use_right_ptr = false; | |
354 | ||
355 | if (crc_l.offset + crc_l.live_size + crc_r.live_size <= | |
356 | crc_l.uncompressed_size) { | |
357 | /* can use left extent's crc entry */ | |
3e3e02e6 | 358 | } else if (crc_l.live_size <= crc_r.offset) { |
b058ac20 KO |
359 | /* can use right extent's crc entry */ |
360 | crc_r.offset -= crc_l.live_size; | |
361 | bch2_extent_crc_pack(entry_to_crc(en_l), crc_r, | |
362 | extent_entry_type(en_l)); | |
363 | use_right_ptr = true; | |
364 | } else { | |
365 | crc_l.csum = bch2_checksum_merge(crc_l.csum_type, | |
366 | crc_l.csum, | |
367 | crc_r.csum, | |
368 | crc_r.uncompressed_size << 9); | |
369 | ||
370 | crc_l.uncompressed_size += crc_r.uncompressed_size; | |
371 | crc_l.compressed_size += crc_r.compressed_size; | |
372 | bch2_extent_crc_pack(entry_to_crc(en_l), crc_l, | |
373 | extent_entry_type(en_l)); | |
374 | } | |
c2177e4d | 375 | } |
b058ac20 KO |
376 | |
377 | en_l = extent_entry_next(en_l); | |
378 | en_r = extent_entry_next(en_r); | |
a2753581 KO |
379 | } |
380 | ||
4de77495 | 381 | bch2_key_resize(l.k, l.k->size + r.k->size); |
59ba21d9 | 382 | return true; |
4de77495 KO |
383 | } |
384 | ||
385 | /* KEY_TYPE_reservation: */ | |
386 | ||
b65db750 | 387 | int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k, |
8726dc93 KO |
388 | enum bkey_invalid_flags flags, |
389 | struct printbuf *err) | |
4de77495 KO |
390 | { |
391 | struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); | |
b65db750 | 392 | int ret = 0; |
4de77495 | 393 | |
b65db750 KO |
394 | bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err, |
395 | reservation_key_nr_replicas_invalid, | |
396 | "invalid nr_replicas (%u)", r.v->nr_replicas); | |
397 | fsck_err: | |
398 | return ret; | |
4de77495 KO |
399 | } |
400 | ||
401 | void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c, | |
402 | struct bkey_s_c k) | |
403 | { | |
404 | struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); | |
405 | ||
401ec4db | 406 | prt_printf(out, "generation %u replicas %u", |
4de77495 KO |
407 | le32_to_cpu(r.v->generation), |
408 | r.v->nr_replicas); | |
409 | } | |
410 | ||
59ba21d9 | 411 | bool bch2_reservation_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) |
4de77495 KO |
412 | { |
413 | struct bkey_s_reservation l = bkey_s_to_reservation(_l); | |
59ba21d9 | 414 | struct bkey_s_c_reservation r = bkey_s_c_to_reservation(_r); |
4de77495 KO |
415 | |
416 | if (l.v->generation != r.v->generation || | |
417 | l.v->nr_replicas != r.v->nr_replicas) | |
59ba21d9 | 418 | return false; |
4de77495 KO |
419 | |
420 | bch2_key_resize(l.k, l.k->size + r.k->size); | |
59ba21d9 | 421 | return true; |
4de77495 KO |
422 | } |
423 | ||
424 | /* Extent checksum entries: */ | |
425 | ||
426 | /* returns true if not equal */ | |
427 | static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l, | |
428 | struct bch_extent_crc_unpacked r) | |
429 | { | |
430 | return (l.csum_type != r.csum_type || | |
431 | l.compression_type != r.compression_type || | |
432 | l.compressed_size != r.compressed_size || | |
433 | l.uncompressed_size != r.uncompressed_size || | |
434 | l.offset != r.offset || | |
435 | l.live_size != r.live_size || | |
436 | l.nonce != r.nonce || | |
437 | bch2_crc_cmp(l.csum, r.csum)); | |
1c6fdbd8 KO |
438 | } |
439 | ||
440 | static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u, | |
441 | struct bch_extent_crc_unpacked n) | |
442 | { | |
ab05de4c | 443 | return !crc_is_compressed(u) && |
1c6fdbd8 KO |
444 | u.csum_type && |
445 | u.uncompressed_size > u.live_size && | |
446 | bch2_csum_type_is_encryption(u.csum_type) == | |
447 | bch2_csum_type_is_encryption(n.csum_type); | |
448 | } | |
449 | ||
99aaf570 | 450 | bool bch2_can_narrow_extent_crcs(struct bkey_s_c k, |
1c6fdbd8 KO |
451 | struct bch_extent_crc_unpacked n) |
452 | { | |
99aaf570 | 453 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
1c6fdbd8 KO |
454 | struct bch_extent_crc_unpacked crc; |
455 | const union bch_extent_entry *i; | |
456 | ||
457 | if (!n.csum_type) | |
458 | return false; | |
459 | ||
99aaf570 | 460 | bkey_for_each_crc(k.k, ptrs, crc, i) |
1c6fdbd8 KO |
461 | if (can_narrow_crc(crc, n)) |
462 | return true; | |
463 | ||
464 | return false; | |
465 | } | |
466 | ||
467 | /* | |
468 | * We're writing another replica for this extent, so while we've got the data in | |
469 | * memory we'll be computing a new checksum for the currently live data. | |
470 | * | |
471 | * If there are other replicas we aren't moving, and they are checksummed but | |
472 | * not compressed, we can modify them to point to only the data that is | |
473 | * currently live (so that readers won't have to bounce) while we've got the | |
474 | * checksum we need: | |
475 | */ | |
99aaf570 | 476 | bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n) |
1c6fdbd8 | 477 | { |
99aaf570 | 478 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); |
1c6fdbd8 | 479 | struct bch_extent_crc_unpacked u; |
1742237b | 480 | struct extent_ptr_decoded p; |
1c6fdbd8 | 481 | union bch_extent_entry *i; |
71c9e0ba | 482 | bool ret = false; |
1c6fdbd8 KO |
483 | |
484 | /* Find a checksum entry that covers only live data: */ | |
71c9e0ba | 485 | if (!n.csum_type) { |
99aaf570 | 486 | bkey_for_each_crc(&k->k, ptrs, u, i) |
ab05de4c | 487 | if (!crc_is_compressed(u) && |
1c6fdbd8 KO |
488 | u.csum_type && |
489 | u.live_size == u.uncompressed_size) { | |
490 | n = u; | |
71c9e0ba | 491 | goto found; |
1c6fdbd8 | 492 | } |
1c6fdbd8 | 493 | return false; |
71c9e0ba KO |
494 | } |
495 | found: | |
ab05de4c | 496 | BUG_ON(crc_is_compressed(n)); |
1c6fdbd8 | 497 | BUG_ON(n.offset); |
99aaf570 | 498 | BUG_ON(n.live_size != k->k.size); |
1c6fdbd8 | 499 | |
1c6fdbd8 | 500 | restart_narrow_pointers: |
f698a957 KO |
501 | ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); |
502 | ||
99aaf570 | 503 | bkey_for_each_ptr_decode(&k->k, ptrs, p, i) |
1742237b | 504 | if (can_narrow_crc(p.crc, n)) { |
702ffea2 | 505 | bch2_bkey_drop_ptr_noerror(bkey_i_to_s(k), &i->ptr); |
71c9e0ba KO |
506 | p.ptr.offset += p.crc.offset; |
507 | p.crc = n; | |
99aaf570 | 508 | bch2_extent_ptr_decoded_append(k, &p); |
71c9e0ba | 509 | ret = true; |
1c6fdbd8 KO |
510 | goto restart_narrow_pointers; |
511 | } | |
512 | ||
71c9e0ba | 513 | return ret; |
1c6fdbd8 KO |
514 | } |
515 | ||
4de77495 KO |
516 | static void bch2_extent_crc_pack(union bch_extent_crc *dst, |
517 | struct bch_extent_crc_unpacked src, | |
518 | enum bch_extent_entry_type type) | |
1c6fdbd8 | 519 | { |
4de77495 KO |
520 | #define set_common_fields(_dst, _src) \ |
521 | _dst.type = 1 << type; \ | |
522 | _dst.csum_type = _src.csum_type, \ | |
523 | _dst.compression_type = _src.compression_type, \ | |
524 | _dst._compressed_size = _src.compressed_size - 1, \ | |
525 | _dst._uncompressed_size = _src.uncompressed_size - 1, \ | |
526 | _dst.offset = _src.offset | |
527 | ||
528 | switch (type) { | |
529 | case BCH_EXTENT_ENTRY_crc32: | |
530 | set_common_fields(dst->crc32, src); | |
1e81f89b | 531 | dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo); |
4de77495 KO |
532 | break; |
533 | case BCH_EXTENT_ENTRY_crc64: | |
534 | set_common_fields(dst->crc64, src); | |
535 | dst->crc64.nonce = src.nonce; | |
73bd774d KO |
536 | dst->crc64.csum_lo = (u64 __force) src.csum.lo; |
537 | dst->crc64.csum_hi = (u64 __force) *((__le16 *) &src.csum.hi); | |
4de77495 KO |
538 | break; |
539 | case BCH_EXTENT_ENTRY_crc128: | |
540 | set_common_fields(dst->crc128, src); | |
541 | dst->crc128.nonce = src.nonce; | |
542 | dst->crc128.csum = src.csum; | |
543 | break; | |
544 | default: | |
545 | BUG(); | |
546 | } | |
547 | #undef set_common_fields | |
1c6fdbd8 KO |
548 | } |
549 | ||
4de77495 KO |
550 | void bch2_extent_crc_append(struct bkey_i *k, |
551 | struct bch_extent_crc_unpacked new) | |
1c6fdbd8 | 552 | { |
4de77495 KO |
553 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); |
554 | union bch_extent_crc *crc = (void *) ptrs.end; | |
555 | enum bch_extent_entry_type type; | |
1c6fdbd8 | 556 | |
4de77495 | 557 | if (bch_crc_bytes[new.csum_type] <= 4 && |
e65fcb43 | 558 | new.uncompressed_size <= CRC32_SIZE_MAX && |
4de77495 KO |
559 | new.nonce <= CRC32_NONCE_MAX) |
560 | type = BCH_EXTENT_ENTRY_crc32; | |
561 | else if (bch_crc_bytes[new.csum_type] <= 10 && | |
e65fcb43 | 562 | new.uncompressed_size <= CRC64_SIZE_MAX && |
4de77495 KO |
563 | new.nonce <= CRC64_NONCE_MAX) |
564 | type = BCH_EXTENT_ENTRY_crc64; | |
565 | else if (bch_crc_bytes[new.csum_type] <= 16 && | |
e65fcb43 | 566 | new.uncompressed_size <= CRC128_SIZE_MAX && |
4de77495 KO |
567 | new.nonce <= CRC128_NONCE_MAX) |
568 | type = BCH_EXTENT_ENTRY_crc128; | |
569 | else | |
570 | BUG(); | |
1c6fdbd8 | 571 | |
4de77495 KO |
572 | bch2_extent_crc_pack(crc, new, type); |
573 | ||
574 | k->k.u64s += extent_entry_u64s(ptrs.end); | |
575 | ||
576 | EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX); | |
1c6fdbd8 KO |
577 | } |
578 | ||
4de77495 | 579 | /* Generic code for keys with pointers: */ |
1c6fdbd8 | 580 | |
4de77495 KO |
581 | unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k) |
582 | { | |
583 | return bch2_bkey_devs(k).nr; | |
1c6fdbd8 KO |
584 | } |
585 | ||
4de77495 | 586 | unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k) |
3811aa6d | 587 | { |
4de77495 KO |
588 | return k.k->type == KEY_TYPE_reservation |
589 | ? bkey_s_c_to_reservation(k).v->nr_replicas | |
590 | : bch2_bkey_dirty_devs(k).nr; | |
3811aa6d KO |
591 | } |
592 | ||
4de77495 | 593 | unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k) |
1c6fdbd8 | 594 | { |
4de77495 | 595 | unsigned ret = 0; |
3811aa6d | 596 | |
4de77495 KO |
597 | if (k.k->type == KEY_TYPE_reservation) { |
598 | ret = bkey_s_c_to_reservation(k).v->nr_replicas; | |
599 | } else { | |
600 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
601 | const union bch_extent_entry *entry; | |
602 | struct extent_ptr_decoded p; | |
3811aa6d | 603 | |
4de77495 | 604 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
ab05de4c | 605 | ret += !p.ptr.cached && !crc_is_compressed(p.crc); |
1c6fdbd8 | 606 | } |
26609b61 | 607 | |
4de77495 | 608 | return ret; |
3811aa6d KO |
609 | } |
610 | ||
4de77495 | 611 | unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k) |
1c6fdbd8 | 612 | { |
26609b61 | 613 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
4de77495 KO |
614 | const union bch_extent_entry *entry; |
615 | struct extent_ptr_decoded p; | |
616 | unsigned ret = 0; | |
1c6fdbd8 | 617 | |
4de77495 | 618 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
ab05de4c | 619 | if (!p.ptr.cached && crc_is_compressed(p.crc)) |
4de77495 | 620 | ret += p.crc.compressed_size; |
1c6fdbd8 | 621 | |
4de77495 | 622 | return ret; |
1c6fdbd8 KO |
623 | } |
624 | ||
ab05de4c KO |
625 | bool bch2_bkey_is_incompressible(struct bkey_s_c k) |
626 | { | |
627 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
628 | const union bch_extent_entry *entry; | |
629 | struct bch_extent_crc_unpacked crc; | |
630 | ||
631 | bkey_for_each_crc(k.k, ptrs, crc, entry) | |
632 | if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) | |
633 | return true; | |
634 | return false; | |
635 | } | |
636 | ||
35a067b4 KO |
637 | unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k) |
638 | { | |
639 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
640 | const union bch_extent_entry *entry; | |
33a391a2 | 641 | struct extent_ptr_decoded p = { 0 }; |
35a067b4 KO |
642 | unsigned replicas = 0; |
643 | ||
644 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { | |
645 | if (p.ptr.cached) | |
646 | continue; | |
647 | ||
7f4e1d5d KO |
648 | if (p.has_ec) |
649 | replicas += p.ec.redundancy; | |
35a067b4 KO |
650 | |
651 | replicas++; | |
652 | ||
653 | } | |
654 | ||
655 | return replicas; | |
656 | } | |
657 | ||
e4f72bb4 | 658 | static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p) |
1c6fdbd8 | 659 | { |
a8b3a677 | 660 | if (p->ptr.cached) |
085ab693 | 661 | return 0; |
1c6fdbd8 | 662 | |
e4f72bb4 KO |
663 | return p->has_ec |
664 | ? p->ec.redundancy + 1 | |
665 | : ca->mi.durability; | |
91ecd41b | 666 | } |
1c6fdbd8 | 667 | |
e4f72bb4 | 668 | unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p) |
91ecd41b | 669 | { |
e4f72bb4 | 670 | struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev); |
085ab693 | 671 | |
e4f72bb4 KO |
672 | return __extent_ptr_durability(ca, p); |
673 | } | |
91ecd41b | 674 | |
e4f72bb4 KO |
675 | unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p) |
676 | { | |
677 | struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev); | |
91ecd41b KO |
678 | |
679 | if (ca->mi.state == BCH_MEMBER_STATE_failed) | |
680 | return 0; | |
681 | ||
e4f72bb4 | 682 | return __extent_ptr_durability(ca, p); |
1c6fdbd8 KO |
683 | } |
684 | ||
4de77495 | 685 | unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k) |
1c6fdbd8 | 686 | { |
4de77495 | 687 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
45dbb73e KO |
688 | const union bch_extent_entry *entry; |
689 | struct extent_ptr_decoded p; | |
4de77495 | 690 | unsigned durability = 0; |
45dbb73e | 691 | |
4de77495 | 692 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) |
702ffea2 KO |
693 | durability += bch2_extent_ptr_durability(c, &p); |
694 | ||
695 | return durability; | |
696 | } | |
697 | ||
698 | static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k) | |
699 | { | |
700 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
701 | const union bch_extent_entry *entry; | |
702 | struct extent_ptr_decoded p; | |
703 | unsigned durability = 0; | |
704 | ||
705 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) | |
706 | if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev]) | |
707 | durability += bch2_extent_ptr_durability(c, &p); | |
45dbb73e | 708 | |
4de77495 | 709 | return durability; |
1c6fdbd8 KO |
710 | } |
711 | ||
0507962f KO |
712 | void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry) |
713 | { | |
714 | union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k)); | |
715 | union bch_extent_entry *next = extent_entry_next(entry); | |
716 | ||
717 | memmove_u64s(entry, next, (u64 *) end - (u64 *) next); | |
718 | k->k.u64s -= extent_entry_u64s(entry); | |
719 | } | |
720 | ||
99aaf570 | 721 | void bch2_extent_ptr_decoded_append(struct bkey_i *k, |
71c9e0ba KO |
722 | struct extent_ptr_decoded *p) |
723 | { | |
99aaf570 KO |
724 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k)); |
725 | struct bch_extent_crc_unpacked crc = | |
726 | bch2_extent_crc_unpack(&k->k, NULL); | |
71c9e0ba | 727 | union bch_extent_entry *pos; |
1c6fdbd8 | 728 | |
71c9e0ba | 729 | if (!bch2_crc_unpacked_cmp(crc, p->crc)) { |
99aaf570 | 730 | pos = ptrs.start; |
71c9e0ba KO |
731 | goto found; |
732 | } | |
733 | ||
99aaf570 | 734 | bkey_for_each_crc(&k->k, ptrs, crc, pos) |
71c9e0ba KO |
735 | if (!bch2_crc_unpacked_cmp(crc, p->crc)) { |
736 | pos = extent_entry_next(pos); | |
737 | goto found; | |
738 | } | |
739 | ||
99aaf570 KO |
740 | bch2_extent_crc_append(k, p->crc); |
741 | pos = bkey_val_end(bkey_i_to_s(k)); | |
71c9e0ba KO |
742 | found: |
743 | p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr; | |
99aaf570 | 744 | __extent_entry_insert(k, pos, to_entry(&p->ptr)); |
cd575ddf | 745 | |
37954a27 KO |
746 | if (p->has_ec) { |
747 | p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr; | |
748 | __extent_entry_insert(k, pos, to_entry(&p->ec)); | |
cd575ddf | 749 | } |
1c6fdbd8 KO |
750 | } |
751 | ||
4de77495 KO |
752 | static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs, |
753 | union bch_extent_entry *entry) | |
754 | { | |
755 | union bch_extent_entry *i = ptrs.start; | |
756 | ||
757 | if (i == entry) | |
758 | return NULL; | |
759 | ||
760 | while (extent_entry_next(i) != entry) | |
761 | i = extent_entry_next(i); | |
762 | return i; | |
763 | } | |
764 | ||
b9a7d8ac KO |
765 | /* |
766 | * Returns pointer to the next entry after the one being dropped: | |
767 | */ | |
702ffea2 KO |
768 | union bch_extent_entry *bch2_bkey_drop_ptr_noerror(struct bkey_s k, |
769 | struct bch_extent_ptr *ptr) | |
4de77495 KO |
770 | { |
771 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); | |
b9a7d8ac KO |
772 | union bch_extent_entry *entry = to_entry(ptr), *next; |
773 | union bch_extent_entry *ret = entry; | |
4de77495 KO |
774 | bool drop_crc = true; |
775 | ||
776 | EBUG_ON(ptr < &ptrs.start->ptr || | |
777 | ptr >= &ptrs.end->ptr); | |
778 | EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr); | |
779 | ||
b9a7d8ac KO |
780 | for (next = extent_entry_next(entry); |
781 | next != ptrs.end; | |
782 | next = extent_entry_next(next)) { | |
783 | if (extent_entry_is_crc(next)) { | |
4de77495 | 784 | break; |
b9a7d8ac KO |
785 | } else if (extent_entry_is_ptr(next)) { |
786 | drop_crc = false; | |
4de77495 KO |
787 | break; |
788 | } | |
b9a7d8ac KO |
789 | } |
790 | ||
791 | extent_entry_drop(k, entry); | |
4de77495 | 792 | |
b9a7d8ac KO |
793 | while ((entry = extent_entry_prev(ptrs, entry))) { |
794 | if (extent_entry_is_ptr(entry)) | |
795 | break; | |
796 | ||
797 | if ((extent_entry_is_crc(entry) && drop_crc) || | |
798 | extent_entry_is_stripe_ptr(entry)) { | |
799 | ret = (void *) ret - extent_entry_bytes(entry); | |
800 | extent_entry_drop(k, entry); | |
801 | } | |
4de77495 KO |
802 | } |
803 | ||
b9a7d8ac KO |
804 | return ret; |
805 | } | |
806 | ||
807 | union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k, | |
808 | struct bch_extent_ptr *ptr) | |
809 | { | |
810 | bool have_dirty = bch2_bkey_dirty_devs(k.s_c).nr; | |
811 | union bch_extent_entry *ret = | |
702ffea2 | 812 | bch2_bkey_drop_ptr_noerror(k, ptr); |
b9a7d8ac KO |
813 | |
814 | /* | |
815 | * If we deleted all the dirty pointers and there's still cached | |
816 | * pointers, we could set the cached pointers to dirty if they're not | |
817 | * stale - but to do that correctly we'd need to grab an open_bucket | |
818 | * reference so that we don't race with bucket reuse: | |
819 | */ | |
820 | if (have_dirty && | |
821 | !bch2_bkey_dirty_devs(k.s_c).nr) { | |
822 | k.k->type = KEY_TYPE_error; | |
823 | set_bkey_val_u64s(k.k, 0); | |
824 | ret = NULL; | |
825 | } else if (!bch2_bkey_nr_ptrs(k.s_c)) { | |
826 | k.k->type = KEY_TYPE_deleted; | |
827 | set_bkey_val_u64s(k.k, 0); | |
828 | ret = NULL; | |
829 | } | |
4de77495 | 830 | |
b9a7d8ac | 831 | return ret; |
4de77495 KO |
832 | } |
833 | ||
834 | void bch2_bkey_drop_device(struct bkey_s k, unsigned dev) | |
835 | { | |
836 | struct bch_extent_ptr *ptr; | |
837 | ||
838 | bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev); | |
839 | } | |
840 | ||
7f5c5d20 KO |
841 | void bch2_bkey_drop_device_noerror(struct bkey_s k, unsigned dev) |
842 | { | |
702ffea2 | 843 | struct bch_extent_ptr *ptr = bch2_bkey_has_device(k, dev); |
7f5c5d20 KO |
844 | |
845 | if (ptr) | |
702ffea2 | 846 | bch2_bkey_drop_ptr_noerror(k, ptr); |
7f5c5d20 KO |
847 | } |
848 | ||
702ffea2 | 849 | const struct bch_extent_ptr *bch2_bkey_has_device_c(struct bkey_s_c k, unsigned dev) |
4de77495 KO |
850 | { |
851 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
4de77495 KO |
852 | |
853 | bkey_for_each_ptr(ptrs, ptr) | |
854 | if (ptr->dev == dev) | |
855 | return ptr; | |
856 | ||
857 | return NULL; | |
858 | } | |
859 | ||
860 | bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target) | |
861 | { | |
862 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
4de77495 KO |
863 | |
864 | bkey_for_each_ptr(ptrs, ptr) | |
865 | if (bch2_dev_in_target(c, ptr->dev, target) && | |
866 | (!ptr->cached || | |
867 | !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr))) | |
868 | return true; | |
869 | ||
870 | return false; | |
871 | } | |
872 | ||
873 | bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k, | |
874 | struct bch_extent_ptr m, u64 offset) | |
875 | { | |
876 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
877 | const union bch_extent_entry *entry; | |
878 | struct extent_ptr_decoded p; | |
879 | ||
880 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) | |
881 | if (p.ptr.dev == m.dev && | |
882 | p.ptr.gen == m.gen && | |
883 | (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) == | |
884 | (s64) m.offset - offset) | |
885 | return true; | |
886 | ||
887 | return false; | |
888 | } | |
889 | ||
7f5c5d20 KO |
890 | /* |
891 | * Returns true if two extents refer to the same data: | |
892 | */ | |
893 | bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2) | |
894 | { | |
a8b3a677 | 895 | if (k1.k->type != k2.k->type) |
79203111 KO |
896 | return false; |
897 | ||
a8b3a677 KO |
898 | if (bkey_extent_is_direct_data(k1.k)) { |
899 | struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1); | |
900 | struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2); | |
901 | const union bch_extent_entry *entry1, *entry2; | |
902 | struct extent_ptr_decoded p1, p2; | |
903 | ||
904 | if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2)) | |
905 | return false; | |
906 | ||
907 | bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) | |
908 | bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) | |
1e81f89b KO |
909 | if (p1.ptr.dev == p2.ptr.dev && |
910 | p1.ptr.gen == p2.ptr.gen && | |
911 | (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == | |
912 | (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) | |
913 | return true; | |
7f5c5d20 | 914 | |
a8b3a677 KO |
915 | return false; |
916 | } else { | |
917 | /* KEY_TYPE_deleted, etc. */ | |
918 | return true; | |
919 | } | |
7f5c5d20 KO |
920 | } |
921 | ||
702ffea2 KO |
922 | struct bch_extent_ptr * |
923 | bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, struct bkey_s k2) | |
7f5c5d20 | 924 | { |
702ffea2 KO |
925 | struct bkey_ptrs ptrs2 = bch2_bkey_ptrs(k2); |
926 | union bch_extent_entry *entry2; | |
7f5c5d20 KO |
927 | struct extent_ptr_decoded p2; |
928 | ||
929 | bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) | |
930 | if (p1.ptr.dev == p2.ptr.dev && | |
931 | p1.ptr.gen == p2.ptr.gen && | |
932 | (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == | |
933 | (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) | |
702ffea2 | 934 | return &entry2->ptr; |
7f5c5d20 | 935 | |
702ffea2 | 936 | return NULL; |
7f5c5d20 KO |
937 | } |
938 | ||
c9163bb0 KO |
939 | void bch2_extent_ptr_set_cached(struct bkey_s k, struct bch_extent_ptr *ptr) |
940 | { | |
941 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); | |
942 | union bch_extent_entry *entry; | |
943 | union bch_extent_entry *ec = NULL; | |
944 | ||
945 | bkey_extent_entry_for_each(ptrs, entry) { | |
946 | if (&entry->ptr == ptr) { | |
947 | ptr->cached = true; | |
948 | if (ec) | |
949 | extent_entry_drop(k, ec); | |
950 | return; | |
951 | } | |
952 | ||
953 | if (extent_entry_is_stripe_ptr(entry)) | |
954 | ec = entry; | |
955 | else if (extent_entry_is_ptr(entry)) | |
956 | ec = NULL; | |
957 | } | |
958 | ||
959 | BUG(); | |
960 | } | |
961 | ||
1c6fdbd8 KO |
962 | /* |
963 | * bch_extent_normalize - clean up an extent, dropping stale pointers etc. | |
964 | * | |
965 | * Returns true if @k should be dropped entirely | |
966 | * | |
967 | * For existing keys, only called when btree nodes are being rewritten, not when | |
968 | * they're merely being compacted/resorted in memory. | |
969 | */ | |
970 | bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k) | |
971 | { | |
26609b61 | 972 | struct bch_extent_ptr *ptr; |
1c6fdbd8 | 973 | |
26609b61 KO |
974 | bch2_bkey_drop_ptrs(k, ptr, |
975 | ptr->cached && | |
976 | ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)); | |
1c6fdbd8 | 977 | |
c052cf82 | 978 | return bkey_deleted(k.k); |
1c6fdbd8 KO |
979 | } |
980 | ||
4409b808 KO |
981 | void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr) |
982 | { | |
983 | struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev] | |
984 | ? bch_dev_bkey_exists(c, ptr->dev) | |
985 | : NULL; | |
986 | ||
987 | if (!ca) { | |
988 | prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev, | |
989 | (u64) ptr->offset, ptr->gen, | |
990 | ptr->cached ? " cached" : ""); | |
991 | } else { | |
992 | u32 offset; | |
993 | u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset); | |
994 | ||
995 | prt_printf(out, "ptr: %u:%llu:%u gen %u", | |
996 | ptr->dev, b, offset, ptr->gen); | |
997 | if (ptr->cached) | |
998 | prt_str(out, " cached"); | |
999 | if (ptr->unwritten) | |
1000 | prt_str(out, " unwritten"); | |
1001 | if (ca && ptr_stale(ca, ptr)) | |
1002 | prt_printf(out, " stale"); | |
1003 | } | |
1004 | } | |
1005 | ||
4de77495 KO |
1006 | void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, |
1007 | struct bkey_s_c k) | |
1c6fdbd8 | 1008 | { |
4de77495 KO |
1009 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
1010 | const union bch_extent_entry *entry; | |
4de77495 | 1011 | bool first = true; |
1c6fdbd8 | 1012 | |
702ffea2 KO |
1013 | if (c) |
1014 | prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k)); | |
1015 | ||
4de77495 KO |
1016 | bkey_extent_entry_for_each(ptrs, entry) { |
1017 | if (!first) | |
401ec4db | 1018 | prt_printf(out, " "); |
1c6fdbd8 | 1019 | |
4de77495 | 1020 | switch (__extent_entry_type(entry)) { |
4409b808 KO |
1021 | case BCH_EXTENT_ENTRY_ptr: |
1022 | bch2_extent_ptr_to_text(out, c, entry_to_ptr(entry)); | |
4de77495 | 1023 | break; |
4409b808 | 1024 | |
4de77495 KO |
1025 | case BCH_EXTENT_ENTRY_crc32: |
1026 | case BCH_EXTENT_ENTRY_crc64: | |
ef435abd KO |
1027 | case BCH_EXTENT_ENTRY_crc128: { |
1028 | struct bch_extent_crc_unpacked crc = | |
1029 | bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); | |
1c6fdbd8 | 1030 | |
4f564f4f | 1031 | prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ", |
4de77495 KO |
1032 | crc.compressed_size, |
1033 | crc.uncompressed_size, | |
1034 | crc.offset, crc.nonce, | |
4f564f4f KO |
1035 | bch2_csum_types[crc.csum_type]); |
1036 | bch2_prt_compression_type(out, crc.compression_type); | |
4de77495 | 1037 | break; |
ef435abd KO |
1038 | } |
1039 | case BCH_EXTENT_ENTRY_stripe_ptr: { | |
1040 | const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr; | |
4de77495 | 1041 | |
401ec4db | 1042 | prt_printf(out, "ec: idx %llu block %u", |
4de77495 KO |
1043 | (u64) ec->idx, ec->block); |
1044 | break; | |
ef435abd | 1045 | } |
fb3f57bb KO |
1046 | case BCH_EXTENT_ENTRY_rebalance: { |
1047 | const struct bch_extent_rebalance *r = &entry->rebalance; | |
1048 | ||
1049 | prt_str(out, "rebalance: target "); | |
1050 | if (c) | |
1051 | bch2_target_to_text(out, c, r->target); | |
1052 | else | |
1053 | prt_printf(out, "%u", r->target); | |
1054 | prt_str(out, " compression "); | |
1055 | bch2_compression_opt_to_text(out, r->compression); | |
1056 | break; | |
1057 | } | |
4de77495 | 1058 | default: |
401ec4db | 1059 | prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry)); |
4de77495 | 1060 | return; |
1c6fdbd8 | 1061 | } |
4de77495 KO |
1062 | |
1063 | first = false; | |
1064 | } | |
1c6fdbd8 KO |
1065 | } |
1066 | ||
b65db750 | 1067 | static int extent_ptr_invalid(struct bch_fs *c, |
f0ac7df2 | 1068 | struct bkey_s_c k, |
e9679b4a | 1069 | enum bkey_invalid_flags flags, |
f0ac7df2 KO |
1070 | const struct bch_extent_ptr *ptr, |
1071 | unsigned size_ondisk, | |
1072 | bool metadata, | |
1073 | struct printbuf *err) | |
1c6fdbd8 | 1074 | { |
4de77495 | 1075 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); |
f0ac7df2 KO |
1076 | u64 bucket; |
1077 | u32 bucket_offset; | |
4de77495 | 1078 | struct bch_dev *ca; |
b65db750 | 1079 | int ret = 0; |
1c6fdbd8 | 1080 | |
f0ac7df2 | 1081 | if (!bch2_dev_exists2(c, ptr->dev)) { |
e9679b4a KO |
1082 | /* |
1083 | * If we're in the write path this key might have already been | |
1084 | * overwritten, and we could be seeing a device that doesn't | |
1085 | * exist anymore due to racing with device removal: | |
1086 | */ | |
1087 | if (flags & BKEY_INVALID_WRITE) | |
1088 | return 0; | |
1089 | ||
b65db750 KO |
1090 | bkey_fsck_err(c, err, ptr_to_invalid_device, |
1091 | "pointer to invalid device (%u)", ptr->dev); | |
f0ac7df2 | 1092 | } |
a62c78a7 | 1093 | |
4de77495 | 1094 | ca = bch_dev_bkey_exists(c, ptr->dev); |
4de77495 | 1095 | bkey_for_each_ptr(ptrs, ptr2) |
b65db750 KO |
1096 | bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err, |
1097 | ptr_to_duplicate_device, | |
1098 | "multiple pointers to same device (%u)", ptr->dev); | |
1c6fdbd8 | 1099 | |
f0ac7df2 | 1100 | bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset); |
1c6fdbd8 | 1101 | |
b65db750 KO |
1102 | bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err, |
1103 | ptr_after_last_bucket, | |
1104 | "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets); | |
1105 | bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err, | |
1106 | ptr_before_first_bucket, | |
1107 | "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket); | |
1108 | bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err, | |
1109 | ptr_spans_multiple_buckets, | |
1110 | "pointer spans multiple buckets (%u + %u > %u)", | |
f0ac7df2 | 1111 | bucket_offset, size_ondisk, ca->mi.bucket_size); |
b65db750 KO |
1112 | fsck_err: |
1113 | return ret; | |
4de77495 | 1114 | } |
1c6fdbd8 | 1115 | |
b65db750 | 1116 | int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k, |
8726dc93 KO |
1117 | enum bkey_invalid_flags flags, |
1118 | struct printbuf *err) | |
4de77495 KO |
1119 | { |
1120 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
1121 | const union bch_extent_entry *entry; | |
1122 | struct bch_extent_crc_unpacked crc; | |
1123 | unsigned size_ondisk = k.k->size; | |
4de77495 | 1124 | unsigned nonce = UINT_MAX; |
b5f73fd7 | 1125 | unsigned nr_ptrs = 0; |
b65db750 KO |
1126 | bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false; |
1127 | int ret = 0; | |
4de77495 | 1128 | |
f0ac7df2 | 1129 | if (bkey_is_btree_ptr(k.k)) |
8244f320 | 1130 | size_ondisk = btree_sectors(c); |
4de77495 KO |
1131 | |
1132 | bkey_extent_entry_for_each(ptrs, entry) { | |
b65db750 KO |
1133 | bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err, |
1134 | extent_ptrs_invalid_entry, | |
1135 | "invalid extent entry type (got %u, max %u)", | |
1136 | __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX); | |
6009b4e5 | 1137 | |
b65db750 KO |
1138 | bkey_fsck_err_on(bkey_is_btree_ptr(k.k) && |
1139 | !extent_entry_is_ptr(entry), c, err, | |
1140 | btree_ptr_has_non_ptr, | |
1141 | "has non ptr field"); | |
4de77495 KO |
1142 | |
1143 | switch (extent_entry_type(entry)) { | |
1144 | case BCH_EXTENT_ENTRY_ptr: | |
e9679b4a KO |
1145 | ret = extent_ptr_invalid(c, k, flags, &entry->ptr, |
1146 | size_ondisk, false, err); | |
f0ac7df2 KO |
1147 | if (ret) |
1148 | return ret; | |
79203111 | 1149 | |
b65db750 KO |
1150 | bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err, |
1151 | ptr_cached_and_erasure_coded, | |
1152 | "cached, erasure coded ptr"); | |
79203111 | 1153 | |
b65db750 KO |
1154 | if (!entry->ptr.unwritten) |
1155 | have_written = true; | |
1156 | else | |
1157 | have_unwritten = true; | |
c9163bb0 | 1158 | |
c9163bb0 | 1159 | have_ec = false; |
43b0e878 | 1160 | crc_since_last_ptr = false; |
b5f73fd7 | 1161 | nr_ptrs++; |
6009b4e5 KO |
1162 | break; |
1163 | case BCH_EXTENT_ENTRY_crc32: | |
1164 | case BCH_EXTENT_ENTRY_crc64: | |
1165 | case BCH_EXTENT_ENTRY_crc128: | |
4de77495 | 1166 | crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); |
6009b4e5 | 1167 | |
b65db750 KO |
1168 | bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err, |
1169 | ptr_crc_uncompressed_size_too_small, | |
1170 | "checksum offset + key size > uncompressed size"); | |
1171 | bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err, | |
1172 | ptr_crc_csum_type_unknown, | |
1173 | "invalid checksum type"); | |
1174 | bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err, | |
1175 | ptr_crc_compression_type_unknown, | |
1176 | "invalid compression type"); | |
6009b4e5 | 1177 | |
4de77495 KO |
1178 | if (bch2_csum_type_is_encryption(crc.csum_type)) { |
1179 | if (nonce == UINT_MAX) | |
1180 | nonce = crc.offset + crc.nonce; | |
b65db750 KO |
1181 | else if (nonce != crc.offset + crc.nonce) |
1182 | bkey_fsck_err(c, err, ptr_crc_nonce_mismatch, | |
1183 | "incorrect nonce"); | |
4de77495 | 1184 | } |
43b0e878 | 1185 | |
b65db750 KO |
1186 | bkey_fsck_err_on(crc_since_last_ptr, c, err, |
1187 | ptr_crc_redundant, | |
1188 | "redundant crc entry"); | |
43b0e878 | 1189 | crc_since_last_ptr = true; |
9db2f860 | 1190 | |
b65db750 KO |
1191 | bkey_fsck_err_on(crc_is_encoded(crc) && |
1192 | (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && | |
1193 | (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err, | |
1194 | ptr_crc_uncompressed_size_too_big, | |
1195 | "too large encoded extent"); | |
9db2f860 | 1196 | |
b65db750 | 1197 | size_ondisk = crc.compressed_size; |
4de77495 KO |
1198 | break; |
1199 | case BCH_EXTENT_ENTRY_stripe_ptr: | |
b65db750 KO |
1200 | bkey_fsck_err_on(have_ec, c, err, |
1201 | ptr_stripe_redundant, | |
1202 | "redundant stripe entry"); | |
c9163bb0 | 1203 | have_ec = true; |
6009b4e5 | 1204 | break; |
fb3f57bb KO |
1205 | case BCH_EXTENT_ENTRY_rebalance: { |
1206 | const struct bch_extent_rebalance *r = &entry->rebalance; | |
1207 | ||
1208 | if (!bch2_compression_opt_valid(r->compression)) { | |
1209 | struct bch_compression_opt opt = __bch2_compression_decode(r->compression); | |
1210 | prt_printf(err, "invalid compression opt %u:%u", | |
1211 | opt.type, opt.level); | |
1212 | return -BCH_ERR_invalid_bkey; | |
1213 | } | |
2766876d | 1214 | break; |
6009b4e5 | 1215 | } |
fb3f57bb | 1216 | } |
1c6fdbd8 KO |
1217 | } |
1218 | ||
b65db750 KO |
1219 | bkey_fsck_err_on(!nr_ptrs, c, err, |
1220 | extent_ptrs_no_ptrs, | |
1221 | "no ptrs"); | |
1222 | bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err, | |
1223 | extent_ptrs_too_many_ptrs, | |
1224 | "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX); | |
1225 | bkey_fsck_err_on(have_written && have_unwritten, c, err, | |
1226 | extent_ptrs_written_and_unwritten, | |
1227 | "extent with unwritten and written ptrs"); | |
1228 | bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err, | |
1229 | extent_ptrs_unwritten, | |
1230 | "has unwritten ptrs"); | |
1231 | bkey_fsck_err_on(crc_since_last_ptr, c, err, | |
1232 | extent_ptrs_redundant_crc, | |
1233 | "redundant crc entry"); | |
1234 | bkey_fsck_err_on(have_ec, c, err, | |
1235 | extent_ptrs_redundant_stripe, | |
1236 | "redundant stripe entry"); | |
1237 | fsck_err: | |
1238 | return ret; | |
4de77495 | 1239 | } |
6009b4e5 | 1240 | |
1f49dafc | 1241 | void bch2_ptr_swab(struct bkey_s k) |
4de77495 | 1242 | { |
1f49dafc | 1243 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); |
4de77495 | 1244 | union bch_extent_entry *entry; |
1f49dafc | 1245 | u64 *d; |
6009b4e5 | 1246 | |
1f49dafc KO |
1247 | for (d = (u64 *) ptrs.start; |
1248 | d != (u64 *) ptrs.end; | |
1249 | d++) | |
1250 | *d = swab64(*d); | |
6009b4e5 | 1251 | |
1f49dafc KO |
1252 | for (entry = ptrs.start; |
1253 | entry < ptrs.end; | |
4de77495 KO |
1254 | entry = extent_entry_next(entry)) { |
1255 | switch (extent_entry_type(entry)) { | |
1256 | case BCH_EXTENT_ENTRY_ptr: | |
1257 | break; | |
1258 | case BCH_EXTENT_ENTRY_crc32: | |
1259 | entry->crc32.csum = swab32(entry->crc32.csum); | |
1260 | break; | |
1261 | case BCH_EXTENT_ENTRY_crc64: | |
1262 | entry->crc64.csum_hi = swab16(entry->crc64.csum_hi); | |
1263 | entry->crc64.csum_lo = swab64(entry->crc64.csum_lo); | |
1264 | break; | |
1265 | case BCH_EXTENT_ENTRY_crc128: | |
1266 | entry->crc128.csum.hi = (__force __le64) | |
1267 | swab64((__force u64) entry->crc128.csum.hi); | |
1268 | entry->crc128.csum.lo = (__force __le64) | |
1269 | swab64((__force u64) entry->crc128.csum.lo); | |
1270 | break; | |
1271 | case BCH_EXTENT_ENTRY_stripe_ptr: | |
1272 | break; | |
2766876d KO |
1273 | case BCH_EXTENT_ENTRY_rebalance: |
1274 | break; | |
4de77495 | 1275 | } |
1c6fdbd8 | 1276 | } |
1c6fdbd8 KO |
1277 | } |
1278 | ||
fb3f57bb KO |
1279 | const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) |
1280 | { | |
1281 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
1282 | const union bch_extent_entry *entry; | |
1283 | ||
1284 | bkey_extent_entry_for_each(ptrs, entry) | |
1285 | if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) | |
1286 | return &entry->rebalance; | |
1287 | ||
1288 | return NULL; | |
1289 | } | |
1290 | ||
1291 | unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k, | |
1292 | unsigned target, unsigned compression) | |
1293 | { | |
1294 | struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); | |
1295 | unsigned rewrite_ptrs = 0; | |
1296 | ||
1297 | if (compression) { | |
1298 | unsigned compression_type = bch2_compression_opt_to_type(compression); | |
1299 | const union bch_extent_entry *entry; | |
1300 | struct extent_ptr_decoded p; | |
1301 | unsigned i = 0; | |
1302 | ||
1303 | bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { | |
e5972888 DH |
1304 | if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || |
1305 | p.ptr.unwritten) { | |
fb3f57bb KO |
1306 | rewrite_ptrs = 0; |
1307 | goto incompressible; | |
1308 | } | |
1309 | ||
1310 | if (!p.ptr.cached && p.crc.compression_type != compression_type) | |
1311 | rewrite_ptrs |= 1U << i; | |
1312 | i++; | |
1313 | } | |
1314 | } | |
1315 | incompressible: | |
1316 | if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { | |
fb3f57bb KO |
1317 | unsigned i = 0; |
1318 | ||
1319 | bkey_for_each_ptr(ptrs, ptr) { | |
1320 | if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target)) | |
1321 | rewrite_ptrs |= 1U << i; | |
1322 | i++; | |
1323 | } | |
1324 | } | |
1325 | ||
1326 | return rewrite_ptrs; | |
1327 | } | |
1328 | ||
1329 | bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k) | |
1330 | { | |
1331 | const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); | |
1332 | ||
1333 | /* | |
1334 | * If it's an indirect extent, we don't delete the rebalance entry when | |
1335 | * done so that we know what options were applied - check if it still | |
1336 | * needs work done: | |
1337 | */ | |
1338 | if (r && | |
1339 | k.k->type == KEY_TYPE_reflink_v && | |
1340 | !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression)) | |
1341 | r = NULL; | |
1342 | ||
1343 | return r != NULL; | |
1344 | } | |
1345 | ||
1346 | int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k, | |
d7e77f53 | 1347 | struct bch_io_opts *opts) |
fb3f57bb KO |
1348 | { |
1349 | struct bkey_s k = bkey_i_to_s(_k); | |
1350 | struct bch_extent_rebalance *r; | |
d7e77f53 KO |
1351 | unsigned target = opts->background_target; |
1352 | unsigned compression = background_compression(*opts); | |
fb3f57bb KO |
1353 | bool needs_rebalance; |
1354 | ||
1355 | if (!bkey_extent_is_direct_data(k.k)) | |
1356 | return 0; | |
1357 | ||
1358 | /* get existing rebalance entry: */ | |
1359 | r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); | |
1360 | if (r) { | |
1361 | if (k.k->type == KEY_TYPE_reflink_v) { | |
1362 | /* | |
1363 | * indirect extents: existing options take precedence, | |
1364 | * so that we don't move extents back and forth if | |
1365 | * they're referenced by different inodes with different | |
1366 | * options: | |
1367 | */ | |
1368 | if (r->target) | |
1369 | target = r->target; | |
1370 | if (r->compression) | |
1371 | compression = r->compression; | |
1372 | } | |
1373 | ||
1374 | r->target = target; | |
1375 | r->compression = compression; | |
1376 | } | |
1377 | ||
1378 | needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression); | |
1379 | ||
1380 | if (needs_rebalance && !r) { | |
1381 | union bch_extent_entry *new = bkey_val_end(k); | |
1382 | ||
1383 | new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance; | |
1384 | new->rebalance.compression = compression; | |
1385 | new->rebalance.target = target; | |
1386 | new->rebalance.unused = 0; | |
1387 | k.k->u64s += extent_entry_u64s(new); | |
1388 | } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) { | |
1389 | /* | |
1390 | * For indirect extents, don't delete the rebalance entry when | |
1391 | * we're finished so that we know we specifically moved it or | |
1392 | * compressed it to its current location/compression type | |
1393 | */ | |
1394 | extent_entry_drop(k, (union bch_extent_entry *) r); | |
1395 | } | |
1396 | ||
1397 | return 0; | |
1398 | } | |
1399 | ||
4de77495 KO |
1400 | /* Generic extent code: */ |
1401 | ||
1402 | int bch2_cut_front_s(struct bpos where, struct bkey_s k) | |
1c6fdbd8 | 1403 | { |
4de77495 KO |
1404 | unsigned new_val_u64s = bkey_val_u64s(k.k); |
1405 | int val_u64s_delta; | |
1406 | u64 sub; | |
1c6fdbd8 | 1407 | |
e88a75eb | 1408 | if (bkey_le(where, bkey_start_pos(k.k))) |
4de77495 | 1409 | return 0; |
1c6fdbd8 | 1410 | |
e88a75eb | 1411 | EBUG_ON(bkey_gt(where, k.k->p)); |
424eb881 | 1412 | |
4de77495 | 1413 | sub = where.offset - bkey_start_offset(k.k); |
1c6fdbd8 | 1414 | |
4de77495 KO |
1415 | k.k->size -= sub; |
1416 | ||
1417 | if (!k.k->size) { | |
1418 | k.k->type = KEY_TYPE_deleted; | |
1419 | new_val_u64s = 0; | |
1c6fdbd8 | 1420 | } |
1c6fdbd8 | 1421 | |
4de77495 KO |
1422 | switch (k.k->type) { |
1423 | case KEY_TYPE_extent: | |
1424 | case KEY_TYPE_reflink_v: { | |
1425 | struct bkey_ptrs ptrs = bch2_bkey_ptrs(k); | |
1426 | union bch_extent_entry *entry; | |
1427 | bool seen_crc = false; | |
26609b61 | 1428 | |
4de77495 KO |
1429 | bkey_extent_entry_for_each(ptrs, entry) { |
1430 | switch (extent_entry_type(entry)) { | |
1431 | case BCH_EXTENT_ENTRY_ptr: | |
1432 | if (!seen_crc) | |
1433 | entry->ptr.offset += sub; | |
1434 | break; | |
1435 | case BCH_EXTENT_ENTRY_crc32: | |
1436 | entry->crc32.offset += sub; | |
1437 | break; | |
1438 | case BCH_EXTENT_ENTRY_crc64: | |
1439 | entry->crc64.offset += sub; | |
1440 | break; | |
1441 | case BCH_EXTENT_ENTRY_crc128: | |
1442 | entry->crc128.offset += sub; | |
1443 | break; | |
1444 | case BCH_EXTENT_ENTRY_stripe_ptr: | |
1445 | break; | |
2766876d KO |
1446 | case BCH_EXTENT_ENTRY_rebalance: |
1447 | break; | |
4de77495 | 1448 | } |
7ef2a73a | 1449 | |
4de77495 KO |
1450 | if (extent_entry_is_crc(entry)) |
1451 | seen_crc = true; | |
1452 | } | |
7ef2a73a | 1453 | |
7ef2a73a KO |
1454 | break; |
1455 | } | |
4de77495 KO |
1456 | case KEY_TYPE_reflink_p: { |
1457 | struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k); | |
1458 | ||
1459 | le64_add_cpu(&p.v->idx, sub); | |
7ef2a73a KO |
1460 | break; |
1461 | } | |
801a3de6 KO |
1462 | case KEY_TYPE_inline_data: |
1463 | case KEY_TYPE_indirect_inline_data: { | |
1464 | void *p = bkey_inline_data_p(k); | |
1465 | unsigned bytes = bkey_inline_data_bytes(k.k); | |
7ef2a73a | 1466 | |
801a3de6 | 1467 | sub = min_t(u64, sub << 9, bytes); |
26609b61 | 1468 | |
801a3de6 | 1469 | memmove(p, p + sub, bytes - sub); |
26609b61 | 1470 | |
4de77495 KO |
1471 | new_val_u64s -= sub >> 3; |
1472 | break; | |
1473 | } | |
1474 | } | |
26609b61 | 1475 | |
4de77495 KO |
1476 | val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; |
1477 | BUG_ON(val_u64s_delta < 0); | |
26609b61 | 1478 | |
4de77495 KO |
1479 | set_bkey_val_u64s(k.k, new_val_u64s); |
1480 | memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); | |
1481 | return -val_u64s_delta; | |
26609b61 KO |
1482 | } |
1483 | ||
4de77495 | 1484 | int bch2_cut_back_s(struct bpos where, struct bkey_s k) |
26609b61 | 1485 | { |
4de77495 KO |
1486 | unsigned new_val_u64s = bkey_val_u64s(k.k); |
1487 | int val_u64s_delta; | |
1488 | u64 len = 0; | |
26609b61 | 1489 | |
e88a75eb | 1490 | if (bkey_ge(where, k.k->p)) |
4de77495 | 1491 | return 0; |
26609b61 | 1492 | |
e88a75eb | 1493 | EBUG_ON(bkey_lt(where, bkey_start_pos(k.k))); |
26609b61 | 1494 | |
4de77495 | 1495 | len = where.offset - bkey_start_offset(k.k); |
26609b61 | 1496 | |
d361a26d | 1497 | k.k->p.offset = where.offset; |
4de77495 KO |
1498 | k.k->size = len; |
1499 | ||
1500 | if (!len) { | |
1501 | k.k->type = KEY_TYPE_deleted; | |
1502 | new_val_u64s = 0; | |
26609b61 KO |
1503 | } |
1504 | ||
4de77495 KO |
1505 | switch (k.k->type) { |
1506 | case KEY_TYPE_inline_data: | |
801a3de6 KO |
1507 | case KEY_TYPE_indirect_inline_data: |
1508 | new_val_u64s = (bkey_inline_data_offset(k.k) + | |
1509 | min(bkey_inline_data_bytes(k.k), k.k->size << 9)) >> 3; | |
4de77495 KO |
1510 | break; |
1511 | } | |
26609b61 | 1512 | |
4de77495 KO |
1513 | val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s; |
1514 | BUG_ON(val_u64s_delta < 0); | |
1515 | ||
1516 | set_bkey_val_u64s(k.k, new_val_u64s); | |
1517 | memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64)); | |
1518 | return -val_u64s_delta; | |
26609b61 | 1519 | } |