]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - fs/fat/fatent.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 156
[thirdparty/kernel/linux.git] / fs / fat / fatent.c
1 /*
2 * Copyright (C) 2004, OGAWA Hirofumi
3 * Released under GPL v2.
4 */
5
6 #include <linux/blkdev.h>
7 #include <linux/sched/signal.h>
8 #include "fat.h"
9
10 struct fatent_operations {
11 void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
12 void (*ent_set_ptr)(struct fat_entry *, int);
13 int (*ent_bread)(struct super_block *, struct fat_entry *,
14 int, sector_t);
15 int (*ent_get)(struct fat_entry *);
16 void (*ent_put)(struct fat_entry *, int);
17 int (*ent_next)(struct fat_entry *);
18 };
19
20 static DEFINE_SPINLOCK(fat12_entry_lock);
21
22 static void fat12_ent_blocknr(struct super_block *sb, int entry,
23 int *offset, sector_t *blocknr)
24 {
25 struct msdos_sb_info *sbi = MSDOS_SB(sb);
26 int bytes = entry + (entry >> 1);
27 WARN_ON(!fat_valid_entry(sbi, entry));
28 *offset = bytes & (sb->s_blocksize - 1);
29 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
30 }
31
32 static void fat_ent_blocknr(struct super_block *sb, int entry,
33 int *offset, sector_t *blocknr)
34 {
35 struct msdos_sb_info *sbi = MSDOS_SB(sb);
36 int bytes = (entry << sbi->fatent_shift);
37 WARN_ON(!fat_valid_entry(sbi, entry));
38 *offset = bytes & (sb->s_blocksize - 1);
39 *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
40 }
41
42 static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
43 {
44 struct buffer_head **bhs = fatent->bhs;
45 if (fatent->nr_bhs == 1) {
46 WARN_ON(offset >= (bhs[0]->b_size - 1));
47 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
48 fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
49 } else {
50 WARN_ON(offset != (bhs[0]->b_size - 1));
51 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
52 fatent->u.ent12_p[1] = bhs[1]->b_data;
53 }
54 }
55
56 static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
57 {
58 WARN_ON(offset & (2 - 1));
59 fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
60 }
61
62 static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
63 {
64 WARN_ON(offset & (4 - 1));
65 fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
66 }
67
68 static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
69 int offset, sector_t blocknr)
70 {
71 struct buffer_head **bhs = fatent->bhs;
72
73 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
74 fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
75
76 bhs[0] = sb_bread(sb, blocknr);
77 if (!bhs[0])
78 goto err;
79
80 if ((offset + 1) < sb->s_blocksize)
81 fatent->nr_bhs = 1;
82 else {
83 /* This entry is block boundary, it needs the next block */
84 blocknr++;
85 bhs[1] = sb_bread(sb, blocknr);
86 if (!bhs[1])
87 goto err_brelse;
88 fatent->nr_bhs = 2;
89 }
90 fat12_ent_set_ptr(fatent, offset);
91 return 0;
92
93 err_brelse:
94 brelse(bhs[0]);
95 err:
96 fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
97 return -EIO;
98 }
99
100 static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
101 int offset, sector_t blocknr)
102 {
103 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
104
105 WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
106 fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
107 fatent->bhs[0] = sb_bread(sb, blocknr);
108 if (!fatent->bhs[0]) {
109 fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
110 (llu)blocknr);
111 return -EIO;
112 }
113 fatent->nr_bhs = 1;
114 ops->ent_set_ptr(fatent, offset);
115 return 0;
116 }
117
118 static int fat12_ent_get(struct fat_entry *fatent)
119 {
120 u8 **ent12_p = fatent->u.ent12_p;
121 int next;
122
123 spin_lock(&fat12_entry_lock);
124 if (fatent->entry & 1)
125 next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
126 else
127 next = (*ent12_p[1] << 8) | *ent12_p[0];
128 spin_unlock(&fat12_entry_lock);
129
130 next &= 0x0fff;
131 if (next >= BAD_FAT12)
132 next = FAT_ENT_EOF;
133 return next;
134 }
135
136 static int fat16_ent_get(struct fat_entry *fatent)
137 {
138 int next = le16_to_cpu(*fatent->u.ent16_p);
139 WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
140 if (next >= BAD_FAT16)
141 next = FAT_ENT_EOF;
142 return next;
143 }
144
145 static int fat32_ent_get(struct fat_entry *fatent)
146 {
147 int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
148 WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
149 if (next >= BAD_FAT32)
150 next = FAT_ENT_EOF;
151 return next;
152 }
153
154 static void fat12_ent_put(struct fat_entry *fatent, int new)
155 {
156 u8 **ent12_p = fatent->u.ent12_p;
157
158 if (new == FAT_ENT_EOF)
159 new = EOF_FAT12;
160
161 spin_lock(&fat12_entry_lock);
162 if (fatent->entry & 1) {
163 *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
164 *ent12_p[1] = new >> 4;
165 } else {
166 *ent12_p[0] = new & 0xff;
167 *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
168 }
169 spin_unlock(&fat12_entry_lock);
170
171 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
172 if (fatent->nr_bhs == 2)
173 mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
174 }
175
176 static void fat16_ent_put(struct fat_entry *fatent, int new)
177 {
178 if (new == FAT_ENT_EOF)
179 new = EOF_FAT16;
180
181 *fatent->u.ent16_p = cpu_to_le16(new);
182 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
183 }
184
185 static void fat32_ent_put(struct fat_entry *fatent, int new)
186 {
187 WARN_ON(new & 0xf0000000);
188 new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
189 *fatent->u.ent32_p = cpu_to_le32(new);
190 mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
191 }
192
193 static int fat12_ent_next(struct fat_entry *fatent)
194 {
195 u8 **ent12_p = fatent->u.ent12_p;
196 struct buffer_head **bhs = fatent->bhs;
197 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
198
199 fatent->entry++;
200 if (fatent->nr_bhs == 1) {
201 WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data +
202 (bhs[0]->b_size - 2)));
203 WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data +
204 (bhs[0]->b_size - 1)));
205 if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
206 ent12_p[0] = nextp - 1;
207 ent12_p[1] = nextp;
208 return 1;
209 }
210 } else {
211 WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data +
212 (bhs[0]->b_size - 1)));
213 WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
214 ent12_p[0] = nextp - 1;
215 ent12_p[1] = nextp;
216 brelse(bhs[0]);
217 bhs[0] = bhs[1];
218 fatent->nr_bhs = 1;
219 return 1;
220 }
221 ent12_p[0] = NULL;
222 ent12_p[1] = NULL;
223 return 0;
224 }
225
226 static int fat16_ent_next(struct fat_entry *fatent)
227 {
228 const struct buffer_head *bh = fatent->bhs[0];
229 fatent->entry++;
230 if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
231 fatent->u.ent16_p++;
232 return 1;
233 }
234 fatent->u.ent16_p = NULL;
235 return 0;
236 }
237
238 static int fat32_ent_next(struct fat_entry *fatent)
239 {
240 const struct buffer_head *bh = fatent->bhs[0];
241 fatent->entry++;
242 if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
243 fatent->u.ent32_p++;
244 return 1;
245 }
246 fatent->u.ent32_p = NULL;
247 return 0;
248 }
249
250 static const struct fatent_operations fat12_ops = {
251 .ent_blocknr = fat12_ent_blocknr,
252 .ent_set_ptr = fat12_ent_set_ptr,
253 .ent_bread = fat12_ent_bread,
254 .ent_get = fat12_ent_get,
255 .ent_put = fat12_ent_put,
256 .ent_next = fat12_ent_next,
257 };
258
259 static const struct fatent_operations fat16_ops = {
260 .ent_blocknr = fat_ent_blocknr,
261 .ent_set_ptr = fat16_ent_set_ptr,
262 .ent_bread = fat_ent_bread,
263 .ent_get = fat16_ent_get,
264 .ent_put = fat16_ent_put,
265 .ent_next = fat16_ent_next,
266 };
267
268 static const struct fatent_operations fat32_ops = {
269 .ent_blocknr = fat_ent_blocknr,
270 .ent_set_ptr = fat32_ent_set_ptr,
271 .ent_bread = fat_ent_bread,
272 .ent_get = fat32_ent_get,
273 .ent_put = fat32_ent_put,
274 .ent_next = fat32_ent_next,
275 };
276
277 static inline void lock_fat(struct msdos_sb_info *sbi)
278 {
279 mutex_lock(&sbi->fat_lock);
280 }
281
282 static inline void unlock_fat(struct msdos_sb_info *sbi)
283 {
284 mutex_unlock(&sbi->fat_lock);
285 }
286
287 void fat_ent_access_init(struct super_block *sb)
288 {
289 struct msdos_sb_info *sbi = MSDOS_SB(sb);
290
291 mutex_init(&sbi->fat_lock);
292
293 if (is_fat32(sbi)) {
294 sbi->fatent_shift = 2;
295 sbi->fatent_ops = &fat32_ops;
296 } else if (is_fat16(sbi)) {
297 sbi->fatent_shift = 1;
298 sbi->fatent_ops = &fat16_ops;
299 } else if (is_fat12(sbi)) {
300 sbi->fatent_shift = -1;
301 sbi->fatent_ops = &fat12_ops;
302 } else {
303 fat_fs_error(sb, "invalid FAT variant, %u bits", sbi->fat_bits);
304 }
305 }
306
307 static void mark_fsinfo_dirty(struct super_block *sb)
308 {
309 struct msdos_sb_info *sbi = MSDOS_SB(sb);
310
311 if (sb_rdonly(sb) || !is_fat32(sbi))
312 return;
313
314 __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
315 }
316
317 static inline int fat_ent_update_ptr(struct super_block *sb,
318 struct fat_entry *fatent,
319 int offset, sector_t blocknr)
320 {
321 struct msdos_sb_info *sbi = MSDOS_SB(sb);
322 const struct fatent_operations *ops = sbi->fatent_ops;
323 struct buffer_head **bhs = fatent->bhs;
324
325 /* Is this fatent's blocks including this entry? */
326 if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
327 return 0;
328 if (is_fat12(sbi)) {
329 if ((offset + 1) < sb->s_blocksize) {
330 /* This entry is on bhs[0]. */
331 if (fatent->nr_bhs == 2) {
332 brelse(bhs[1]);
333 fatent->nr_bhs = 1;
334 }
335 } else {
336 /* This entry needs the next block. */
337 if (fatent->nr_bhs != 2)
338 return 0;
339 if (bhs[1]->b_blocknr != (blocknr + 1))
340 return 0;
341 }
342 }
343 ops->ent_set_ptr(fatent, offset);
344 return 1;
345 }
346
347 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
348 {
349 struct super_block *sb = inode->i_sb;
350 struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
351 const struct fatent_operations *ops = sbi->fatent_ops;
352 int err, offset;
353 sector_t blocknr;
354
355 if (!fat_valid_entry(sbi, entry)) {
356 fatent_brelse(fatent);
357 fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
358 return -EIO;
359 }
360
361 fatent_set_entry(fatent, entry);
362 ops->ent_blocknr(sb, entry, &offset, &blocknr);
363
364 if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
365 fatent_brelse(fatent);
366 err = ops->ent_bread(sb, fatent, offset, blocknr);
367 if (err)
368 return err;
369 }
370 return ops->ent_get(fatent);
371 }
372
373 /* FIXME: We can write the blocks as more big chunk. */
374 static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
375 int nr_bhs)
376 {
377 struct msdos_sb_info *sbi = MSDOS_SB(sb);
378 struct buffer_head *c_bh;
379 int err, n, copy;
380
381 err = 0;
382 for (copy = 1; copy < sbi->fats; copy++) {
383 sector_t backup_fat = sbi->fat_length * copy;
384
385 for (n = 0; n < nr_bhs; n++) {
386 c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
387 if (!c_bh) {
388 err = -ENOMEM;
389 goto error;
390 }
391 memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
392 set_buffer_uptodate(c_bh);
393 mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
394 if (sb->s_flags & SB_SYNCHRONOUS)
395 err = sync_dirty_buffer(c_bh);
396 brelse(c_bh);
397 if (err)
398 goto error;
399 }
400 }
401 error:
402 return err;
403 }
404
405 int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
406 int new, int wait)
407 {
408 struct super_block *sb = inode->i_sb;
409 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
410 int err;
411
412 ops->ent_put(fatent, new);
413 if (wait) {
414 err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
415 if (err)
416 return err;
417 }
418 return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
419 }
420
421 static inline int fat_ent_next(struct msdos_sb_info *sbi,
422 struct fat_entry *fatent)
423 {
424 if (sbi->fatent_ops->ent_next(fatent)) {
425 if (fatent->entry < sbi->max_cluster)
426 return 1;
427 }
428 return 0;
429 }
430
431 static inline int fat_ent_read_block(struct super_block *sb,
432 struct fat_entry *fatent)
433 {
434 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
435 sector_t blocknr;
436 int offset;
437
438 fatent_brelse(fatent);
439 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
440 return ops->ent_bread(sb, fatent, offset, blocknr);
441 }
442
443 static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
444 struct fat_entry *fatent)
445 {
446 int n, i;
447
448 for (n = 0; n < fatent->nr_bhs; n++) {
449 for (i = 0; i < *nr_bhs; i++) {
450 if (fatent->bhs[n] == bhs[i])
451 break;
452 }
453 if (i == *nr_bhs) {
454 get_bh(fatent->bhs[n]);
455 bhs[i] = fatent->bhs[n];
456 (*nr_bhs)++;
457 }
458 }
459 }
460
461 int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
462 {
463 struct super_block *sb = inode->i_sb;
464 struct msdos_sb_info *sbi = MSDOS_SB(sb);
465 const struct fatent_operations *ops = sbi->fatent_ops;
466 struct fat_entry fatent, prev_ent;
467 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
468 int i, count, err, nr_bhs, idx_clus;
469
470 BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
471
472 lock_fat(sbi);
473 if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
474 sbi->free_clusters < nr_cluster) {
475 unlock_fat(sbi);
476 return -ENOSPC;
477 }
478
479 err = nr_bhs = idx_clus = 0;
480 count = FAT_START_ENT;
481 fatent_init(&prev_ent);
482 fatent_init(&fatent);
483 fatent_set_entry(&fatent, sbi->prev_free + 1);
484 while (count < sbi->max_cluster) {
485 if (fatent.entry >= sbi->max_cluster)
486 fatent.entry = FAT_START_ENT;
487 fatent_set_entry(&fatent, fatent.entry);
488 err = fat_ent_read_block(sb, &fatent);
489 if (err)
490 goto out;
491
492 /* Find the free entries in a block */
493 do {
494 if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
495 int entry = fatent.entry;
496
497 /* make the cluster chain */
498 ops->ent_put(&fatent, FAT_ENT_EOF);
499 if (prev_ent.nr_bhs)
500 ops->ent_put(&prev_ent, entry);
501
502 fat_collect_bhs(bhs, &nr_bhs, &fatent);
503
504 sbi->prev_free = entry;
505 if (sbi->free_clusters != -1)
506 sbi->free_clusters--;
507
508 cluster[idx_clus] = entry;
509 idx_clus++;
510 if (idx_clus == nr_cluster)
511 goto out;
512
513 /*
514 * fat_collect_bhs() gets ref-count of bhs,
515 * so we can still use the prev_ent.
516 */
517 prev_ent = fatent;
518 }
519 count++;
520 if (count == sbi->max_cluster)
521 break;
522 } while (fat_ent_next(sbi, &fatent));
523 }
524
525 /* Couldn't allocate the free entries */
526 sbi->free_clusters = 0;
527 sbi->free_clus_valid = 1;
528 err = -ENOSPC;
529
530 out:
531 unlock_fat(sbi);
532 mark_fsinfo_dirty(sb);
533 fatent_brelse(&fatent);
534 if (!err) {
535 if (inode_needs_sync(inode))
536 err = fat_sync_bhs(bhs, nr_bhs);
537 if (!err)
538 err = fat_mirror_bhs(sb, bhs, nr_bhs);
539 }
540 for (i = 0; i < nr_bhs; i++)
541 brelse(bhs[i]);
542
543 if (err && idx_clus)
544 fat_free_clusters(inode, cluster[0]);
545
546 return err;
547 }
548
549 int fat_free_clusters(struct inode *inode, int cluster)
550 {
551 struct super_block *sb = inode->i_sb;
552 struct msdos_sb_info *sbi = MSDOS_SB(sb);
553 const struct fatent_operations *ops = sbi->fatent_ops;
554 struct fat_entry fatent;
555 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
556 int i, err, nr_bhs;
557 int first_cl = cluster, dirty_fsinfo = 0;
558
559 nr_bhs = 0;
560 fatent_init(&fatent);
561 lock_fat(sbi);
562 do {
563 cluster = fat_ent_read(inode, &fatent, cluster);
564 if (cluster < 0) {
565 err = cluster;
566 goto error;
567 } else if (cluster == FAT_ENT_FREE) {
568 fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
569 __func__);
570 err = -EIO;
571 goto error;
572 }
573
574 if (sbi->options.discard) {
575 /*
576 * Issue discard for the sectors we no longer
577 * care about, batching contiguous clusters
578 * into one request
579 */
580 if (cluster != fatent.entry + 1) {
581 int nr_clus = fatent.entry - first_cl + 1;
582
583 sb_issue_discard(sb,
584 fat_clus_to_blknr(sbi, first_cl),
585 nr_clus * sbi->sec_per_clus,
586 GFP_NOFS, 0);
587
588 first_cl = cluster;
589 }
590 }
591
592 ops->ent_put(&fatent, FAT_ENT_FREE);
593 if (sbi->free_clusters != -1) {
594 sbi->free_clusters++;
595 dirty_fsinfo = 1;
596 }
597
598 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
599 if (sb->s_flags & SB_SYNCHRONOUS) {
600 err = fat_sync_bhs(bhs, nr_bhs);
601 if (err)
602 goto error;
603 }
604 err = fat_mirror_bhs(sb, bhs, nr_bhs);
605 if (err)
606 goto error;
607 for (i = 0; i < nr_bhs; i++)
608 brelse(bhs[i]);
609 nr_bhs = 0;
610 }
611 fat_collect_bhs(bhs, &nr_bhs, &fatent);
612 } while (cluster != FAT_ENT_EOF);
613
614 if (sb->s_flags & SB_SYNCHRONOUS) {
615 err = fat_sync_bhs(bhs, nr_bhs);
616 if (err)
617 goto error;
618 }
619 err = fat_mirror_bhs(sb, bhs, nr_bhs);
620 error:
621 fatent_brelse(&fatent);
622 for (i = 0; i < nr_bhs; i++)
623 brelse(bhs[i]);
624 unlock_fat(sbi);
625 if (dirty_fsinfo)
626 mark_fsinfo_dirty(sb);
627
628 return err;
629 }
630 EXPORT_SYMBOL_GPL(fat_free_clusters);
631
632 /* 128kb is the whole sectors for FAT12 and FAT16 */
633 #define FAT_READA_SIZE (128 * 1024)
634
635 static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
636 unsigned long reada_blocks)
637 {
638 const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
639 sector_t blocknr;
640 int i, offset;
641
642 ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
643
644 for (i = 0; i < reada_blocks; i++)
645 sb_breadahead(sb, blocknr + i);
646 }
647
648 int fat_count_free_clusters(struct super_block *sb)
649 {
650 struct msdos_sb_info *sbi = MSDOS_SB(sb);
651 const struct fatent_operations *ops = sbi->fatent_ops;
652 struct fat_entry fatent;
653 unsigned long reada_blocks, reada_mask, cur_block;
654 int err = 0, free;
655
656 lock_fat(sbi);
657 if (sbi->free_clusters != -1 && sbi->free_clus_valid)
658 goto out;
659
660 reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
661 reada_mask = reada_blocks - 1;
662 cur_block = 0;
663
664 free = 0;
665 fatent_init(&fatent);
666 fatent_set_entry(&fatent, FAT_START_ENT);
667 while (fatent.entry < sbi->max_cluster) {
668 /* readahead of fat blocks */
669 if ((cur_block & reada_mask) == 0) {
670 unsigned long rest = sbi->fat_length - cur_block;
671 fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
672 }
673 cur_block++;
674
675 err = fat_ent_read_block(sb, &fatent);
676 if (err)
677 goto out;
678
679 do {
680 if (ops->ent_get(&fatent) == FAT_ENT_FREE)
681 free++;
682 } while (fat_ent_next(sbi, &fatent));
683 cond_resched();
684 }
685 sbi->free_clusters = free;
686 sbi->free_clus_valid = 1;
687 mark_fsinfo_dirty(sb);
688 fatent_brelse(&fatent);
689 out:
690 unlock_fat(sbi);
691 return err;
692 }
693
694 static int fat_trim_clusters(struct super_block *sb, u32 clus, u32 nr_clus)
695 {
696 struct msdos_sb_info *sbi = MSDOS_SB(sb);
697 return sb_issue_discard(sb, fat_clus_to_blknr(sbi, clus),
698 nr_clus * sbi->sec_per_clus, GFP_NOFS, 0);
699 }
700
701 int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
702 {
703 struct super_block *sb = inode->i_sb;
704 struct msdos_sb_info *sbi = MSDOS_SB(sb);
705 const struct fatent_operations *ops = sbi->fatent_ops;
706 struct fat_entry fatent;
707 u64 ent_start, ent_end, minlen, trimmed = 0;
708 u32 free = 0;
709 unsigned long reada_blocks, reada_mask, cur_block = 0;
710 int err = 0;
711
712 /*
713 * FAT data is organized as clusters, trim at the granulary of cluster.
714 *
715 * fstrim_range is in byte, convert vaules to cluster index.
716 * Treat sectors before data region as all used, not to trim them.
717 */
718 ent_start = max_t(u64, range->start>>sbi->cluster_bits, FAT_START_ENT);
719 ent_end = ent_start + (range->len >> sbi->cluster_bits) - 1;
720 minlen = range->minlen >> sbi->cluster_bits;
721
722 if (ent_start >= sbi->max_cluster || range->len < sbi->cluster_size)
723 return -EINVAL;
724 if (ent_end >= sbi->max_cluster)
725 ent_end = sbi->max_cluster - 1;
726
727 reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
728 reada_mask = reada_blocks - 1;
729
730 fatent_init(&fatent);
731 lock_fat(sbi);
732 fatent_set_entry(&fatent, ent_start);
733 while (fatent.entry <= ent_end) {
734 /* readahead of fat blocks */
735 if ((cur_block & reada_mask) == 0) {
736 unsigned long rest = sbi->fat_length - cur_block;
737 fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
738 }
739 cur_block++;
740
741 err = fat_ent_read_block(sb, &fatent);
742 if (err)
743 goto error;
744 do {
745 if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
746 free++;
747 } else if (free) {
748 if (free >= minlen) {
749 u32 clus = fatent.entry - free;
750
751 err = fat_trim_clusters(sb, clus, free);
752 if (err && err != -EOPNOTSUPP)
753 goto error;
754 if (!err)
755 trimmed += free;
756 err = 0;
757 }
758 free = 0;
759 }
760 } while (fat_ent_next(sbi, &fatent) && fatent.entry <= ent_end);
761
762 if (fatal_signal_pending(current)) {
763 err = -ERESTARTSYS;
764 goto error;
765 }
766
767 if (need_resched()) {
768 fatent_brelse(&fatent);
769 unlock_fat(sbi);
770 cond_resched();
771 lock_fat(sbi);
772 }
773 }
774 /* handle scenario when tail entries are all free */
775 if (free && free >= minlen) {
776 u32 clus = fatent.entry - free;
777
778 err = fat_trim_clusters(sb, clus, free);
779 if (err && err != -EOPNOTSUPP)
780 goto error;
781 if (!err)
782 trimmed += free;
783 err = 0;
784 }
785
786 error:
787 fatent_brelse(&fatent);
788 unlock_fat(sbi);
789
790 range->len = trimmed << sbi->cluster_bits;
791
792 return err;
793 }