]> git.ipfire.org Git - people/arne_f/kernel.git/blob - fs/ufs/balloc.c
block,fs: untangle fs.h and blk_types.h
[people/arne_f/kernel.git] / fs / ufs / balloc.c
1 /*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
7 *
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9 */
10
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25
26 #define INVBLOCK ((u64)-1L)
27
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35 /*
36 * Free 'count' fragments from fragment number 'fragment'
37 */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 u64 blkno;
46
47 sb = inode->i_sb;
48 uspi = UFS_SB(sb)->s_uspi;
49
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment, count);
52
53 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 ufs_error (sb, "ufs_free_fragments", "internal error");
55
56 mutex_lock(&UFS_SB(sb)->s_lock);
57
58 cgno = ufs_dtog(uspi, fragment);
59 bit = ufs_dtogd(uspi, fragment);
60 if (cgno >= uspi->s_ncg) {
61 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 goto failed;
63 }
64
65 ucpi = ufs_load_cylinder (sb, cgno);
66 if (!ucpi)
67 goto failed;
68 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69 if (!ufs_cg_chkmagic(sb, ucg)) {
70 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 goto failed;
72 }
73
74 end_bit = bit + count;
75 bbase = ufs_blknum (bit);
76 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 for (i = bit; i < end_bit; i++) {
79 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81 else
82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i);
84 }
85
86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
87 uspi->cs_total.cs_nffree += count;
88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
89 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
90 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
91
92 /*
93 * Trying to reassemble free fragments into block
94 */
95 blkno = ufs_fragstoblks (bbase);
96 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
97 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
98 uspi->cs_total.cs_nffree -= uspi->s_fpb;
99 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101 ufs_clusteracct (sb, ucpi, blkno, 1);
102 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
103 uspi->cs_total.cs_nbfree++;
104 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
105 if (uspi->fs_magic != UFS2_MAGIC) {
106 unsigned cylno = ufs_cbtocylno (bbase);
107
108 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109 ufs_cbtorpos(bbase)), 1);
110 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111 }
112 }
113
114 ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116 if (sb->s_flags & MS_SYNCHRONOUS)
117 ubh_sync_block(UCPI_UBH(ucpi));
118 ufs_mark_sb_dirty(sb);
119
120 mutex_unlock(&UFS_SB(sb)->s_lock);
121 UFSD("EXIT\n");
122 return;
123
124 failed:
125 mutex_unlock(&UFS_SB(sb)->s_lock);
126 UFSD("EXIT (FAILED)\n");
127 return;
128 }
129
130 /*
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132 */
133 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
134 {
135 struct super_block * sb;
136 struct ufs_sb_private_info * uspi;
137 struct ufs_cg_private_info * ucpi;
138 struct ufs_cylinder_group * ucg;
139 unsigned overflow, cgno, bit, end_bit, i;
140 u64 blkno;
141
142 sb = inode->i_sb;
143 uspi = UFS_SB(sb)->s_uspi;
144
145 UFSD("ENTER, fragment %llu, count %u\n",
146 (unsigned long long)fragment, count);
147
148 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149 ufs_error (sb, "ufs_free_blocks", "internal error, "
150 "fragment %llu, count %u\n",
151 (unsigned long long)fragment, count);
152 goto failed;
153 }
154
155 mutex_lock(&UFS_SB(sb)->s_lock);
156
157 do_more:
158 overflow = 0;
159 cgno = ufs_dtog(uspi, fragment);
160 bit = ufs_dtogd(uspi, fragment);
161 if (cgno >= uspi->s_ncg) {
162 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
163 goto failed_unlock;
164 }
165 end_bit = bit + count;
166 if (end_bit > uspi->s_fpg) {
167 overflow = bit + count - uspi->s_fpg;
168 count -= overflow;
169 end_bit -= overflow;
170 }
171
172 ucpi = ufs_load_cylinder (sb, cgno);
173 if (!ucpi)
174 goto failed_unlock;
175 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
176 if (!ufs_cg_chkmagic(sb, ucg)) {
177 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
178 goto failed_unlock;
179 }
180
181 for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 blkno = ufs_fragstoblks(i);
183 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 }
186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1);
189
190 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
191 uspi->cs_total.cs_nbfree++;
192 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
193
194 if (uspi->fs_magic != UFS2_MAGIC) {
195 unsigned cylno = ufs_cbtocylno(i);
196
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
198 ufs_cbtorpos(i)), 1);
199 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200 }
201 }
202
203 ubh_mark_buffer_dirty (USPI_UBH(uspi));
204 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
205 if (sb->s_flags & MS_SYNCHRONOUS)
206 ubh_sync_block(UCPI_UBH(ucpi));
207
208 if (overflow) {
209 fragment += count;
210 count = overflow;
211 goto do_more;
212 }
213
214 ufs_mark_sb_dirty(sb);
215 mutex_unlock(&UFS_SB(sb)->s_lock);
216 UFSD("EXIT\n");
217 return;
218
219 failed_unlock:
220 mutex_unlock(&UFS_SB(sb)->s_lock);
221 failed:
222 UFSD("EXIT (FAILED)\n");
223 return;
224 }
225
226 /*
227 * Modify inode page cache in such way:
228 * have - blocks with b_blocknr equal to oldb...oldb+count-1
229 * get - blocks with b_blocknr equal to newb...newb+count-1
230 * also we suppose that oldb...oldb+count-1 blocks
231 * situated at the end of file.
232 *
233 * We can come here from ufs_writepage or ufs_prepare_write,
234 * locked_page is argument of these functions, so we already lock it.
235 */
236 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 unsigned int count, sector_t oldb,
238 sector_t newb, struct page *locked_page)
239 {
240 const unsigned blks_per_page =
241 1 << (PAGE_SHIFT - inode->i_blkbits);
242 const unsigned mask = blks_per_page - 1;
243 struct address_space * const mapping = inode->i_mapping;
244 pgoff_t index, cur_index, last_index;
245 unsigned pos, j, lblock;
246 sector_t end, i;
247 struct page *page;
248 struct buffer_head *head, *bh;
249
250 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
251 inode->i_ino, count,
252 (unsigned long long)oldb, (unsigned long long)newb);
253
254 BUG_ON(!locked_page);
255 BUG_ON(!PageLocked(locked_page));
256
257 cur_index = locked_page->index;
258 end = count + beg;
259 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
260 for (i = beg; i < end; i = (i | mask) + 1) {
261 index = i >> (PAGE_SHIFT - inode->i_blkbits);
262
263 if (likely(cur_index != index)) {
264 page = ufs_get_locked_page(mapping, index);
265 if (!page)/* it was truncated */
266 continue;
267 if (IS_ERR(page)) {/* or EIO */
268 ufs_error(inode->i_sb, __func__,
269 "read of page %llu failed\n",
270 (unsigned long long)index);
271 continue;
272 }
273 } else
274 page = locked_page;
275
276 head = page_buffers(page);
277 bh = head;
278 pos = i & mask;
279 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page;
281
282
283 if (unlikely(index == last_index))
284 lblock = end & mask;
285 else
286 lblock = blks_per_page;
287
288 do {
289 if (j >= lblock)
290 break;
291 pos = (i - beg) + j;
292
293 if (!buffer_mapped(bh))
294 map_bh(bh, inode->i_sb, oldb + pos);
295 if (!buffer_uptodate(bh)) {
296 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
297 wait_on_buffer(bh);
298 if (!buffer_uptodate(bh)) {
299 ufs_error(inode->i_sb, __func__,
300 "read of block failed\n");
301 break;
302 }
303 }
304
305 UFSD(" change from %llu to %llu, pos %u\n",
306 (unsigned long long)(pos + oldb),
307 (unsigned long long)(pos + newb), pos);
308
309 bh->b_blocknr = newb + pos;
310 unmap_underlying_metadata(bh->b_bdev,
311 bh->b_blocknr);
312 mark_buffer_dirty(bh);
313 ++j;
314 bh = bh->b_this_page;
315 } while (bh != head);
316
317 if (likely(cur_index != index))
318 ufs_put_locked_page(page);
319 }
320 UFSD("EXIT\n");
321 }
322
323 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
324 int sync)
325 {
326 struct buffer_head *bh;
327 sector_t end = beg + n;
328
329 for (; beg < end; ++beg) {
330 bh = sb_getblk(inode->i_sb, beg);
331 lock_buffer(bh);
332 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
333 set_buffer_uptodate(bh);
334 mark_buffer_dirty(bh);
335 unlock_buffer(bh);
336 if (IS_SYNC(inode) || sync)
337 sync_dirty_buffer(bh);
338 brelse(bh);
339 }
340 }
341
342 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
343 u64 goal, unsigned count, int *err,
344 struct page *locked_page)
345 {
346 struct super_block * sb;
347 struct ufs_sb_private_info * uspi;
348 struct ufs_super_block_first * usb1;
349 unsigned cgno, oldcount, newcount;
350 u64 tmp, request, result;
351
352 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
353 inode->i_ino, (unsigned long long)fragment,
354 (unsigned long long)goal, count);
355
356 sb = inode->i_sb;
357 uspi = UFS_SB(sb)->s_uspi;
358 usb1 = ubh_get_usb_first(uspi);
359 *err = -ENOSPC;
360
361 mutex_lock(&UFS_SB(sb)->s_lock);
362 tmp = ufs_data_ptr_to_cpu(sb, p);
363
364 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
365 ufs_warning(sb, "ufs_new_fragments", "internal warning"
366 " fragment %llu, count %u",
367 (unsigned long long)fragment, count);
368 count = uspi->s_fpb - ufs_fragnum(fragment);
369 }
370 oldcount = ufs_fragnum (fragment);
371 newcount = oldcount + count;
372
373 /*
374 * Somebody else has just allocated our fragments
375 */
376 if (oldcount) {
377 if (!tmp) {
378 ufs_error(sb, "ufs_new_fragments", "internal error, "
379 "fragment %llu, tmp %llu\n",
380 (unsigned long long)fragment,
381 (unsigned long long)tmp);
382 mutex_unlock(&UFS_SB(sb)->s_lock);
383 return INVBLOCK;
384 }
385 if (fragment < UFS_I(inode)->i_lastfrag) {
386 UFSD("EXIT (ALREADY ALLOCATED)\n");
387 mutex_unlock(&UFS_SB(sb)->s_lock);
388 return 0;
389 }
390 }
391 else {
392 if (tmp) {
393 UFSD("EXIT (ALREADY ALLOCATED)\n");
394 mutex_unlock(&UFS_SB(sb)->s_lock);
395 return 0;
396 }
397 }
398
399 /*
400 * There is not enough space for user on the device
401 */
402 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
403 mutex_unlock(&UFS_SB(sb)->s_lock);
404 UFSD("EXIT (FAILED)\n");
405 return 0;
406 }
407
408 if (goal >= uspi->s_size)
409 goal = 0;
410 if (goal == 0)
411 cgno = ufs_inotocg (inode->i_ino);
412 else
413 cgno = ufs_dtog(uspi, goal);
414
415 /*
416 * allocate new fragment
417 */
418 if (oldcount == 0) {
419 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
420 if (result) {
421 ufs_clear_frags(inode, result + oldcount,
422 newcount - oldcount, locked_page != NULL);
423 write_seqlock(&UFS_I(inode)->meta_lock);
424 ufs_cpu_to_data_ptr(sb, p, result);
425 write_sequnlock(&UFS_I(inode)->meta_lock);
426 *err = 0;
427 UFS_I(inode)->i_lastfrag =
428 max(UFS_I(inode)->i_lastfrag, fragment + count);
429 }
430 mutex_unlock(&UFS_SB(sb)->s_lock);
431 UFSD("EXIT, result %llu\n", (unsigned long long)result);
432 return result;
433 }
434
435 /*
436 * resize block
437 */
438 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
439 if (result) {
440 *err = 0;
441 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
442 fragment + count);
443 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
444 locked_page != NULL);
445 mutex_unlock(&UFS_SB(sb)->s_lock);
446 UFSD("EXIT, result %llu\n", (unsigned long long)result);
447 return result;
448 }
449
450 /*
451 * allocate new block and move data
452 */
453 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
454 case UFS_OPTSPACE:
455 request = newcount;
456 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
457 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
458 break;
459 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
460 break;
461 default:
462 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463
464 case UFS_OPTTIME:
465 request = uspi->s_fpb;
466 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
467 (uspi->s_minfree - 2) / 100)
468 break;
469 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
470 break;
471 }
472 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
473 if (result) {
474 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
475 locked_page != NULL);
476 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
477 uspi->s_sbbase + tmp,
478 uspi->s_sbbase + result, locked_page);
479 write_seqlock(&UFS_I(inode)->meta_lock);
480 ufs_cpu_to_data_ptr(sb, p, result);
481 write_sequnlock(&UFS_I(inode)->meta_lock);
482 *err = 0;
483 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
484 fragment + count);
485 mutex_unlock(&UFS_SB(sb)->s_lock);
486 if (newcount < request)
487 ufs_free_fragments (inode, result + newcount, request - newcount);
488 ufs_free_fragments (inode, tmp, oldcount);
489 UFSD("EXIT, result %llu\n", (unsigned long long)result);
490 return result;
491 }
492
493 mutex_unlock(&UFS_SB(sb)->s_lock);
494 UFSD("EXIT (FAILED)\n");
495 return 0;
496 }
497
498 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
499 unsigned oldcount, unsigned newcount)
500 {
501 struct super_block * sb;
502 struct ufs_sb_private_info * uspi;
503 struct ufs_cg_private_info * ucpi;
504 struct ufs_cylinder_group * ucg;
505 unsigned cgno, fragno, fragoff, count, fragsize, i;
506
507 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
508 (unsigned long long)fragment, oldcount, newcount);
509
510 sb = inode->i_sb;
511 uspi = UFS_SB(sb)->s_uspi;
512 count = newcount - oldcount;
513
514 cgno = ufs_dtog(uspi, fragment);
515 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
516 return 0;
517 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
518 return 0;
519 ucpi = ufs_load_cylinder (sb, cgno);
520 if (!ucpi)
521 return 0;
522 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
523 if (!ufs_cg_chkmagic(sb, ucg)) {
524 ufs_panic (sb, "ufs_add_fragments",
525 "internal error, bad magic number on cg %u", cgno);
526 return 0;
527 }
528
529 fragno = ufs_dtogd(uspi, fragment);
530 fragoff = ufs_fragnum (fragno);
531 for (i = oldcount; i < newcount; i++)
532 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
533 return 0;
534 /*
535 * Block can be extended
536 */
537 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
538 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
539 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
540 break;
541 fragsize = i - oldcount;
542 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
543 ufs_panic (sb, "ufs_add_fragments",
544 "internal error or corrupted bitmap on cg %u", cgno);
545 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
546 if (fragsize != count)
547 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
548 for (i = oldcount; i < newcount; i++)
549 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
550
551 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
552 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
553 uspi->cs_total.cs_nffree -= count;
554
555 ubh_mark_buffer_dirty (USPI_UBH(uspi));
556 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
557 if (sb->s_flags & MS_SYNCHRONOUS)
558 ubh_sync_block(UCPI_UBH(ucpi));
559 ufs_mark_sb_dirty(sb);
560
561 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
562
563 return fragment;
564 }
565
566 #define UFS_TEST_FREE_SPACE_CG \
567 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
568 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
569 goto cg_found; \
570 for (k = count; k < uspi->s_fpb; k++) \
571 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
572 goto cg_found;
573
574 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
575 u64 goal, unsigned count, int *err)
576 {
577 struct super_block * sb;
578 struct ufs_sb_private_info * uspi;
579 struct ufs_cg_private_info * ucpi;
580 struct ufs_cylinder_group * ucg;
581 unsigned oldcg, i, j, k, allocsize;
582 u64 result;
583
584 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
585 inode->i_ino, cgno, (unsigned long long)goal, count);
586
587 sb = inode->i_sb;
588 uspi = UFS_SB(sb)->s_uspi;
589 oldcg = cgno;
590
591 /*
592 * 1. searching on preferred cylinder group
593 */
594 UFS_TEST_FREE_SPACE_CG
595
596 /*
597 * 2. quadratic rehash
598 */
599 for (j = 1; j < uspi->s_ncg; j *= 2) {
600 cgno += j;
601 if (cgno >= uspi->s_ncg)
602 cgno -= uspi->s_ncg;
603 UFS_TEST_FREE_SPACE_CG
604 }
605
606 /*
607 * 3. brute force search
608 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
609 */
610 cgno = (oldcg + 1) % uspi->s_ncg;
611 for (j = 2; j < uspi->s_ncg; j++) {
612 cgno++;
613 if (cgno >= uspi->s_ncg)
614 cgno = 0;
615 UFS_TEST_FREE_SPACE_CG
616 }
617
618 UFSD("EXIT (FAILED)\n");
619 return 0;
620
621 cg_found:
622 ucpi = ufs_load_cylinder (sb, cgno);
623 if (!ucpi)
624 return 0;
625 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
626 if (!ufs_cg_chkmagic(sb, ucg))
627 ufs_panic (sb, "ufs_alloc_fragments",
628 "internal error, bad magic number on cg %u", cgno);
629 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
630
631 if (count == uspi->s_fpb) {
632 result = ufs_alloccg_block (inode, ucpi, goal, err);
633 if (result == INVBLOCK)
634 return 0;
635 goto succed;
636 }
637
638 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
639 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
640 break;
641
642 if (allocsize == uspi->s_fpb) {
643 result = ufs_alloccg_block (inode, ucpi, goal, err);
644 if (result == INVBLOCK)
645 return 0;
646 goal = ufs_dtogd(uspi, result);
647 for (i = count; i < uspi->s_fpb; i++)
648 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
649 i = uspi->s_fpb - count;
650
651 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
652 uspi->cs_total.cs_nffree += i;
653 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
654 fs32_add(sb, &ucg->cg_frsum[i], 1);
655 goto succed;
656 }
657
658 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
659 if (result == INVBLOCK)
660 return 0;
661 for (i = 0; i < count; i++)
662 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
663
664 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
665 uspi->cs_total.cs_nffree -= count;
666 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
667 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
668
669 if (count != allocsize)
670 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
671
672 succed:
673 ubh_mark_buffer_dirty (USPI_UBH(uspi));
674 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
675 if (sb->s_flags & MS_SYNCHRONOUS)
676 ubh_sync_block(UCPI_UBH(ucpi));
677 ufs_mark_sb_dirty(sb);
678
679 result += cgno * uspi->s_fpg;
680 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
681 return result;
682 }
683
684 static u64 ufs_alloccg_block(struct inode *inode,
685 struct ufs_cg_private_info *ucpi,
686 u64 goal, int *err)
687 {
688 struct super_block * sb;
689 struct ufs_sb_private_info * uspi;
690 struct ufs_cylinder_group * ucg;
691 u64 result, blkno;
692
693 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
694
695 sb = inode->i_sb;
696 uspi = UFS_SB(sb)->s_uspi;
697 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
698
699 if (goal == 0) {
700 goal = ucpi->c_rotor;
701 goto norot;
702 }
703 goal = ufs_blknum (goal);
704 goal = ufs_dtogd(uspi, goal);
705
706 /*
707 * If the requested block is available, use it.
708 */
709 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
710 result = goal;
711 goto gotit;
712 }
713
714 norot:
715 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
716 if (result == INVBLOCK)
717 return INVBLOCK;
718 ucpi->c_rotor = result;
719 gotit:
720 blkno = ufs_fragstoblks(result);
721 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
722 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
723 ufs_clusteracct (sb, ucpi, blkno, -1);
724
725 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
726 uspi->cs_total.cs_nbfree--;
727 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
728
729 if (uspi->fs_magic != UFS2_MAGIC) {
730 unsigned cylno = ufs_cbtocylno((unsigned)result);
731
732 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
733 ufs_cbtorpos((unsigned)result)), 1);
734 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
735 }
736
737 UFSD("EXIT, result %llu\n", (unsigned long long)result);
738
739 return result;
740 }
741
742 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
743 struct ufs_buffer_head *ubh,
744 unsigned begin, unsigned size,
745 unsigned char *table, unsigned char mask)
746 {
747 unsigned rest, offset;
748 unsigned char *cp;
749
750
751 offset = begin & ~uspi->s_fmask;
752 begin >>= uspi->s_fshift;
753 for (;;) {
754 if ((offset + size) < uspi->s_fsize)
755 rest = size;
756 else
757 rest = uspi->s_fsize - offset;
758 size -= rest;
759 cp = ubh->bh[begin]->b_data + offset;
760 while ((table[*cp++] & mask) == 0 && --rest)
761 ;
762 if (rest || !size)
763 break;
764 begin++;
765 offset = 0;
766 }
767 return (size + rest);
768 }
769
770 /*
771 * Find a block of the specified size in the specified cylinder group.
772 * @sp: pointer to super block
773 * @ucpi: pointer to cylinder group info
774 * @goal: near which block we want find new one
775 * @count: specified size
776 */
777 static u64 ufs_bitmap_search(struct super_block *sb,
778 struct ufs_cg_private_info *ucpi,
779 u64 goal, unsigned count)
780 {
781 /*
782 * Bit patterns for identifying fragments in the block map
783 * used as ((map & mask_arr) == want_arr)
784 */
785 static const int mask_arr[9] = {
786 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
787 };
788 static const int want_arr[9] = {
789 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
790 };
791 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
792 unsigned start, length, loc;
793 unsigned pos, want, blockmap, mask, end;
794 u64 result;
795
796 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
797 (unsigned long long)goal, count);
798
799 if (goal)
800 start = ufs_dtogd(uspi, goal) >> 3;
801 else
802 start = ucpi->c_frotor >> 3;
803
804 length = ((uspi->s_fpg + 7) >> 3) - start;
805 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
806 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
807 1 << (count - 1 + (uspi->s_fpb & 7)));
808 if (loc == 0) {
809 length = start + 1;
810 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
811 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
812 ufs_fragtable_other,
813 1 << (count - 1 + (uspi->s_fpb & 7)));
814 if (loc == 0) {
815 ufs_error(sb, "ufs_bitmap_search",
816 "bitmap corrupted on cg %u, start %u,"
817 " length %u, count %u, freeoff %u\n",
818 ucpi->c_cgx, start, length, count,
819 ucpi->c_freeoff);
820 return INVBLOCK;
821 }
822 start = 0;
823 }
824 result = (start + length - loc) << 3;
825 ucpi->c_frotor = result;
826
827 /*
828 * found the byte in the map
829 */
830
831 for (end = result + 8; result < end; result += uspi->s_fpb) {
832 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
833 blockmap <<= 1;
834 mask = mask_arr[count];
835 want = want_arr[count];
836 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
837 if ((blockmap & mask) == want) {
838 UFSD("EXIT, result %llu\n",
839 (unsigned long long)result);
840 return result + pos;
841 }
842 mask <<= 1;
843 want <<= 1;
844 }
845 }
846
847 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
848 ucpi->c_cgx);
849 UFSD("EXIT (FAILED)\n");
850 return INVBLOCK;
851 }
852
853 static void ufs_clusteracct(struct super_block * sb,
854 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
855 {
856 struct ufs_sb_private_info * uspi;
857 int i, start, end, forw, back;
858
859 uspi = UFS_SB(sb)->s_uspi;
860 if (uspi->s_contigsumsize <= 0)
861 return;
862
863 if (cnt > 0)
864 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
865 else
866 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
867
868 /*
869 * Find the size of the cluster going forward.
870 */
871 start = blkno + 1;
872 end = start + uspi->s_contigsumsize;
873 if ( end >= ucpi->c_nclusterblks)
874 end = ucpi->c_nclusterblks;
875 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
876 if (i > end)
877 i = end;
878 forw = i - start;
879
880 /*
881 * Find the size of the cluster going backward.
882 */
883 start = blkno - 1;
884 end = start - uspi->s_contigsumsize;
885 if (end < 0 )
886 end = -1;
887 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
888 if ( i < end)
889 i = end;
890 back = start - i;
891
892 /*
893 * Account for old cluster and the possibly new forward and
894 * back clusters.
895 */
896 i = back + forw + 1;
897 if (i > uspi->s_contigsumsize)
898 i = uspi->s_contigsumsize;
899 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
900 if (back > 0)
901 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
902 if (forw > 0)
903 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
904 }
905
906
907 static unsigned char ufs_fragtable_8fpb[] = {
908 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
909 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
910 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
911 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
912 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
913 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
914 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
915 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
916 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
918 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
919 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
920 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
921 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
922 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
923 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
924 };
925
926 static unsigned char ufs_fragtable_other[] = {
927 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
928 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
930 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
931 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
933 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
934 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
935 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
938 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
939 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
940 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
941 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
942 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
943 };