]> git.ipfire.org Git - people/ms/linux.git/blame - fs/ufs/balloc.c
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / ufs / balloc.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/ufs/balloc.c
4 *
5 * Copyright (C) 1998
6 * Daniel Pirkl <daniel.pirkl@email.cz>
7 * Charles University, Faculty of Mathematics and Physics
54fb996a
ED
8 *
9 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
1da177e4
LT
10 */
11
12#include <linux/fs.h>
1da177e4
LT
13#include <linux/stat.h>
14#include <linux/time.h>
15#include <linux/string.h>
1da177e4 16#include <linux/buffer_head.h>
16f7e0fe 17#include <linux/capability.h>
1da177e4 18#include <linux/bitops.h>
2f8b5444 19#include <linux/bio.h>
1da177e4
LT
20#include <asm/byteorder.h>
21
e5420598 22#include "ufs_fs.h"
bcd6d4ec 23#include "ufs.h"
1da177e4
LT
24#include "swab.h"
25#include "util.h"
26
54fb996a
ED
27#define INVBLOCK ((u64)-1L)
28
45641c82 29static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
54fb996a
ED
30static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
31static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
32static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
1da177e4
LT
33static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
34static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
35
36/*
37 * Free 'count' fragments from fragment number 'fragment'
38 */
54fb996a 39void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 40{
1da177e4
LT
41 struct super_block * sb;
42 struct ufs_sb_private_info * uspi;
1da177e4
LT
43 struct ufs_cg_private_info * ucpi;
44 struct ufs_cylinder_group * ucg;
54fb996a
ED
45 unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 u64 blkno;
1da177e4
LT
47
48 sb = inode->i_sb;
49 uspi = UFS_SB(sb)->s_uspi;
1da177e4 50
54fb996a
ED
51 UFSD("ENTER, fragment %llu, count %u\n",
52 (unsigned long long)fragment, count);
1da177e4
LT
53
54 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
55 ufs_error (sb, "ufs_free_fragments", "internal error");
cdd9eefd
FF
56
57 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4 58
54fb996a
ED
59 cgno = ufs_dtog(uspi, fragment);
60 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
61 if (cgno >= uspi->s_ncg) {
62 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
63 goto failed;
64 }
65
66 ucpi = ufs_load_cylinder (sb, cgno);
67 if (!ucpi)
68 goto failed;
9695ef16 69 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
70 if (!ufs_cg_chkmagic(sb, ucg)) {
71 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
72 goto failed;
73 }
74
75 end_bit = bit + count;
76 bbase = ufs_blknum (bit);
9695ef16 77 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
78 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
79 for (i = bit; i < end_bit; i++) {
9695ef16
ED
80 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
81 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
7b4ee73e
E
82 else
83 ufs_error (sb, "ufs_free_fragments",
84 "bit already cleared for fragment %u", i);
1da177e4 85 }
eb315d2a
AV
86
87 inode_sub_bytes(inode, count << uspi->s_fshift);
1da177e4 88 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 89 uspi->cs_total.cs_nffree += count;
1da177e4 90 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
9695ef16 91 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
92 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
93
94 /*
95 * Trying to reassemble free fragments into block
96 */
97 blkno = ufs_fragstoblks (bbase);
9695ef16 98 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4 99 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
ee3ffd6c 100 uspi->cs_total.cs_nffree -= uspi->s_fpb;
1da177e4
LT
101 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
102 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
103 ufs_clusteracct (sb, ucpi, blkno, 1);
104 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 105 uspi->cs_total.cs_nbfree++;
1da177e4 106 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
107 if (uspi->fs_magic != UFS2_MAGIC) {
108 unsigned cylno = ufs_cbtocylno (bbase);
109
110 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
111 ufs_cbtorpos(bbase)), 1);
112 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
113 }
1da177e4
LT
114 }
115
9695ef16
ED
116 ubh_mark_buffer_dirty (USPI_UBH(uspi));
117 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 118 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 119 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 120 ufs_mark_sb_dirty(sb);
cdd9eefd
FF
121
122 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 123 UFSD("EXIT\n");
1da177e4
LT
124 return;
125
126failed:
cdd9eefd 127 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 128 UFSD("EXIT (FAILED)\n");
1da177e4
LT
129 return;
130}
131
132/*
133 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
134 */
54fb996a 135void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 136{
1da177e4
LT
137 struct super_block * sb;
138 struct ufs_sb_private_info * uspi;
1da177e4
LT
139 struct ufs_cg_private_info * ucpi;
140 struct ufs_cylinder_group * ucg;
54fb996a
ED
141 unsigned overflow, cgno, bit, end_bit, i;
142 u64 blkno;
1da177e4
LT
143
144 sb = inode->i_sb;
145 uspi = UFS_SB(sb)->s_uspi;
1da177e4 146
54fb996a
ED
147 UFSD("ENTER, fragment %llu, count %u\n",
148 (unsigned long long)fragment, count);
1da177e4
LT
149
150 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
151 ufs_error (sb, "ufs_free_blocks", "internal error, "
54fb996a
ED
152 "fragment %llu, count %u\n",
153 (unsigned long long)fragment, count);
1da177e4
LT
154 goto failed;
155 }
156
cdd9eefd 157 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4
LT
158
159do_more:
160 overflow = 0;
54fb996a
ED
161 cgno = ufs_dtog(uspi, fragment);
162 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
163 if (cgno >= uspi->s_ncg) {
164 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
2e006393 165 goto failed_unlock;
1da177e4
LT
166 }
167 end_bit = bit + count;
168 if (end_bit > uspi->s_fpg) {
169 overflow = bit + count - uspi->s_fpg;
170 count -= overflow;
171 end_bit -= overflow;
172 }
173
174 ucpi = ufs_load_cylinder (sb, cgno);
175 if (!ucpi)
2e006393 176 goto failed_unlock;
9695ef16 177 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
178 if (!ufs_cg_chkmagic(sb, ucg)) {
179 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
2e006393 180 goto failed_unlock;
1da177e4
LT
181 }
182
183 for (i = bit; i < end_bit; i += uspi->s_fpb) {
184 blkno = ufs_fragstoblks(i);
9695ef16 185 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4
LT
186 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
187 }
9695ef16 188 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
eb315d2a 189 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
1da177e4
LT
190 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191 ufs_clusteracct (sb, ucpi, blkno, 1);
1da177e4
LT
192
193 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 194 uspi->cs_total.cs_nbfree++;
1da177e4 195 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
196
197 if (uspi->fs_magic != UFS2_MAGIC) {
198 unsigned cylno = ufs_cbtocylno(i);
199
200 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201 ufs_cbtorpos(i)), 1);
202 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203 }
1da177e4
LT
204 }
205
9695ef16
ED
206 ubh_mark_buffer_dirty (USPI_UBH(uspi));
207 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 208 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 209 ubh_sync_block(UCPI_UBH(ucpi));
1da177e4
LT
210
211 if (overflow) {
212 fragment += count;
213 count = overflow;
214 goto do_more;
215 }
216
9e9ad5f4 217 ufs_mark_sb_dirty(sb);
cdd9eefd 218 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 219 UFSD("EXIT\n");
1da177e4
LT
220 return;
221
2e006393 222failed_unlock:
cdd9eefd 223 mutex_unlock(&UFS_SB(sb)->s_lock);
2e006393 224failed:
abf5d15f 225 UFSD("EXIT (FAILED)\n");
1da177e4
LT
226 return;
227}
228
6ef4d6bf
ED
229/*
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
235 *
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
238 */
5431bf97
ED
239static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240 unsigned int count, sector_t oldb,
241 sector_t newb, struct page *locked_page)
6ef4d6bf 242{
5431bf97 243 const unsigned blks_per_page =
09cbfeaf 244 1 << (PAGE_SHIFT - inode->i_blkbits);
5431bf97 245 const unsigned mask = blks_per_page - 1;
efee2b81 246 struct address_space * const mapping = inode->i_mapping;
5431bf97
ED
247 pgoff_t index, cur_index, last_index;
248 unsigned pos, j, lblock;
249 sector_t end, i;
6ef4d6bf
ED
250 struct page *page;
251 struct buffer_head *head, *bh;
252
5431bf97
ED
253 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254 inode->i_ino, count,
255 (unsigned long long)oldb, (unsigned long long)newb);
6ef4d6bf 256
a685e26f 257 BUG_ON(!locked_page);
6ef4d6bf
ED
258 BUG_ON(!PageLocked(locked_page));
259
a685e26f 260 cur_index = locked_page->index;
5431bf97 261 end = count + beg;
09cbfeaf 262 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
5431bf97 263 for (i = beg; i < end; i = (i | mask) + 1) {
09cbfeaf 264 index = i >> (PAGE_SHIFT - inode->i_blkbits);
6ef4d6bf
ED
265
266 if (likely(cur_index != index)) {
267 page = ufs_get_locked_page(mapping, index);
5431bf97
ED
268 if (!page)/* it was truncated */
269 continue;
270 if (IS_ERR(page)) {/* or EIO */
9746077a 271 ufs_error(inode->i_sb, __func__,
5431bf97
ED
272 "read of page %llu failed\n",
273 (unsigned long long)index);
6ef4d6bf 274 continue;
5431bf97 275 }
6ef4d6bf
ED
276 } else
277 page = locked_page;
278
6ef4d6bf
ED
279 head = page_buffers(page);
280 bh = head;
5431bf97 281 pos = i & mask;
efee2b81
ED
282 for (j = 0; j < pos; ++j)
283 bh = bh->b_this_page;
5431bf97
ED
284
285
286 if (unlikely(index == last_index))
287 lblock = end & mask;
288 else
289 lblock = blks_per_page;
290
6ef4d6bf 291 do {
5431bf97
ED
292 if (j >= lblock)
293 break;
294 pos = (i - beg) + j;
295
296 if (!buffer_mapped(bh))
297 map_bh(bh, inode->i_sb, oldb + pos);
298 if (!buffer_uptodate(bh)) {
1420c4a5 299 ll_rw_block(REQ_OP_READ, 1, &bh);
5431bf97
ED
300 wait_on_buffer(bh);
301 if (!buffer_uptodate(bh)) {
9746077a 302 ufs_error(inode->i_sb, __func__,
5431bf97
ED
303 "read of block failed\n");
304 break;
efee2b81 305 }
6ef4d6bf
ED
306 }
307
5431bf97 308 UFSD(" change from %llu to %llu, pos %u\n",
9df13039
AM
309 (unsigned long long)(pos + oldb),
310 (unsigned long long)(pos + newb), pos);
5431bf97
ED
311
312 bh->b_blocknr = newb + pos;
e64855c6 313 clean_bdev_bh_alias(bh);
5431bf97
ED
314 mark_buffer_dirty(bh);
315 ++j;
6ef4d6bf
ED
316 bh = bh->b_this_page;
317 } while (bh != head);
318
10e5dce0
ED
319 if (likely(cur_index != index))
320 ufs_put_locked_page(page);
6ef4d6bf 321 }
abf5d15f 322 UFSD("EXIT\n");
6ef4d6bf
ED
323}
324
d63b7090
ED
325static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
326 int sync)
327{
328 struct buffer_head *bh;
329 sector_t end = beg + n;
330
331 for (; beg < end; ++beg) {
332 bh = sb_getblk(inode->i_sb, beg);
333 lock_buffer(bh);
334 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
335 set_buffer_uptodate(bh);
336 mark_buffer_dirty(bh);
337 unlock_buffer(bh);
338 if (IS_SYNC(inode) || sync)
339 sync_dirty_buffer(bh);
340 brelse(bh);
341 }
342}
343
54fb996a
ED
344u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
345 u64 goal, unsigned count, int *err,
346 struct page *locked_page)
1da177e4
LT
347{
348 struct super_block * sb;
349 struct ufs_sb_private_info * uspi;
350 struct ufs_super_block_first * usb1;
54fb996a
ED
351 unsigned cgno, oldcount, newcount;
352 u64 tmp, request, result;
1da177e4 353
54fb996a
ED
354 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
355 inode->i_ino, (unsigned long long)fragment,
356 (unsigned long long)goal, count);
1da177e4
LT
357
358 sb = inode->i_sb;
359 uspi = UFS_SB(sb)->s_uspi;
7b4ee73e 360 usb1 = ubh_get_usb_first(uspi);
1da177e4
LT
361 *err = -ENOSPC;
362
cdd9eefd 363 mutex_lock(&UFS_SB(sb)->s_lock);
54fb996a
ED
364 tmp = ufs_data_ptr_to_cpu(sb, p);
365
1da177e4 366 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
54fb996a
ED
367 ufs_warning(sb, "ufs_new_fragments", "internal warning"
368 " fragment %llu, count %u",
369 (unsigned long long)fragment, count);
1da177e4
LT
370 count = uspi->s_fpb - ufs_fragnum(fragment);
371 }
372 oldcount = ufs_fragnum (fragment);
373 newcount = oldcount + count;
374
375 /*
376 * Somebody else has just allocated our fragments
377 */
378 if (oldcount) {
379 if (!tmp) {
54fb996a
ED
380 ufs_error(sb, "ufs_new_fragments", "internal error, "
381 "fragment %llu, tmp %llu\n",
382 (unsigned long long)fragment,
383 (unsigned long long)tmp);
cdd9eefd 384 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 385 return INVBLOCK;
1da177e4
LT
386 }
387 if (fragment < UFS_I(inode)->i_lastfrag) {
abf5d15f 388 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 389 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
390 return 0;
391 }
392 }
393 else {
394 if (tmp) {
abf5d15f 395 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 396 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
397 return 0;
398 }
399 }
400
401 /*
402 * There is not enough space for user on the device
403 */
c596961d 404 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
b451cec4
AV
405 if (!capable(CAP_SYS_RESOURCE)) {
406 mutex_unlock(&UFS_SB(sb)->s_lock);
407 UFSD("EXIT (FAILED)\n");
408 return 0;
409 }
1da177e4
LT
410 }
411
412 if (goal >= uspi->s_size)
413 goal = 0;
414 if (goal == 0)
415 cgno = ufs_inotocg (inode->i_ino);
416 else
54fb996a 417 cgno = ufs_dtog(uspi, goal);
1da177e4
LT
418
419 /*
420 * allocate new fragment
421 */
422 if (oldcount == 0) {
423 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
424 if (result) {
bd2843fe
AV
425 ufs_clear_frags(inode, result + oldcount,
426 newcount - oldcount, locked_page != NULL);
09bf4f5b 427 *err = 0;
724bb09f 428 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 429 ufs_cpu_to_data_ptr(sb, p, result);
54fb996a 430 UFS_I(inode)->i_lastfrag =
1d582723 431 max(UFS_I(inode)->i_lastfrag, fragment + count);
09bf4f5b 432 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 433 }
cdd9eefd 434 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 435 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
436 return result;
437 }
438
439 /*
440 * resize block
441 */
45641c82 442 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
1da177e4
LT
443 if (result) {
444 *err = 0;
09bf4f5b 445 read_seqlock_excl(&UFS_I(inode)->meta_lock);
1d582723
DC
446 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
447 fragment + count);
09bf4f5b 448 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
d63b7090
ED
449 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
450 locked_page != NULL);
cdd9eefd 451 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 452 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
453 return result;
454 }
455
456 /*
457 * allocate new block and move data
458 */
77e9ce32 459 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
1da177e4 460 request = newcount;
77e9ce32
AV
461 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
462 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
463 } else {
1da177e4 464 request = uspi->s_fpb;
77e9ce32
AV
465 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
466 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1da177e4
LT
467 }
468 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
469 if (result) {
efee2b81
ED
470 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
471 locked_page != NULL);
289dec5b 472 mutex_unlock(&UFS_SB(sb)->s_lock);
4b25a37e
ED
473 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
474 uspi->s_sbbase + tmp,
475 uspi->s_sbbase + result, locked_page);
09bf4f5b 476 *err = 0;
724bb09f 477 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 478 ufs_cpu_to_data_ptr(sb, p, result);
1d582723
DC
479 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
480 fragment + count);
09bf4f5b 481 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4
LT
482 if (newcount < request)
483 ufs_free_fragments (inode, result + newcount, request - newcount);
484 ufs_free_fragments (inode, tmp, oldcount);
54fb996a 485 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
486 return result;
487 }
488
cdd9eefd 489 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 490 UFSD("EXIT (FAILED)\n");
1da177e4
LT
491 return 0;
492}
493
eb315d2a
AV
494static bool try_add_frags(struct inode *inode, unsigned frags)
495{
496 unsigned size = frags * i_blocksize(inode);
497 spin_lock(&inode->i_lock);
498 __inode_add_bytes(inode, size);
499 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
500 __inode_sub_bytes(inode, size);
501 spin_unlock(&inode->i_lock);
502 return false;
503 }
504 spin_unlock(&inode->i_lock);
505 return true;
506}
507
54fb996a 508static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
45641c82 509 unsigned oldcount, unsigned newcount)
1da177e4
LT
510{
511 struct super_block * sb;
512 struct ufs_sb_private_info * uspi;
1da177e4
LT
513 struct ufs_cg_private_info * ucpi;
514 struct ufs_cylinder_group * ucg;
515 unsigned cgno, fragno, fragoff, count, fragsize, i;
516
54fb996a
ED
517 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
518 (unsigned long long)fragment, oldcount, newcount);
1da177e4
LT
519
520 sb = inode->i_sb;
521 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
522 count = newcount - oldcount;
523
54fb996a 524 cgno = ufs_dtog(uspi, fragment);
1da177e4
LT
525 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
526 return 0;
527 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
528 return 0;
529 ucpi = ufs_load_cylinder (sb, cgno);
530 if (!ucpi)
531 return 0;
9695ef16 532 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
533 if (!ufs_cg_chkmagic(sb, ucg)) {
534 ufs_panic (sb, "ufs_add_fragments",
535 "internal error, bad magic number on cg %u", cgno);
536 return 0;
537 }
538
54fb996a 539 fragno = ufs_dtogd(uspi, fragment);
1da177e4
LT
540 fragoff = ufs_fragnum (fragno);
541 for (i = oldcount; i < newcount; i++)
9695ef16 542 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4 543 return 0;
eb315d2a
AV
544
545 if (!try_add_frags(inode, count))
546 return 0;
1da177e4
LT
547 /*
548 * Block can be extended
549 */
a3fda0ff 550 ucg->cg_time = ufs_get_seconds(sb);
1da177e4 551 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
9695ef16 552 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
553 break;
554 fragsize = i - oldcount;
555 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
556 ufs_panic (sb, "ufs_add_fragments",
557 "internal error or corrupted bitmap on cg %u", cgno);
558 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
559 if (fragsize != count)
560 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
561 for (i = oldcount; i < newcount; i++)
9695ef16 562 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
1da177e4
LT
563
564 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
565 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
ee3ffd6c 566 uspi->cs_total.cs_nffree -= count;
1da177e4 567
9695ef16
ED
568 ubh_mark_buffer_dirty (USPI_UBH(uspi));
569 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 570 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 571 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 572 ufs_mark_sb_dirty(sb);
1da177e4 573
54fb996a 574 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
1da177e4
LT
575
576 return fragment;
577}
578
579#define UFS_TEST_FREE_SPACE_CG \
580 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
581 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
582 goto cg_found; \
583 for (k = count; k < uspi->s_fpb; k++) \
584 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
585 goto cg_found;
586
54fb996a
ED
587static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
588 u64 goal, unsigned count, int *err)
1da177e4
LT
589{
590 struct super_block * sb;
591 struct ufs_sb_private_info * uspi;
1da177e4
LT
592 struct ufs_cg_private_info * ucpi;
593 struct ufs_cylinder_group * ucg;
54fb996a
ED
594 unsigned oldcg, i, j, k, allocsize;
595 u64 result;
1da177e4 596
54fb996a
ED
597 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
598 inode->i_ino, cgno, (unsigned long long)goal, count);
1da177e4
LT
599
600 sb = inode->i_sb;
601 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
602 oldcg = cgno;
603
604 /*
605 * 1. searching on preferred cylinder group
606 */
607 UFS_TEST_FREE_SPACE_CG
608
609 /*
610 * 2. quadratic rehash
611 */
612 for (j = 1; j < uspi->s_ncg; j *= 2) {
613 cgno += j;
614 if (cgno >= uspi->s_ncg)
615 cgno -= uspi->s_ncg;
616 UFS_TEST_FREE_SPACE_CG
617 }
618
619 /*
620 * 3. brute force search
621 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
622 */
623 cgno = (oldcg + 1) % uspi->s_ncg;
624 for (j = 2; j < uspi->s_ncg; j++) {
625 cgno++;
626 if (cgno >= uspi->s_ncg)
627 cgno = 0;
628 UFS_TEST_FREE_SPACE_CG
629 }
630
abf5d15f 631 UFSD("EXIT (FAILED)\n");
1da177e4
LT
632 return 0;
633
634cg_found:
635 ucpi = ufs_load_cylinder (sb, cgno);
636 if (!ucpi)
637 return 0;
9695ef16 638 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
639 if (!ufs_cg_chkmagic(sb, ucg))
640 ufs_panic (sb, "ufs_alloc_fragments",
641 "internal error, bad magic number on cg %u", cgno);
a3fda0ff 642 ucg->cg_time = ufs_get_seconds(sb);
1da177e4
LT
643
644 if (count == uspi->s_fpb) {
645 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 646 if (result == INVBLOCK)
1da177e4
LT
647 return 0;
648 goto succed;
649 }
650
651 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
652 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
653 break;
654
655 if (allocsize == uspi->s_fpb) {
656 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 657 if (result == INVBLOCK)
1da177e4 658 return 0;
54fb996a 659 goal = ufs_dtogd(uspi, result);
1da177e4 660 for (i = count; i < uspi->s_fpb; i++)
9695ef16 661 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
1da177e4 662 i = uspi->s_fpb - count;
1da177e4 663
eb315d2a 664 inode_sub_bytes(inode, i << uspi->s_fshift);
1da177e4 665 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
ee3ffd6c 666 uspi->cs_total.cs_nffree += i;
1da177e4
LT
667 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
668 fs32_add(sb, &ucg->cg_frsum[i], 1);
669 goto succed;
670 }
671
672 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
54fb996a 673 if (result == INVBLOCK)
1da177e4 674 return 0;
eb315d2a
AV
675 if (!try_add_frags(inode, count))
676 return 0;
1da177e4 677 for (i = 0; i < count; i++)
9695ef16 678 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
1da177e4
LT
679
680 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 681 uspi->cs_total.cs_nffree -= count;
1da177e4
LT
682 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
683 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
684
685 if (count != allocsize)
686 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
687
688succed:
9695ef16
ED
689 ubh_mark_buffer_dirty (USPI_UBH(uspi));
690 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
1751e8a6 691 if (sb->s_flags & SB_SYNCHRONOUS)
9cb569d6 692 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 693 ufs_mark_sb_dirty(sb);
1da177e4
LT
694
695 result += cgno * uspi->s_fpg;
54fb996a 696 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
1da177e4
LT
697 return result;
698}
699
54fb996a
ED
700static u64 ufs_alloccg_block(struct inode *inode,
701 struct ufs_cg_private_info *ucpi,
702 u64 goal, int *err)
1da177e4
LT
703{
704 struct super_block * sb;
705 struct ufs_sb_private_info * uspi;
1da177e4 706 struct ufs_cylinder_group * ucg;
54fb996a 707 u64 result, blkno;
1da177e4 708
54fb996a 709 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
1da177e4
LT
710
711 sb = inode->i_sb;
712 uspi = UFS_SB(sb)->s_uspi;
9695ef16 713 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1da177e4
LT
714
715 if (goal == 0) {
716 goal = ucpi->c_rotor;
717 goto norot;
718 }
719 goal = ufs_blknum (goal);
54fb996a 720 goal = ufs_dtogd(uspi, goal);
1da177e4
LT
721
722 /*
723 * If the requested block is available, use it.
724 */
9695ef16 725 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
1da177e4
LT
726 result = goal;
727 goto gotit;
728 }
729
730norot:
731 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
54fb996a
ED
732 if (result == INVBLOCK)
733 return INVBLOCK;
1da177e4
LT
734 ucpi->c_rotor = result;
735gotit:
eb315d2a
AV
736 if (!try_add_frags(inode, uspi->s_fpb))
737 return 0;
1da177e4 738 blkno = ufs_fragstoblks(result);
9695ef16 739 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
740 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
741 ufs_clusteracct (sb, ucpi, blkno, -1);
1da177e4
LT
742
743 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 744 uspi->cs_total.cs_nbfree--;
1da177e4 745 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
54fb996a
ED
746
747 if (uspi->fs_magic != UFS2_MAGIC) {
748 unsigned cylno = ufs_cbtocylno((unsigned)result);
749
750 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
751 ufs_cbtorpos((unsigned)result)), 1);
752 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
753 }
1da177e4 754
54fb996a 755 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
756
757 return result;
758}
759
3e41f597
ED
760static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
761 struct ufs_buffer_head *ubh,
762 unsigned begin, unsigned size,
763 unsigned char *table, unsigned char mask)
1da177e4 764{
3e41f597
ED
765 unsigned rest, offset;
766 unsigned char *cp;
1da177e4 767
1da177e4 768
3e41f597
ED
769 offset = begin & ~uspi->s_fmask;
770 begin >>= uspi->s_fshift;
771 for (;;) {
772 if ((offset + size) < uspi->s_fsize)
773 rest = size;
774 else
775 rest = uspi->s_fsize - offset;
776 size -= rest;
777 cp = ubh->bh[begin]->b_data + offset;
778 while ((table[*cp++] & mask) == 0 && --rest)
779 ;
780 if (rest || !size)
781 break;
782 begin++;
783 offset = 0;
784 }
785 return (size + rest);
786}
787
788/*
789 * Find a block of the specified size in the specified cylinder group.
790 * @sp: pointer to super block
791 * @ucpi: pointer to cylinder group info
792 * @goal: near which block we want find new one
793 * @count: specified size
794 */
54fb996a
ED
795static u64 ufs_bitmap_search(struct super_block *sb,
796 struct ufs_cg_private_info *ucpi,
797 u64 goal, unsigned count)
3e41f597
ED
798{
799 /*
800 * Bit patterns for identifying fragments in the block map
801 * used as ((map & mask_arr) == want_arr)
802 */
803 static const int mask_arr[9] = {
804 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
805 };
806 static const int want_arr[9] = {
807 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
808 };
809 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
54fb996a 810 unsigned start, length, loc;
3e41f597 811 unsigned pos, want, blockmap, mask, end;
54fb996a 812 u64 result;
3e41f597 813
54fb996a
ED
814 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
815 (unsigned long long)goal, count);
3e41f597 816
1da177e4 817 if (goal)
54fb996a 818 start = ufs_dtogd(uspi, goal) >> 3;
1da177e4
LT
819 else
820 start = ucpi->c_frotor >> 3;
821
822 length = ((uspi->s_fpg + 7) >> 3) - start;
3e41f597 823 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
1da177e4
LT
824 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
825 1 << (count - 1 + (uspi->s_fpb & 7)));
3e41f597 826 if (loc == 0) {
1da177e4 827 length = start + 1;
3e41f597
ED
828 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
829 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
830 ufs_fragtable_other,
831 1 << (count - 1 + (uspi->s_fpb & 7)));
832 if (loc == 0) {
833 ufs_error(sb, "ufs_bitmap_search",
834 "bitmap corrupted on cg %u, start %u,"
835 " length %u, count %u, freeoff %u\n",
836 ucpi->c_cgx, start, length, count,
837 ucpi->c_freeoff);
54fb996a 838 return INVBLOCK;
1da177e4
LT
839 }
840 start = 0;
841 }
3e41f597 842 result = (start + length - loc) << 3;
1da177e4
LT
843 ucpi->c_frotor = result;
844
845 /*
846 * found the byte in the map
847 */
3e41f597
ED
848
849 for (end = result + 8; result < end; result += uspi->s_fpb) {
850 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
851 blockmap <<= 1;
852 mask = mask_arr[count];
853 want = want_arr[count];
854 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
855 if ((blockmap & mask) == want) {
54fb996a
ED
856 UFSD("EXIT, result %llu\n",
857 (unsigned long long)result);
3e41f597
ED
858 return result + pos;
859 }
860 mask <<= 1;
861 want <<= 1;
862 }
863 }
864
865 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
866 ucpi->c_cgx);
abf5d15f 867 UFSD("EXIT (FAILED)\n");
54fb996a 868 return INVBLOCK;
1da177e4
LT
869}
870
871static void ufs_clusteracct(struct super_block * sb,
872 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
873{
874 struct ufs_sb_private_info * uspi;
875 int i, start, end, forw, back;
876
877 uspi = UFS_SB(sb)->s_uspi;
878 if (uspi->s_contigsumsize <= 0)
879 return;
880
881 if (cnt > 0)
9695ef16 882 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4 883 else
9695ef16 884 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4
LT
885
886 /*
887 * Find the size of the cluster going forward.
888 */
889 start = blkno + 1;
890 end = start + uspi->s_contigsumsize;
891 if ( end >= ucpi->c_nclusterblks)
892 end = ucpi->c_nclusterblks;
9695ef16 893 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
1da177e4
LT
894 if (i > end)
895 i = end;
896 forw = i - start;
897
898 /*
899 * Find the size of the cluster going backward.
900 */
901 start = blkno - 1;
902 end = start - uspi->s_contigsumsize;
903 if (end < 0 )
904 end = -1;
9695ef16 905 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
1da177e4
LT
906 if ( i < end)
907 i = end;
908 back = start - i;
909
910 /*
911 * Account for old cluster and the possibly new forward and
912 * back clusters.
913 */
914 i = back + forw + 1;
915 if (i > uspi->s_contigsumsize)
916 i = uspi->s_contigsumsize;
9695ef16 917 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
1da177e4 918 if (back > 0)
9695ef16 919 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
1da177e4 920 if (forw > 0)
9695ef16 921 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
1da177e4
LT
922}
923
924
925static unsigned char ufs_fragtable_8fpb[] = {
926 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
928 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
931 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
932 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
933 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
934 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
936 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
937 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
938 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
939 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
940 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
941 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
942};
943
944static unsigned char ufs_fragtable_other[] = {
945 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
951 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
952 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
953 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
957 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
958 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
959 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
960 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
961};