else
irec->ino_un.plist = ptbl;
- ptbl->pmask = 1LL << offset;
+ ptbl->pmask = 1ULL << offset;
ptbl->pentries = (xfs_ino_t*)memalign(sizeof(xfs_ino_t),
sizeof(xfs_ino_t));
if (!ptbl->pentries)
return;
}
- if (ptbl->pmask & (1LL << offset)) {
- bitmask = 1LL;
+ if (ptbl->pmask & (1ULL << offset)) {
+ bitmask = 1ULL;
target = 0;
for (i = 0; i < offset; i++) {
return;
}
- bitmask = 1LL;
+ bitmask = 1ULL;
cnt = target = 0;
for (i = 0; i < XFS_INODES_PER_CHUNK; i++) {
ptbl->cnt++;
#endif
ptbl->pentries[target] = parent;
- ptbl->pmask |= (1LL << offset);
+ ptbl->pmask |= (1ULL << offset);
}
xfs_ino_t
else
ptbl = irec->ino_un.plist;
- if (ptbl->pmask & (1LL << offset)) {
- bitmask = 1LL;
+ if (ptbl->pmask & (1ULL << offset)) {
+ bitmask = 1ULL;
target = 0;
for (i = 0; i < offset; i++) {
* sparse state in cluster sized chunks as cluster size
* is the min. granularity of sparse irec regions.
*/
- if ((sparse & ((1 << inodes_per_cluster) - 1)) == 0)
+ if ((sparse & ((1ULL << inodes_per_cluster) - 1)) == 0)
pf_queue_io(args, &map, 1,
(cur_irec->ino_isa_dir != 0) ?
B_DIR_INODE : B_INODE);