In a recent commit, I inadvertently changed a comparison from being an
unsigned comparison (on 64-bit systems) to being a signed comparison
(which it had always been on 32-bit systems). This led to a sporadic
fstests failure.
To make sure this comparison is always unsigned, introduce a new type,
uoff_t which is the unsigned version of loff_t. Generally file sizes
are restricted to being a signed integer, but in these two places it is
convenient to pass -1 to indicate "up to the end of the file".
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Link: https://patch.msgid.link/20251123220518.1447261-1-willy@infradead.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
/* truncate.c */
-extern void truncate_inode_pages(struct address_space *, loff_t);
-extern void truncate_inode_pages_range(struct address_space *,
- loff_t lstart, loff_t lend);
-extern void truncate_inode_pages_final(struct address_space *);
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart);
+void truncate_inode_pages_range(struct address_space *mapping, loff_t lstart,
+ uoff_t lend);
+void truncate_inode_pages_final(struct address_space *mapping);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
pgoff_t index, gfp_t gfp_mask);
int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
struct list_head *folio_list);
-void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
+void shmem_truncate_range(struct inode *inode, loff_t start, uoff_t end);
int shmem_unuse(unsigned int type);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#if defined(__GNUC__)
typedef __kernel_loff_t loff_t;
+typedef __kernel_uoff_t uoff_t;
#endif
/*
*/
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
+typedef unsigned long long __kernel_uoff_t;
typedef __kernel_long_t __kernel_old_time_t;
#ifndef __KERNEL__
typedef __kernel_long_t __kernel_time_t;
* Remove range of pages and swap entries from page cache, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
-static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
+static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend,
bool unfalloc)
{
struct address_space *mapping = inode->i_mapping;
shmem_recalc_inode(inode, 0, -nr_swaps_freed);
}
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
}
#endif
-void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
+void shmem_truncate_range(struct inode *inode, loff_t lstart, uoff_t lend)
{
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
}
* page aligned properly.
*/
void truncate_inode_pages_range(struct address_space *mapping,
- loff_t lstart, loff_t lend)
+ loff_t lstart, uoff_t lend)
{
pgoff_t start; /* inclusive */
pgoff_t end; /* exclusive */