1 From: Nick Piggin <npiggin@suse.de>
2 Subject: mm: page_mkwrite kABI compat 1
5 Rename page_mkwrite to page_mkwrite2, put it into a union, and in that union
6 also add a new page_mkwrite which has the same prototype as the old one. Update
7 all in-tree code to use page_mkwrite2 (can't initialize anonymous unions so
10 [mmarek: added __GENKSYMS__ ifdef: ->_pmkw.page_mkwrite() will be the same
11 as ->page_mkwrite() in assembly, but for genksyms it's two different things.]
13 Signed-off-by: Nick Piggin <npiggin@suse.de>
15 drivers/video/fb_defio.c | 2 +-
16 fs/buffer.c | 38 +++++++++++++++++++++++++++++++++++++-
19 fs/gfs2/ops_file.c | 2 +-
21 fs/ocfs2/mmap.c | 2 +-
22 fs/ubifs/file.c | 2 +-
23 fs/xfs/linux-2.6/xfs_file.c | 4 ++--
24 include/linux/buffer_head.h | 4 +++-
25 include/linux/mm.h | 19 ++++++++++++++++---
26 mm/memory.c | 8 ++++----
28 13 files changed, 70 insertions(+), 19 deletions(-)
30 --- a/drivers/video/fb_defio.c
31 +++ b/drivers/video/fb_defio.c
32 @@ -112,7 +112,7 @@ page_already_added:
34 static struct vm_operations_struct fb_deferred_io_vm_ops = {
35 .fault = fb_deferred_io_fault,
36 - .page_mkwrite = fb_deferred_io_mkwrite,
37 + ._pmkw.page_mkwrite2 = fb_deferred_io_mkwrite,
40 static int fb_deferred_io_set_page_dirty(struct page *page)
43 @@ -125,7 +125,7 @@ force_commit:
45 static struct vm_operations_struct ext4_file_vm_ops = {
46 .fault = filemap_fault,
47 - .page_mkwrite = ext4_page_mkwrite,
48 + ._pmkw.page_mkwrite2 = ext4_page_mkwrite,
51 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
54 @@ -1235,7 +1235,7 @@ static int fuse_page_mkwrite(struct vm_a
55 static struct vm_operations_struct fuse_file_vm_ops = {
56 .close = fuse_vma_close,
57 .fault = filemap_fault,
58 - .page_mkwrite = fuse_page_mkwrite,
59 + ._pmkw.page_mkwrite2 = fuse_page_mkwrite,
62 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
63 --- a/fs/gfs2/ops_file.c
64 +++ b/fs/gfs2/ops_file.c
65 @@ -421,7 +421,7 @@ out:
67 static struct vm_operations_struct gfs2_vm_ops = {
68 .fault = filemap_fault,
69 - .page_mkwrite = gfs2_page_mkwrite,
70 + ._pmkw.page_mkwrite2 = gfs2_page_mkwrite,
76 @@ -486,7 +486,7 @@ out_unlock:
78 static struct vm_operations_struct nfs_file_vm_ops = {
79 .fault = filemap_fault,
80 - .page_mkwrite = nfs_vm_page_mkwrite,
81 + ._pmkw.page_mkwrite2 = nfs_vm_page_mkwrite,
84 static int nfs_need_sync_write(struct file *filp, struct inode *inode)
87 @@ -200,7 +200,7 @@ out:
89 static struct vm_operations_struct ocfs2_file_vm_ops = {
91 - .page_mkwrite = ocfs2_page_mkwrite,
92 + ._pmkw.page_mkwrite2 = ocfs2_page_mkwrite,
95 int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
98 @@ -1234,7 +1234,7 @@ out_unlock:
100 static struct vm_operations_struct ubifs_file_vm_ops = {
101 .fault = filemap_fault,
102 - .page_mkwrite = ubifs_vm_page_mkwrite,
103 + ._pmkw.page_mkwrite2 = ubifs_vm_page_mkwrite,
106 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
107 --- a/fs/xfs/linux-2.6/xfs_file.c
108 +++ b/fs/xfs/linux-2.6/xfs_file.c
109 @@ -429,7 +429,7 @@ xfs_vm_page_mkwrite(
110 struct vm_area_struct *vma,
111 struct vm_fault *vmf)
113 - return block_page_mkwrite(vma, vmf, xfs_get_blocks);
114 + return block_page_mkwrite2(vma, vmf, xfs_get_blocks);
117 const struct file_operations xfs_file_operations = {
118 @@ -485,5 +485,5 @@ const struct file_operations xfs_dir_fil
120 static struct vm_operations_struct xfs_file_vm_ops = {
121 .fault = filemap_fault,
122 - .page_mkwrite = xfs_vm_page_mkwrite,
123 + ._pmkw.page_mkwrite2 = xfs_vm_page_mkwrite,
125 --- a/include/linux/mm.h
126 +++ b/include/linux/mm.h
127 @@ -172,9 +172,22 @@ struct vm_operations_struct {
128 void (*close)(struct vm_area_struct * area);
129 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
131 - /* notification that a previously read-only page is about to become
132 - * writable, if an error is returned it will cause a SIGBUS */
133 - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
135 + int (*page_mkwrite)(struct vm_area_struct *, struct page *);
139 + * XXX: this is an ABI compatibility hack.
140 + * Using the fixed page_mkwrite2 call requires VM_PAGE_MKWRITE2 to be
141 + * set in vma->vm_flags
144 + /* notification that a previously read-only page is about to become
145 + * writable, if an error is returned it will cause a SIGBUS */
146 + int (*page_mkwrite)(struct vm_area_struct *, struct page *);
147 + int (*page_mkwrite2)(struct vm_area_struct *vma, struct vm_fault *vmf);
151 /* called by access_process_vm when get_user_pages() fails, typically
152 * for use by special VMAs that can switch between memory and hardware
155 @@ -2402,7 +2402,7 @@ int block_commit_write(struct page *page
159 -block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
160 +block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf,
161 get_block_t get_block)
163 struct page *page = vmf->page;
164 @@ -2444,6 +2444,41 @@ out:
171 +block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
172 + get_block_t get_block)
174 + struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
180 + size = i_size_read(inode);
181 + if ((page->mapping != inode->i_mapping) ||
182 + (page_offset(page) > size)) {
183 + /* page got truncated out from underneath us */
187 + /* page is wholly or partially inside EOF */
188 + if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
189 + end = size & ~PAGE_CACHE_MASK;
191 + end = PAGE_CACHE_SIZE;
193 + ret = block_prepare_write(page, 0, end, get_block);
195 + ret = block_commit_write(page, 0, end);
203 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
204 * immediately, while under the page lock. So it needs a special end_io
205 * handler which does not touch the bh after unlocking it.
206 @@ -3362,6 +3397,7 @@ EXPORT_SYMBOL(__wait_on_buffer);
207 EXPORT_SYMBOL(block_commit_write);
208 EXPORT_SYMBOL(block_prepare_write);
209 EXPORT_SYMBOL(block_page_mkwrite);
210 +EXPORT_SYMBOL(block_page_mkwrite2);
211 EXPORT_SYMBOL(block_read_full_page);
212 EXPORT_SYMBOL(block_sync_page);
213 EXPORT_SYMBOL(block_truncate_page);
214 --- a/include/linux/buffer_head.h
215 +++ b/include/linux/buffer_head.h
216 @@ -222,7 +222,9 @@ int cont_write_begin(struct file *, stru
217 get_block_t *, loff_t *);
218 int generic_cont_expand_simple(struct inode *inode, loff_t size);
219 int block_commit_write(struct page *page, unsigned from, unsigned to);
220 -int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
221 +int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
222 + get_block_t get_block);
223 +int block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf,
224 get_block_t get_block);
225 void block_sync_page(struct page *);
226 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
229 @@ -1800,7 +1800,7 @@ static int do_wp_page(struct mm_struct *
230 * read-only shared pages can get COWed by
231 * get_user_pages(.write=1, .force=1).
233 - if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
234 + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite2) {
238 @@ -1821,7 +1821,7 @@ static int do_wp_page(struct mm_struct *
239 page_cache_get(old_page);
240 pte_unmap_unlock(page_table, ptl);
242 - tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
243 + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf);
245 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
247 @@ -2517,12 +2517,12 @@ static int __do_fault(struct mm_struct *
248 * address space wants to know that the page is about
251 - if (vma->vm_ops->page_mkwrite) {
252 + if (vma->vm_ops->_pmkw.page_mkwrite2) {
256 vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
257 - tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
258 + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf);
260 (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
264 @@ -1071,7 +1071,7 @@ int vma_wants_writenotify(struct vm_area
267 /* The backer wishes to know when pages are first written to? */
268 - if (vma->vm_ops && vma->vm_ops->page_mkwrite)
269 + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite)
272 /* The open routine did something to the protections already? */