]>
Commit | Line | Data |
---|---|---|
00e5a55c BS |
1 | From: Nick Piggin <npiggin@suse.de> |
2 | Subject: mm: page_mkwrite kABI compat 1 | |
3 | Patch-upstream: never | |
4 | ||
5 | Rename page_mkwrite to page_mkwrite2, put it into a union, and in that union | |
6 | also add a new page_mkwrite which has the same prototype as the old one. Update | |
7 | all in-tree code to use page_mkwrite2 (can't initialize anonymous unions so | |
8 | it's a bit ugly :( ). | |
9 | ||
10 | [mmarek: added __GENKSYMS__ ifdef: ->_pmkw.page_mkwrite() will be the same | |
11 | as ->page_mkwrite() in assembly, but for genksyms it's two different things.] | |
12 | ||
13 | Signed-off-by: Nick Piggin <npiggin@suse.de> | |
14 | --- | |
15 | drivers/video/fb_defio.c | 2 +- | |
16 | fs/buffer.c | 38 +++++++++++++++++++++++++++++++++++++- | |
17 | fs/ext4/file.c | 2 +- | |
18 | fs/fuse/file.c | 2 +- | |
19 | fs/gfs2/ops_file.c | 2 +- | |
20 | fs/nfs/file.c | 2 +- | |
21 | fs/ocfs2/mmap.c | 2 +- | |
22 | fs/ubifs/file.c | 2 +- | |
23 | fs/xfs/linux-2.6/xfs_file.c | 4 ++-- | |
24 | include/linux/buffer_head.h | 4 +++- | |
25 | include/linux/mm.h | 19 ++++++++++++++++--- | |
26 | mm/memory.c | 8 ++++---- | |
27 | mm/mmap.c | 2 +- | |
28 | 13 files changed, 70 insertions(+), 19 deletions(-) | |
29 | ||
30 | --- a/drivers/video/fb_defio.c | |
31 | +++ b/drivers/video/fb_defio.c | |
32 | @@ -112,7 +112,7 @@ page_already_added: | |
33 | ||
34 | static struct vm_operations_struct fb_deferred_io_vm_ops = { | |
35 | .fault = fb_deferred_io_fault, | |
36 | - .page_mkwrite = fb_deferred_io_mkwrite, | |
37 | + ._pmkw.page_mkwrite2 = fb_deferred_io_mkwrite, | |
38 | }; | |
39 | ||
40 | static int fb_deferred_io_set_page_dirty(struct page *page) | |
41 | --- a/fs/ext4/file.c | |
42 | +++ b/fs/ext4/file.c | |
43 | @@ -125,7 +125,7 @@ force_commit: | |
44 | ||
45 | static struct vm_operations_struct ext4_file_vm_ops = { | |
46 | .fault = filemap_fault, | |
47 | - .page_mkwrite = ext4_page_mkwrite, | |
48 | + ._pmkw.page_mkwrite2 = ext4_page_mkwrite, | |
49 | }; | |
50 | ||
51 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | |
52 | --- a/fs/fuse/file.c | |
53 | +++ b/fs/fuse/file.c | |
54 | @@ -1235,7 +1235,7 @@ static int fuse_page_mkwrite(struct vm_a | |
55 | static struct vm_operations_struct fuse_file_vm_ops = { | |
56 | .close = fuse_vma_close, | |
57 | .fault = filemap_fault, | |
58 | - .page_mkwrite = fuse_page_mkwrite, | |
59 | + ._pmkw.page_mkwrite2 = fuse_page_mkwrite, | |
60 | }; | |
61 | ||
62 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
63 | --- a/fs/gfs2/ops_file.c | |
64 | +++ b/fs/gfs2/ops_file.c | |
65 | @@ -421,7 +421,7 @@ out: | |
66 | ||
67 | static struct vm_operations_struct gfs2_vm_ops = { | |
68 | .fault = filemap_fault, | |
69 | - .page_mkwrite = gfs2_page_mkwrite, | |
70 | + ._pmkw.page_mkwrite2 = gfs2_page_mkwrite, | |
71 | }; | |
72 | ||
73 | ||
74 | --- a/fs/nfs/file.c | |
75 | +++ b/fs/nfs/file.c | |
76 | @@ -486,7 +486,7 @@ out_unlock: | |
77 | ||
78 | static struct vm_operations_struct nfs_file_vm_ops = { | |
79 | .fault = filemap_fault, | |
80 | - .page_mkwrite = nfs_vm_page_mkwrite, | |
81 | + ._pmkw.page_mkwrite2 = nfs_vm_page_mkwrite, | |
82 | }; | |
83 | ||
84 | static int nfs_need_sync_write(struct file *filp, struct inode *inode) | |
85 | --- a/fs/ocfs2/mmap.c | |
86 | +++ b/fs/ocfs2/mmap.c | |
87 | @@ -200,7 +200,7 @@ out: | |
88 | ||
89 | static struct vm_operations_struct ocfs2_file_vm_ops = { | |
90 | .fault = ocfs2_fault, | |
91 | - .page_mkwrite = ocfs2_page_mkwrite, | |
92 | + ._pmkw.page_mkwrite2 = ocfs2_page_mkwrite, | |
93 | }; | |
94 | ||
95 | int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) | |
96 | --- a/fs/ubifs/file.c | |
97 | +++ b/fs/ubifs/file.c | |
98 | @@ -1234,7 +1234,7 @@ out_unlock: | |
99 | ||
100 | static struct vm_operations_struct ubifs_file_vm_ops = { | |
101 | .fault = filemap_fault, | |
102 | - .page_mkwrite = ubifs_vm_page_mkwrite, | |
103 | + ._pmkw.page_mkwrite2 = ubifs_vm_page_mkwrite, | |
104 | }; | |
105 | ||
106 | static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
107 | --- a/fs/xfs/linux-2.6/xfs_file.c | |
108 | +++ b/fs/xfs/linux-2.6/xfs_file.c | |
109 | @@ -429,7 +429,7 @@ xfs_vm_page_mkwrite( | |
110 | struct vm_area_struct *vma, | |
111 | struct vm_fault *vmf) | |
112 | { | |
113 | - return block_page_mkwrite(vma, vmf, xfs_get_blocks); | |
114 | + return block_page_mkwrite2(vma, vmf, xfs_get_blocks); | |
115 | } | |
116 | ||
117 | const struct file_operations xfs_file_operations = { | |
118 | @@ -485,5 +485,5 @@ const struct file_operations xfs_dir_fil | |
119 | ||
120 | static struct vm_operations_struct xfs_file_vm_ops = { | |
121 | .fault = filemap_fault, | |
122 | - .page_mkwrite = xfs_vm_page_mkwrite, | |
123 | + ._pmkw.page_mkwrite2 = xfs_vm_page_mkwrite, | |
124 | }; | |
125 | --- a/include/linux/mm.h | |
126 | +++ b/include/linux/mm.h | |
127 | @@ -172,9 +172,22 @@ struct vm_operations_struct { | |
128 | void (*close)(struct vm_area_struct * area); | |
129 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
130 | ||
131 | - /* notification that a previously read-only page is about to become | |
132 | - * writable, if an error is returned it will cause a SIGBUS */ | |
133 | - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
134 | +#ifdef __GENKSYMS__ | |
135 | + int (*page_mkwrite)(struct vm_area_struct *, struct page *); | |
136 | +#else | |
137 | + union { | |
138 | + /* | |
139 | + * XXX: this is an ABI compatibility hack. | |
140 | + * Using the fixed page_mkwrite2 call requires VM_PAGE_MKWRITE2 to be | |
141 | + * set in vma->vm_flags | |
142 | + */ | |
143 | + | |
144 | + /* notification that a previously read-only page is about to become | |
145 | + * writable, if an error is returned it will cause a SIGBUS */ | |
146 | + int (*page_mkwrite)(struct vm_area_struct *, struct page *); | |
147 | + int (*page_mkwrite2)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
148 | + } _pmkw; | |
149 | +#endif | |
150 | ||
151 | /* called by access_process_vm when get_user_pages() fails, typically | |
152 | * for use by special VMAs that can switch between memory and hardware | |
153 | --- a/fs/buffer.c | |
154 | +++ b/fs/buffer.c | |
155 | @@ -2402,7 +2402,7 @@ int block_commit_write(struct page *page | |
156 | * unlock the page. | |
157 | */ | |
158 | int | |
159 | -block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |
160 | +block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf, | |
161 | get_block_t get_block) | |
162 | { | |
163 | struct page *page = vmf->page; | |
164 | @@ -2444,6 +2444,41 @@ out: | |
165 | } | |
166 | ||
167 | /* | |
168 | + * XXX: ABI hack | |
169 | + */ | |
170 | +int | |
171 | +block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |
172 | + get_block_t get_block) | |
173 | +{ | |
174 | + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | |
175 | + unsigned long end; | |
176 | + loff_t size; | |
177 | + int ret = -EINVAL; | |
178 | + | |
179 | + lock_page(page); | |
180 | + size = i_size_read(inode); | |
181 | + if ((page->mapping != inode->i_mapping) || | |
182 | + (page_offset(page) > size)) { | |
183 | + /* page got truncated out from underneath us */ | |
184 | + goto out_unlock; | |
185 | + } | |
186 | + | |
187 | + /* page is wholly or partially inside EOF */ | |
188 | + if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) | |
189 | + end = size & ~PAGE_CACHE_MASK; | |
190 | + else | |
191 | + end = PAGE_CACHE_SIZE; | |
192 | + | |
193 | + ret = block_prepare_write(page, 0, end, get_block); | |
194 | + if (!ret) | |
195 | + ret = block_commit_write(page, 0, end); | |
196 | + | |
197 | +out_unlock: | |
198 | + unlock_page(page); | |
199 | + return ret; | |
200 | +} | |
201 | + | |
202 | +/* | |
203 | * nobh_write_begin()'s prereads are special: the buffer_heads are freed | |
204 | * immediately, while under the page lock. So it needs a special end_io | |
205 | * handler which does not touch the bh after unlocking it. | |
206 | @@ -3362,6 +3397,7 @@ EXPORT_SYMBOL(__wait_on_buffer); | |
207 | EXPORT_SYMBOL(block_commit_write); | |
208 | EXPORT_SYMBOL(block_prepare_write); | |
209 | EXPORT_SYMBOL(block_page_mkwrite); | |
210 | +EXPORT_SYMBOL(block_page_mkwrite2); | |
211 | EXPORT_SYMBOL(block_read_full_page); | |
212 | EXPORT_SYMBOL(block_sync_page); | |
213 | EXPORT_SYMBOL(block_truncate_page); | |
214 | --- a/include/linux/buffer_head.h | |
215 | +++ b/include/linux/buffer_head.h | |
216 | @@ -222,7 +222,9 @@ int cont_write_begin(struct file *, stru | |
217 | get_block_t *, loff_t *); | |
218 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | |
219 | int block_commit_write(struct page *page, unsigned from, unsigned to); | |
220 | -int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |
221 | +int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |
222 | + get_block_t get_block); | |
223 | +int block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf, | |
224 | get_block_t get_block); | |
225 | void block_sync_page(struct page *); | |
226 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | |
227 | --- a/mm/memory.c | |
228 | +++ b/mm/memory.c | |
229 | @@ -1800,7 +1800,7 @@ static int do_wp_page(struct mm_struct * | |
230 | * read-only shared pages can get COWed by | |
231 | * get_user_pages(.write=1, .force=1). | |
232 | */ | |
233 | - if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | |
234 | + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite2) { | |
235 | struct vm_fault vmf; | |
236 | int tmp; | |
237 | ||
238 | @@ -1821,7 +1821,7 @@ static int do_wp_page(struct mm_struct * | |
239 | page_cache_get(old_page); | |
240 | pte_unmap_unlock(page_table, ptl); | |
241 | ||
242 | - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | |
243 | + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf); | |
244 | if (unlikely(tmp & | |
245 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | |
246 | ret = tmp; | |
247 | @@ -2517,12 +2517,12 @@ static int __do_fault(struct mm_struct * | |
248 | * address space wants to know that the page is about | |
249 | * to become writable | |
250 | */ | |
251 | - if (vma->vm_ops->page_mkwrite) { | |
252 | + if (vma->vm_ops->_pmkw.page_mkwrite2) { | |
253 | int tmp; | |
254 | ||
255 | unlock_page(page); | |
256 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | |
257 | - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | |
258 | + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf); | |
259 | if (unlikely(tmp & | |
260 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | |
261 | ret = tmp; | |
262 | --- a/mm/mmap.c | |
263 | +++ b/mm/mmap.c | |
264 | @@ -1071,7 +1071,7 @@ int vma_wants_writenotify(struct vm_area | |
265 | return 0; | |
266 | ||
267 | /* The backer wishes to know when pages are first written to? */ | |
268 | - if (vma->vm_ops && vma->vm_ops->page_mkwrite) | |
269 | + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite) | |
270 | return 1; | |
271 | ||
272 | /* The open routine did something to the protections already? */ |