]>
Commit | Line | Data |
---|---|---|
2cb7cef9 BS |
1 | From: Nick Piggin <npiggin@suse.de> |
2 | Subject: mm: page_mkwrite kABI compat 1 | |
3 | Patch-upstream: never | |
4 | ||
5 | Rename page_mkwrite to page_mkwrite2, put it into a union, and in that union | |
6 | also add a new page_mkwrite which has the same prototype as the old one. Update | |
7 | all in-tree code to use page_mkwrite2 (can't initialize anonymous unions so | |
8 | it's a bit ugly :( ). | |
9 | ||
10 | [mmarek: added __GENKSYMS__ ifdef: ->_pmkw.page_mkwrite() will be the same | |
11 | as ->page_mkwrite() in assembly, but for genksyms it's two different things.] | |
12 | ||
13 | Signed-off-by: Nick Piggin <npiggin@suse.de> | |
14 | --- | |
15 | drivers/video/fb_defio.c | 2 +- | |
16 | fs/buffer.c | 38 +++++++++++++++++++++++++++++++++++++- | |
17 | fs/ext4/file.c | 2 +- | |
18 | fs/fuse/file.c | 2 +- | |
19 | fs/gfs2/ops_file.c | 2 +- | |
20 | fs/nfs/file.c | 2 +- | |
21 | fs/ocfs2/mmap.c | 2 +- | |
22 | fs/ubifs/file.c | 2 +- | |
23 | fs/xfs/linux-2.6/xfs_file.c | 4 ++-- | |
24 | include/linux/buffer_head.h | 4 +++- | |
25 | include/linux/mm.h | 19 ++++++++++++++++--- | |
26 | mm/memory.c | 8 ++++---- | |
27 | mm/mmap.c | 2 +- | |
28 | 13 files changed, 70 insertions(+), 19 deletions(-) | |
29 | ||
30 | --- a/drivers/video/fb_defio.c | |
31 | +++ b/drivers/video/fb_defio.c | |
32 | @@ -112,7 +112,7 @@ page_already_added: | |
33 | ||
34 | static struct vm_operations_struct fb_deferred_io_vm_ops = { | |
35 | .fault = fb_deferred_io_fault, | |
36 | - .page_mkwrite = fb_deferred_io_mkwrite, | |
37 | + ._pmkw.page_mkwrite2 = fb_deferred_io_mkwrite, | |
38 | }; | |
39 | ||
40 | static int fb_deferred_io_set_page_dirty(struct page *page) | |
41 | --- a/fs/buffer.c | |
42 | +++ b/fs/buffer.c | |
43 | @@ -2402,7 +2402,7 @@ int block_commit_write(struct page *page | |
44 | * unlock the page. | |
45 | */ | |
46 | int | |
47 | -block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |
48 | +block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf, | |
49 | get_block_t get_block) | |
50 | { | |
51 | struct page *page = vmf->page; | |
52 | @@ -2444,6 +2444,41 @@ out: | |
53 | } | |
54 | ||
55 | /* | |
56 | + * XXX: ABI hack | |
57 | + */ | |
58 | +int | |
59 | +block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |
60 | + get_block_t get_block) | |
61 | +{ | |
62 | + struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | |
63 | + unsigned long end; | |
64 | + loff_t size; | |
65 | + int ret = -EINVAL; | |
66 | + | |
67 | + lock_page(page); | |
68 | + size = i_size_read(inode); | |
69 | + if ((page->mapping != inode->i_mapping) || | |
70 | + (page_offset(page) > size)) { | |
71 | + /* page got truncated out from underneath us */ | |
72 | + goto out_unlock; | |
73 | + } | |
74 | + | |
75 | + /* page is wholly or partially inside EOF */ | |
76 | + if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) | |
77 | + end = size & ~PAGE_CACHE_MASK; | |
78 | + else | |
79 | + end = PAGE_CACHE_SIZE; | |
80 | + | |
81 | + ret = block_prepare_write(page, 0, end, get_block); | |
82 | + if (!ret) | |
83 | + ret = block_commit_write(page, 0, end); | |
84 | + | |
85 | +out_unlock: | |
86 | + unlock_page(page); | |
87 | + return ret; | |
88 | +} | |
89 | + | |
90 | +/* | |
91 | * nobh_write_begin()'s prereads are special: the buffer_heads are freed | |
92 | * immediately, while under the page lock. So it needs a special end_io | |
93 | * handler which does not touch the bh after unlocking it. | |
94 | @@ -3362,6 +3397,7 @@ EXPORT_SYMBOL(__wait_on_buffer); | |
95 | EXPORT_SYMBOL(block_commit_write); | |
96 | EXPORT_SYMBOL(block_prepare_write); | |
97 | EXPORT_SYMBOL(block_page_mkwrite); | |
98 | +EXPORT_SYMBOL(block_page_mkwrite2); | |
99 | EXPORT_SYMBOL(block_read_full_page); | |
100 | EXPORT_SYMBOL(block_sync_page); | |
101 | EXPORT_SYMBOL(block_truncate_page); | |
102 | --- a/fs/ext4/file.c | |
103 | +++ b/fs/ext4/file.c | |
104 | @@ -125,7 +125,7 @@ force_commit: | |
105 | ||
106 | static struct vm_operations_struct ext4_file_vm_ops = { | |
107 | .fault = filemap_fault, | |
108 | - .page_mkwrite = ext4_page_mkwrite, | |
109 | + ._pmkw.page_mkwrite2 = ext4_page_mkwrite, | |
110 | }; | |
111 | ||
112 | static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) | |
113 | --- a/fs/fuse/file.c | |
114 | +++ b/fs/fuse/file.c | |
115 | @@ -1235,7 +1235,7 @@ static int fuse_page_mkwrite(struct vm_a | |
116 | static struct vm_operations_struct fuse_file_vm_ops = { | |
117 | .close = fuse_vma_close, | |
118 | .fault = filemap_fault, | |
119 | - .page_mkwrite = fuse_page_mkwrite, | |
120 | + ._pmkw.page_mkwrite2 = fuse_page_mkwrite, | |
121 | }; | |
122 | ||
123 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) | |
124 | --- a/fs/gfs2/ops_file.c | |
125 | +++ b/fs/gfs2/ops_file.c | |
126 | @@ -421,7 +421,7 @@ out: | |
127 | ||
128 | static struct vm_operations_struct gfs2_vm_ops = { | |
129 | .fault = filemap_fault, | |
130 | - .page_mkwrite = gfs2_page_mkwrite, | |
131 | + ._pmkw.page_mkwrite2 = gfs2_page_mkwrite, | |
132 | }; | |
133 | ||
134 | ||
135 | --- a/fs/nfs/file.c | |
136 | +++ b/fs/nfs/file.c | |
137 | @@ -486,7 +486,7 @@ out_unlock: | |
138 | ||
139 | static struct vm_operations_struct nfs_file_vm_ops = { | |
140 | .fault = filemap_fault, | |
141 | - .page_mkwrite = nfs_vm_page_mkwrite, | |
142 | + ._pmkw.page_mkwrite2 = nfs_vm_page_mkwrite, | |
143 | }; | |
144 | ||
145 | static int nfs_need_sync_write(struct file *filp, struct inode *inode) | |
146 | --- a/fs/ocfs2/mmap.c | |
147 | +++ b/fs/ocfs2/mmap.c | |
148 | @@ -200,7 +200,7 @@ out: | |
149 | ||
150 | static struct vm_operations_struct ocfs2_file_vm_ops = { | |
151 | .fault = ocfs2_fault, | |
152 | - .page_mkwrite = ocfs2_page_mkwrite, | |
153 | + ._pmkw.page_mkwrite2 = ocfs2_page_mkwrite, | |
154 | }; | |
155 | ||
156 | int ocfs2_mmap(struct file *file, struct vm_area_struct *vma) | |
157 | --- a/fs/ubifs/file.c | |
158 | +++ b/fs/ubifs/file.c | |
159 | @@ -1234,7 +1234,7 @@ out_unlock: | |
160 | ||
161 | static struct vm_operations_struct ubifs_file_vm_ops = { | |
162 | .fault = filemap_fault, | |
163 | - .page_mkwrite = ubifs_vm_page_mkwrite, | |
164 | + ._pmkw.page_mkwrite2 = ubifs_vm_page_mkwrite, | |
165 | }; | |
166 | ||
167 | static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
168 | --- a/fs/xfs/linux-2.6/xfs_file.c | |
169 | +++ b/fs/xfs/linux-2.6/xfs_file.c | |
170 | @@ -429,7 +429,7 @@ xfs_vm_page_mkwrite( | |
171 | struct vm_area_struct *vma, | |
172 | struct vm_fault *vmf) | |
173 | { | |
174 | - return block_page_mkwrite(vma, vmf, xfs_get_blocks); | |
175 | + return block_page_mkwrite2(vma, vmf, xfs_get_blocks); | |
176 | } | |
177 | ||
178 | const struct file_operations xfs_file_operations = { | |
179 | @@ -485,5 +485,5 @@ const struct file_operations xfs_dir_fil | |
180 | ||
181 | static struct vm_operations_struct xfs_file_vm_ops = { | |
182 | .fault = filemap_fault, | |
183 | - .page_mkwrite = xfs_vm_page_mkwrite, | |
184 | + ._pmkw.page_mkwrite2 = xfs_vm_page_mkwrite, | |
185 | }; | |
186 | --- a/include/linux/buffer_head.h | |
187 | +++ b/include/linux/buffer_head.h | |
188 | @@ -222,7 +222,9 @@ int cont_write_begin(struct file *, stru | |
189 | get_block_t *, loff_t *); | |
190 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | |
191 | int block_commit_write(struct page *page, unsigned from, unsigned to); | |
192 | -int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |
193 | +int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |
194 | + get_block_t get_block); | |
195 | +int block_page_mkwrite2(struct vm_area_struct *vma, struct vm_fault *vmf, | |
196 | get_block_t get_block); | |
197 | void block_sync_page(struct page *); | |
198 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | |
199 | --- a/include/linux/mm.h | |
200 | +++ b/include/linux/mm.h | |
201 | @@ -172,9 +172,22 @@ struct vm_operations_struct { | |
202 | void (*close)(struct vm_area_struct * area); | |
203 | int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
204 | ||
205 | - /* notification that a previously read-only page is about to become | |
206 | - * writable, if an error is returned it will cause a SIGBUS */ | |
207 | - int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
208 | +#ifdef __GENKSYMS__ | |
209 | + int (*page_mkwrite)(struct vm_area_struct *, struct page *); | |
210 | +#else | |
211 | + union { | |
212 | + /* | |
213 | + * XXX: this is an ABI compatibility hack. | |
214 | + * Using the fixed page_mkwrite2 call requires VM_PAGE_MKWRITE2 to be | |
215 | + * set in vma->vm_flags | |
216 | + */ | |
217 | + | |
218 | + /* notification that a previously read-only page is about to become | |
219 | + * writable, if an error is returned it will cause a SIGBUS */ | |
220 | + int (*page_mkwrite)(struct vm_area_struct *, struct page *); | |
221 | + int (*page_mkwrite2)(struct vm_area_struct *vma, struct vm_fault *vmf); | |
222 | + } _pmkw; | |
223 | +#endif | |
224 | ||
225 | /* called by access_process_vm when get_user_pages() fails, typically | |
226 | * for use by special VMAs that can switch between memory and hardware | |
227 | --- a/mm/memory.c | |
228 | +++ b/mm/memory.c | |
229 | @@ -1800,7 +1800,7 @@ static int do_wp_page(struct mm_struct * | |
230 | * read-only shared pages can get COWed by | |
231 | * get_user_pages(.write=1, .force=1). | |
232 | */ | |
233 | - if (vma->vm_ops && vma->vm_ops->page_mkwrite) { | |
234 | + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite2) { | |
235 | struct vm_fault vmf; | |
236 | int tmp; | |
237 | ||
238 | @@ -1821,7 +1821,7 @@ static int do_wp_page(struct mm_struct * | |
239 | page_cache_get(old_page); | |
240 | pte_unmap_unlock(page_table, ptl); | |
241 | ||
242 | - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | |
243 | + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf); | |
244 | if (unlikely(tmp & | |
245 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | |
246 | ret = tmp; | |
247 | @@ -2517,12 +2517,12 @@ static int __do_fault(struct mm_struct * | |
248 | * address space wants to know that the page is about | |
249 | * to become writable | |
250 | */ | |
251 | - if (vma->vm_ops->page_mkwrite) { | |
252 | + if (vma->vm_ops->_pmkw.page_mkwrite2) { | |
253 | int tmp; | |
254 | ||
255 | unlock_page(page); | |
256 | vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; | |
257 | - tmp = vma->vm_ops->page_mkwrite(vma, &vmf); | |
258 | + tmp = vma->vm_ops->_pmkw.page_mkwrite2(vma, &vmf); | |
259 | if (unlikely(tmp & | |
260 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { | |
261 | ret = tmp; | |
262 | --- a/mm/mmap.c | |
263 | +++ b/mm/mmap.c | |
264 | @@ -1074,7 +1074,7 @@ int vma_wants_writenotify(struct vm_area | |
265 | return 0; | |
266 | ||
267 | /* The backer wishes to know when pages are first written to? */ | |
268 | - if (vma->vm_ops && vma->vm_ops->page_mkwrite) | |
269 | + if (vma->vm_ops && vma->vm_ops->_pmkw.page_mkwrite) | |
270 | return 1; | |
271 | ||
272 | /* The open routine did something to the protections already? */ |