]> git.ipfire.org Git - thirdparty/linux.git/blame - mm/truncate.c
HWPOISON: Add invalidate_inode_page
[thirdparty/linux.git] / mm / truncate.c
CommitLineData
1da177e4
LT
1/*
2 * mm/truncate.c - code for taking down pages from address_spaces
3 *
4 * Copyright (C) 2002, Linus Torvalds
5 *
e1f8e874 6 * 10Sep2002 Andrew Morton
1da177e4
LT
7 * Initial version.
8 */
9
10#include <linux/kernel.h>
4af3c9cc 11#include <linux/backing-dev.h>
1da177e4 12#include <linux/mm.h>
0fd0e6b0 13#include <linux/swap.h>
1da177e4
LT
14#include <linux/module.h>
15#include <linux/pagemap.h>
01f2705d 16#include <linux/highmem.h>
1da177e4 17#include <linux/pagevec.h>
e08748ce 18#include <linux/task_io_accounting_ops.h>
1da177e4 19#include <linux/buffer_head.h> /* grr. try_to_release_page,
aaa4059b 20 do_invalidatepage */
ba470de4 21#include "internal.h"
1da177e4
LT
22
23
cf9a2ae8 24/**
28bc44d7 25 * do_invalidatepage - invalidate part or all of a page
cf9a2ae8
DH
26 * @page: the page which is affected
27 * @offset: the index of the truncation point
28 *
29 * do_invalidatepage() is called when all or part of the page has become
30 * invalidated by a truncate operation.
31 *
32 * do_invalidatepage() does not have to release all buffers, but it must
33 * ensure that no dirty buffer is left outside @offset and that no I/O
34 * is underway against any of the blocks which are outside the truncation
35 * point. Because the caller is about to free (and possibly reuse) those
36 * blocks on-disk.
37 */
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
9361401e 42#ifdef CONFIG_BLOCK
cf9a2ae8
DH
43 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
9361401e 45#endif
cf9a2ae8
DH
46 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
1da177e4
LT
50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
eebd2aa3 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
266cf658 53 if (page_has_private(page))
1da177e4
LT
54 do_invalidatepage(page, partial);
55}
56
ecdfc978
LT
57/*
58 * This cancels just the dirty bit on the kernel page itself, it
59 * does NOT actually remove dirty bits on any mmap's that may be
60 * around. It also leaves the page tagged dirty, so any sync
61 * activity will still find it on the dirty lists, and in particular,
62 * clear_page_dirty_for_io() will still look at the dirty bits in
63 * the VM.
64 *
65 * Doing this should *normally* only ever be done when a page
66 * is truncated, and is not actually mapped anywhere at all. However,
67 * fs/buffer.c does this when it notices that somebody has cleaned
68 * out all the buffers on a page without actually doing it through
69 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
70 */
fba2591b
LT
71void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
8368e328
LT
73 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
c9e51e41
PZ
77 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
8368e328
LT
79 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
3e67c098 82 }
fba2591b 83}
8368e328 84EXPORT_SYMBOL(cancel_dirty_page);
fba2591b 85
1da177e4
LT
86/*
87 * If truncate cannot remove the fs-private metadata from the page, the page
62e1c553 88 * becomes orphaned. It will be left on the LRU and may even be mapped into
54cb8821 89 * user pagetables if we're racing with filemap_fault().
1da177e4
LT
90 *
91 * We need to bale out if page->mapping is no longer equal to the original
92 * mapping. This happens a) when the VM reclaimed the page while we waited on
fc0ecff6 93 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
1da177e4
LT
94 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
95 */
750b4987 96static int
1da177e4
LT
97truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
750b4987 100 return -EIO;
1da177e4 101
266cf658 102 if (page_has_private(page))
1da177e4
LT
103 do_invalidatepage(page, 0);
104
a2b34564
BS
105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
ba470de4 107 clear_page_mlock(page);
787d2214 108 remove_from_page_cache(page);
1da177e4 109 ClearPageMappedToDisk(page);
1da177e4 110 page_cache_release(page); /* pagecache ref */
750b4987 111 return 0;
1da177e4
LT
112}
113
114/*
fc0ecff6 115 * This is for invalidate_mapping_pages(). That function can be called at
1da177e4 116 * any time, and is not supposed to throw away dirty pages. But pages can
0fd0e6b0
NP
117 * be marked dirty at any time too, so use remove_mapping which safely
118 * discards clean, unused pages.
1da177e4
LT
119 *
120 * Returns non-zero if the page was successfully invalidated.
121 */
122static int
123invalidate_complete_page(struct address_space *mapping, struct page *page)
124{
0fd0e6b0
NP
125 int ret;
126
1da177e4
LT
127 if (page->mapping != mapping)
128 return 0;
129
266cf658 130 if (page_has_private(page) && !try_to_release_page(page, 0))
1da177e4
LT
131 return 0;
132
ba470de4 133 clear_page_mlock(page);
0fd0e6b0 134 ret = remove_mapping(mapping, page);
0fd0e6b0
NP
135
136 return ret;
1da177e4
LT
137}
138
750b4987
NP
139int truncate_inode_page(struct address_space *mapping, struct page *page)
140{
141 if (page_mapped(page)) {
142 unmap_mapping_range(mapping,
143 (loff_t)page->index << PAGE_CACHE_SHIFT,
144 PAGE_CACHE_SIZE, 0);
145 }
146 return truncate_complete_page(mapping, page);
147}
148
83f78668
WF
149/*
150 * Safely invalidate one page from its pagecache mapping.
151 * It only drops clean, unused pages. The page must be locked.
152 *
153 * Returns 1 if the page is successfully invalidated, otherwise 0.
154 */
155int invalidate_inode_page(struct page *page)
156{
157 struct address_space *mapping = page_mapping(page);
158 if (!mapping)
159 return 0;
160 if (PageDirty(page) || PageWriteback(page))
161 return 0;
162 if (page_mapped(page))
163 return 0;
164 return invalidate_complete_page(mapping, page);
165}
166
1da177e4 167/**
0643245f 168 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
1da177e4
LT
169 * @mapping: mapping to truncate
170 * @lstart: offset from which to truncate
d7339071 171 * @lend: offset to which to truncate
1da177e4 172 *
d7339071
HR
173 * Truncate the page cache, removing the pages that are between
174 * specified offsets (and zeroing out partial page
175 * (if lstart is not page aligned)).
1da177e4
LT
176 *
177 * Truncate takes two passes - the first pass is nonblocking. It will not
178 * block on page locks and it will not block on writeback. The second pass
179 * will wait. This is to prevent as much IO as possible in the affected region.
180 * The first pass will remove most pages, so the search cost of the second pass
181 * is low.
182 *
183 * When looking at page->index outside the page lock we need to be careful to
184 * copy it into a local to avoid races (it could change at any time).
185 *
186 * We pass down the cache-hot hint to the page freeing code. Even if the
187 * mapping is large, it is probably the case that the final pages are the most
188 * recently touched, and freeing happens in ascending file offset order.
1da177e4 189 */
d7339071
HR
190void truncate_inode_pages_range(struct address_space *mapping,
191 loff_t lstart, loff_t lend)
1da177e4
LT
192{
193 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
d7339071 194 pgoff_t end;
1da177e4
LT
195 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
196 struct pagevec pvec;
197 pgoff_t next;
198 int i;
199
200 if (mapping->nrpages == 0)
201 return;
202
d7339071
HR
203 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
204 end = (lend >> PAGE_CACHE_SHIFT);
205
1da177e4
LT
206 pagevec_init(&pvec, 0);
207 next = start;
d7339071
HR
208 while (next <= end &&
209 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1da177e4
LT
210 for (i = 0; i < pagevec_count(&pvec); i++) {
211 struct page *page = pvec.pages[i];
212 pgoff_t page_index = page->index;
213
d7339071
HR
214 if (page_index > end) {
215 next = page_index;
216 break;
217 }
218
1da177e4
LT
219 if (page_index > next)
220 next = page_index;
221 next++;
529ae9aa 222 if (!trylock_page(page))
1da177e4
LT
223 continue;
224 if (PageWriteback(page)) {
225 unlock_page(page);
226 continue;
227 }
750b4987 228 truncate_inode_page(mapping, page);
1da177e4
LT
229 unlock_page(page);
230 }
231 pagevec_release(&pvec);
232 cond_resched();
233 }
234
235 if (partial) {
236 struct page *page = find_lock_page(mapping, start - 1);
237 if (page) {
238 wait_on_page_writeback(page);
239 truncate_partial_page(page, partial);
240 unlock_page(page);
241 page_cache_release(page);
242 }
243 }
244
245 next = start;
246 for ( ; ; ) {
247 cond_resched();
248 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
249 if (next == start)
250 break;
251 next = start;
252 continue;
253 }
d7339071
HR
254 if (pvec.pages[0]->index > end) {
255 pagevec_release(&pvec);
256 break;
257 }
1da177e4
LT
258 for (i = 0; i < pagevec_count(&pvec); i++) {
259 struct page *page = pvec.pages[i];
260
d7339071
HR
261 if (page->index > end)
262 break;
1da177e4
LT
263 lock_page(page);
264 wait_on_page_writeback(page);
750b4987 265 truncate_inode_page(mapping, page);
1da177e4
LT
266 if (page->index > next)
267 next = page->index;
268 next++;
1da177e4
LT
269 unlock_page(page);
270 }
271 pagevec_release(&pvec);
272 }
273}
d7339071 274EXPORT_SYMBOL(truncate_inode_pages_range);
1da177e4 275
d7339071
HR
276/**
277 * truncate_inode_pages - truncate *all* the pages from an offset
278 * @mapping: mapping to truncate
279 * @lstart: offset from which to truncate
280 *
1b1dcc1b 281 * Called under (and serialised by) inode->i_mutex.
d7339071
HR
282 */
283void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
284{
285 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
286}
1da177e4
LT
287EXPORT_SYMBOL(truncate_inode_pages);
288
28697355
MW
289/**
290 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
291 * @mapping: the address_space which holds the pages to invalidate
292 * @start: the offset 'from' which to invalidate
293 * @end: the offset 'to' which to invalidate (inclusive)
294 *
295 * This function only removes the unlocked pages, if you want to
296 * remove all the pages of one inode, you must call truncate_inode_pages.
297 *
298 * invalidate_mapping_pages() will not block on IO activity. It will not
299 * invalidate pages which are dirty, locked, under writeback or mapped into
300 * pagetables.
301 */
302unsigned long invalidate_mapping_pages(struct address_space *mapping,
303 pgoff_t start, pgoff_t end)
1da177e4
LT
304{
305 struct pagevec pvec;
306 pgoff_t next = start;
307 unsigned long ret = 0;
308 int i;
309
310 pagevec_init(&pvec, 0);
311 while (next <= end &&
312 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
313 for (i = 0; i < pagevec_count(&pvec); i++) {
314 struct page *page = pvec.pages[i];
e0f23603
N
315 pgoff_t index;
316 int lock_failed;
1da177e4 317
529ae9aa 318 lock_failed = !trylock_page(page);
e0f23603
N
319
320 /*
321 * We really shouldn't be looking at the ->index of an
322 * unlocked page. But we're not allowed to lock these
323 * pages. So we rely upon nobody altering the ->index
324 * of this (pinned-by-us) page.
325 */
326 index = page->index;
327 if (index > next)
328 next = index;
1da177e4 329 next++;
e0f23603
N
330 if (lock_failed)
331 continue;
332
83f78668
WF
333 ret += invalidate_inode_page(page);
334
1da177e4
LT
335 unlock_page(page);
336 if (next > end)
337 break;
338 }
339 pagevec_release(&pvec);
28697355 340 cond_resched();
1da177e4
LT
341 }
342 return ret;
343}
54bc4855 344EXPORT_SYMBOL(invalidate_mapping_pages);
1da177e4 345
bd4c8ce4
AM
346/*
347 * This is like invalidate_complete_page(), except it ignores the page's
348 * refcount. We do this because invalidate_inode_pages2() needs stronger
349 * invalidation guarantees, and cannot afford to leave pages behind because
2706a1b8
AB
350 * shrink_page_list() has a temp ref on them, or because they're transiently
351 * sitting in the lru_cache_add() pagevecs.
bd4c8ce4
AM
352 */
353static int
354invalidate_complete_page2(struct address_space *mapping, struct page *page)
355{
356 if (page->mapping != mapping)
357 return 0;
358
266cf658 359 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
bd4c8ce4
AM
360 return 0;
361
19fd6231 362 spin_lock_irq(&mapping->tree_lock);
bd4c8ce4
AM
363 if (PageDirty(page))
364 goto failed;
365
ba470de4 366 clear_page_mlock(page);
266cf658 367 BUG_ON(page_has_private(page));
bd4c8ce4 368 __remove_from_page_cache(page);
19fd6231 369 spin_unlock_irq(&mapping->tree_lock);
e767e056 370 mem_cgroup_uncharge_cache_page(page);
bd4c8ce4
AM
371 page_cache_release(page); /* pagecache ref */
372 return 1;
373failed:
19fd6231 374 spin_unlock_irq(&mapping->tree_lock);
bd4c8ce4
AM
375 return 0;
376}
377
e3db7691
TM
378static int do_launder_page(struct address_space *mapping, struct page *page)
379{
380 if (!PageDirty(page))
381 return 0;
382 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
383 return 0;
384 return mapping->a_ops->launder_page(page);
385}
386
1da177e4
LT
387/**
388 * invalidate_inode_pages2_range - remove range of pages from an address_space
67be2dd1 389 * @mapping: the address_space
1da177e4
LT
390 * @start: the page offset 'from' which to invalidate
391 * @end: the page offset 'to' which to invalidate (inclusive)
392 *
393 * Any pages which are found to be mapped into pagetables are unmapped prior to
394 * invalidation.
395 *
6ccfa806 396 * Returns -EBUSY if any pages could not be invalidated.
1da177e4
LT
397 */
398int invalidate_inode_pages2_range(struct address_space *mapping,
399 pgoff_t start, pgoff_t end)
400{
401 struct pagevec pvec;
402 pgoff_t next;
403 int i;
404 int ret = 0;
0dd1334f 405 int ret2 = 0;
1da177e4
LT
406 int did_range_unmap = 0;
407 int wrapped = 0;
408
409 pagevec_init(&pvec, 0);
410 next = start;
7b965e08 411 while (next <= end && !wrapped &&
1da177e4
LT
412 pagevec_lookup(&pvec, mapping, next,
413 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
7b965e08 414 for (i = 0; i < pagevec_count(&pvec); i++) {
1da177e4
LT
415 struct page *page = pvec.pages[i];
416 pgoff_t page_index;
1da177e4
LT
417
418 lock_page(page);
419 if (page->mapping != mapping) {
420 unlock_page(page);
421 continue;
422 }
423 page_index = page->index;
424 next = page_index + 1;
425 if (next == 0)
426 wrapped = 1;
427 if (page_index > end) {
428 unlock_page(page);
429 break;
430 }
431 wait_on_page_writeback(page);
d00806b1 432 if (page_mapped(page)) {
1da177e4
LT
433 if (!did_range_unmap) {
434 /*
435 * Zap the rest of the file in one hit.
436 */
437 unmap_mapping_range(mapping,
479ef592
OD
438 (loff_t)page_index<<PAGE_CACHE_SHIFT,
439 (loff_t)(end - page_index + 1)
1da177e4
LT
440 << PAGE_CACHE_SHIFT,
441 0);
442 did_range_unmap = 1;
443 } else {
444 /*
445 * Just zap this page
446 */
447 unmap_mapping_range(mapping,
479ef592 448 (loff_t)page_index<<PAGE_CACHE_SHIFT,
1da177e4
LT
449 PAGE_CACHE_SIZE, 0);
450 }
451 }
d00806b1 452 BUG_ON(page_mapped(page));
0dd1334f
HH
453 ret2 = do_launder_page(mapping, page);
454 if (ret2 == 0) {
455 if (!invalidate_complete_page2(mapping, page))
6ccfa806 456 ret2 = -EBUSY;
0dd1334f
HH
457 }
458 if (ret2 < 0)
459 ret = ret2;
1da177e4
LT
460 unlock_page(page);
461 }
462 pagevec_release(&pvec);
463 cond_resched();
464 }
465 return ret;
466}
467EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
468
469/**
470 * invalidate_inode_pages2 - remove all pages from an address_space
67be2dd1 471 * @mapping: the address_space
1da177e4
LT
472 *
473 * Any pages which are found to be mapped into pagetables are unmapped prior to
474 * invalidation.
475 *
476 * Returns -EIO if any pages could not be invalidated.
477 */
478int invalidate_inode_pages2(struct address_space *mapping)
479{
480 return invalidate_inode_pages2_range(mapping, 0, -1);
481}
482EXPORT_SYMBOL_GPL(invalidate_inode_pages2);