]> git.ipfire.org Git - people/ms/linux.git/blob - mm/frontswap.c
IB/srp: Fix possible send queue overflow
[people/ms/linux.git] / mm / frontswap.c
1 /*
2 * Frontswap frontend
3 *
4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of frontswap. See
6 * Documentation/vm/frontswap.txt for more information.
7 *
8 * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2.
12 */
13
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/swapops.h>
17 #include <linux/security.h>
18 #include <linux/module.h>
19 #include <linux/debugfs.h>
20 #include <linux/frontswap.h>
21 #include <linux/swapfile.h>
22
23 /*
24 * frontswap_ops are added by frontswap_register_ops, and provide the
25 * frontswap "backend" implementation functions. Multiple implementations
26 * may be registered, but implementations can never deregister. This
27 * is a simple singly-linked list of all registered implementations.
28 */
29 static struct frontswap_ops *frontswap_ops __read_mostly;
30
31 #define for_each_frontswap_ops(ops) \
32 for ((ops) = frontswap_ops; (ops); (ops) = (ops)->next)
33
34 /*
35 * If enabled, frontswap_store will return failure even on success. As
36 * a result, the swap subsystem will always write the page to swap, in
37 * effect converting frontswap into a writethrough cache. In this mode,
38 * there is no direct reduction in swap writes, but a frontswap backend
39 * can unilaterally "reclaim" any pages in use with no data loss, thus
40 * providing increases control over maximum memory usage due to frontswap.
41 */
42 static bool frontswap_writethrough_enabled __read_mostly;
43
44 /*
45 * If enabled, the underlying tmem implementation is capable of doing
46 * exclusive gets, so frontswap_load, on a successful tmem_get must
47 * mark the page as no longer in frontswap AND mark it dirty.
48 */
49 static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;
50
51 #ifdef CONFIG_DEBUG_FS
52 /*
53 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
54 * properly configured). These are for information only so are not protected
55 * against increment races.
56 */
57 static u64 frontswap_loads;
58 static u64 frontswap_succ_stores;
59 static u64 frontswap_failed_stores;
60 static u64 frontswap_invalidates;
61
62 static inline void inc_frontswap_loads(void) {
63 frontswap_loads++;
64 }
65 static inline void inc_frontswap_succ_stores(void) {
66 frontswap_succ_stores++;
67 }
68 static inline void inc_frontswap_failed_stores(void) {
69 frontswap_failed_stores++;
70 }
71 static inline void inc_frontswap_invalidates(void) {
72 frontswap_invalidates++;
73 }
74 #else
75 static inline void inc_frontswap_loads(void) { }
76 static inline void inc_frontswap_succ_stores(void) { }
77 static inline void inc_frontswap_failed_stores(void) { }
78 static inline void inc_frontswap_invalidates(void) { }
79 #endif
80
81 /*
82 * Due to the asynchronous nature of the backends loading potentially
83 * _after_ the swap system has been activated, we have chokepoints
84 * on all frontswap functions to not call the backend until the backend
85 * has registered.
86 *
87 * This would not guards us against the user deciding to call swapoff right as
88 * we are calling the backend to initialize (so swapon is in action).
89 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
90 * OK. The other scenario where calls to frontswap_store (called via
91 * swap_writepage) is racing with frontswap_invalidate_area (called via
92 * swapoff) is again guarded by the swap subsystem.
93 *
94 * While no backend is registered all calls to frontswap_[store|load|
95 * invalidate_area|invalidate_page] are ignored or fail.
96 *
97 * The time between the backend being registered and the swap file system
98 * calling the backend (via the frontswap_* functions) is indeterminate as
99 * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
100 * That is OK as we are comfortable missing some of these calls to the newly
101 * registered backend.
102 *
103 * Obviously the opposite (unloading the backend) must be done after all
104 * the frontswap_[store|load|invalidate_area|invalidate_page] start
105 * ignoring or failing the requests. However, there is currently no way
106 * to unload a backend once it is registered.
107 */
108
109 /*
110 * Register operations for frontswap
111 */
112 void frontswap_register_ops(struct frontswap_ops *ops)
113 {
114 DECLARE_BITMAP(a, MAX_SWAPFILES);
115 DECLARE_BITMAP(b, MAX_SWAPFILES);
116 struct swap_info_struct *si;
117 unsigned int i;
118
119 bitmap_zero(a, MAX_SWAPFILES);
120 bitmap_zero(b, MAX_SWAPFILES);
121
122 spin_lock(&swap_lock);
123 plist_for_each_entry(si, &swap_active_head, list) {
124 if (!WARN_ON(!si->frontswap_map))
125 set_bit(si->type, a);
126 }
127 spin_unlock(&swap_lock);
128
129 /* the new ops needs to know the currently active swap devices */
130 for_each_set_bit(i, a, MAX_SWAPFILES)
131 ops->init(i);
132
133 /*
134 * Setting frontswap_ops must happen after the ops->init() calls
135 * above; cmpxchg implies smp_mb() which will ensure the init is
136 * complete at this point.
137 */
138 do {
139 ops->next = frontswap_ops;
140 } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next);
141
142 spin_lock(&swap_lock);
143 plist_for_each_entry(si, &swap_active_head, list) {
144 if (si->frontswap_map)
145 set_bit(si->type, b);
146 }
147 spin_unlock(&swap_lock);
148
149 /*
150 * On the very unlikely chance that a swap device was added or
151 * removed between setting the "a" list bits and the ops init
152 * calls, we re-check and do init or invalidate for any changed
153 * bits.
154 */
155 if (unlikely(!bitmap_equal(a, b, MAX_SWAPFILES))) {
156 for (i = 0; i < MAX_SWAPFILES; i++) {
157 if (!test_bit(i, a) && test_bit(i, b))
158 ops->init(i);
159 else if (test_bit(i, a) && !test_bit(i, b))
160 ops->invalidate_area(i);
161 }
162 }
163 }
164 EXPORT_SYMBOL(frontswap_register_ops);
165
166 /*
167 * Enable/disable frontswap writethrough (see above).
168 */
169 void frontswap_writethrough(bool enable)
170 {
171 frontswap_writethrough_enabled = enable;
172 }
173 EXPORT_SYMBOL(frontswap_writethrough);
174
175 /*
176 * Enable/disable frontswap exclusive gets (see above).
177 */
178 void frontswap_tmem_exclusive_gets(bool enable)
179 {
180 frontswap_tmem_exclusive_gets_enabled = enable;
181 }
182 EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);
183
184 /*
185 * Called when a swap device is swapon'd.
186 */
187 void __frontswap_init(unsigned type, unsigned long *map)
188 {
189 struct swap_info_struct *sis = swap_info[type];
190 struct frontswap_ops *ops;
191
192 BUG_ON(sis == NULL);
193
194 /*
195 * p->frontswap is a bitmap that we MUST have to figure out which page
196 * has gone in frontswap. Without it there is no point of continuing.
197 */
198 if (WARN_ON(!map))
199 return;
200 /*
201 * Irregardless of whether the frontswap backend has been loaded
202 * before this function or it will be later, we _MUST_ have the
203 * p->frontswap set to something valid to work properly.
204 */
205 frontswap_map_set(sis, map);
206
207 for_each_frontswap_ops(ops)
208 ops->init(type);
209 }
210 EXPORT_SYMBOL(__frontswap_init);
211
212 bool __frontswap_test(struct swap_info_struct *sis,
213 pgoff_t offset)
214 {
215 if (sis->frontswap_map)
216 return test_bit(offset, sis->frontswap_map);
217 return false;
218 }
219 EXPORT_SYMBOL(__frontswap_test);
220
221 static inline void __frontswap_set(struct swap_info_struct *sis,
222 pgoff_t offset)
223 {
224 set_bit(offset, sis->frontswap_map);
225 atomic_inc(&sis->frontswap_pages);
226 }
227
228 static inline void __frontswap_clear(struct swap_info_struct *sis,
229 pgoff_t offset)
230 {
231 clear_bit(offset, sis->frontswap_map);
232 atomic_dec(&sis->frontswap_pages);
233 }
234
235 /*
236 * "Store" data from a page to frontswap and associate it with the page's
237 * swaptype and offset. Page must be locked and in the swap cache.
238 * If frontswap already contains a page with matching swaptype and
239 * offset, the frontswap implementation may either overwrite the data and
240 * return success or invalidate the page from frontswap and return failure.
241 */
242 int __frontswap_store(struct page *page)
243 {
244 int ret = -1;
245 swp_entry_t entry = { .val = page_private(page), };
246 int type = swp_type(entry);
247 struct swap_info_struct *sis = swap_info[type];
248 pgoff_t offset = swp_offset(entry);
249 struct frontswap_ops *ops;
250
251 /*
252 * Return if no backend registed.
253 * Don't need to inc frontswap_failed_stores here.
254 */
255 if (!frontswap_ops)
256 return -1;
257
258 BUG_ON(!PageLocked(page));
259 BUG_ON(sis == NULL);
260
261 /*
262 * If a dup, we must remove the old page first; we can't leave the
263 * old page no matter if the store of the new page succeeds or fails,
264 * and we can't rely on the new page replacing the old page as we may
265 * not store to the same implementation that contains the old page.
266 */
267 if (__frontswap_test(sis, offset)) {
268 __frontswap_clear(sis, offset);
269 for_each_frontswap_ops(ops)
270 ops->invalidate_page(type, offset);
271 }
272
273 /* Try to store in each implementation, until one succeeds. */
274 for_each_frontswap_ops(ops) {
275 ret = ops->store(type, offset, page);
276 if (!ret) /* successful store */
277 break;
278 }
279 if (ret == 0) {
280 __frontswap_set(sis, offset);
281 inc_frontswap_succ_stores();
282 } else {
283 inc_frontswap_failed_stores();
284 }
285 if (frontswap_writethrough_enabled)
286 /* report failure so swap also writes to swap device */
287 ret = -1;
288 return ret;
289 }
290 EXPORT_SYMBOL(__frontswap_store);
291
292 /*
293 * "Get" data from frontswap associated with swaptype and offset that were
294 * specified when the data was put to frontswap and use it to fill the
295 * specified page with data. Page must be locked and in the swap cache.
296 */
297 int __frontswap_load(struct page *page)
298 {
299 int ret = -1;
300 swp_entry_t entry = { .val = page_private(page), };
301 int type = swp_type(entry);
302 struct swap_info_struct *sis = swap_info[type];
303 pgoff_t offset = swp_offset(entry);
304 struct frontswap_ops *ops;
305
306 if (!frontswap_ops)
307 return -1;
308
309 BUG_ON(!PageLocked(page));
310 BUG_ON(sis == NULL);
311 if (!__frontswap_test(sis, offset))
312 return -1;
313
314 /* Try loading from each implementation, until one succeeds. */
315 for_each_frontswap_ops(ops) {
316 ret = ops->load(type, offset, page);
317 if (!ret) /* successful load */
318 break;
319 }
320 if (ret == 0) {
321 inc_frontswap_loads();
322 if (frontswap_tmem_exclusive_gets_enabled) {
323 SetPageDirty(page);
324 __frontswap_clear(sis, offset);
325 }
326 }
327 return ret;
328 }
329 EXPORT_SYMBOL(__frontswap_load);
330
331 /*
332 * Invalidate any data from frontswap associated with the specified swaptype
333 * and offset so that a subsequent "get" will fail.
334 */
335 void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
336 {
337 struct swap_info_struct *sis = swap_info[type];
338 struct frontswap_ops *ops;
339
340 if (!frontswap_ops)
341 return;
342
343 BUG_ON(sis == NULL);
344 if (!__frontswap_test(sis, offset))
345 return;
346
347 for_each_frontswap_ops(ops)
348 ops->invalidate_page(type, offset);
349 __frontswap_clear(sis, offset);
350 inc_frontswap_invalidates();
351 }
352 EXPORT_SYMBOL(__frontswap_invalidate_page);
353
354 /*
355 * Invalidate all data from frontswap associated with all offsets for the
356 * specified swaptype.
357 */
358 void __frontswap_invalidate_area(unsigned type)
359 {
360 struct swap_info_struct *sis = swap_info[type];
361 struct frontswap_ops *ops;
362
363 if (!frontswap_ops)
364 return;
365
366 BUG_ON(sis == NULL);
367 if (sis->frontswap_map == NULL)
368 return;
369
370 for_each_frontswap_ops(ops)
371 ops->invalidate_area(type);
372 atomic_set(&sis->frontswap_pages, 0);
373 bitmap_zero(sis->frontswap_map, sis->max);
374 }
375 EXPORT_SYMBOL(__frontswap_invalidate_area);
376
377 static unsigned long __frontswap_curr_pages(void)
378 {
379 unsigned long totalpages = 0;
380 struct swap_info_struct *si = NULL;
381
382 assert_spin_locked(&swap_lock);
383 plist_for_each_entry(si, &swap_active_head, list)
384 totalpages += atomic_read(&si->frontswap_pages);
385 return totalpages;
386 }
387
388 static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
389 int *swapid)
390 {
391 int ret = -EINVAL;
392 struct swap_info_struct *si = NULL;
393 int si_frontswap_pages;
394 unsigned long total_pages_to_unuse = total;
395 unsigned long pages = 0, pages_to_unuse = 0;
396
397 assert_spin_locked(&swap_lock);
398 plist_for_each_entry(si, &swap_active_head, list) {
399 si_frontswap_pages = atomic_read(&si->frontswap_pages);
400 if (total_pages_to_unuse < si_frontswap_pages) {
401 pages = pages_to_unuse = total_pages_to_unuse;
402 } else {
403 pages = si_frontswap_pages;
404 pages_to_unuse = 0; /* unuse all */
405 }
406 /* ensure there is enough RAM to fetch pages from frontswap */
407 if (security_vm_enough_memory_mm(current->mm, pages)) {
408 ret = -ENOMEM;
409 continue;
410 }
411 vm_unacct_memory(pages);
412 *unused = pages_to_unuse;
413 *swapid = si->type;
414 ret = 0;
415 break;
416 }
417
418 return ret;
419 }
420
421 /*
422 * Used to check if it's necessory and feasible to unuse pages.
423 * Return 1 when nothing to do, 0 when need to shink pages,
424 * error code when there is an error.
425 */
426 static int __frontswap_shrink(unsigned long target_pages,
427 unsigned long *pages_to_unuse,
428 int *type)
429 {
430 unsigned long total_pages = 0, total_pages_to_unuse;
431
432 assert_spin_locked(&swap_lock);
433
434 total_pages = __frontswap_curr_pages();
435 if (total_pages <= target_pages) {
436 /* Nothing to do */
437 *pages_to_unuse = 0;
438 return 1;
439 }
440 total_pages_to_unuse = total_pages - target_pages;
441 return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
442 }
443
444 /*
445 * Frontswap, like a true swap device, may unnecessarily retain pages
446 * under certain circumstances; "shrink" frontswap is essentially a
447 * "partial swapoff" and works by calling try_to_unuse to attempt to
448 * unuse enough frontswap pages to attempt to -- subject to memory
449 * constraints -- reduce the number of pages in frontswap to the
450 * number given in the parameter target_pages.
451 */
452 void frontswap_shrink(unsigned long target_pages)
453 {
454 unsigned long pages_to_unuse = 0;
455 int uninitialized_var(type), ret;
456
457 /*
458 * we don't want to hold swap_lock while doing a very
459 * lengthy try_to_unuse, but swap_list may change
460 * so restart scan from swap_active_head each time
461 */
462 spin_lock(&swap_lock);
463 ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
464 spin_unlock(&swap_lock);
465 if (ret == 0)
466 try_to_unuse(type, true, pages_to_unuse);
467 return;
468 }
469 EXPORT_SYMBOL(frontswap_shrink);
470
471 /*
472 * Count and return the number of frontswap pages across all
473 * swap devices. This is exported so that backend drivers can
474 * determine current usage without reading debugfs.
475 */
476 unsigned long frontswap_curr_pages(void)
477 {
478 unsigned long totalpages = 0;
479
480 spin_lock(&swap_lock);
481 totalpages = __frontswap_curr_pages();
482 spin_unlock(&swap_lock);
483
484 return totalpages;
485 }
486 EXPORT_SYMBOL(frontswap_curr_pages);
487
488 static int __init init_frontswap(void)
489 {
490 #ifdef CONFIG_DEBUG_FS
491 struct dentry *root = debugfs_create_dir("frontswap", NULL);
492 if (root == NULL)
493 return -ENXIO;
494 debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
495 debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
496 debugfs_create_u64("failed_stores", S_IRUGO, root,
497 &frontswap_failed_stores);
498 debugfs_create_u64("invalidates", S_IRUGO,
499 root, &frontswap_invalidates);
500 #endif
501 return 0;
502 }
503
504 module_init(init_frontswap);