]> git.ipfire.org Git - people/arne_f/kernel.git/blob - drivers/block/brd.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[people/arne_f/kernel.git] / drivers / block / brd.c
1 /*
2 * Ram backed block device driver.
3 *
4 * Copyright (C) 2007 Nick Piggin
5 * Copyright (C) 2007 Novell Inc.
6 *
7 * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8 * of their respective owners.
9 */
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/major.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/highmem.h>
18 #include <linux/radix-tree.h>
19 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */
20 #include <linux/slab.h>
21
22 #include <asm/uaccess.h>
23
24 #define SECTOR_SHIFT 9
25 #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
26 #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
27
28 /*
29 * Each block ramdisk device has a radix_tree brd_pages of pages that stores
30 * the pages containing the block device's contents. A brd page's ->index is
31 * its offset in PAGE_SIZE units. This is similar to, but in no way connected
32 * with, the kernel's pagecache or buffer cache (which sit above our block
33 * device).
34 */
35 struct brd_device {
36 int brd_number;
37 int brd_refcnt;
38 loff_t brd_offset;
39 loff_t brd_sizelimit;
40 unsigned brd_blocksize;
41
42 struct request_queue *brd_queue;
43 struct gendisk *brd_disk;
44 struct list_head brd_list;
45
46 /*
47 * Backing store of pages and lock to protect it. This is the contents
48 * of the block device.
49 */
50 spinlock_t brd_lock;
51 struct radix_tree_root brd_pages;
52 };
53
54 /*
55 * Look up and return a brd's page for a given sector.
56 */
57 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
58 {
59 pgoff_t idx;
60 struct page *page;
61
62 /*
63 * The page lifetime is protected by the fact that we have opened the
64 * device node -- brd pages will never be deleted under us, so we
65 * don't need any further locking or refcounting.
66 *
67 * This is strictly true for the radix-tree nodes as well (ie. we
68 * don't actually need the rcu_read_lock()), however that is not a
69 * documented feature of the radix-tree API so it is better to be
70 * safe here (we don't have total exclusion from radix tree updates
71 * here, only deletes).
72 */
73 rcu_read_lock();
74 idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
75 page = radix_tree_lookup(&brd->brd_pages, idx);
76 rcu_read_unlock();
77
78 BUG_ON(page && page->index != idx);
79
80 return page;
81 }
82
83 /*
84 * Look up and return a brd's page for a given sector.
85 * If one does not exist, allocate an empty page, and insert that. Then
86 * return it.
87 */
88 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
89 {
90 pgoff_t idx;
91 struct page *page;
92 gfp_t gfp_flags;
93
94 page = brd_lookup_page(brd, sector);
95 if (page)
96 return page;
97
98 /*
99 * Must use NOIO because we don't want to recurse back into the
100 * block or filesystem layers from page reclaim.
101 *
102 * Cannot support XIP and highmem, because our ->direct_access
103 * routine for XIP must return memory that is always addressable.
104 * If XIP was reworked to use pfns and kmap throughout, this
105 * restriction might be able to be lifted.
106 */
107 gfp_flags = GFP_NOIO | __GFP_ZERO;
108 #ifndef CONFIG_BLK_DEV_XIP
109 gfp_flags |= __GFP_HIGHMEM;
110 #endif
111 page = alloc_page(gfp_flags);
112 if (!page)
113 return NULL;
114
115 if (radix_tree_preload(GFP_NOIO)) {
116 __free_page(page);
117 return NULL;
118 }
119
120 spin_lock(&brd->brd_lock);
121 idx = sector >> PAGE_SECTORS_SHIFT;
122 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
123 __free_page(page);
124 page = radix_tree_lookup(&brd->brd_pages, idx);
125 BUG_ON(!page);
126 BUG_ON(page->index != idx);
127 } else
128 page->index = idx;
129 spin_unlock(&brd->brd_lock);
130
131 radix_tree_preload_end();
132
133 return page;
134 }
135
136 /*
137 * Free all backing store pages and radix tree. This must only be called when
138 * there are no other users of the device.
139 */
140 #define FREE_BATCH 16
141 static void brd_free_pages(struct brd_device *brd)
142 {
143 unsigned long pos = 0;
144 struct page *pages[FREE_BATCH];
145 int nr_pages;
146
147 do {
148 int i;
149
150 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
151 (void **)pages, pos, FREE_BATCH);
152
153 for (i = 0; i < nr_pages; i++) {
154 void *ret;
155
156 BUG_ON(pages[i]->index < pos);
157 pos = pages[i]->index;
158 ret = radix_tree_delete(&brd->brd_pages, pos);
159 BUG_ON(!ret || ret != pages[i]);
160 __free_page(pages[i]);
161 }
162
163 pos++;
164
165 /*
166 * This assumes radix_tree_gang_lookup always returns as
167 * many pages as possible. If the radix-tree code changes,
168 * so will this have to.
169 */
170 } while (nr_pages == FREE_BATCH);
171 }
172
173 /*
174 * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
175 */
176 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
177 {
178 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
179 size_t copy;
180
181 copy = min_t(size_t, n, PAGE_SIZE - offset);
182 if (!brd_insert_page(brd, sector))
183 return -ENOMEM;
184 if (copy < n) {
185 sector += copy >> SECTOR_SHIFT;
186 if (!brd_insert_page(brd, sector))
187 return -ENOMEM;
188 }
189 return 0;
190 }
191
192 /*
193 * Copy n bytes from src to the brd starting at sector. Does not sleep.
194 */
195 static void copy_to_brd(struct brd_device *brd, const void *src,
196 sector_t sector, size_t n)
197 {
198 struct page *page;
199 void *dst;
200 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
201 size_t copy;
202
203 copy = min_t(size_t, n, PAGE_SIZE - offset);
204 page = brd_lookup_page(brd, sector);
205 BUG_ON(!page);
206
207 dst = kmap_atomic(page, KM_USER1);
208 memcpy(dst + offset, src, copy);
209 kunmap_atomic(dst, KM_USER1);
210
211 if (copy < n) {
212 src += copy;
213 sector += copy >> SECTOR_SHIFT;
214 copy = n - copy;
215 page = brd_lookup_page(brd, sector);
216 BUG_ON(!page);
217
218 dst = kmap_atomic(page, KM_USER1);
219 memcpy(dst, src, copy);
220 kunmap_atomic(dst, KM_USER1);
221 }
222 }
223
224 /*
225 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
226 */
227 static void copy_from_brd(void *dst, struct brd_device *brd,
228 sector_t sector, size_t n)
229 {
230 struct page *page;
231 void *src;
232 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
233 size_t copy;
234
235 copy = min_t(size_t, n, PAGE_SIZE - offset);
236 page = brd_lookup_page(brd, sector);
237 if (page) {
238 src = kmap_atomic(page, KM_USER1);
239 memcpy(dst, src + offset, copy);
240 kunmap_atomic(src, KM_USER1);
241 } else
242 memset(dst, 0, copy);
243
244 if (copy < n) {
245 dst += copy;
246 sector += copy >> SECTOR_SHIFT;
247 copy = n - copy;
248 page = brd_lookup_page(brd, sector);
249 if (page) {
250 src = kmap_atomic(page, KM_USER1);
251 memcpy(dst, src, copy);
252 kunmap_atomic(src, KM_USER1);
253 } else
254 memset(dst, 0, copy);
255 }
256 }
257
258 /*
259 * Process a single bvec of a bio.
260 */
261 static int brd_do_bvec(struct brd_device *brd, struct page *page,
262 unsigned int len, unsigned int off, int rw,
263 sector_t sector)
264 {
265 void *mem;
266 int err = 0;
267
268 if (rw != READ) {
269 err = copy_to_brd_setup(brd, sector, len);
270 if (err)
271 goto out;
272 }
273
274 mem = kmap_atomic(page, KM_USER0);
275 if (rw == READ) {
276 copy_from_brd(mem + off, brd, sector, len);
277 flush_dcache_page(page);
278 } else {
279 flush_dcache_page(page);
280 copy_to_brd(brd, mem + off, sector, len);
281 }
282 kunmap_atomic(mem, KM_USER0);
283
284 out:
285 return err;
286 }
287
288 static int brd_make_request(struct request_queue *q, struct bio *bio)
289 {
290 struct block_device *bdev = bio->bi_bdev;
291 struct brd_device *brd = bdev->bd_disk->private_data;
292 int rw;
293 struct bio_vec *bvec;
294 sector_t sector;
295 int i;
296 int err = -EIO;
297
298 sector = bio->bi_sector;
299 if (sector + (bio->bi_size >> SECTOR_SHIFT) >
300 get_capacity(bdev->bd_disk))
301 goto out;
302
303 rw = bio_rw(bio);
304 if (rw == READA)
305 rw = READ;
306
307 bio_for_each_segment(bvec, bio, i) {
308 unsigned int len = bvec->bv_len;
309 err = brd_do_bvec(brd, bvec->bv_page, len,
310 bvec->bv_offset, rw, sector);
311 if (err)
312 break;
313 sector += len >> SECTOR_SHIFT;
314 }
315
316 out:
317 bio_endio(bio, err);
318
319 return 0;
320 }
321
322 #ifdef CONFIG_BLK_DEV_XIP
323 static int brd_direct_access (struct block_device *bdev, sector_t sector,
324 void **kaddr, unsigned long *pfn)
325 {
326 struct brd_device *brd = bdev->bd_disk->private_data;
327 struct page *page;
328
329 if (!brd)
330 return -ENODEV;
331 if (sector & (PAGE_SECTORS-1))
332 return -EINVAL;
333 if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
334 return -ERANGE;
335 page = brd_insert_page(brd, sector);
336 if (!page)
337 return -ENOMEM;
338 *kaddr = page_address(page);
339 *pfn = page_to_pfn(page);
340
341 return 0;
342 }
343 #endif
344
345 static int brd_ioctl(struct block_device *bdev, fmode_t mode,
346 unsigned int cmd, unsigned long arg)
347 {
348 int error;
349 struct brd_device *brd = bdev->bd_disk->private_data;
350
351 if (cmd != BLKFLSBUF)
352 return -ENOTTY;
353
354 /*
355 * ram device BLKFLSBUF has special semantics, we want to actually
356 * release and destroy the ramdisk data.
357 */
358 mutex_lock(&bdev->bd_mutex);
359 error = -EBUSY;
360 if (bdev->bd_openers <= 1) {
361 /*
362 * Invalidate the cache first, so it isn't written
363 * back to the device.
364 *
365 * Another thread might instantiate more buffercache here,
366 * but there is not much we can do to close that race.
367 */
368 invalidate_bh_lrus();
369 truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
370 brd_free_pages(brd);
371 error = 0;
372 }
373 mutex_unlock(&bdev->bd_mutex);
374
375 return error;
376 }
377
378 static const struct block_device_operations brd_fops = {
379 .owner = THIS_MODULE,
380 .locked_ioctl = brd_ioctl,
381 #ifdef CONFIG_BLK_DEV_XIP
382 .direct_access = brd_direct_access,
383 #endif
384 };
385
386 /*
387 * And now the modules code and kernel interface.
388 */
389 static int rd_nr;
390 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
391 static int max_part;
392 static int part_shift;
393 module_param(rd_nr, int, 0);
394 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
395 module_param(rd_size, int, 0);
396 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
397 module_param(max_part, int, 0);
398 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
399 MODULE_LICENSE("GPL");
400 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
401 MODULE_ALIAS("rd");
402
403 #ifndef MODULE
404 /* Legacy boot options - nonmodular */
405 static int __init ramdisk_size(char *str)
406 {
407 rd_size = simple_strtol(str, NULL, 0);
408 return 1;
409 }
410 __setup("ramdisk_size=", ramdisk_size);
411 #endif
412
413 /*
414 * The device scheme is derived from loop.c. Keep them in synch where possible
415 * (should share code eventually).
416 */
417 static LIST_HEAD(brd_devices);
418 static DEFINE_MUTEX(brd_devices_mutex);
419
420 static struct brd_device *brd_alloc(int i)
421 {
422 struct brd_device *brd;
423 struct gendisk *disk;
424
425 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
426 if (!brd)
427 goto out;
428 brd->brd_number = i;
429 spin_lock_init(&brd->brd_lock);
430 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
431
432 brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
433 if (!brd->brd_queue)
434 goto out_free_dev;
435 blk_queue_make_request(brd->brd_queue, brd_make_request);
436 blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
437 blk_queue_max_hw_sectors(brd->brd_queue, 1024);
438 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
439
440 disk = brd->brd_disk = alloc_disk(1 << part_shift);
441 if (!disk)
442 goto out_free_queue;
443 disk->major = RAMDISK_MAJOR;
444 disk->first_minor = i << part_shift;
445 disk->fops = &brd_fops;
446 disk->private_data = brd;
447 disk->queue = brd->brd_queue;
448 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
449 sprintf(disk->disk_name, "ram%d", i);
450 set_capacity(disk, rd_size * 2);
451
452 return brd;
453
454 out_free_queue:
455 blk_cleanup_queue(brd->brd_queue);
456 out_free_dev:
457 kfree(brd);
458 out:
459 return NULL;
460 }
461
462 static void brd_free(struct brd_device *brd)
463 {
464 put_disk(brd->brd_disk);
465 blk_cleanup_queue(brd->brd_queue);
466 brd_free_pages(brd);
467 kfree(brd);
468 }
469
470 static struct brd_device *brd_init_one(int i)
471 {
472 struct brd_device *brd;
473
474 list_for_each_entry(brd, &brd_devices, brd_list) {
475 if (brd->brd_number == i)
476 goto out;
477 }
478
479 brd = brd_alloc(i);
480 if (brd) {
481 add_disk(brd->brd_disk);
482 list_add_tail(&brd->brd_list, &brd_devices);
483 }
484 out:
485 return brd;
486 }
487
488 static void brd_del_one(struct brd_device *brd)
489 {
490 list_del(&brd->brd_list);
491 del_gendisk(brd->brd_disk);
492 brd_free(brd);
493 }
494
495 static struct kobject *brd_probe(dev_t dev, int *part, void *data)
496 {
497 struct brd_device *brd;
498 struct kobject *kobj;
499
500 mutex_lock(&brd_devices_mutex);
501 brd = brd_init_one(dev & MINORMASK);
502 kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
503 mutex_unlock(&brd_devices_mutex);
504
505 *part = 0;
506 return kobj;
507 }
508
509 static int __init brd_init(void)
510 {
511 int i, nr;
512 unsigned long range;
513 struct brd_device *brd, *next;
514
515 /*
516 * brd module now has a feature to instantiate underlying device
517 * structure on-demand, provided that there is an access dev node.
518 * However, this will not work well with user space tool that doesn't
519 * know about such "feature". In order to not break any existing
520 * tool, we do the following:
521 *
522 * (1) if rd_nr is specified, create that many upfront, and this
523 * also becomes a hard limit.
524 * (2) if rd_nr is not specified, create 1 rd device on module
525 * load, user can further extend brd device by create dev node
526 * themselves and have kernel automatically instantiate actual
527 * device on-demand.
528 */
529
530 part_shift = 0;
531 if (max_part > 0)
532 part_shift = fls(max_part);
533
534 if (rd_nr > 1UL << (MINORBITS - part_shift))
535 return -EINVAL;
536
537 if (rd_nr) {
538 nr = rd_nr;
539 range = rd_nr;
540 } else {
541 nr = CONFIG_BLK_DEV_RAM_COUNT;
542 range = 1UL << (MINORBITS - part_shift);
543 }
544
545 if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
546 return -EIO;
547
548 for (i = 0; i < nr; i++) {
549 brd = brd_alloc(i);
550 if (!brd)
551 goto out_free;
552 list_add_tail(&brd->brd_list, &brd_devices);
553 }
554
555 /* point of no return */
556
557 list_for_each_entry(brd, &brd_devices, brd_list)
558 add_disk(brd->brd_disk);
559
560 blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
561 THIS_MODULE, brd_probe, NULL, NULL);
562
563 printk(KERN_INFO "brd: module loaded\n");
564 return 0;
565
566 out_free:
567 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
568 list_del(&brd->brd_list);
569 brd_free(brd);
570 }
571 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
572
573 return -ENOMEM;
574 }
575
576 static void __exit brd_exit(void)
577 {
578 unsigned long range;
579 struct brd_device *brd, *next;
580
581 range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
582
583 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
584 brd_del_one(brd);
585
586 blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
587 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
588 }
589
590 module_init(brd_init);
591 module_exit(brd_exit);
592