]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/block/rbd.c
ceph: propagate layout error on osd request creation
[people/arne_f/kernel.git] / drivers / block / rbd.c
CommitLineData
602adf40
YS
1/*
2 rbd.c -- Export ceph rados objects as a Linux block device
3
4
5 based on drivers/block/osdblk.c:
6
7 Copyright 2009 Red Hat, Inc.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21
22
23
dfc5606d 24 For usage instructions, please refer to:
602adf40 25
dfc5606d 26 Documentation/ABI/testing/sysfs-bus-rbd
602adf40
YS
27
28 */
29
30#include <linux/ceph/libceph.h>
31#include <linux/ceph/osd_client.h>
32#include <linux/ceph/mon_client.h>
33#include <linux/ceph/decode.h>
59c2be1e 34#include <linux/parser.h>
602adf40
YS
35
36#include <linux/kernel.h>
37#include <linux/device.h>
38#include <linux/module.h>
39#include <linux/fs.h>
40#include <linux/blkdev.h>
41
42#include "rbd_types.h"
43
aafb230e
AE
44#define RBD_DEBUG /* Activate rbd_assert() calls */
45
593a9e7b
AE
46/*
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
51 */
52#define SECTOR_SHIFT 9
53#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
54
df111be6
AE
55/* It might be useful to have this defined elsewhere too */
56
57#define U64_MAX ((u64) (~0ULL))
58
f0f8cef5
AE
59#define RBD_DRV_NAME "rbd"
60#define RBD_DRV_NAME_LONG "rbd (rados block device)"
602adf40
YS
61
62#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
63
602adf40 64#define RBD_MAX_SNAP_NAME_LEN 32
35d489f9 65#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
602adf40
YS
66#define RBD_MAX_OPT_LEN 1024
67
68#define RBD_SNAP_HEAD_NAME "-"
69
1e130199
AE
70#define RBD_IMAGE_ID_LEN_MAX 64
71#define RBD_OBJ_PREFIX_LEN_MAX 64
589d30e0 72
81a89793
AE
73/*
74 * An RBD device name will be "rbd#", where the "rbd" comes from
75 * RBD_DRV_NAME above, and # is a unique integer identifier.
76 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
77 * enough to hold all possible device names.
78 */
602adf40 79#define DEV_NAME_LEN 32
81a89793 80#define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
602adf40 81
cc0538b6 82#define RBD_READ_ONLY_DEFAULT false
59c2be1e 83
602adf40
YS
84/*
85 * block device image metadata (in-memory version)
86 */
87struct rbd_image_header {
f84344f3 88 /* These four fields never change for a given rbd image */
849b4260 89 char *object_prefix;
34b13184 90 u64 features;
602adf40
YS
91 __u8 obj_order;
92 __u8 crypt_type;
93 __u8 comp_type;
602adf40 94
f84344f3
AE
95 /* The remaining fields need to be updated occasionally */
96 u64 image_size;
97 struct ceph_snap_context *snapc;
602adf40
YS
98 char *snap_names;
99 u64 *snap_sizes;
59c2be1e
YS
100
101 u64 obj_version;
102};
103
104struct rbd_options {
cc0538b6 105 bool read_only;
602adf40
YS
106};
107
108/*
f0f8cef5 109 * an instance of the client. multiple devices may share an rbd client.
602adf40
YS
110 */
111struct rbd_client {
112 struct ceph_client *client;
113 struct kref kref;
114 struct list_head node;
115};
116
117/*
f0f8cef5 118 * a request completion status
602adf40 119 */
1fec7093
YS
120struct rbd_req_status {
121 int done;
122 int rc;
123 u64 bytes;
124};
125
126/*
127 * a collection of requests
128 */
129struct rbd_req_coll {
130 int total;
131 int num_done;
132 struct kref kref;
133 struct rbd_req_status status[0];
602adf40
YS
134};
135
f0f8cef5
AE
136/*
137 * a single io request
138 */
139struct rbd_request {
140 struct request *rq; /* blk layer request */
141 struct bio *bio; /* cloned bio */
142 struct page **pages; /* list of used pages */
143 u64 len;
144 int coll_index;
145 struct rbd_req_coll *coll;
146};
147
dfc5606d
YS
148struct rbd_snap {
149 struct device dev;
150 const char *name;
3591538f 151 u64 size;
dfc5606d
YS
152 struct list_head node;
153 u64 id;
34b13184 154 u64 features;
dfc5606d
YS
155};
156
f84344f3
AE
157struct rbd_mapping {
158 char *snap_name;
159 u64 snap_id;
99c1f08f 160 u64 size;
34b13184 161 u64 features;
f84344f3
AE
162 bool snap_exists;
163 bool read_only;
164};
165
602adf40
YS
166/*
167 * a single device
168 */
169struct rbd_device {
de71a297 170 int dev_id; /* blkdev unique id */
602adf40
YS
171
172 int major; /* blkdev assigned major */
173 struct gendisk *disk; /* blkdev's gendisk and rq */
602adf40 174
a30b71b9 175 u32 image_format; /* Either 1 or 2 */
f8c38929 176 struct rbd_options rbd_opts;
602adf40
YS
177 struct rbd_client *rbd_client;
178
179 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
180
181 spinlock_t lock; /* queue lock */
182
183 struct rbd_image_header header;
589d30e0
AE
184 char *image_id;
185 size_t image_id_len;
0bed54dc
AE
186 char *image_name;
187 size_t image_name_len;
188 char *header_name;
d22f76e7 189 char *pool_name;
9bb2f334 190 int pool_id;
602adf40 191
59c2be1e
YS
192 struct ceph_osd_event *watch_event;
193 struct ceph_osd_request *watch_request;
194
c666601a
JD
195 /* protects updating the header */
196 struct rw_semaphore header_rwsem;
f84344f3
AE
197
198 struct rbd_mapping mapping;
602adf40
YS
199
200 struct list_head node;
dfc5606d
YS
201
202 /* list of snapshots */
203 struct list_head snaps;
204
205 /* sysfs related */
206 struct device dev;
207};
208
602adf40 209static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
e124a82f 210
602adf40 211static LIST_HEAD(rbd_dev_list); /* devices */
e124a82f
AE
212static DEFINE_SPINLOCK(rbd_dev_list_lock);
213
432b8587
AE
214static LIST_HEAD(rbd_client_list); /* clients */
215static DEFINE_SPINLOCK(rbd_client_list_lock);
602adf40 216
304f6808
AE
217static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
218static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
219
dfc5606d 220static void rbd_dev_release(struct device *dev);
14e7085d 221static void __rbd_remove_snap_dev(struct rbd_snap *snap);
dfc5606d 222
f0f8cef5
AE
223static ssize_t rbd_add(struct bus_type *bus, const char *buf,
224 size_t count);
225static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
226 size_t count);
227
228static struct bus_attribute rbd_bus_attrs[] = {
229 __ATTR(add, S_IWUSR, NULL, rbd_add),
230 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
231 __ATTR_NULL
232};
233
234static struct bus_type rbd_bus_type = {
235 .name = "rbd",
236 .bus_attrs = rbd_bus_attrs,
237};
238
239static void rbd_root_dev_release(struct device *dev)
240{
241}
242
243static struct device rbd_root_dev = {
244 .init_name = "rbd",
245 .release = rbd_root_dev_release,
246};
247
aafb230e
AE
248#ifdef RBD_DEBUG
249#define rbd_assert(expr) \
250 if (unlikely(!(expr))) { \
251 printk(KERN_ERR "\nAssertion failure in %s() " \
252 "at line %d:\n\n" \
253 "\trbd_assert(%s);\n\n", \
254 __func__, __LINE__, #expr); \
255 BUG(); \
256 }
257#else /* !RBD_DEBUG */
258# define rbd_assert(expr) ((void) 0)
259#endif /* !RBD_DEBUG */
dfc5606d 260
dfc5606d
YS
261static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
262{
263 return get_device(&rbd_dev->dev);
264}
265
266static void rbd_put_dev(struct rbd_device *rbd_dev)
267{
268 put_device(&rbd_dev->dev);
269}
602adf40 270
1fe5e993 271static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver);
59c2be1e 272
602adf40
YS
273static int rbd_open(struct block_device *bdev, fmode_t mode)
274{
f0f8cef5 275 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
602adf40 276
f84344f3 277 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
602adf40
YS
278 return -EROFS;
279
340c7a2b 280 rbd_get_dev(rbd_dev);
f84344f3 281 set_device_ro(bdev, rbd_dev->mapping.read_only);
340c7a2b 282
602adf40
YS
283 return 0;
284}
285
dfc5606d
YS
286static int rbd_release(struct gendisk *disk, fmode_t mode)
287{
288 struct rbd_device *rbd_dev = disk->private_data;
289
290 rbd_put_dev(rbd_dev);
291
292 return 0;
293}
294
602adf40
YS
295static const struct block_device_operations rbd_bd_ops = {
296 .owner = THIS_MODULE,
297 .open = rbd_open,
dfc5606d 298 .release = rbd_release,
602adf40
YS
299};
300
301/*
302 * Initialize an rbd client instance.
43ae4701 303 * We own *ceph_opts.
602adf40 304 */
f8c38929 305static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
602adf40
YS
306{
307 struct rbd_client *rbdc;
308 int ret = -ENOMEM;
309
310 dout("rbd_client_create\n");
311 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
312 if (!rbdc)
313 goto out_opt;
314
315 kref_init(&rbdc->kref);
316 INIT_LIST_HEAD(&rbdc->node);
317
bc534d86
AE
318 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
319
43ae4701 320 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
602adf40 321 if (IS_ERR(rbdc->client))
bc534d86 322 goto out_mutex;
43ae4701 323 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
602adf40
YS
324
325 ret = ceph_open_session(rbdc->client);
326 if (ret < 0)
327 goto out_err;
328
432b8587 329 spin_lock(&rbd_client_list_lock);
602adf40 330 list_add_tail(&rbdc->node, &rbd_client_list);
432b8587 331 spin_unlock(&rbd_client_list_lock);
602adf40 332
bc534d86
AE
333 mutex_unlock(&ctl_mutex);
334
602adf40
YS
335 dout("rbd_client_create created %p\n", rbdc);
336 return rbdc;
337
338out_err:
339 ceph_destroy_client(rbdc->client);
bc534d86
AE
340out_mutex:
341 mutex_unlock(&ctl_mutex);
602adf40
YS
342 kfree(rbdc);
343out_opt:
43ae4701
AE
344 if (ceph_opts)
345 ceph_destroy_options(ceph_opts);
28f259b7 346 return ERR_PTR(ret);
602adf40
YS
347}
348
349/*
1f7ba331
AE
350 * Find a ceph client with specific addr and configuration. If
351 * found, bump its reference count.
602adf40 352 */
1f7ba331 353static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
602adf40
YS
354{
355 struct rbd_client *client_node;
1f7ba331 356 bool found = false;
602adf40 357
43ae4701 358 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
602adf40
YS
359 return NULL;
360
1f7ba331
AE
361 spin_lock(&rbd_client_list_lock);
362 list_for_each_entry(client_node, &rbd_client_list, node) {
363 if (!ceph_compare_options(ceph_opts, client_node->client)) {
364 kref_get(&client_node->kref);
365 found = true;
366 break;
367 }
368 }
369 spin_unlock(&rbd_client_list_lock);
370
371 return found ? client_node : NULL;
602adf40
YS
372}
373
59c2be1e
YS
374/*
375 * mount options
376 */
377enum {
59c2be1e
YS
378 Opt_last_int,
379 /* int args above */
380 Opt_last_string,
381 /* string args above */
cc0538b6
AE
382 Opt_read_only,
383 Opt_read_write,
384 /* Boolean args above */
385 Opt_last_bool,
59c2be1e
YS
386};
387
43ae4701 388static match_table_t rbd_opts_tokens = {
59c2be1e
YS
389 /* int args above */
390 /* string args above */
f84344f3 391 {Opt_read_only, "mapping.read_only"},
cc0538b6
AE
392 {Opt_read_only, "ro"}, /* Alternate spelling */
393 {Opt_read_write, "read_write"},
394 {Opt_read_write, "rw"}, /* Alternate spelling */
395 /* Boolean args above */
59c2be1e
YS
396 {-1, NULL}
397};
398
399static int parse_rbd_opts_token(char *c, void *private)
400{
43ae4701 401 struct rbd_options *rbd_opts = private;
59c2be1e
YS
402 substring_t argstr[MAX_OPT_ARGS];
403 int token, intval, ret;
404
43ae4701 405 token = match_token(c, rbd_opts_tokens, argstr);
59c2be1e
YS
406 if (token < 0)
407 return -EINVAL;
408
409 if (token < Opt_last_int) {
410 ret = match_int(&argstr[0], &intval);
411 if (ret < 0) {
412 pr_err("bad mount option arg (not int) "
413 "at '%s'\n", c);
414 return ret;
415 }
416 dout("got int token %d val %d\n", token, intval);
417 } else if (token > Opt_last_int && token < Opt_last_string) {
418 dout("got string token %d val %s\n", token,
419 argstr[0].from);
cc0538b6
AE
420 } else if (token > Opt_last_string && token < Opt_last_bool) {
421 dout("got Boolean token %d\n", token);
59c2be1e
YS
422 } else {
423 dout("got token %d\n", token);
424 }
425
426 switch (token) {
cc0538b6
AE
427 case Opt_read_only:
428 rbd_opts->read_only = true;
429 break;
430 case Opt_read_write:
431 rbd_opts->read_only = false;
432 break;
59c2be1e 433 default:
aafb230e
AE
434 rbd_assert(false);
435 break;
59c2be1e
YS
436 }
437 return 0;
438}
439
602adf40
YS
440/*
441 * Get a ceph client with specific addr and configuration, if one does
442 * not exist create it.
443 */
f8c38929
AE
444static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
445 size_t mon_addr_len, char *options)
602adf40 446{
f8c38929 447 struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
43ae4701 448 struct ceph_options *ceph_opts;
f8c38929 449 struct rbd_client *rbdc;
59c2be1e 450
cc0538b6 451 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
602adf40 452
43ae4701
AE
453 ceph_opts = ceph_parse_options(options, mon_addr,
454 mon_addr + mon_addr_len,
455 parse_rbd_opts_token, rbd_opts);
f8c38929
AE
456 if (IS_ERR(ceph_opts))
457 return PTR_ERR(ceph_opts);
602adf40 458
1f7ba331 459 rbdc = rbd_client_find(ceph_opts);
602adf40 460 if (rbdc) {
602adf40 461 /* using an existing client */
43ae4701 462 ceph_destroy_options(ceph_opts);
f8c38929
AE
463 } else {
464 rbdc = rbd_client_create(ceph_opts);
465 if (IS_ERR(rbdc))
466 return PTR_ERR(rbdc);
602adf40 467 }
f8c38929 468 rbd_dev->rbd_client = rbdc;
602adf40 469
f8c38929 470 return 0;
602adf40
YS
471}
472
473/*
474 * Destroy ceph client
d23a4b3f 475 *
432b8587 476 * Caller must hold rbd_client_list_lock.
602adf40
YS
477 */
478static void rbd_client_release(struct kref *kref)
479{
480 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
481
482 dout("rbd_release_client %p\n", rbdc);
cd9d9f5d 483 spin_lock(&rbd_client_list_lock);
602adf40 484 list_del(&rbdc->node);
cd9d9f5d 485 spin_unlock(&rbd_client_list_lock);
602adf40
YS
486
487 ceph_destroy_client(rbdc->client);
488 kfree(rbdc);
489}
490
491/*
492 * Drop reference to ceph client node. If it's not referenced anymore, release
493 * it.
494 */
495static void rbd_put_client(struct rbd_device *rbd_dev)
496{
497 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
498 rbd_dev->rbd_client = NULL;
602adf40
YS
499}
500
1fec7093
YS
501/*
502 * Destroy requests collection
503 */
504static void rbd_coll_release(struct kref *kref)
505{
506 struct rbd_req_coll *coll =
507 container_of(kref, struct rbd_req_coll, kref);
508
509 dout("rbd_coll_release %p\n", coll);
510 kfree(coll);
511}
602adf40 512
a30b71b9
AE
513static bool rbd_image_format_valid(u32 image_format)
514{
515 return image_format == 1 || image_format == 2;
516}
517
8e94af8e
AE
518static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
519{
103a150f
AE
520 size_t size;
521 u32 snap_count;
522
523 /* The header has to start with the magic rbd header text */
524 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
525 return false;
526
527 /*
528 * The size of a snapshot header has to fit in a size_t, and
529 * that limits the number of snapshots.
530 */
531 snap_count = le32_to_cpu(ondisk->snap_count);
532 size = SIZE_MAX - sizeof (struct ceph_snap_context);
533 if (snap_count > size / sizeof (__le64))
534 return false;
535
536 /*
537 * Not only that, but the size of the entire the snapshot
538 * header must also be representable in a size_t.
539 */
540 size -= snap_count * sizeof (__le64);
541 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
542 return false;
543
544 return true;
8e94af8e
AE
545}
546
602adf40
YS
547/*
548 * Create a new header structure, translate header format from the on-disk
549 * header.
550 */
551static int rbd_header_from_disk(struct rbd_image_header *header,
4156d998 552 struct rbd_image_header_ondisk *ondisk)
602adf40 553{
ccece235 554 u32 snap_count;
58c17b0e 555 size_t len;
d2bb24e5 556 size_t size;
621901d6 557 u32 i;
602adf40 558
6a52325f
AE
559 memset(header, 0, sizeof (*header));
560
103a150f
AE
561 snap_count = le32_to_cpu(ondisk->snap_count);
562
58c17b0e
AE
563 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
564 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
6a52325f 565 if (!header->object_prefix)
602adf40 566 return -ENOMEM;
58c17b0e
AE
567 memcpy(header->object_prefix, ondisk->object_prefix, len);
568 header->object_prefix[len] = '\0';
00f1f36f 569
602adf40 570 if (snap_count) {
f785cc1d
AE
571 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
572
621901d6
AE
573 /* Save a copy of the snapshot names */
574
f785cc1d
AE
575 if (snap_names_len > (u64) SIZE_MAX)
576 return -EIO;
577 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
602adf40 578 if (!header->snap_names)
6a52325f 579 goto out_err;
f785cc1d
AE
580 /*
581 * Note that rbd_dev_v1_header_read() guarantees
582 * the ondisk buffer we're working with has
583 * snap_names_len bytes beyond the end of the
584 * snapshot id array, this memcpy() is safe.
585 */
586 memcpy(header->snap_names, &ondisk->snaps[snap_count],
587 snap_names_len);
6a52325f 588
621901d6
AE
589 /* Record each snapshot's size */
590
d2bb24e5
AE
591 size = snap_count * sizeof (*header->snap_sizes);
592 header->snap_sizes = kmalloc(size, GFP_KERNEL);
602adf40 593 if (!header->snap_sizes)
6a52325f 594 goto out_err;
621901d6
AE
595 for (i = 0; i < snap_count; i++)
596 header->snap_sizes[i] =
597 le64_to_cpu(ondisk->snaps[i].image_size);
602adf40 598 } else {
ccece235 599 WARN_ON(ondisk->snap_names_len);
602adf40
YS
600 header->snap_names = NULL;
601 header->snap_sizes = NULL;
602 }
849b4260 603
34b13184 604 header->features = 0; /* No features support in v1 images */
602adf40
YS
605 header->obj_order = ondisk->options.order;
606 header->crypt_type = ondisk->options.crypt_type;
607 header->comp_type = ondisk->options.comp_type;
6a52325f 608
621901d6
AE
609 /* Allocate and fill in the snapshot context */
610
f84344f3 611 header->image_size = le64_to_cpu(ondisk->image_size);
6a52325f
AE
612 size = sizeof (struct ceph_snap_context);
613 size += snap_count * sizeof (header->snapc->snaps[0]);
614 header->snapc = kzalloc(size, GFP_KERNEL);
615 if (!header->snapc)
616 goto out_err;
602adf40
YS
617
618 atomic_set(&header->snapc->nref, 1);
505cbb9b 619 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
602adf40 620 header->snapc->num_snaps = snap_count;
621901d6
AE
621 for (i = 0; i < snap_count; i++)
622 header->snapc->snaps[i] =
623 le64_to_cpu(ondisk->snaps[i].id);
602adf40
YS
624
625 return 0;
626
6a52325f 627out_err:
849b4260 628 kfree(header->snap_sizes);
ccece235 629 header->snap_sizes = NULL;
602adf40 630 kfree(header->snap_names);
ccece235 631 header->snap_names = NULL;
6a52325f
AE
632 kfree(header->object_prefix);
633 header->object_prefix = NULL;
ccece235 634
00f1f36f 635 return -ENOMEM;
602adf40
YS
636}
637
8836b995 638static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
602adf40 639{
602adf40 640
e86924a8 641 struct rbd_snap *snap;
602adf40 642
e86924a8
AE
643 list_for_each_entry(snap, &rbd_dev->snaps, node) {
644 if (!strcmp(snap_name, snap->name)) {
645 rbd_dev->mapping.snap_id = snap->id;
646 rbd_dev->mapping.size = snap->size;
34b13184 647 rbd_dev->mapping.features = snap->features;
602adf40 648
e86924a8 649 return 0;
00f1f36f 650 }
00f1f36f 651 }
e86924a8 652
00f1f36f 653 return -ENOENT;
602adf40
YS
654}
655
5ed16177 656static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
602adf40 657{
78dc447d 658 int ret;
602adf40 659
4e1105a2 660 if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
cc9d734c 661 sizeof (RBD_SNAP_HEAD_NAME))) {
f84344f3 662 rbd_dev->mapping.snap_id = CEPH_NOSNAP;
99c1f08f 663 rbd_dev->mapping.size = rbd_dev->header.image_size;
34b13184 664 rbd_dev->mapping.features = rbd_dev->header.features;
f84344f3
AE
665 rbd_dev->mapping.snap_exists = false;
666 rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
e86924a8 667 ret = 0;
602adf40 668 } else {
8836b995 669 ret = snap_by_name(rbd_dev, snap_name);
602adf40
YS
670 if (ret < 0)
671 goto done;
f84344f3
AE
672 rbd_dev->mapping.snap_exists = true;
673 rbd_dev->mapping.read_only = true;
602adf40 674 }
4e1105a2 675 rbd_dev->mapping.snap_name = snap_name;
602adf40 676done:
602adf40
YS
677 return ret;
678}
679
680static void rbd_header_free(struct rbd_image_header *header)
681{
849b4260 682 kfree(header->object_prefix);
d78fd7ae 683 header->object_prefix = NULL;
602adf40 684 kfree(header->snap_sizes);
d78fd7ae 685 header->snap_sizes = NULL;
849b4260 686 kfree(header->snap_names);
d78fd7ae 687 header->snap_names = NULL;
d1d25646 688 ceph_put_snap_context(header->snapc);
d78fd7ae 689 header->snapc = NULL;
602adf40
YS
690}
691
65ccfe21 692static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
602adf40 693{
65ccfe21
AE
694 char *name;
695 u64 segment;
696 int ret;
602adf40 697
65ccfe21
AE
698 name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
699 if (!name)
700 return NULL;
701 segment = offset >> rbd_dev->header.obj_order;
702 ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
703 rbd_dev->header.object_prefix, segment);
704 if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
705 pr_err("error formatting segment name for #%llu (%d)\n",
706 segment, ret);
707 kfree(name);
708 name = NULL;
709 }
602adf40 710
65ccfe21
AE
711 return name;
712}
602adf40 713
65ccfe21
AE
714static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
715{
716 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
602adf40 717
65ccfe21
AE
718 return offset & (segment_size - 1);
719}
720
721static u64 rbd_segment_length(struct rbd_device *rbd_dev,
722 u64 offset, u64 length)
723{
724 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
725
726 offset &= segment_size - 1;
727
aafb230e 728 rbd_assert(length <= U64_MAX - offset);
65ccfe21
AE
729 if (offset + length > segment_size)
730 length = segment_size - offset;
731
732 return length;
602adf40
YS
733}
734
1fec7093
YS
735static int rbd_get_num_segments(struct rbd_image_header *header,
736 u64 ofs, u64 len)
737{
df111be6
AE
738 u64 start_seg;
739 u64 end_seg;
740
741 if (!len)
742 return 0;
743 if (len - 1 > U64_MAX - ofs)
744 return -ERANGE;
745
746 start_seg = ofs >> header->obj_order;
747 end_seg = (ofs + len - 1) >> header->obj_order;
748
1fec7093
YS
749 return end_seg - start_seg + 1;
750}
751
029bcbd8
JD
752/*
753 * returns the size of an object in the image
754 */
755static u64 rbd_obj_bytes(struct rbd_image_header *header)
756{
757 return 1 << header->obj_order;
758}
759
602adf40
YS
760/*
761 * bio helpers
762 */
763
764static void bio_chain_put(struct bio *chain)
765{
766 struct bio *tmp;
767
768 while (chain) {
769 tmp = chain;
770 chain = chain->bi_next;
771 bio_put(tmp);
772 }
773}
774
775/*
776 * zeros a bio chain, starting at specific offset
777 */
778static void zero_bio_chain(struct bio *chain, int start_ofs)
779{
780 struct bio_vec *bv;
781 unsigned long flags;
782 void *buf;
783 int i;
784 int pos = 0;
785
786 while (chain) {
787 bio_for_each_segment(bv, chain, i) {
788 if (pos + bv->bv_len > start_ofs) {
789 int remainder = max(start_ofs - pos, 0);
790 buf = bvec_kmap_irq(bv, &flags);
791 memset(buf + remainder, 0,
792 bv->bv_len - remainder);
85b5aaa6 793 bvec_kunmap_irq(buf, &flags);
602adf40
YS
794 }
795 pos += bv->bv_len;
796 }
797
798 chain = chain->bi_next;
799 }
800}
801
802/*
803 * bio_chain_clone - clone a chain of bios up to a certain length.
804 * might return a bio_pair that will need to be released.
805 */
806static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
807 struct bio_pair **bp,
808 int len, gfp_t gfpmask)
809{
542582fc
AE
810 struct bio *old_chain = *old;
811 struct bio *new_chain = NULL;
812 struct bio *tail;
602adf40
YS
813 int total = 0;
814
815 if (*bp) {
816 bio_pair_release(*bp);
817 *bp = NULL;
818 }
819
820 while (old_chain && (total < len)) {
542582fc
AE
821 struct bio *tmp;
822
602adf40
YS
823 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
824 if (!tmp)
825 goto err_out;
542582fc 826 gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
602adf40
YS
827
828 if (total + old_chain->bi_size > len) {
829 struct bio_pair *bp;
830
831 /*
832 * this split can only happen with a single paged bio,
833 * split_bio will BUG_ON if this is not the case
834 */
835 dout("bio_chain_clone split! total=%d remaining=%d"
bd919d45
AE
836 "bi_size=%u\n",
837 total, len - total, old_chain->bi_size);
602adf40
YS
838
839 /* split the bio. We'll release it either in the next
840 call, or it will have to be released outside */
593a9e7b 841 bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
602adf40
YS
842 if (!bp)
843 goto err_out;
844
845 __bio_clone(tmp, &bp->bio1);
846
847 *next = &bp->bio2;
848 } else {
849 __bio_clone(tmp, old_chain);
850 *next = old_chain->bi_next;
851 }
852
853 tmp->bi_bdev = NULL;
602adf40 854 tmp->bi_next = NULL;
542582fc 855 if (new_chain)
602adf40 856 tail->bi_next = tmp;
542582fc
AE
857 else
858 new_chain = tmp;
859 tail = tmp;
602adf40
YS
860 old_chain = old_chain->bi_next;
861
862 total += tmp->bi_size;
863 }
864
aafb230e 865 rbd_assert(total == len);
602adf40 866
602adf40
YS
867 *old = old_chain;
868
869 return new_chain;
870
871err_out:
872 dout("bio_chain_clone with err\n");
873 bio_chain_put(new_chain);
874 return NULL;
875}
876
877/*
878 * helpers for osd request op vectors.
879 */
57cfc106
AE
880static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
881 int opcode, u32 payload_len)
602adf40 882{
57cfc106
AE
883 struct ceph_osd_req_op *ops;
884
885 ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
886 if (!ops)
887 return NULL;
888
889 ops[0].op = opcode;
890
602adf40
YS
891 /*
892 * op extent offset and length will be set later on
893 * in calc_raw_layout()
894 */
57cfc106
AE
895 ops[0].payload_len = payload_len;
896
897 return ops;
602adf40
YS
898}
899
900static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
901{
902 kfree(ops);
903}
904
1fec7093
YS
905static void rbd_coll_end_req_index(struct request *rq,
906 struct rbd_req_coll *coll,
907 int index,
908 int ret, u64 len)
909{
910 struct request_queue *q;
911 int min, max, i;
912
bd919d45
AE
913 dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
914 coll, index, ret, (unsigned long long) len);
1fec7093
YS
915
916 if (!rq)
917 return;
918
919 if (!coll) {
920 blk_end_request(rq, ret, len);
921 return;
922 }
923
924 q = rq->q;
925
926 spin_lock_irq(q->queue_lock);
927 coll->status[index].done = 1;
928 coll->status[index].rc = ret;
929 coll->status[index].bytes = len;
930 max = min = coll->num_done;
931 while (max < coll->total && coll->status[max].done)
932 max++;
933
934 for (i = min; i<max; i++) {
935 __blk_end_request(rq, coll->status[i].rc,
936 coll->status[i].bytes);
937 coll->num_done++;
938 kref_put(&coll->kref, rbd_coll_release);
939 }
940 spin_unlock_irq(q->queue_lock);
941}
942
943static void rbd_coll_end_req(struct rbd_request *req,
944 int ret, u64 len)
945{
946 rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
947}
948
602adf40
YS
949/*
950 * Send ceph osd request
951 */
952static int rbd_do_request(struct request *rq,
0ce1a794 953 struct rbd_device *rbd_dev,
602adf40
YS
954 struct ceph_snap_context *snapc,
955 u64 snapid,
aded07ea 956 const char *object_name, u64 ofs, u64 len,
602adf40
YS
957 struct bio *bio,
958 struct page **pages,
959 int num_pages,
960 int flags,
961 struct ceph_osd_req_op *ops,
1fec7093
YS
962 struct rbd_req_coll *coll,
963 int coll_index,
602adf40 964 void (*rbd_cb)(struct ceph_osd_request *req,
59c2be1e
YS
965 struct ceph_msg *msg),
966 struct ceph_osd_request **linger_req,
967 u64 *ver)
602adf40
YS
968{
969 struct ceph_osd_request *req;
970 struct ceph_file_layout *layout;
971 int ret;
972 u64 bno;
973 struct timespec mtime = CURRENT_TIME;
974 struct rbd_request *req_data;
975 struct ceph_osd_request_head *reqhead;
1dbb4399 976 struct ceph_osd_client *osdc;
602adf40 977
602adf40 978 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
1fec7093
YS
979 if (!req_data) {
980 if (coll)
981 rbd_coll_end_req_index(rq, coll, coll_index,
982 -ENOMEM, len);
983 return -ENOMEM;
984 }
985
986 if (coll) {
987 req_data->coll = coll;
988 req_data->coll_index = coll_index;
989 }
602adf40 990
bd919d45
AE
991 dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
992 (unsigned long long) ofs, (unsigned long long) len);
602adf40 993
0ce1a794 994 osdc = &rbd_dev->rbd_client->client->osdc;
1dbb4399
AE
995 req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
996 false, GFP_NOIO, pages, bio);
4ad12621 997 if (!req) {
4ad12621 998 ret = -ENOMEM;
602adf40
YS
999 goto done_pages;
1000 }
1001
1002 req->r_callback = rbd_cb;
1003
1004 req_data->rq = rq;
1005 req_data->bio = bio;
1006 req_data->pages = pages;
1007 req_data->len = len;
1008
1009 req->r_priv = req_data;
1010
1011 reqhead = req->r_request->front.iov_base;
1012 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
1013
aded07ea 1014 strncpy(req->r_oid, object_name, sizeof(req->r_oid));
602adf40
YS
1015 req->r_oid_len = strlen(req->r_oid);
1016
1017 layout = &req->r_file_layout;
1018 memset(layout, 0, sizeof(*layout));
1019 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
1020 layout->fl_stripe_count = cpu_to_le32(1);
1021 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
0ce1a794 1022 layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
1dbb4399
AE
1023 ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
1024 req, ops);
602adf40
YS
1025
1026 ceph_osdc_build_request(req, ofs, &len,
1027 ops,
1028 snapc,
1029 &mtime,
1030 req->r_oid, req->r_oid_len);
602adf40 1031
59c2be1e 1032 if (linger_req) {
1dbb4399 1033 ceph_osdc_set_request_linger(osdc, req);
59c2be1e
YS
1034 *linger_req = req;
1035 }
1036
1dbb4399 1037 ret = ceph_osdc_start_request(osdc, req, false);
602adf40
YS
1038 if (ret < 0)
1039 goto done_err;
1040
1041 if (!rbd_cb) {
1dbb4399 1042 ret = ceph_osdc_wait_request(osdc, req);
59c2be1e
YS
1043 if (ver)
1044 *ver = le64_to_cpu(req->r_reassert_version.version);
bd919d45
AE
1045 dout("reassert_ver=%llu\n",
1046 (unsigned long long)
1047 le64_to_cpu(req->r_reassert_version.version));
602adf40
YS
1048 ceph_osdc_put_request(req);
1049 }
1050 return ret;
1051
1052done_err:
1053 bio_chain_put(req_data->bio);
1054 ceph_osdc_put_request(req);
1055done_pages:
1fec7093 1056 rbd_coll_end_req(req_data, ret, len);
602adf40 1057 kfree(req_data);
602adf40
YS
1058 return ret;
1059}
1060
1061/*
1062 * Ceph osd op callback
1063 */
1064static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1065{
1066 struct rbd_request *req_data = req->r_priv;
1067 struct ceph_osd_reply_head *replyhead;
1068 struct ceph_osd_op *op;
1069 __s32 rc;
1070 u64 bytes;
1071 int read_op;
1072
1073 /* parse reply */
1074 replyhead = msg->front.iov_base;
1075 WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
1076 op = (void *)(replyhead + 1);
1077 rc = le32_to_cpu(replyhead->result);
1078 bytes = le64_to_cpu(op->extent.length);
895cfcc8 1079 read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
602adf40 1080
bd919d45
AE
1081 dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
1082 (unsigned long long) bytes, read_op, (int) rc);
602adf40
YS
1083
1084 if (rc == -ENOENT && read_op) {
1085 zero_bio_chain(req_data->bio, 0);
1086 rc = 0;
1087 } else if (rc == 0 && read_op && bytes < req_data->len) {
1088 zero_bio_chain(req_data->bio, bytes);
1089 bytes = req_data->len;
1090 }
1091
1fec7093 1092 rbd_coll_end_req(req_data, rc, bytes);
602adf40
YS
1093
1094 if (req_data->bio)
1095 bio_chain_put(req_data->bio);
1096
1097 ceph_osdc_put_request(req);
1098 kfree(req_data);
1099}
1100
59c2be1e
YS
1101static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1102{
1103 ceph_osdc_put_request(req);
1104}
1105
602adf40
YS
1106/*
1107 * Do a synchronous ceph osd operation
1108 */
0ce1a794 1109static int rbd_req_sync_op(struct rbd_device *rbd_dev,
602adf40
YS
1110 struct ceph_snap_context *snapc,
1111 u64 snapid,
602adf40 1112 int flags,
913d2fdc 1113 struct ceph_osd_req_op *ops,
aded07ea 1114 const char *object_name,
f8d4de6e
AE
1115 u64 ofs, u64 inbound_size,
1116 char *inbound,
59c2be1e
YS
1117 struct ceph_osd_request **linger_req,
1118 u64 *ver)
602adf40
YS
1119{
1120 int ret;
1121 struct page **pages;
1122 int num_pages;
913d2fdc 1123
aafb230e 1124 rbd_assert(ops != NULL);
602adf40 1125
f8d4de6e 1126 num_pages = calc_pages_for(ofs, inbound_size);
602adf40 1127 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
b8d0638a
DC
1128 if (IS_ERR(pages))
1129 return PTR_ERR(pages);
602adf40 1130
0ce1a794 1131 ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
f8d4de6e 1132 object_name, ofs, inbound_size, NULL,
602adf40
YS
1133 pages, num_pages,
1134 flags,
1135 ops,
1fec7093 1136 NULL, 0,
59c2be1e
YS
1137 NULL,
1138 linger_req, ver);
602adf40 1139 if (ret < 0)
913d2fdc 1140 goto done;
602adf40 1141
f8d4de6e
AE
1142 if ((flags & CEPH_OSD_FLAG_READ) && inbound)
1143 ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
602adf40 1144
602adf40
YS
1145done:
1146 ceph_release_page_vector(pages, num_pages);
1147 return ret;
1148}
1149
1150/*
1151 * Do an asynchronous ceph osd operation
1152 */
1153static int rbd_do_op(struct request *rq,
0ce1a794 1154 struct rbd_device *rbd_dev,
602adf40
YS
1155 struct ceph_snap_context *snapc,
1156 u64 snapid,
d1f57ea6 1157 int opcode, int flags,
602adf40 1158 u64 ofs, u64 len,
1fec7093
YS
1159 struct bio *bio,
1160 struct rbd_req_coll *coll,
1161 int coll_index)
602adf40
YS
1162{
1163 char *seg_name;
1164 u64 seg_ofs;
1165 u64 seg_len;
1166 int ret;
1167 struct ceph_osd_req_op *ops;
1168 u32 payload_len;
1169
65ccfe21 1170 seg_name = rbd_segment_name(rbd_dev, ofs);
602adf40
YS
1171 if (!seg_name)
1172 return -ENOMEM;
65ccfe21
AE
1173 seg_len = rbd_segment_length(rbd_dev, ofs, len);
1174 seg_ofs = rbd_segment_offset(rbd_dev, ofs);
602adf40
YS
1175
1176 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
1177
57cfc106
AE
1178 ret = -ENOMEM;
1179 ops = rbd_create_rw_ops(1, opcode, payload_len);
1180 if (!ops)
602adf40
YS
1181 goto done;
1182
1183 /* we've taken care of segment sizes earlier when we
1184 cloned the bios. We should never have a segment
1185 truncated at this point */
aafb230e 1186 rbd_assert(seg_len == len);
602adf40
YS
1187
1188 ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
1189 seg_name, seg_ofs, seg_len,
1190 bio,
1191 NULL, 0,
1192 flags,
1193 ops,
1fec7093 1194 coll, coll_index,
59c2be1e 1195 rbd_req_cb, 0, NULL);
11f77002
SW
1196
1197 rbd_destroy_ops(ops);
602adf40
YS
1198done:
1199 kfree(seg_name);
1200 return ret;
1201}
1202
1203/*
1204 * Request async osd write
1205 */
1206static int rbd_req_write(struct request *rq,
1207 struct rbd_device *rbd_dev,
1208 struct ceph_snap_context *snapc,
1209 u64 ofs, u64 len,
1fec7093
YS
1210 struct bio *bio,
1211 struct rbd_req_coll *coll,
1212 int coll_index)
602adf40
YS
1213{
1214 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1215 CEPH_OSD_OP_WRITE,
1216 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1fec7093 1217 ofs, len, bio, coll, coll_index);
602adf40
YS
1218}
1219
1220/*
1221 * Request async osd read
1222 */
1223static int rbd_req_read(struct request *rq,
1224 struct rbd_device *rbd_dev,
1225 u64 snapid,
1226 u64 ofs, u64 len,
1fec7093
YS
1227 struct bio *bio,
1228 struct rbd_req_coll *coll,
1229 int coll_index)
602adf40
YS
1230{
1231 return rbd_do_op(rq, rbd_dev, NULL,
b06e6a6b 1232 snapid,
602adf40
YS
1233 CEPH_OSD_OP_READ,
1234 CEPH_OSD_FLAG_READ,
1fec7093 1235 ofs, len, bio, coll, coll_index);
602adf40
YS
1236}
1237
1238/*
1239 * Request sync osd read
1240 */
0ce1a794 1241static int rbd_req_sync_read(struct rbd_device *rbd_dev,
602adf40 1242 u64 snapid,
aded07ea 1243 const char *object_name,
602adf40 1244 u64 ofs, u64 len,
59c2be1e
YS
1245 char *buf,
1246 u64 *ver)
602adf40 1247{
913d2fdc
AE
1248 struct ceph_osd_req_op *ops;
1249 int ret;
1250
1251 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
1252 if (!ops)
1253 return -ENOMEM;
1254
1255 ret = rbd_req_sync_op(rbd_dev, NULL,
b06e6a6b 1256 snapid,
602adf40 1257 CEPH_OSD_FLAG_READ,
913d2fdc
AE
1258 ops, object_name, ofs, len, buf, NULL, ver);
1259 rbd_destroy_ops(ops);
1260
1261 return ret;
602adf40
YS
1262}
1263
1264/*
59c2be1e
YS
1265 * Request sync osd watch
1266 */
0ce1a794 1267static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
59c2be1e 1268 u64 ver,
7f0a24d8 1269 u64 notify_id)
59c2be1e
YS
1270{
1271 struct ceph_osd_req_op *ops;
11f77002
SW
1272 int ret;
1273
57cfc106
AE
1274 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1275 if (!ops)
1276 return -ENOMEM;
59c2be1e 1277
a71b891b 1278 ops[0].watch.ver = cpu_to_le64(ver);
59c2be1e
YS
1279 ops[0].watch.cookie = notify_id;
1280 ops[0].watch.flag = 0;
1281
0ce1a794 1282 ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
7f0a24d8 1283 rbd_dev->header_name, 0, 0, NULL,
ad4f232f 1284 NULL, 0,
59c2be1e
YS
1285 CEPH_OSD_FLAG_READ,
1286 ops,
1fec7093 1287 NULL, 0,
59c2be1e
YS
1288 rbd_simple_req_cb, 0, NULL);
1289
1290 rbd_destroy_ops(ops);
1291 return ret;
1292}
1293
1294static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1295{
0ce1a794 1296 struct rbd_device *rbd_dev = (struct rbd_device *)data;
a71b891b 1297 u64 hver;
13143d2d
SW
1298 int rc;
1299
0ce1a794 1300 if (!rbd_dev)
59c2be1e
YS
1301 return;
1302
bd919d45
AE
1303 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1304 rbd_dev->header_name, (unsigned long long) notify_id,
1305 (unsigned int) opcode);
1fe5e993 1306 rc = rbd_refresh_header(rbd_dev, &hver);
13143d2d 1307 if (rc)
f0f8cef5 1308 pr_warning(RBD_DRV_NAME "%d got notification but failed to "
0ce1a794 1309 " update snaps: %d\n", rbd_dev->major, rc);
59c2be1e 1310
7f0a24d8 1311 rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
59c2be1e
YS
1312}
1313
1314/*
1315 * Request sync osd watch
1316 */
0e6f322d 1317static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
59c2be1e
YS
1318{
1319 struct ceph_osd_req_op *ops;
0ce1a794 1320 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
57cfc106 1321 int ret;
59c2be1e 1322
57cfc106
AE
1323 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1324 if (!ops)
1325 return -ENOMEM;
59c2be1e
YS
1326
1327 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
0ce1a794 1328 (void *)rbd_dev, &rbd_dev->watch_event);
59c2be1e
YS
1329 if (ret < 0)
1330 goto fail;
1331
0e6f322d 1332 ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
0ce1a794 1333 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
59c2be1e
YS
1334 ops[0].watch.flag = 1;
1335
0ce1a794 1336 ret = rbd_req_sync_op(rbd_dev, NULL,
59c2be1e 1337 CEPH_NOSNAP,
59c2be1e
YS
1338 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1339 ops,
0e6f322d
AE
1340 rbd_dev->header_name,
1341 0, 0, NULL,
0ce1a794 1342 &rbd_dev->watch_request, NULL);
59c2be1e
YS
1343
1344 if (ret < 0)
1345 goto fail_event;
1346
1347 rbd_destroy_ops(ops);
1348 return 0;
1349
1350fail_event:
0ce1a794
AE
1351 ceph_osdc_cancel_event(rbd_dev->watch_event);
1352 rbd_dev->watch_event = NULL;
59c2be1e
YS
1353fail:
1354 rbd_destroy_ops(ops);
1355 return ret;
1356}
1357
79e3057c
YS
1358/*
1359 * Request sync osd unwatch
1360 */
070c633f 1361static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
79e3057c
YS
1362{
1363 struct ceph_osd_req_op *ops;
57cfc106 1364 int ret;
79e3057c 1365
57cfc106
AE
1366 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1367 if (!ops)
1368 return -ENOMEM;
79e3057c
YS
1369
1370 ops[0].watch.ver = 0;
0ce1a794 1371 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
79e3057c
YS
1372 ops[0].watch.flag = 0;
1373
0ce1a794 1374 ret = rbd_req_sync_op(rbd_dev, NULL,
79e3057c 1375 CEPH_NOSNAP,
79e3057c
YS
1376 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1377 ops,
070c633f
AE
1378 rbd_dev->header_name,
1379 0, 0, NULL, NULL, NULL);
1380
79e3057c
YS
1381
1382 rbd_destroy_ops(ops);
0ce1a794
AE
1383 ceph_osdc_cancel_event(rbd_dev->watch_event);
1384 rbd_dev->watch_event = NULL;
79e3057c
YS
1385 return ret;
1386}
1387
602adf40 1388/*
3cb4a687 1389 * Synchronous osd object method call
602adf40 1390 */
0ce1a794 1391static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
aded07ea
AE
1392 const char *object_name,
1393 const char *class_name,
1394 const char *method_name,
3cb4a687
AE
1395 const char *outbound,
1396 size_t outbound_size,
f8d4de6e
AE
1397 char *inbound,
1398 size_t inbound_size,
3cb4a687 1399 int flags,
59c2be1e 1400 u64 *ver)
602adf40
YS
1401{
1402 struct ceph_osd_req_op *ops;
aded07ea
AE
1403 int class_name_len = strlen(class_name);
1404 int method_name_len = strlen(method_name);
3cb4a687 1405 int payload_size;
57cfc106
AE
1406 int ret;
1407
3cb4a687
AE
1408 /*
1409 * Any input parameters required by the method we're calling
1410 * will be sent along with the class and method names as
1411 * part of the message payload. That data and its size are
1412 * supplied via the indata and indata_len fields (named from
1413 * the perspective of the server side) in the OSD request
1414 * operation.
1415 */
1416 payload_size = class_name_len + method_name_len + outbound_size;
1417 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
57cfc106
AE
1418 if (!ops)
1419 return -ENOMEM;
602adf40 1420
aded07ea
AE
1421 ops[0].cls.class_name = class_name;
1422 ops[0].cls.class_len = (__u8) class_name_len;
1423 ops[0].cls.method_name = method_name;
1424 ops[0].cls.method_len = (__u8) method_name_len;
602adf40 1425 ops[0].cls.argc = 0;
3cb4a687
AE
1426 ops[0].cls.indata = outbound;
1427 ops[0].cls.indata_len = outbound_size;
602adf40 1428
0ce1a794 1429 ret = rbd_req_sync_op(rbd_dev, NULL,
602adf40 1430 CEPH_NOSNAP,
3cb4a687 1431 flags, ops,
f8d4de6e
AE
1432 object_name, 0, inbound_size, inbound,
1433 NULL, ver);
602adf40
YS
1434
1435 rbd_destroy_ops(ops);
1436
1437 dout("cls_exec returned %d\n", ret);
1438 return ret;
1439}
1440
1fec7093
YS
1441static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
1442{
1443 struct rbd_req_coll *coll =
1444 kzalloc(sizeof(struct rbd_req_coll) +
1445 sizeof(struct rbd_req_status) * num_reqs,
1446 GFP_ATOMIC);
1447
1448 if (!coll)
1449 return NULL;
1450 coll->total = num_reqs;
1451 kref_init(&coll->kref);
1452 return coll;
1453}
1454
602adf40
YS
1455/*
1456 * block device queue callback
1457 */
1458static void rbd_rq_fn(struct request_queue *q)
1459{
1460 struct rbd_device *rbd_dev = q->queuedata;
1461 struct request *rq;
1462 struct bio_pair *bp = NULL;
1463
00f1f36f 1464 while ((rq = blk_fetch_request(q))) {
602adf40
YS
1465 struct bio *bio;
1466 struct bio *rq_bio, *next_bio = NULL;
1467 bool do_write;
bd919d45
AE
1468 unsigned int size;
1469 u64 op_size = 0;
602adf40 1470 u64 ofs;
1fec7093
YS
1471 int num_segs, cur_seg = 0;
1472 struct rbd_req_coll *coll;
d1d25646 1473 struct ceph_snap_context *snapc;
602adf40 1474
602adf40
YS
1475 dout("fetched request\n");
1476
1477 /* filter out block requests we don't understand */
1478 if ((rq->cmd_type != REQ_TYPE_FS)) {
1479 __blk_end_request_all(rq, 0);
00f1f36f 1480 continue;
602adf40
YS
1481 }
1482
1483 /* deduce our operation (read, write) */
1484 do_write = (rq_data_dir(rq) == WRITE);
1485
1486 size = blk_rq_bytes(rq);
593a9e7b 1487 ofs = blk_rq_pos(rq) * SECTOR_SIZE;
602adf40 1488 rq_bio = rq->bio;
f84344f3 1489 if (do_write && rbd_dev->mapping.read_only) {
602adf40 1490 __blk_end_request_all(rq, -EROFS);
00f1f36f 1491 continue;
602adf40
YS
1492 }
1493
1494 spin_unlock_irq(q->queue_lock);
1495
d1d25646 1496 down_read(&rbd_dev->header_rwsem);
e88a36ec 1497
f84344f3
AE
1498 if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
1499 !rbd_dev->mapping.snap_exists) {
e88a36ec 1500 up_read(&rbd_dev->header_rwsem);
d1d25646
JD
1501 dout("request for non-existent snapshot");
1502 spin_lock_irq(q->queue_lock);
1503 __blk_end_request_all(rq, -ENXIO);
1504 continue;
e88a36ec
JD
1505 }
1506
d1d25646
JD
1507 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1508
1509 up_read(&rbd_dev->header_rwsem);
1510
602adf40
YS
1511 dout("%s 0x%x bytes at 0x%llx\n",
1512 do_write ? "write" : "read",
bd919d45 1513 size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
602adf40 1514
1fec7093 1515 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
df111be6
AE
1516 if (num_segs <= 0) {
1517 spin_lock_irq(q->queue_lock);
1518 __blk_end_request_all(rq, num_segs);
1519 ceph_put_snap_context(snapc);
1520 continue;
1521 }
1fec7093
YS
1522 coll = rbd_alloc_coll(num_segs);
1523 if (!coll) {
1524 spin_lock_irq(q->queue_lock);
1525 __blk_end_request_all(rq, -ENOMEM);
d1d25646 1526 ceph_put_snap_context(snapc);
00f1f36f 1527 continue;
1fec7093
YS
1528 }
1529
602adf40
YS
1530 do {
1531 /* a bio clone to be passed down to OSD req */
bd919d45 1532 dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
65ccfe21 1533 op_size = rbd_segment_length(rbd_dev, ofs, size);
1fec7093 1534 kref_get(&coll->kref);
602adf40
YS
1535 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1536 op_size, GFP_ATOMIC);
1537 if (!bio) {
1fec7093
YS
1538 rbd_coll_end_req_index(rq, coll, cur_seg,
1539 -ENOMEM, op_size);
1540 goto next_seg;
602adf40
YS
1541 }
1542
1fec7093 1543
602adf40
YS
1544 /* init OSD command: write or read */
1545 if (do_write)
1546 rbd_req_write(rq, rbd_dev,
d1d25646 1547 snapc,
602adf40 1548 ofs,
1fec7093
YS
1549 op_size, bio,
1550 coll, cur_seg);
602adf40
YS
1551 else
1552 rbd_req_read(rq, rbd_dev,
f84344f3 1553 rbd_dev->mapping.snap_id,
602adf40 1554 ofs,
1fec7093
YS
1555 op_size, bio,
1556 coll, cur_seg);
602adf40 1557
1fec7093 1558next_seg:
602adf40
YS
1559 size -= op_size;
1560 ofs += op_size;
1561
1fec7093 1562 cur_seg++;
602adf40
YS
1563 rq_bio = next_bio;
1564 } while (size > 0);
1fec7093 1565 kref_put(&coll->kref, rbd_coll_release);
602adf40
YS
1566
1567 if (bp)
1568 bio_pair_release(bp);
602adf40 1569 spin_lock_irq(q->queue_lock);
d1d25646
JD
1570
1571 ceph_put_snap_context(snapc);
602adf40
YS
1572 }
1573}
1574
1575/*
1576 * a queue callback. Makes sure that we don't create a bio that spans across
1577 * multiple osd objects. One exception would be with a single page bios,
1578 * which we handle later at bio_chain_clone
1579 */
1580static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1581 struct bio_vec *bvec)
1582{
1583 struct rbd_device *rbd_dev = q->queuedata;
593a9e7b
AE
1584 unsigned int chunk_sectors;
1585 sector_t sector;
1586 unsigned int bio_sectors;
602adf40
YS
1587 int max;
1588
593a9e7b
AE
1589 chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
1590 sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
1591 bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
1592
602adf40 1593 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
593a9e7b 1594 + bio_sectors)) << SECTOR_SHIFT;
602adf40
YS
1595 if (max < 0)
1596 max = 0; /* bio_add cannot handle a negative return */
1597 if (max <= bvec->bv_len && bio_sectors == 0)
1598 return bvec->bv_len;
1599 return max;
1600}
1601
1602static void rbd_free_disk(struct rbd_device *rbd_dev)
1603{
1604 struct gendisk *disk = rbd_dev->disk;
1605
1606 if (!disk)
1607 return;
1608
602adf40
YS
1609 if (disk->flags & GENHD_FL_UP)
1610 del_gendisk(disk);
1611 if (disk->queue)
1612 blk_cleanup_queue(disk->queue);
1613 put_disk(disk);
1614}
1615
1616/*
4156d998
AE
1617 * Read the complete header for the given rbd device.
1618 *
1619 * Returns a pointer to a dynamically-allocated buffer containing
1620 * the complete and validated header. Caller can pass the address
1621 * of a variable that will be filled in with the version of the
1622 * header object at the time it was read.
1623 *
1624 * Returns a pointer-coded errno if a failure occurs.
602adf40 1625 */
4156d998
AE
1626static struct rbd_image_header_ondisk *
1627rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
602adf40 1628{
4156d998 1629 struct rbd_image_header_ondisk *ondisk = NULL;
50f7c4c9 1630 u32 snap_count = 0;
4156d998
AE
1631 u64 names_size = 0;
1632 u32 want_count;
1633 int ret;
602adf40 1634
00f1f36f 1635 /*
4156d998
AE
1636 * The complete header will include an array of its 64-bit
1637 * snapshot ids, followed by the names of those snapshots as
1638 * a contiguous block of NUL-terminated strings. Note that
1639 * the number of snapshots could change by the time we read
1640 * it in, in which case we re-read it.
00f1f36f 1641 */
4156d998
AE
1642 do {
1643 size_t size;
1644
1645 kfree(ondisk);
1646
1647 size = sizeof (*ondisk);
1648 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
1649 size += names_size;
1650 ondisk = kmalloc(size, GFP_KERNEL);
1651 if (!ondisk)
1652 return ERR_PTR(-ENOMEM);
1653
1654 ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
0bed54dc 1655 rbd_dev->header_name,
4156d998
AE
1656 0, size,
1657 (char *) ondisk, version);
1658
1659 if (ret < 0)
1660 goto out_err;
1661 if (WARN_ON((size_t) ret < size)) {
1662 ret = -ENXIO;
1663 pr_warning("short header read for image %s"
1664 " (want %zd got %d)\n",
1665 rbd_dev->image_name, size, ret);
1666 goto out_err;
1667 }
1668 if (!rbd_dev_ondisk_valid(ondisk)) {
1669 ret = -ENXIO;
1670 pr_warning("invalid header for image %s\n",
1671 rbd_dev->image_name);
1672 goto out_err;
81e759fb 1673 }
602adf40 1674
4156d998
AE
1675 names_size = le64_to_cpu(ondisk->snap_names_len);
1676 want_count = snap_count;
1677 snap_count = le32_to_cpu(ondisk->snap_count);
1678 } while (snap_count != want_count);
00f1f36f 1679
4156d998 1680 return ondisk;
00f1f36f 1681
4156d998
AE
1682out_err:
1683 kfree(ondisk);
1684
1685 return ERR_PTR(ret);
1686}
1687
1688/*
1689 * reload the ondisk the header
1690 */
1691static int rbd_read_header(struct rbd_device *rbd_dev,
1692 struct rbd_image_header *header)
1693{
1694 struct rbd_image_header_ondisk *ondisk;
1695 u64 ver = 0;
1696 int ret;
602adf40 1697
4156d998
AE
1698 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
1699 if (IS_ERR(ondisk))
1700 return PTR_ERR(ondisk);
1701 ret = rbd_header_from_disk(header, ondisk);
1702 if (ret >= 0)
1703 header->obj_version = ver;
1704 kfree(ondisk);
1705
1706 return ret;
602adf40
YS
1707}
1708
dfc5606d
YS
1709static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
1710{
1711 struct rbd_snap *snap;
a0593290 1712 struct rbd_snap *next;
dfc5606d 1713
a0593290 1714 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
14e7085d 1715 __rbd_remove_snap_dev(snap);
dfc5606d
YS
1716}
1717
602adf40
YS
1718/*
1719 * only read the first part of the ondisk header, without the snaps info
1720 */
b813623a 1721static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
602adf40
YS
1722{
1723 int ret;
1724 struct rbd_image_header h;
602adf40
YS
1725
1726 ret = rbd_read_header(rbd_dev, &h);
1727 if (ret < 0)
1728 return ret;
1729
a51aa0c0
JD
1730 down_write(&rbd_dev->header_rwsem);
1731
9db4b3e3 1732 /* resized? */
f84344f3 1733 if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) {
474ef7ce
JD
1734 sector_t size = (sector_t) h.image_size / SECTOR_SIZE;
1735
99c1f08f
AE
1736 if (size != (sector_t) rbd_dev->mapping.size) {
1737 dout("setting size to %llu sectors",
1738 (unsigned long long) size);
1739 rbd_dev->mapping.size = (u64) size;
1740 set_capacity(rbd_dev->disk, size);
1741 }
474ef7ce 1742 }
9db4b3e3 1743
849b4260 1744 /* rbd_dev->header.object_prefix shouldn't change */
602adf40 1745 kfree(rbd_dev->header.snap_sizes);
849b4260 1746 kfree(rbd_dev->header.snap_names);
d1d25646
JD
1747 /* osd requests may still refer to snapc */
1748 ceph_put_snap_context(rbd_dev->header.snapc);
602adf40 1749
b813623a
AE
1750 if (hver)
1751 *hver = h.obj_version;
a71b891b 1752 rbd_dev->header.obj_version = h.obj_version;
93a24e08 1753 rbd_dev->header.image_size = h.image_size;
602adf40
YS
1754 rbd_dev->header.snapc = h.snapc;
1755 rbd_dev->header.snap_names = h.snap_names;
1756 rbd_dev->header.snap_sizes = h.snap_sizes;
849b4260
AE
1757 /* Free the extra copy of the object prefix */
1758 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
1759 kfree(h.object_prefix);
1760
304f6808
AE
1761 ret = rbd_dev_snaps_update(rbd_dev);
1762 if (!ret)
1763 ret = rbd_dev_snaps_register(rbd_dev);
dfc5606d 1764
c666601a 1765 up_write(&rbd_dev->header_rwsem);
602adf40 1766
dfc5606d 1767 return ret;
602adf40
YS
1768}
1769
1fe5e993
AE
1770static int rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver)
1771{
1772 int ret;
1773
1774 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1775 ret = __rbd_refresh_header(rbd_dev, hver);
1776 mutex_unlock(&ctl_mutex);
1777
1778 return ret;
1779}
1780
602adf40
YS
1781static int rbd_init_disk(struct rbd_device *rbd_dev)
1782{
1783 struct gendisk *disk;
1784 struct request_queue *q;
593a9e7b 1785 u64 segment_size;
602adf40 1786
602adf40 1787 /* create gendisk info */
602adf40
YS
1788 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
1789 if (!disk)
1fcdb8aa 1790 return -ENOMEM;
602adf40 1791
f0f8cef5 1792 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
de71a297 1793 rbd_dev->dev_id);
602adf40
YS
1794 disk->major = rbd_dev->major;
1795 disk->first_minor = 0;
1796 disk->fops = &rbd_bd_ops;
1797 disk->private_data = rbd_dev;
1798
1799 /* init rq */
602adf40
YS
1800 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
1801 if (!q)
1802 goto out_disk;
029bcbd8 1803
593a9e7b
AE
1804 /* We use the default size, but let's be explicit about it. */
1805 blk_queue_physical_block_size(q, SECTOR_SIZE);
1806
029bcbd8 1807 /* set io sizes to object size */
593a9e7b
AE
1808 segment_size = rbd_obj_bytes(&rbd_dev->header);
1809 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
1810 blk_queue_max_segment_size(q, segment_size);
1811 blk_queue_io_min(q, segment_size);
1812 blk_queue_io_opt(q, segment_size);
029bcbd8 1813
602adf40
YS
1814 blk_queue_merge_bvec(q, rbd_merge_bvec);
1815 disk->queue = q;
1816
1817 q->queuedata = rbd_dev;
1818
1819 rbd_dev->disk = disk;
602adf40 1820
12f02944
AE
1821 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
1822
602adf40 1823 return 0;
602adf40
YS
1824out_disk:
1825 put_disk(disk);
1fcdb8aa
AE
1826
1827 return -ENOMEM;
602adf40
YS
1828}
1829
dfc5606d
YS
1830/*
1831 sysfs
1832*/
1833
593a9e7b
AE
1834static struct rbd_device *dev_to_rbd_dev(struct device *dev)
1835{
1836 return container_of(dev, struct rbd_device, dev);
1837}
1838
dfc5606d
YS
1839static ssize_t rbd_size_show(struct device *dev,
1840 struct device_attribute *attr, char *buf)
1841{
593a9e7b 1842 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
a51aa0c0
JD
1843 sector_t size;
1844
1845 down_read(&rbd_dev->header_rwsem);
1846 size = get_capacity(rbd_dev->disk);
1847 up_read(&rbd_dev->header_rwsem);
dfc5606d 1848
a51aa0c0 1849 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
dfc5606d
YS
1850}
1851
34b13184
AE
1852/*
1853 * Note this shows the features for whatever's mapped, which is not
1854 * necessarily the base image.
1855 */
1856static ssize_t rbd_features_show(struct device *dev,
1857 struct device_attribute *attr, char *buf)
1858{
1859 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1860
1861 return sprintf(buf, "0x%016llx\n",
1862 (unsigned long long) rbd_dev->mapping.features);
1863}
1864
dfc5606d
YS
1865static ssize_t rbd_major_show(struct device *dev,
1866 struct device_attribute *attr, char *buf)
1867{
593a9e7b 1868 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 1869
dfc5606d
YS
1870 return sprintf(buf, "%d\n", rbd_dev->major);
1871}
1872
1873static ssize_t rbd_client_id_show(struct device *dev,
1874 struct device_attribute *attr, char *buf)
602adf40 1875{
593a9e7b 1876 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 1877
1dbb4399
AE
1878 return sprintf(buf, "client%lld\n",
1879 ceph_client_id(rbd_dev->rbd_client->client));
602adf40
YS
1880}
1881
dfc5606d
YS
1882static ssize_t rbd_pool_show(struct device *dev,
1883 struct device_attribute *attr, char *buf)
602adf40 1884{
593a9e7b 1885 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d
YS
1886
1887 return sprintf(buf, "%s\n", rbd_dev->pool_name);
1888}
1889
9bb2f334
AE
1890static ssize_t rbd_pool_id_show(struct device *dev,
1891 struct device_attribute *attr, char *buf)
1892{
1893 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1894
1895 return sprintf(buf, "%d\n", rbd_dev->pool_id);
1896}
1897
dfc5606d
YS
1898static ssize_t rbd_name_show(struct device *dev,
1899 struct device_attribute *attr, char *buf)
1900{
593a9e7b 1901 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 1902
0bed54dc 1903 return sprintf(buf, "%s\n", rbd_dev->image_name);
dfc5606d
YS
1904}
1905
589d30e0
AE
1906static ssize_t rbd_image_id_show(struct device *dev,
1907 struct device_attribute *attr, char *buf)
1908{
1909 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1910
1911 return sprintf(buf, "%s\n", rbd_dev->image_id);
1912}
1913
34b13184
AE
1914/*
1915 * Shows the name of the currently-mapped snapshot (or
1916 * RBD_SNAP_HEAD_NAME for the base image).
1917 */
dfc5606d
YS
1918static ssize_t rbd_snap_show(struct device *dev,
1919 struct device_attribute *attr,
1920 char *buf)
1921{
593a9e7b 1922 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
dfc5606d 1923
f84344f3 1924 return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
dfc5606d
YS
1925}
1926
1927static ssize_t rbd_image_refresh(struct device *dev,
1928 struct device_attribute *attr,
1929 const char *buf,
1930 size_t size)
1931{
593a9e7b 1932 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
b813623a 1933 int ret;
602adf40 1934
1fe5e993 1935 ret = rbd_refresh_header(rbd_dev, NULL);
b813623a
AE
1936
1937 return ret < 0 ? ret : size;
dfc5606d 1938}
602adf40 1939
dfc5606d 1940static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
34b13184 1941static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
dfc5606d
YS
1942static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
1943static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
1944static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
9bb2f334 1945static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
dfc5606d 1946static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
589d30e0 1947static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
dfc5606d
YS
1948static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1949static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
dfc5606d
YS
1950
1951static struct attribute *rbd_attrs[] = {
1952 &dev_attr_size.attr,
34b13184 1953 &dev_attr_features.attr,
dfc5606d
YS
1954 &dev_attr_major.attr,
1955 &dev_attr_client_id.attr,
1956 &dev_attr_pool.attr,
9bb2f334 1957 &dev_attr_pool_id.attr,
dfc5606d 1958 &dev_attr_name.attr,
589d30e0 1959 &dev_attr_image_id.attr,
dfc5606d
YS
1960 &dev_attr_current_snap.attr,
1961 &dev_attr_refresh.attr,
dfc5606d
YS
1962 NULL
1963};
1964
1965static struct attribute_group rbd_attr_group = {
1966 .attrs = rbd_attrs,
1967};
1968
1969static const struct attribute_group *rbd_attr_groups[] = {
1970 &rbd_attr_group,
1971 NULL
1972};
1973
1974static void rbd_sysfs_dev_release(struct device *dev)
1975{
1976}
1977
1978static struct device_type rbd_device_type = {
1979 .name = "rbd",
1980 .groups = rbd_attr_groups,
1981 .release = rbd_sysfs_dev_release,
1982};
1983
1984
1985/*
1986 sysfs - snapshots
1987*/
1988
1989static ssize_t rbd_snap_size_show(struct device *dev,
1990 struct device_attribute *attr,
1991 char *buf)
1992{
1993 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1994
3591538f 1995 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
dfc5606d
YS
1996}
1997
1998static ssize_t rbd_snap_id_show(struct device *dev,
1999 struct device_attribute *attr,
2000 char *buf)
2001{
2002 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2003
3591538f 2004 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
dfc5606d
YS
2005}
2006
34b13184
AE
2007static ssize_t rbd_snap_features_show(struct device *dev,
2008 struct device_attribute *attr,
2009 char *buf)
2010{
2011 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2012
2013 return sprintf(buf, "0x%016llx\n",
2014 (unsigned long long) snap->features);
2015}
2016
dfc5606d
YS
2017static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2018static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
34b13184 2019static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
dfc5606d
YS
2020
2021static struct attribute *rbd_snap_attrs[] = {
2022 &dev_attr_snap_size.attr,
2023 &dev_attr_snap_id.attr,
34b13184 2024 &dev_attr_snap_features.attr,
dfc5606d
YS
2025 NULL,
2026};
2027
2028static struct attribute_group rbd_snap_attr_group = {
2029 .attrs = rbd_snap_attrs,
2030};
2031
2032static void rbd_snap_dev_release(struct device *dev)
2033{
2034 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2035 kfree(snap->name);
2036 kfree(snap);
2037}
2038
2039static const struct attribute_group *rbd_snap_attr_groups[] = {
2040 &rbd_snap_attr_group,
2041 NULL
2042};
2043
2044static struct device_type rbd_snap_device_type = {
2045 .groups = rbd_snap_attr_groups,
2046 .release = rbd_snap_dev_release,
2047};
2048
304f6808
AE
2049static bool rbd_snap_registered(struct rbd_snap *snap)
2050{
2051 bool ret = snap->dev.type == &rbd_snap_device_type;
2052 bool reg = device_is_registered(&snap->dev);
2053
2054 rbd_assert(!ret ^ reg);
2055
2056 return ret;
2057}
2058
14e7085d 2059static void __rbd_remove_snap_dev(struct rbd_snap *snap)
dfc5606d
YS
2060{
2061 list_del(&snap->node);
304f6808
AE
2062 if (device_is_registered(&snap->dev))
2063 device_unregister(&snap->dev);
dfc5606d
YS
2064}
2065
14e7085d 2066static int rbd_register_snap_dev(struct rbd_snap *snap,
dfc5606d
YS
2067 struct device *parent)
2068{
2069 struct device *dev = &snap->dev;
2070 int ret;
2071
2072 dev->type = &rbd_snap_device_type;
2073 dev->parent = parent;
2074 dev->release = rbd_snap_dev_release;
2075 dev_set_name(dev, "snap_%s", snap->name);
304f6808
AE
2076 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2077
dfc5606d
YS
2078 ret = device_register(dev);
2079
2080 return ret;
2081}
2082
4e891e0a 2083static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
c8d18425 2084 const char *snap_name,
34b13184
AE
2085 u64 snap_id, u64 snap_size,
2086 u64 snap_features)
dfc5606d 2087{
4e891e0a 2088 struct rbd_snap *snap;
dfc5606d 2089 int ret;
4e891e0a
AE
2090
2091 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
dfc5606d 2092 if (!snap)
4e891e0a
AE
2093 return ERR_PTR(-ENOMEM);
2094
2095 ret = -ENOMEM;
c8d18425 2096 snap->name = kstrdup(snap_name, GFP_KERNEL);
4e891e0a
AE
2097 if (!snap->name)
2098 goto err;
2099
c8d18425
AE
2100 snap->id = snap_id;
2101 snap->size = snap_size;
34b13184 2102 snap->features = snap_features;
4e891e0a
AE
2103
2104 return snap;
2105
dfc5606d
YS
2106err:
2107 kfree(snap->name);
2108 kfree(snap);
4e891e0a
AE
2109
2110 return ERR_PTR(ret);
dfc5606d
YS
2111}
2112
cd892126
AE
2113static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2114 u64 *snap_size, u64 *snap_features)
2115{
2116 char *snap_name;
2117
2118 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2119
2120 *snap_size = rbd_dev->header.snap_sizes[which];
2121 *snap_features = 0; /* No features for v1 */
2122
2123 /* Skip over names until we find the one we are looking for */
2124
2125 snap_name = rbd_dev->header.snap_names;
2126 while (which--)
2127 snap_name += strlen(snap_name) + 1;
2128
2129 return snap_name;
2130}
2131
9d475de5
AE
2132/*
2133 * Get the size and object order for an image snapshot, or if
2134 * snap_id is CEPH_NOSNAP, gets this information for the base
2135 * image.
2136 */
2137static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2138 u8 *order, u64 *snap_size)
2139{
2140 __le64 snapid = cpu_to_le64(snap_id);
2141 int ret;
2142 struct {
2143 u8 order;
2144 __le64 size;
2145 } __attribute__ ((packed)) size_buf = { 0 };
2146
2147 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2148 "rbd", "get_size",
2149 (char *) &snapid, sizeof (snapid),
2150 (char *) &size_buf, sizeof (size_buf),
2151 CEPH_OSD_FLAG_READ, NULL);
2152 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2153 if (ret < 0)
2154 return ret;
2155
2156 *order = size_buf.order;
2157 *snap_size = le64_to_cpu(size_buf.size);
2158
2159 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2160 (unsigned long long) snap_id, (unsigned int) *order,
2161 (unsigned long long) *snap_size);
2162
2163 return 0;
2164}
2165
2166static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2167{
2168 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2169 &rbd_dev->header.obj_order,
2170 &rbd_dev->header.image_size);
2171}
2172
1e130199
AE
2173static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2174{
2175 void *reply_buf;
2176 int ret;
2177 void *p;
2178
2179 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2180 if (!reply_buf)
2181 return -ENOMEM;
2182
2183 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2184 "rbd", "get_object_prefix",
2185 NULL, 0,
2186 reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
2187 CEPH_OSD_FLAG_READ, NULL);
2188 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2189 if (ret < 0)
2190 goto out;
2191
2192 p = reply_buf;
2193 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2194 p + RBD_OBJ_PREFIX_LEN_MAX,
2195 NULL, GFP_NOIO);
2196
2197 if (IS_ERR(rbd_dev->header.object_prefix)) {
2198 ret = PTR_ERR(rbd_dev->header.object_prefix);
2199 rbd_dev->header.object_prefix = NULL;
2200 } else {
2201 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2202 }
2203
2204out:
2205 kfree(reply_buf);
2206
2207 return ret;
2208}
2209
b1b5402a
AE
2210static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2211 u64 *snap_features)
2212{
2213 __le64 snapid = cpu_to_le64(snap_id);
2214 struct {
2215 __le64 features;
2216 __le64 incompat;
2217 } features_buf = { 0 };
2218 int ret;
2219
2220 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2221 "rbd", "get_features",
2222 (char *) &snapid, sizeof (snapid),
2223 (char *) &features_buf, sizeof (features_buf),
2224 CEPH_OSD_FLAG_READ, NULL);
2225 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2226 if (ret < 0)
2227 return ret;
2228 *snap_features = le64_to_cpu(features_buf.features);
2229
2230 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2231 (unsigned long long) snap_id,
2232 (unsigned long long) *snap_features,
2233 (unsigned long long) le64_to_cpu(features_buf.incompat));
2234
2235 return 0;
2236}
2237
2238static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2239{
2240 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2241 &rbd_dev->header.features);
2242}
2243
6e14b1a6 2244static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
35d489f9
AE
2245{
2246 size_t size;
2247 int ret;
2248 void *reply_buf;
2249 void *p;
2250 void *end;
2251 u64 seq;
2252 u32 snap_count;
2253 struct ceph_snap_context *snapc;
2254 u32 i;
2255
2256 /*
2257 * We'll need room for the seq value (maximum snapshot id),
2258 * snapshot count, and array of that many snapshot ids.
2259 * For now we have a fixed upper limit on the number we're
2260 * prepared to receive.
2261 */
2262 size = sizeof (__le64) + sizeof (__le32) +
2263 RBD_MAX_SNAP_COUNT * sizeof (__le64);
2264 reply_buf = kzalloc(size, GFP_KERNEL);
2265 if (!reply_buf)
2266 return -ENOMEM;
2267
2268 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2269 "rbd", "get_snapcontext",
2270 NULL, 0,
2271 reply_buf, size,
6e14b1a6 2272 CEPH_OSD_FLAG_READ, ver);
35d489f9
AE
2273 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2274 if (ret < 0)
2275 goto out;
2276
2277 ret = -ERANGE;
2278 p = reply_buf;
2279 end = (char *) reply_buf + size;
2280 ceph_decode_64_safe(&p, end, seq, out);
2281 ceph_decode_32_safe(&p, end, snap_count, out);
2282
2283 /*
2284 * Make sure the reported number of snapshot ids wouldn't go
2285 * beyond the end of our buffer. But before checking that,
2286 * make sure the computed size of the snapshot context we
2287 * allocate is representable in a size_t.
2288 */
2289 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
2290 / sizeof (u64)) {
2291 ret = -EINVAL;
2292 goto out;
2293 }
2294 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
2295 goto out;
2296
2297 size = sizeof (struct ceph_snap_context) +
2298 snap_count * sizeof (snapc->snaps[0]);
2299 snapc = kmalloc(size, GFP_KERNEL);
2300 if (!snapc) {
2301 ret = -ENOMEM;
2302 goto out;
2303 }
2304
2305 atomic_set(&snapc->nref, 1);
2306 snapc->seq = seq;
2307 snapc->num_snaps = snap_count;
2308 for (i = 0; i < snap_count; i++)
2309 snapc->snaps[i] = ceph_decode_64(&p);
2310
2311 rbd_dev->header.snapc = snapc;
2312
2313 dout(" snap context seq = %llu, snap_count = %u\n",
2314 (unsigned long long) seq, (unsigned int) snap_count);
2315
2316out:
2317 kfree(reply_buf);
2318
2319 return 0;
2320}
2321
b8b1e2db
AE
2322static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
2323{
2324 size_t size;
2325 void *reply_buf;
2326 __le64 snap_id;
2327 int ret;
2328 void *p;
2329 void *end;
2330 size_t snap_name_len;
2331 char *snap_name;
2332
2333 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
2334 reply_buf = kmalloc(size, GFP_KERNEL);
2335 if (!reply_buf)
2336 return ERR_PTR(-ENOMEM);
2337
2338 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
2339 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2340 "rbd", "get_snapshot_name",
2341 (char *) &snap_id, sizeof (snap_id),
2342 reply_buf, size,
2343 CEPH_OSD_FLAG_READ, NULL);
2344 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2345 if (ret < 0)
2346 goto out;
2347
2348 p = reply_buf;
2349 end = (char *) reply_buf + size;
2350 snap_name_len = 0;
2351 snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
2352 GFP_KERNEL);
2353 if (IS_ERR(snap_name)) {
2354 ret = PTR_ERR(snap_name);
2355 goto out;
2356 } else {
2357 dout(" snap_id 0x%016llx snap_name = %s\n",
2358 (unsigned long long) le64_to_cpu(snap_id), snap_name);
2359 }
2360 kfree(reply_buf);
2361
2362 return snap_name;
2363out:
2364 kfree(reply_buf);
2365
2366 return ERR_PTR(ret);
2367}
2368
2369static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
2370 u64 *snap_size, u64 *snap_features)
2371{
2372 __le64 snap_id;
2373 u8 order;
2374 int ret;
2375
2376 snap_id = rbd_dev->header.snapc->snaps[which];
2377 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
2378 if (ret)
2379 return ERR_PTR(ret);
2380 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
2381 if (ret)
2382 return ERR_PTR(ret);
2383
2384 return rbd_dev_v2_snap_name(rbd_dev, which);
2385}
2386
2387static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
2388 u64 *snap_size, u64 *snap_features)
2389{
2390 if (rbd_dev->image_format == 1)
2391 return rbd_dev_v1_snap_info(rbd_dev, which,
2392 snap_size, snap_features);
2393 if (rbd_dev->image_format == 2)
2394 return rbd_dev_v2_snap_info(rbd_dev, which,
2395 snap_size, snap_features);
2396 return ERR_PTR(-EINVAL);
2397}
2398
dfc5606d 2399/*
35938150
AE
2400 * Scan the rbd device's current snapshot list and compare it to the
2401 * newly-received snapshot context. Remove any existing snapshots
2402 * not present in the new snapshot context. Add a new snapshot for
2403 * any snaphots in the snapshot context not in the current list.
2404 * And verify there are no changes to snapshots we already know
2405 * about.
2406 *
2407 * Assumes the snapshots in the snapshot context are sorted by
2408 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
2409 * are also maintained in that order.)
dfc5606d 2410 */
304f6808 2411static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
dfc5606d 2412{
35938150
AE
2413 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
2414 const u32 snap_count = snapc->num_snaps;
35938150
AE
2415 struct list_head *head = &rbd_dev->snaps;
2416 struct list_head *links = head->next;
2417 u32 index = 0;
dfc5606d 2418
9fcbb800 2419 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
35938150
AE
2420 while (index < snap_count || links != head) {
2421 u64 snap_id;
2422 struct rbd_snap *snap;
cd892126
AE
2423 char *snap_name;
2424 u64 snap_size = 0;
2425 u64 snap_features = 0;
dfc5606d 2426
35938150
AE
2427 snap_id = index < snap_count ? snapc->snaps[index]
2428 : CEPH_NOSNAP;
2429 snap = links != head ? list_entry(links, struct rbd_snap, node)
2430 : NULL;
aafb230e 2431 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
dfc5606d 2432
35938150
AE
2433 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
2434 struct list_head *next = links->next;
dfc5606d 2435
35938150 2436 /* Existing snapshot not in the new snap context */
dfc5606d 2437
f84344f3
AE
2438 if (rbd_dev->mapping.snap_id == snap->id)
2439 rbd_dev->mapping.snap_exists = false;
35938150 2440 __rbd_remove_snap_dev(snap);
9fcbb800 2441 dout("%ssnap id %llu has been removed\n",
f84344f3
AE
2442 rbd_dev->mapping.snap_id == snap->id ?
2443 "mapped " : "",
9fcbb800 2444 (unsigned long long) snap->id);
35938150
AE
2445
2446 /* Done with this list entry; advance */
2447
2448 links = next;
dfc5606d
YS
2449 continue;
2450 }
35938150 2451
b8b1e2db
AE
2452 snap_name = rbd_dev_snap_info(rbd_dev, index,
2453 &snap_size, &snap_features);
cd892126
AE
2454 if (IS_ERR(snap_name))
2455 return PTR_ERR(snap_name);
2456
9fcbb800
AE
2457 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
2458 (unsigned long long) snap_id);
35938150
AE
2459 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
2460 struct rbd_snap *new_snap;
2461
2462 /* We haven't seen this snapshot before */
2463
c8d18425 2464 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
cd892126 2465 snap_id, snap_size, snap_features);
9fcbb800
AE
2466 if (IS_ERR(new_snap)) {
2467 int err = PTR_ERR(new_snap);
2468
2469 dout(" failed to add dev, error %d\n", err);
2470
2471 return err;
2472 }
35938150
AE
2473
2474 /* New goes before existing, or at end of list */
2475
9fcbb800 2476 dout(" added dev%s\n", snap ? "" : " at end\n");
35938150
AE
2477 if (snap)
2478 list_add_tail(&new_snap->node, &snap->node);
2479 else
523f3258 2480 list_add_tail(&new_snap->node, head);
35938150
AE
2481 } else {
2482 /* Already have this one */
2483
9fcbb800
AE
2484 dout(" already present\n");
2485
cd892126 2486 rbd_assert(snap->size == snap_size);
aafb230e 2487 rbd_assert(!strcmp(snap->name, snap_name));
cd892126 2488 rbd_assert(snap->features == snap_features);
35938150
AE
2489
2490 /* Done with this list entry; advance */
2491
2492 links = links->next;
dfc5606d 2493 }
35938150
AE
2494
2495 /* Advance to the next entry in the snapshot context */
2496
2497 index++;
dfc5606d 2498 }
9fcbb800 2499 dout("%s: done\n", __func__);
dfc5606d
YS
2500
2501 return 0;
2502}
2503
304f6808
AE
2504/*
2505 * Scan the list of snapshots and register the devices for any that
2506 * have not already been registered.
2507 */
2508static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
2509{
2510 struct rbd_snap *snap;
2511 int ret = 0;
2512
2513 dout("%s called\n", __func__);
86ff77bb
AE
2514 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
2515 return -EIO;
304f6808
AE
2516
2517 list_for_each_entry(snap, &rbd_dev->snaps, node) {
2518 if (!rbd_snap_registered(snap)) {
2519 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
2520 if (ret < 0)
2521 break;
2522 }
2523 }
2524 dout("%s: returning %d\n", __func__, ret);
2525
2526 return ret;
2527}
2528
dfc5606d
YS
2529static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
2530{
dfc5606d 2531 struct device *dev;
cd789ab9 2532 int ret;
dfc5606d
YS
2533
2534 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
dfc5606d 2535
cd789ab9 2536 dev = &rbd_dev->dev;
dfc5606d
YS
2537 dev->bus = &rbd_bus_type;
2538 dev->type = &rbd_device_type;
2539 dev->parent = &rbd_root_dev;
2540 dev->release = rbd_dev_release;
de71a297 2541 dev_set_name(dev, "%d", rbd_dev->dev_id);
dfc5606d 2542 ret = device_register(dev);
dfc5606d 2543
dfc5606d 2544 mutex_unlock(&ctl_mutex);
cd789ab9 2545
dfc5606d 2546 return ret;
602adf40
YS
2547}
2548
dfc5606d
YS
2549static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
2550{
2551 device_unregister(&rbd_dev->dev);
2552}
2553
59c2be1e
YS
2554static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
2555{
2556 int ret, rc;
2557
2558 do {
0e6f322d 2559 ret = rbd_req_sync_watch(rbd_dev);
59c2be1e 2560 if (ret == -ERANGE) {
1fe5e993 2561 rc = rbd_refresh_header(rbd_dev, NULL);
59c2be1e
YS
2562 if (rc < 0)
2563 return rc;
2564 }
2565 } while (ret == -ERANGE);
2566
2567 return ret;
2568}
2569
e2839308 2570static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
1ddbe94e
AE
2571
2572/*
499afd5b
AE
2573 * Get a unique rbd identifier for the given new rbd_dev, and add
2574 * the rbd_dev to the global list. The minimum rbd id is 1.
1ddbe94e 2575 */
e2839308 2576static void rbd_dev_id_get(struct rbd_device *rbd_dev)
b7f23c36 2577{
e2839308 2578 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
499afd5b
AE
2579
2580 spin_lock(&rbd_dev_list_lock);
2581 list_add_tail(&rbd_dev->node, &rbd_dev_list);
2582 spin_unlock(&rbd_dev_list_lock);
e2839308
AE
2583 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
2584 (unsigned long long) rbd_dev->dev_id);
1ddbe94e 2585}
b7f23c36 2586
1ddbe94e 2587/*
499afd5b
AE
2588 * Remove an rbd_dev from the global list, and record that its
2589 * identifier is no longer in use.
1ddbe94e 2590 */
e2839308 2591static void rbd_dev_id_put(struct rbd_device *rbd_dev)
1ddbe94e 2592{
d184f6bf 2593 struct list_head *tmp;
de71a297 2594 int rbd_id = rbd_dev->dev_id;
d184f6bf
AE
2595 int max_id;
2596
aafb230e 2597 rbd_assert(rbd_id > 0);
499afd5b 2598
e2839308
AE
2599 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
2600 (unsigned long long) rbd_dev->dev_id);
499afd5b
AE
2601 spin_lock(&rbd_dev_list_lock);
2602 list_del_init(&rbd_dev->node);
d184f6bf
AE
2603
2604 /*
2605 * If the id being "put" is not the current maximum, there
2606 * is nothing special we need to do.
2607 */
e2839308 2608 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
d184f6bf
AE
2609 spin_unlock(&rbd_dev_list_lock);
2610 return;
2611 }
2612
2613 /*
2614 * We need to update the current maximum id. Search the
2615 * list to find out what it is. We're more likely to find
2616 * the maximum at the end, so search the list backward.
2617 */
2618 max_id = 0;
2619 list_for_each_prev(tmp, &rbd_dev_list) {
2620 struct rbd_device *rbd_dev;
2621
2622 rbd_dev = list_entry(tmp, struct rbd_device, node);
2623 if (rbd_id > max_id)
2624 max_id = rbd_id;
2625 }
499afd5b 2626 spin_unlock(&rbd_dev_list_lock);
b7f23c36 2627
1ddbe94e 2628 /*
e2839308 2629 * The max id could have been updated by rbd_dev_id_get(), in
d184f6bf
AE
2630 * which case it now accurately reflects the new maximum.
2631 * Be careful not to overwrite the maximum value in that
2632 * case.
1ddbe94e 2633 */
e2839308
AE
2634 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
2635 dout(" max dev id has been reset\n");
b7f23c36
AE
2636}
2637
e28fff26
AE
2638/*
2639 * Skips over white space at *buf, and updates *buf to point to the
2640 * first found non-space character (if any). Returns the length of
593a9e7b
AE
2641 * the token (string of non-white space characters) found. Note
2642 * that *buf must be terminated with '\0'.
e28fff26
AE
2643 */
2644static inline size_t next_token(const char **buf)
2645{
2646 /*
2647 * These are the characters that produce nonzero for
2648 * isspace() in the "C" and "POSIX" locales.
2649 */
2650 const char *spaces = " \f\n\r\t\v";
2651
2652 *buf += strspn(*buf, spaces); /* Find start of token */
2653
2654 return strcspn(*buf, spaces); /* Return token length */
2655}
2656
2657/*
2658 * Finds the next token in *buf, and if the provided token buffer is
2659 * big enough, copies the found token into it. The result, if
593a9e7b
AE
2660 * copied, is guaranteed to be terminated with '\0'. Note that *buf
2661 * must be terminated with '\0' on entry.
e28fff26
AE
2662 *
2663 * Returns the length of the token found (not including the '\0').
2664 * Return value will be 0 if no token is found, and it will be >=
2665 * token_size if the token would not fit.
2666 *
593a9e7b 2667 * The *buf pointer will be updated to point beyond the end of the
e28fff26
AE
2668 * found token. Note that this occurs even if the token buffer is
2669 * too small to hold it.
2670 */
2671static inline size_t copy_token(const char **buf,
2672 char *token,
2673 size_t token_size)
2674{
2675 size_t len;
2676
2677 len = next_token(buf);
2678 if (len < token_size) {
2679 memcpy(token, *buf, len);
2680 *(token + len) = '\0';
2681 }
2682 *buf += len;
2683
2684 return len;
2685}
2686
ea3352f4
AE
2687/*
2688 * Finds the next token in *buf, dynamically allocates a buffer big
2689 * enough to hold a copy of it, and copies the token into the new
2690 * buffer. The copy is guaranteed to be terminated with '\0'. Note
2691 * that a duplicate buffer is created even for a zero-length token.
2692 *
2693 * Returns a pointer to the newly-allocated duplicate, or a null
2694 * pointer if memory for the duplicate was not available. If
2695 * the lenp argument is a non-null pointer, the length of the token
2696 * (not including the '\0') is returned in *lenp.
2697 *
2698 * If successful, the *buf pointer will be updated to point beyond
2699 * the end of the found token.
2700 *
2701 * Note: uses GFP_KERNEL for allocation.
2702 */
2703static inline char *dup_token(const char **buf, size_t *lenp)
2704{
2705 char *dup;
2706 size_t len;
2707
2708 len = next_token(buf);
2709 dup = kmalloc(len + 1, GFP_KERNEL);
2710 if (!dup)
2711 return NULL;
2712
2713 memcpy(dup, *buf, len);
2714 *(dup + len) = '\0';
2715 *buf += len;
2716
2717 if (lenp)
2718 *lenp = len;
2719
2720 return dup;
2721}
2722
a725f65e 2723/*
3feeb894
AE
2724 * This fills in the pool_name, image_name, image_name_len, rbd_dev,
2725 * rbd_md_name, and name fields of the given rbd_dev, based on the
2726 * list of monitor addresses and other options provided via
2727 * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
2728 * copy of the snapshot name to map if successful, or a
2729 * pointer-coded error otherwise.
d22f76e7
AE
2730 *
2731 * Note: rbd_dev is assumed to have been initially zero-filled.
a725f65e 2732 */
3feeb894
AE
2733static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
2734 const char *buf,
2735 const char **mon_addrs,
2736 size_t *mon_addrs_size,
2737 char *options,
2738 size_t options_size)
e28fff26 2739{
d22f76e7 2740 size_t len;
3feeb894
AE
2741 char *err_ptr = ERR_PTR(-EINVAL);
2742 char *snap_name;
e28fff26
AE
2743
2744 /* The first four tokens are required */
2745
7ef3214a
AE
2746 len = next_token(&buf);
2747 if (!len)
3feeb894 2748 return err_ptr;
5214ecc4 2749 *mon_addrs_size = len + 1;
7ef3214a
AE
2750 *mon_addrs = buf;
2751
2752 buf += len;
a725f65e 2753
e28fff26
AE
2754 len = copy_token(&buf, options, options_size);
2755 if (!len || len >= options_size)
3feeb894 2756 return err_ptr;
e28fff26 2757
3feeb894 2758 err_ptr = ERR_PTR(-ENOMEM);
d22f76e7
AE
2759 rbd_dev->pool_name = dup_token(&buf, NULL);
2760 if (!rbd_dev->pool_name)
d22f76e7 2761 goto out_err;
e28fff26 2762
0bed54dc
AE
2763 rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
2764 if (!rbd_dev->image_name)
bf3e5ae1 2765 goto out_err;
a725f65e 2766
3feeb894
AE
2767 /* Snapshot name is optional */
2768 len = next_token(&buf);
820a5f3e 2769 if (!len) {
3feeb894
AE
2770 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
2771 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
849b4260 2772 }
3feeb894
AE
2773 snap_name = kmalloc(len + 1, GFP_KERNEL);
2774 if (!snap_name)
2775 goto out_err;
2776 memcpy(snap_name, buf, len);
2777 *(snap_name + len) = '\0';
e28fff26 2778
3feeb894
AE
2779dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
2780
2781 return snap_name;
d22f76e7
AE
2782
2783out_err:
0bed54dc 2784 kfree(rbd_dev->image_name);
d78fd7ae
AE
2785 rbd_dev->image_name = NULL;
2786 rbd_dev->image_name_len = 0;
d22f76e7
AE
2787 kfree(rbd_dev->pool_name);
2788 rbd_dev->pool_name = NULL;
2789
3feeb894 2790 return err_ptr;
a725f65e
AE
2791}
2792
589d30e0
AE
2793/*
2794 * An rbd format 2 image has a unique identifier, distinct from the
2795 * name given to it by the user. Internally, that identifier is
2796 * what's used to specify the names of objects related to the image.
2797 *
2798 * A special "rbd id" object is used to map an rbd image name to its
2799 * id. If that object doesn't exist, then there is no v2 rbd image
2800 * with the supplied name.
2801 *
2802 * This function will record the given rbd_dev's image_id field if
2803 * it can be determined, and in that case will return 0. If any
2804 * errors occur a negative errno will be returned and the rbd_dev's
2805 * image_id field will be unchanged (and should be NULL).
2806 */
2807static int rbd_dev_image_id(struct rbd_device *rbd_dev)
2808{
2809 int ret;
2810 size_t size;
2811 char *object_name;
2812 void *response;
2813 void *p;
2814
2815 /*
2816 * First, see if the format 2 image id file exists, and if
2817 * so, get the image's persistent id from it.
2818 */
2819 size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
2820 object_name = kmalloc(size, GFP_NOIO);
2821 if (!object_name)
2822 return -ENOMEM;
2823 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
2824 dout("rbd id object name is %s\n", object_name);
2825
2826 /* Response will be an encoded string, which includes a length */
2827
2828 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
2829 response = kzalloc(size, GFP_NOIO);
2830 if (!response) {
2831 ret = -ENOMEM;
2832 goto out;
2833 }
2834
2835 ret = rbd_req_sync_exec(rbd_dev, object_name,
2836 "rbd", "get_id",
2837 NULL, 0,
2838 response, RBD_IMAGE_ID_LEN_MAX,
2839 CEPH_OSD_FLAG_READ, NULL);
2840 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2841 if (ret < 0)
2842 goto out;
2843
2844 p = response;
2845 rbd_dev->image_id = ceph_extract_encoded_string(&p,
2846 p + RBD_IMAGE_ID_LEN_MAX,
2847 &rbd_dev->image_id_len,
2848 GFP_NOIO);
2849 if (IS_ERR(rbd_dev->image_id)) {
2850 ret = PTR_ERR(rbd_dev->image_id);
2851 rbd_dev->image_id = NULL;
2852 } else {
2853 dout("image_id is %s\n", rbd_dev->image_id);
2854 }
2855out:
2856 kfree(response);
2857 kfree(object_name);
2858
2859 return ret;
2860}
2861
a30b71b9
AE
2862static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
2863{
2864 int ret;
2865 size_t size;
2866
2867 /* Version 1 images have no id; empty string is used */
2868
2869 rbd_dev->image_id = kstrdup("", GFP_KERNEL);
2870 if (!rbd_dev->image_id)
2871 return -ENOMEM;
2872 rbd_dev->image_id_len = 0;
2873
2874 /* Record the header object name for this rbd image. */
2875
2876 size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
2877 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
2878 if (!rbd_dev->header_name) {
2879 ret = -ENOMEM;
2880 goto out_err;
2881 }
2882 sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
2883
2884 /* Populate rbd image metadata */
2885
2886 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
2887 if (ret < 0)
2888 goto out_err;
2889 rbd_dev->image_format = 1;
2890
2891 dout("discovered version 1 image, header name is %s\n",
2892 rbd_dev->header_name);
2893
2894 return 0;
2895
2896out_err:
2897 kfree(rbd_dev->header_name);
2898 rbd_dev->header_name = NULL;
2899 kfree(rbd_dev->image_id);
2900 rbd_dev->image_id = NULL;
2901
2902 return ret;
2903}
2904
2905static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
2906{
2907 size_t size;
9d475de5 2908 int ret;
6e14b1a6 2909 u64 ver = 0;
a30b71b9
AE
2910
2911 /*
2912 * Image id was filled in by the caller. Record the header
2913 * object name for this rbd image.
2914 */
2915 size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
2916 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
2917 if (!rbd_dev->header_name)
2918 return -ENOMEM;
2919 sprintf(rbd_dev->header_name, "%s%s",
2920 RBD_HEADER_PREFIX, rbd_dev->image_id);
9d475de5
AE
2921
2922 /* Get the size and object order for the image */
2923
2924 ret = rbd_dev_v2_image_size(rbd_dev);
1e130199
AE
2925 if (ret < 0)
2926 goto out_err;
2927
2928 /* Get the object prefix (a.k.a. block_name) for the image */
2929
2930 ret = rbd_dev_v2_object_prefix(rbd_dev);
b1b5402a
AE
2931 if (ret < 0)
2932 goto out_err;
2933
2934 /* Get the features for the image */
2935
2936 ret = rbd_dev_v2_features(rbd_dev);
9d475de5
AE
2937 if (ret < 0)
2938 goto out_err;
35d489f9 2939
6e14b1a6
AE
2940 /* crypto and compression type aren't (yet) supported for v2 images */
2941
2942 rbd_dev->header.crypt_type = 0;
2943 rbd_dev->header.comp_type = 0;
35d489f9 2944
6e14b1a6
AE
2945 /* Get the snapshot context, plus the header version */
2946
2947 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
35d489f9
AE
2948 if (ret)
2949 goto out_err;
6e14b1a6
AE
2950 rbd_dev->header.obj_version = ver;
2951
a30b71b9
AE
2952 rbd_dev->image_format = 2;
2953
2954 dout("discovered version 2 image, header name is %s\n",
2955 rbd_dev->header_name);
2956
2957 return -ENOTSUPP;
9d475de5
AE
2958out_err:
2959 kfree(rbd_dev->header_name);
2960 rbd_dev->header_name = NULL;
1e130199
AE
2961 kfree(rbd_dev->header.object_prefix);
2962 rbd_dev->header.object_prefix = NULL;
9d475de5
AE
2963
2964 return ret;
a30b71b9
AE
2965}
2966
2967/*
2968 * Probe for the existence of the header object for the given rbd
2969 * device. For format 2 images this includes determining the image
2970 * id.
2971 */
2972static int rbd_dev_probe(struct rbd_device *rbd_dev)
2973{
2974 int ret;
2975
2976 /*
2977 * Get the id from the image id object. If it's not a
2978 * format 2 image, we'll get ENOENT back, and we'll assume
2979 * it's a format 1 image.
2980 */
2981 ret = rbd_dev_image_id(rbd_dev);
2982 if (ret)
2983 ret = rbd_dev_v1_probe(rbd_dev);
2984 else
2985 ret = rbd_dev_v2_probe(rbd_dev);
2986 if (ret)
2987 dout("probe failed, returning %d\n", ret);
2988
2989 return ret;
2990}
2991
59c2be1e
YS
2992static ssize_t rbd_add(struct bus_type *bus,
2993 const char *buf,
2994 size_t count)
602adf40 2995{
cb8627c7
AE
2996 char *options;
2997 struct rbd_device *rbd_dev = NULL;
7ef3214a
AE
2998 const char *mon_addrs = NULL;
2999 size_t mon_addrs_size = 0;
27cc2594
AE
3000 struct ceph_osd_client *osdc;
3001 int rc = -ENOMEM;
3feeb894 3002 char *snap_name;
602adf40
YS
3003
3004 if (!try_module_get(THIS_MODULE))
3005 return -ENODEV;
3006
60571c7d 3007 options = kmalloc(count, GFP_KERNEL);
602adf40 3008 if (!options)
85ae8926 3009 goto err_out_mem;
cb8627c7
AE
3010 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
3011 if (!rbd_dev)
85ae8926 3012 goto err_out_mem;
602adf40
YS
3013
3014 /* static rbd_device initialization */
3015 spin_lock_init(&rbd_dev->lock);
3016 INIT_LIST_HEAD(&rbd_dev->node);
dfc5606d 3017 INIT_LIST_HEAD(&rbd_dev->snaps);
c666601a 3018 init_rwsem(&rbd_dev->header_rwsem);
602adf40 3019
602adf40 3020 /* parse add command */
3feeb894
AE
3021 snap_name = rbd_add_parse_args(rbd_dev, buf,
3022 &mon_addrs, &mon_addrs_size, options, count);
3023 if (IS_ERR(snap_name)) {
3024 rc = PTR_ERR(snap_name);
85ae8926 3025 goto err_out_mem;
3feeb894 3026 }
e124a82f 3027
f8c38929
AE
3028 rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
3029 if (rc < 0)
85ae8926 3030 goto err_out_args;
602adf40 3031
602adf40 3032 /* pick the pool */
1dbb4399 3033 osdc = &rbd_dev->rbd_client->client->osdc;
602adf40
YS
3034 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
3035 if (rc < 0)
3036 goto err_out_client;
9bb2f334 3037 rbd_dev->pool_id = rc;
602adf40 3038
a30b71b9
AE
3039 rc = rbd_dev_probe(rbd_dev);
3040 if (rc < 0)
05fd6f6f 3041 goto err_out_client;
a30b71b9 3042 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
05fd6f6f
AE
3043
3044 /* no need to lock here, as rbd_dev is not registered yet */
3045 rc = rbd_dev_snaps_update(rbd_dev);
3046 if (rc)
3047 goto err_out_header;
3048
3049 rc = rbd_dev_set_mapping(rbd_dev, snap_name);
3050 if (rc)
3051 goto err_out_header;
3052
85ae8926
AE
3053 /* generate unique id: find highest unique id, add one */
3054 rbd_dev_id_get(rbd_dev);
3055
3056 /* Fill in the device name, now that we have its id. */
3057 BUILD_BUG_ON(DEV_NAME_LEN
3058 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3059 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3060
3061 /* Get our block major device number. */
3062
27cc2594
AE
3063 rc = register_blkdev(0, rbd_dev->name);
3064 if (rc < 0)
85ae8926 3065 goto err_out_id;
27cc2594 3066 rbd_dev->major = rc;
602adf40 3067
0f308a31
AE
3068 /* Set up the blkdev mapping. */
3069
3070 rc = rbd_init_disk(rbd_dev);
dfc5606d 3071 if (rc)
766fc439
YS
3072 goto err_out_blkdev;
3073
0f308a31
AE
3074 rc = rbd_bus_add_dev(rbd_dev);
3075 if (rc)
3076 goto err_out_disk;
3077
32eec68d
AE
3078 /*
3079 * At this point cleanup in the event of an error is the job
3080 * of the sysfs code (initiated by rbd_bus_del_dev()).
32eec68d 3081 */
2ac4e75d 3082
4bb1f1ed 3083 down_write(&rbd_dev->header_rwsem);
5ed16177 3084 rc = rbd_dev_snaps_register(rbd_dev);
4bb1f1ed 3085 up_write(&rbd_dev->header_rwsem);
2ac4e75d
AE
3086 if (rc)
3087 goto err_out_bus;
3088
3ee4001e
AE
3089 rc = rbd_init_watch_dev(rbd_dev);
3090 if (rc)
3091 goto err_out_bus;
3092
2ac4e75d
AE
3093 /* Everything's ready. Announce the disk to the world. */
3094
2ac4e75d 3095 add_disk(rbd_dev->disk);
3ee4001e 3096
2ac4e75d
AE
3097 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3098 (unsigned long long) rbd_dev->mapping.size);
3099
602adf40
YS
3100 return count;
3101
766fc439 3102err_out_bus:
766fc439
YS
3103 /* this will also clean up rest of rbd_dev stuff */
3104
3105 rbd_bus_del_dev(rbd_dev);
3106 kfree(options);
766fc439
YS
3107 return rc;
3108
0f308a31
AE
3109err_out_disk:
3110 rbd_free_disk(rbd_dev);
602adf40
YS
3111err_out_blkdev:
3112 unregister_blkdev(rbd_dev->major, rbd_dev->name);
85ae8926
AE
3113err_out_id:
3114 rbd_dev_id_put(rbd_dev);
05fd6f6f
AE
3115err_out_header:
3116 rbd_header_free(&rbd_dev->header);
602adf40 3117err_out_client:
3fcf2581 3118 kfree(rbd_dev->header_name);
602adf40 3119 rbd_put_client(rbd_dev);
589d30e0 3120 kfree(rbd_dev->image_id);
85ae8926
AE
3121err_out_args:
3122 kfree(rbd_dev->mapping.snap_name);
3123 kfree(rbd_dev->image_name);
3124 kfree(rbd_dev->pool_name);
3125err_out_mem:
27cc2594 3126 kfree(rbd_dev);
cb8627c7 3127 kfree(options);
27cc2594 3128
602adf40
YS
3129 dout("Error adding device %s\n", buf);
3130 module_put(THIS_MODULE);
27cc2594
AE
3131
3132 return (ssize_t) rc;
602adf40
YS
3133}
3134
de71a297 3135static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
602adf40
YS
3136{
3137 struct list_head *tmp;
3138 struct rbd_device *rbd_dev;
3139
e124a82f 3140 spin_lock(&rbd_dev_list_lock);
602adf40
YS
3141 list_for_each(tmp, &rbd_dev_list) {
3142 rbd_dev = list_entry(tmp, struct rbd_device, node);
de71a297 3143 if (rbd_dev->dev_id == dev_id) {
e124a82f 3144 spin_unlock(&rbd_dev_list_lock);
602adf40 3145 return rbd_dev;
e124a82f 3146 }
602adf40 3147 }
e124a82f 3148 spin_unlock(&rbd_dev_list_lock);
602adf40
YS
3149 return NULL;
3150}
3151
dfc5606d 3152static void rbd_dev_release(struct device *dev)
602adf40 3153{
593a9e7b 3154 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
602adf40 3155
1dbb4399
AE
3156 if (rbd_dev->watch_request) {
3157 struct ceph_client *client = rbd_dev->rbd_client->client;
3158
3159 ceph_osdc_unregister_linger_request(&client->osdc,
59c2be1e 3160 rbd_dev->watch_request);
1dbb4399 3161 }
59c2be1e 3162 if (rbd_dev->watch_event)
070c633f 3163 rbd_req_sync_unwatch(rbd_dev);
59c2be1e 3164
602adf40
YS
3165 rbd_put_client(rbd_dev);
3166
3167 /* clean up and free blkdev */
3168 rbd_free_disk(rbd_dev);
3169 unregister_blkdev(rbd_dev->major, rbd_dev->name);
32eec68d 3170
2ac4e75d
AE
3171 /* release allocated disk header fields */
3172 rbd_header_free(&rbd_dev->header);
3173
32eec68d 3174 /* done with the id, and with the rbd_dev */
f84344f3 3175 kfree(rbd_dev->mapping.snap_name);
589d30e0 3176 kfree(rbd_dev->image_id);
0bed54dc 3177 kfree(rbd_dev->header_name);
d22f76e7 3178 kfree(rbd_dev->pool_name);
0bed54dc 3179 kfree(rbd_dev->image_name);
e2839308 3180 rbd_dev_id_put(rbd_dev);
602adf40
YS
3181 kfree(rbd_dev);
3182
3183 /* release module ref */
3184 module_put(THIS_MODULE);
602adf40
YS
3185}
3186
dfc5606d
YS
3187static ssize_t rbd_remove(struct bus_type *bus,
3188 const char *buf,
3189 size_t count)
602adf40
YS
3190{
3191 struct rbd_device *rbd_dev = NULL;
3192 int target_id, rc;
3193 unsigned long ul;
3194 int ret = count;
3195
3196 rc = strict_strtoul(buf, 10, &ul);
3197 if (rc)
3198 return rc;
3199
3200 /* convert to int; abort if we lost anything in the conversion */
3201 target_id = (int) ul;
3202 if (target_id != ul)
3203 return -EINVAL;
3204
3205 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3206
3207 rbd_dev = __rbd_get_dev(target_id);
3208 if (!rbd_dev) {
3209 ret = -ENOENT;
3210 goto done;
3211 }
3212
dfc5606d
YS
3213 __rbd_remove_all_snaps(rbd_dev);
3214 rbd_bus_del_dev(rbd_dev);
602adf40
YS
3215
3216done:
3217 mutex_unlock(&ctl_mutex);
aafb230e 3218
602adf40
YS
3219 return ret;
3220}
3221
602adf40
YS
3222/*
3223 * create control files in sysfs
dfc5606d 3224 * /sys/bus/rbd/...
602adf40
YS
3225 */
3226static int rbd_sysfs_init(void)
3227{
dfc5606d 3228 int ret;
602adf40 3229
fed4c143 3230 ret = device_register(&rbd_root_dev);
21079786 3231 if (ret < 0)
dfc5606d 3232 return ret;
602adf40 3233
fed4c143
AE
3234 ret = bus_register(&rbd_bus_type);
3235 if (ret < 0)
3236 device_unregister(&rbd_root_dev);
602adf40 3237
602adf40
YS
3238 return ret;
3239}
3240
3241static void rbd_sysfs_cleanup(void)
3242{
dfc5606d 3243 bus_unregister(&rbd_bus_type);
fed4c143 3244 device_unregister(&rbd_root_dev);
602adf40
YS
3245}
3246
3247int __init rbd_init(void)
3248{
3249 int rc;
3250
3251 rc = rbd_sysfs_init();
3252 if (rc)
3253 return rc;
f0f8cef5 3254 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
602adf40
YS
3255 return 0;
3256}
3257
3258void __exit rbd_exit(void)
3259{
3260 rbd_sysfs_cleanup();
3261}
3262
3263module_init(rbd_init);
3264module_exit(rbd_exit);
3265
3266MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
3267MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
3268MODULE_DESCRIPTION("rados block device");
3269
3270/* following authorship retained from original osdblk.c */
3271MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
3272
3273MODULE_LICENSE("GPL");