dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 1"
 
+External snapshots
+------------------
+
+You can use an external _read only_ device as an origin for a
+thinly-provisioned volume.  Any read to an unprovisioned area of the
+thin device will be passed through to the origin.  Writes trigger
+the allocation of new blocks as usual.
+
+One use case for this is VM hosts that want to run guests on
+thinly-provisioned volumes but have the base image on another device
+(possibly shared between many VMs).
+
+You must not write to the origin device if you use this technique!
+Of course, you may write to the thin device and take internal snapshots
+of the thin volume.
+
+i) Creating a snapshot of an external device
+
+  This is the same as creating a thin device.
+  You don't mention the origin at this stage.
+
+    dmsetup message /dev/mapper/pool 0 "create_thin 0"
+
+ii) Using a snapshot of an external device.
+
+  Append an extra parameter to the thin target specifying the origin:
+
+    dmsetup create snap --table "0 2097152 thin /dev/mapper/pool 0 /dev/image"
+
+  N.B. All descendants (internal snapshots) of this snapshot require the
+  same extra origin parameter.
+
 Deactivation
 ------------
 
 
 i) Constructor
 
-    thin <pool dev> <dev id>
+    thin <pool dev> <dev id> [<external origin dev>]
 
     pool dev:
        the thin-pool device, e.g. /dev/mapper/my_pool or 253:0
        the internal device identifier of the device to be
        activated.
 
+    external origin dev:
+       an optional block device outside the pool to be treated as a
+       read-only snapshot origin: reads to unprovisioned areas of the
+       thin target will be mapped to this device.
+
 The pool doesn't store any size against the thin devices.  If you
 load a thin target that is smaller than you've been using previously,
 then you'll have no access to blocks mapped beyond the end.  If you
 
  */
 struct thin_c {
        struct dm_dev *pool_dev;
+       struct dm_dev *origin_dev;
        dm_thin_id dev_id;
 
        struct pool *pool;
                (bio->bi_sector & pool->offset_mask);
 }
 
-static void remap_and_issue(struct thin_c *tc, struct bio *bio,
-                           dm_block_t block)
+static void remap_to_origin(struct thin_c *tc, struct bio *bio)
+{
+       bio->bi_bdev = tc->origin_dev->bdev;
+}
+
+static void issue(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
        unsigned long flags;
 
-       remap(tc, bio, block);
-
        /*
         * Batch together any FUA/FLUSH bios we find and then issue
         * a single commit for them in process_deferred_bios().
                generic_make_request(bio);
 }
 
+static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
+{
+       remap_to_origin(tc, bio);
+       issue(tc, bio);
+}
+
+static void remap_and_issue(struct thin_c *tc, struct bio *bio,
+                           dm_block_t block)
+{
+       remap(tc, bio, block);
+       issue(tc, bio);
+}
+
 /*
  * wake_worker() is used when new work is queued and when pool_resume is
  * ready to continue deferred IO processing.
 }
 
 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
-                         dm_block_t data_origin, dm_block_t data_dest,
+                         struct dm_dev *origin, dm_block_t data_origin,
+                         dm_block_t data_dest,
                          struct cell *cell, struct bio *bio)
 {
        int r;
        } else {
                struct dm_io_region from, to;
 
-               from.bdev = tc->pool_dev->bdev;
+               from.bdev = origin->bdev;
                from.sector = data_origin * pool->sectors_per_block;
                from.count = pool->sectors_per_block;
 
        }
 }
 
+static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
+                                  dm_block_t data_origin, dm_block_t data_dest,
+                                  struct cell *cell, struct bio *bio)
+{
+       schedule_copy(tc, virt_block, tc->pool_dev,
+                     data_origin, data_dest, cell, bio);
+}
+
+static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
+                                  dm_block_t data_dest,
+                                  struct cell *cell, struct bio *bio)
+{
+       schedule_copy(tc, virt_block, tc->origin_dev,
+                     virt_block, data_dest, cell, bio);
+}
+
 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                          dm_block_t data_block, struct cell *cell,
                          struct bio *bio)
        r = alloc_data_block(tc, &data_block);
        switch (r) {
        case 0:
-               schedule_copy(tc, block, lookup_result->block,
-                             data_block, cell, bio);
+               schedule_internal_copy(tc, block, lookup_result->block,
+                                      data_block, cell, bio);
                break;
 
        case -ENOSPC:
        r = alloc_data_block(tc, &data_block);
        switch (r) {
        case 0:
-               schedule_zero(tc, block, data_block, cell, bio);
+               if (tc->origin_dev)
+                       schedule_external_copy(tc, block, data_block, cell, bio);
+               else
+                       schedule_zero(tc, block, data_block, cell, bio);
                break;
 
        case -ENOSPC:
                break;
 
        case -ENODATA:
-               provision_block(tc, bio, block, cell);
+               if (bio_data_dir(bio) == READ && tc->origin_dev) {
+                       cell_release_singleton(cell, bio);
+                       remap_to_origin_and_issue(tc, bio);
+               } else
+                       provision_block(tc, bio, block, cell);
                break;
 
        default:
        __pool_dec(tc->pool);
        dm_pool_close_thin_device(tc->td);
        dm_put_device(ti, tc->pool_dev);
+       if (tc->origin_dev)
+               dm_put_device(ti, tc->origin_dev);
        kfree(tc);
 
        mutex_unlock(&dm_thin_pool_table.mutex);
 /*
  * Thin target parameters:
  *
- * <pool_dev> <dev_id>
+ * <pool_dev> <dev_id> [origin_dev]
  *
  * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
  * dev_id: the internal device identifier
+ * origin_dev: a device external to the pool that should act as the origin
  */
 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 {
        int r;
        struct thin_c *tc;
-       struct dm_dev *pool_dev;
+       struct dm_dev *pool_dev, *origin_dev;
        struct mapped_device *pool_md;
 
        mutex_lock(&dm_thin_pool_table.mutex);
 
-       if (argc != 2) {
+       if (argc != 2 && argc != 3) {
                ti->error = "Invalid argument count";
                r = -EINVAL;
                goto out_unlock;
                goto out_unlock;
        }
 
+       if (argc == 3) {
+               r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
+               if (r) {
+                       ti->error = "Error opening origin device";
+                       goto bad_origin_dev;
+               }
+               tc->origin_dev = origin_dev;
+       }
+
        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
        if (r) {
                ti->error = "Error opening pool device";
 bad_common:
        dm_put_device(ti, tc->pool_dev);
 bad_pool_dev:
+       if (tc->origin_dev)
+               dm_put_device(ti, tc->origin_dev);
+bad_origin_dev:
        kfree(tc);
 out_unlock:
        mutex_unlock(&dm_thin_pool_table.mutex);
                        DMEMIT("%s %lu",
                               format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
                               (unsigned long) tc->dev_id);
+                       if (tc->origin_dev)
+                               DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
                        break;
                }
        }
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 0, 0},
+       .version = {1, 1, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,