1 From 582e26118ab754a3bca9b98351cb874f22b76ffd Mon Sep 17 00:00:00 2001
2 From: Hank Janssen <hjanssen@microsoft.com>
3 Date: Mon, 13 Jul 2009 15:33:02 -0700
4 Subject: Staging: hv: add the Hyper-V virtual block driver
6 From: Hank Janssen <hjanssen@microsoft.com>
8 This is the virtual block driver when running Linux on top of Hyper-V.
10 Signed-off-by: Hank Janssen <hjanssen@microsoft.com>
11 Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
12 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
14 drivers/staging/hv/BlkVsc.c | 107 ++
15 drivers/staging/hv/blkvsc_drv.c | 1547 ++++++++++++++++++++++++++++++++++++++++
16 2 files changed, 1654 insertions(+)
17 create mode 100644 drivers/staging/hv/blkvsc.c
20 +++ b/drivers/staging/hv/BlkVsc.c
24 + * Copyright (c) 2009, Microsoft Corporation.
26 + * This program is free software; you can redistribute it and/or modify it
27 + * under the terms and conditions of the GNU General Public License,
28 + * version 2, as published by the Free Software Foundation.
30 + * This program is distributed in the hope it will be useful, but WITHOUT
31 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
35 + * You should have received a copy of the GNU General Public License along with
36 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
37 + * Place - Suite 330, Boston, MA 02111-1307 USA.
40 + * Hank Janssen <hjanssen@microsoft.com>
45 +#include "../storvsc/StorVsc.c"
47 +static const char* gBlkDriverName="blkvsc";
49 +//{32412632-86cb-44a2-9b5c-50d1417354f5}
50 +static const GUID gBlkVscDeviceType={
51 + .Data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
57 + DEVICE_OBJECT *Device,
58 + void *AdditionalInfo
64 + DRIVER_OBJECT *Driver
67 + STORVSC_DRIVER_OBJECT* storDriver = (STORVSC_DRIVER_OBJECT*)Driver;
70 + DPRINT_ENTER(BLKVSC);
72 + // Make sure we are at least 2 pages since 1 page is used for control
73 + ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1));
75 + Driver->name = gBlkDriverName;
76 + memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(GUID));
78 + storDriver->RequestExtSize = sizeof(STORVSC_REQUEST_EXTENSION);
79 + // Divide the ring buffer data size (which is 1 page less than the ring buffer size since that page is reserved for the ring buffer indices)
80 + // by the max request size (which is VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER + VSTOR_PACKET + UINT64)
81 + storDriver->MaxOutstandingRequestsPerChannel =
82 + ((storDriver->RingBufferSize - PAGE_SIZE) / ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(VSTOR_PACKET) + sizeof(UINT64),sizeof(UINT64)));
84 + DPRINT_INFO(BLKVSC, "max io outstd %u", storDriver->MaxOutstandingRequestsPerChannel);
86 + // Setup the dispatch table
87 + storDriver->Base.OnDeviceAdd = BlkVscOnDeviceAdd;
88 + storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove;
89 + storDriver->Base.OnCleanup = StorVscOnCleanup;
91 + storDriver->OnIORequest = StorVscOnIORequest;
93 + DPRINT_EXIT(BLKVSC);
100 + DEVICE_OBJECT *Device,
101 + void *AdditionalInfo
105 + STORVSC_DEVICE_INFO *deviceInfo = (STORVSC_DEVICE_INFO*)AdditionalInfo;
107 + DPRINT_ENTER(BLKVSC);
109 + ret = StorVscOnDeviceAdd(Device, AdditionalInfo);
113 + DPRINT_EXIT(BLKVSC);
118 + // We need to use the device instance guid to set the path and target id. For IDE devices, the
119 + // device instance id is formatted as <bus id> - <device id> - 8899 - 000000000000.
120 + deviceInfo->PathId = Device->deviceInstance.Data[3] << 24 | Device->deviceInstance.Data[2] << 16 |
121 + Device->deviceInstance.Data[1] << 8 |Device->deviceInstance.Data[0];
123 + deviceInfo->TargetId = Device->deviceInstance.Data[5] << 8 | Device->deviceInstance.Data[4];
125 + DPRINT_EXIT(BLKVSC);
130 +++ b/drivers/staging/hv/blkvsc_drv.c
134 + * Copyright (c) 2009, Microsoft Corporation.
136 + * This program is free software; you can redistribute it and/or modify it
137 + * under the terms and conditions of the GNU General Public License,
138 + * version 2, as published by the Free Software Foundation.
140 + * This program is distributed in the hope it will be useful, but WITHOUT
141 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
142 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
145 + * You should have received a copy of the GNU General Public License along with
146 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
147 + * Place - Suite 330, Boston, MA 02111-1307 USA.
150 + * Hank Janssen <hjanssen@microsoft.com>
155 +#include <linux/init.h>
156 +#include <linux/module.h>
157 +#include <linux/device.h>
158 +#include <linux/blkdev.h>
159 +#include <linux/major.h>
160 +#include <linux/delay.h>
161 +#include <linux/hdreg.h>
163 +#include <scsi/scsi.h>
164 +#include <scsi/scsi_cmnd.h>
165 +#include <scsi/scsi_eh.h>
166 +#include <scsi/scsi_dbg.h>
168 +#include "logging.h"
171 +#include "StorVscApi.h"
176 +#define BLKVSC_MINORS 64
181 +enum blkvsc_device_type {
187 +// This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together
188 +// A struct request may be represented by 1 or more struct blkvsc_request
189 +struct blkvsc_request_group {
193 + struct list_head blkvsc_req_list; // list of blkvsc_requests
197 +struct blkvsc_request {
198 + struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list
200 + struct list_head pend_entry; // block_device_context.pending_list
202 + struct request *req; // This may be null if we generate a request internally
203 + struct block_device_context *dev;
204 + struct blkvsc_request_group *group; // The group this request is part of. Maybe null
206 + wait_queue_head_t wevent;
210 + sector_t sector_start;
211 + unsigned long sector_count;
213 + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
214 + unsigned char cmd_len;
215 + unsigned char cmnd[MAX_COMMAND_SIZE];
217 + STORVSC_REQUEST request;
218 + // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because -
219 + // The extension buffer falls right here and is pointed to by request.Extension;
222 +// Per device structure
223 +struct block_device_context {
224 + struct device_context *device_ctx; // point back to our device context
225 + struct kmem_cache *request_pool;
227 + struct gendisk *gd;
228 + enum blkvsc_device_type device_type;
229 + struct list_head pending_list;
231 + unsigned char device_id[64];
232 + unsigned int device_id_len;
233 + int num_outstanding_reqs;
235 + int media_not_present;
236 + unsigned int sector_size;
239 + unsigned char path;
240 + unsigned char target;
245 +struct blkvsc_driver_context {
246 + // !! These must be the first 2 fields !!
247 + struct driver_context drv_ctx;
248 + STORVSC_DRIVER_OBJECT drv_obj;
252 +static int blkvsc_probe(struct device *dev);
253 +static int blkvsc_remove(struct device *device);
254 +static void blkvsc_shutdown(struct device *device);
256 +static int blkvsc_open(struct inode *inode, struct file *filep);
257 +static int blkvsc_release(struct inode *inode, struct file *filep);
258 +static int blkvsc_media_changed(struct gendisk *gd);
259 +static int blkvsc_revalidate_disk(struct gendisk *gd);
260 +static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
261 +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg);
263 +static void blkvsc_request(struct request_queue *queue);
264 +static void blkvsc_request_completion(STORVSC_REQUEST* request);
265 +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req);
266 +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) );
267 +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
268 +static void blkvsc_cmd_completion(STORVSC_REQUEST* request);
269 +static int blkvsc_do_inquiry(struct block_device_context *blkdev);
270 +static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
271 +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
272 +static int blkvsc_do_flush(struct block_device_context *blkdev);
273 +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
274 +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
277 +static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
279 +// The one and only one
280 +static struct blkvsc_driver_context g_blkvsc_drv;
283 +static struct block_device_operations block_ops =
285 + .owner = THIS_MODULE,
286 + .open = blkvsc_open,
287 + .release = blkvsc_release,
288 + .media_changed = blkvsc_media_changed,
289 + .revalidate_disk = blkvsc_revalidate_disk,
290 + .getgeo = blkvsc_getgeo,
291 + .ioctl = blkvsc_ioctl,
296 +Name: blkvsc_drv_init()
298 +Desc: BlkVsc driver initialization.
301 +int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
304 + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
305 + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
307 + DPRINT_ENTER(BLKVSC_DRV);
309 + vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
311 + storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
313 + // Callback to client driver to complete the initialization
314 + pfn_drv_init(&storvsc_drv_obj->Base);
316 + drv_ctx->driver.name = storvsc_drv_obj->Base.name;
317 + memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
319 +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
320 + drv_ctx->driver.probe = blkvsc_probe;
321 + drv_ctx->driver.remove = blkvsc_remove;
323 + drv_ctx->probe = blkvsc_probe;
324 + drv_ctx->remove = blkvsc_remove;
325 + drv_ctx->shutdown = blkvsc_shutdown;
328 + // The driver belongs to vmbus
329 + vmbus_child_driver_register(drv_ctx);
331 + DPRINT_EXIT(BLKVSC_DRV);
337 +static int blkvsc_drv_exit_cb(struct device *dev, void *data)
339 + struct device **curr = (struct device **)data;
341 + return 1; // stop iterating
346 +Name: blkvsc_drv_exit()
351 +void blkvsc_drv_exit(void)
353 + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
354 + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
356 + struct device *current_dev=NULL;
358 +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
359 +#define driver_for_each_device(drv, start, data, fn) \
360 + struct list_head *ptr, *n; \
361 + list_for_each_safe(ptr, n, &((drv)->devices)) {\
362 + struct device *curr_dev;\
363 + curr_dev = list_entry(ptr, struct device, driver_list);\
364 + fn(curr_dev, data);\
366 +#endif // KERNEL_2_6_9
368 + DPRINT_ENTER(BLKVSC_DRV);
372 + current_dev = NULL;
375 + driver_for_each_device(&drv_ctx->driver, NULL, (void*)¤t_dev, blkvsc_drv_exit_cb);
377 + if (current_dev == NULL)
380 + // Initiate removal from the top-down
381 + device_unregister(current_dev);
384 + if (storvsc_drv_obj->Base.OnCleanup)
385 + storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
387 + vmbus_child_driver_unregister(drv_ctx);
389 + DPRINT_EXIT(BLKVSC_DRV);
396 +Name: blkvsc_probe()
398 +Desc: Add a new device for this driver
401 +static int blkvsc_probe(struct device *device)
405 + struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
406 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
407 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
409 + struct device_context *device_ctx = device_to_device_context(device);
410 + DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
412 + struct block_device_context *blkdev=NULL;
413 + STORVSC_DEVICE_INFO device_info;
417 + static int ide0_registered=0;
418 + static int ide1_registered=0;
420 + DPRINT_ENTER(BLKVSC_DRV);
422 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
424 + if (!storvsc_drv_obj->Base.OnDeviceAdd)
426 + DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
432 + blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
439 + INIT_LIST_HEAD(&blkdev->pending_list);
441 + // Initialize what we can here
442 + spin_lock_init(&blkdev->lock);
444 + ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
446 +#ifdef KERNEL_2_6_27
447 + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id,
448 + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
449 + SLAB_HWCACHE_ALIGN, NULL);
451 + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id,
452 + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
453 + SLAB_HWCACHE_ALIGN, NULL, NULL);
455 + if (!blkdev->request_pool)
462 + // Call to the vsc driver to add the device
463 + ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
466 + DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
470 + blkdev->device_ctx = device_ctx;
471 + blkdev->target = device_info.TargetId; // this identified the device 0 or 1
472 + blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1
474 + device->driver_data = blkdev;
476 + // Calculate the major and device num
477 + if (blkdev->path == 0)
479 + major = IDE0_MAJOR;
480 + devnum = blkdev->path + blkdev->target; // 0 or 1
482 + if (!ide0_registered)
484 + ret = register_blkdev(major, "ide");
487 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
491 + ide0_registered = 1;
494 + else if (blkdev->path == 1)
496 + major = IDE1_MAJOR;
497 + devnum = blkdev->path + blkdev->target + 1; // 2 or 3
499 + if (!ide1_registered)
501 + ret = register_blkdev(major, "ide");
504 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
508 + ide1_registered = 1;
514 + DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
519 + DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
521 + blkdev->gd = alloc_disk(BLKVSC_MINORS);
524 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
529 + blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
531 + blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
532 + blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
533 + blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
534 + blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
535 + blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
536 + blk_queue_dma_alignment(blkdev->gd->queue, 511);
538 + blkdev->gd->major = major;
539 + if (devnum == 1 || devnum == 3)
540 + blkdev->gd->first_minor = BLKVSC_MINORS;
542 + blkdev->gd->first_minor = 0;
543 + blkdev->gd->fops = &block_ops;
544 + blkdev->gd->private_data = blkdev;
545 + sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum);
547 + blkvsc_do_inquiry(blkdev);
548 + if (blkdev->device_type == DVD_TYPE)
550 + set_disk_ro(blkdev->gd, 1);
551 + blkdev->gd->flags |= GENHD_FL_REMOVABLE;
552 + blkvsc_do_read_capacity(blkdev);
556 + blkvsc_do_read_capacity16(blkdev);
559 + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
560 + blk_queue_hardsect_size(blkdev->gd->queue, blkdev->sector_size);
562 + add_disk(blkdev->gd);
564 + DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size);
569 + storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
574 + if (blkdev->request_pool)
576 + kmem_cache_destroy(blkdev->request_pool);
577 + blkdev->request_pool = NULL;
583 + DPRINT_EXIT(BLKVSC_DRV);
588 +static void blkvsc_shutdown(struct device *device)
590 + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data;
591 + unsigned long flags;
596 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
598 + spin_lock_irqsave(&blkdev->lock, flags);
600 + blkdev->shutting_down = 1;
602 + blk_stop_queue(blkdev->gd->queue);
604 + spin_unlock_irqrestore(&blkdev->lock, flags);
606 + while (blkdev->num_outstanding_reqs)
608 + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
613 + blkvsc_do_flush(blkdev);
615 + spin_lock_irqsave(&blkdev->lock, flags);
617 + blkvsc_cancel_pending_reqs(blkdev);
619 + spin_unlock_irqrestore(&blkdev->lock, flags);
622 +static int blkvsc_do_flush(struct block_device_context *blkdev)
624 + struct blkvsc_request *blkvsc_req=NULL;
626 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
628 + if (blkdev->device_type != HARDDISK_TYPE)
631 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
637 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
638 + init_waitqueue_head(&blkvsc_req->wevent);
639 + blkvsc_req->dev = blkdev;
640 + blkvsc_req->req = NULL;
641 + blkvsc_req->write = 0;
643 + blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
644 + blkvsc_req->request.DataBuffer.Offset = 0;
645 + blkvsc_req->request.DataBuffer.Length = 0;
647 + blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
648 + blkvsc_req->cmd_len = 10;
650 + // Set this here since the completion routine may be invoked and completed before we return
651 + blkvsc_req->cond =0;
652 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
654 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
656 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
661 +// Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd)
662 +static int blkvsc_do_inquiry(struct block_device_context *blkdev)
664 + struct blkvsc_request *blkvsc_req=NULL;
665 + struct page *page_buf;
666 + unsigned char *buf;
667 + unsigned char device_type;
669 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
671 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
677 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
678 + page_buf = alloc_page(GFP_KERNEL);
681 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
685 + init_waitqueue_head(&blkvsc_req->wevent);
686 + blkvsc_req->dev = blkdev;
687 + blkvsc_req->req = NULL;
688 + blkvsc_req->write = 0;
690 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
691 + blkvsc_req->request.DataBuffer.Offset = 0;
692 + blkvsc_req->request.DataBuffer.Length = 64;
694 + blkvsc_req->cmnd[0] = INQUIRY;
695 + blkvsc_req->cmnd[1] = 0x1; // Get product data
696 + blkvsc_req->cmnd[2] = 0x83; // mode page 83
697 + blkvsc_req->cmnd[4] = 64;
698 + blkvsc_req->cmd_len = 6;
700 + // Set this here since the completion routine may be invoked and completed before we return
701 + blkvsc_req->cond =0;
703 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
705 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
707 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
709 + buf = kmap(page_buf);
711 + //PrintBytes(buf, 64);
713 + device_type = buf[0] & 0x1F;
715 + if (device_type == 0x0)
717 + blkdev->device_type = HARDDISK_TYPE;
719 + else if (device_type == 0x5)
721 + blkdev->device_type = DVD_TYPE;
725 + // TODO: this is currently unsupported device type
726 + blkdev->device_type = UNKNOWN_DEV_TYPE;
729 + DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
731 + blkdev->device_id_len = buf[7];
732 + if (blkdev->device_id_len > 64)
733 + blkdev->device_id_len = 64;
735 + memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
736 + //PrintBytes(blkdev->device_id, blkdev->device_id_len);
740 + __free_page(page_buf);
742 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
747 +// Do a scsi READ_CAPACITY cmd here to get the size of the disk
748 +static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
750 + struct blkvsc_request *blkvsc_req=NULL;
751 + struct page *page_buf;
752 + unsigned char *buf;
753 + struct scsi_sense_hdr sense_hdr;
755 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
757 + blkdev->sector_size = 0;
758 + blkdev->capacity = 0;
759 + blkdev->media_not_present = 0; // assume a disk is present
761 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
767 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
768 + page_buf = alloc_page(GFP_KERNEL);
771 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
775 + init_waitqueue_head(&blkvsc_req->wevent);
776 + blkvsc_req->dev = blkdev;
777 + blkvsc_req->req = NULL;
778 + blkvsc_req->write = 0;
780 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
781 + blkvsc_req->request.DataBuffer.Offset = 0;
782 + blkvsc_req->request.DataBuffer.Length = 8;
784 + blkvsc_req->cmnd[0] = READ_CAPACITY;
785 + blkvsc_req->cmd_len = 16;
787 + // Set this here since the completion routine may be invoked and completed before we return
788 + blkvsc_req->cond =0;
790 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
792 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
794 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
797 + if (blkvsc_req->request.Status)
799 + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
801 + if (sense_hdr.asc == 0x3A) // Medium not present
803 + blkdev->media_not_present = 1;
808 + buf = kmap(page_buf);
811 + blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
812 + blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
816 + __free_page(page_buf);
818 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
824 +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
826 + struct blkvsc_request *blkvsc_req=NULL;
827 + struct page *page_buf;
828 + unsigned char *buf;
829 + struct scsi_sense_hdr sense_hdr;
831 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
833 + blkdev->sector_size = 0;
834 + blkdev->capacity = 0;
835 + blkdev->media_not_present = 0; // assume a disk is present
837 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
843 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
844 + page_buf = alloc_page(GFP_KERNEL);
847 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
851 + init_waitqueue_head(&blkvsc_req->wevent);
852 + blkvsc_req->dev = blkdev;
853 + blkvsc_req->req = NULL;
854 + blkvsc_req->write = 0;
856 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
857 + blkvsc_req->request.DataBuffer.Offset = 0;
858 + blkvsc_req->request.DataBuffer.Length = 12;
860 + blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16;
861 + blkvsc_req->cmd_len = 16;
863 + // Set this here since the completion routine may be invoked and completed before we return
864 + blkvsc_req->cond =0;
866 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
868 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
870 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
873 + if (blkvsc_req->request.Status)
875 + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
877 + if (sense_hdr.asc == 0x3A) // Medium not present
879 + blkdev->media_not_present = 1;
884 + buf = kmap(page_buf);
887 + blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
888 + blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
890 + //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
891 + //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
895 + __free_page(page_buf);
897 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
904 +Name: blkvsc_remove()
906 +Desc: Callback when our device is removed
909 +static int blkvsc_remove(struct device *device)
913 + struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
914 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
915 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
917 + struct device_context *device_ctx = device_to_device_context(device);
918 + DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
919 + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data;
920 + unsigned long flags;
922 + DPRINT_ENTER(BLKVSC_DRV);
924 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
926 + if (!storvsc_drv_obj->Base.OnDeviceRemove)
928 + DPRINT_EXIT(BLKVSC_DRV);
932 + // Call to the vsc driver to let it know that the device is being removed
933 + ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
937 + DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
940 + // Get to a known state
941 + spin_lock_irqsave(&blkdev->lock, flags);
943 + blkdev->shutting_down = 1;
945 + blk_stop_queue(blkdev->gd->queue);
947 + spin_unlock_irqrestore(&blkdev->lock, flags);
949 + while (blkdev->num_outstanding_reqs)
951 + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
956 + blkvsc_do_flush(blkdev);
958 + spin_lock_irqsave(&blkdev->lock, flags);
960 + blkvsc_cancel_pending_reqs(blkdev);
962 + spin_unlock_irqrestore(&blkdev->lock, flags);
964 + blk_cleanup_queue(blkdev->gd->queue);
966 + del_gendisk(blkdev->gd);
968 + kmem_cache_destroy(blkdev->request_pool);
972 + DPRINT_EXIT(BLKVSC_DRV);
977 +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
979 + ASSERT(blkvsc_req->req);
980 + ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
982 + blkvsc_req->cmd_len = 16;
984 + if (blkvsc_req->sector_start > 0xffffffff)
986 + if (rq_data_dir(blkvsc_req->req))
988 + blkvsc_req->write = 1;
989 + blkvsc_req->cmnd[0] = WRITE_16;
993 + blkvsc_req->write = 0;
994 + blkvsc_req->cmnd[0] = READ_16;
997 + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
999 + *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start);
1000 + *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count);
1002 + else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff))
1004 + if (rq_data_dir(blkvsc_req->req))
1006 + blkvsc_req->write = 1;
1007 + blkvsc_req->cmnd[0] = WRITE_10;
1011 + blkvsc_req->write = 0;
1012 + blkvsc_req->cmnd[0] = READ_10;
1015 + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
1017 + *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start);
1018 + *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count);
1022 + if (rq_data_dir(blkvsc_req->req))
1024 + blkvsc_req->write = 1;
1025 + blkvsc_req->cmnd[0] = WRITE_6;
1029 + blkvsc_req->write = 0;
1030 + blkvsc_req->cmnd[0] = READ_6;
1033 + *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8;
1034 + blkvsc_req->cmnd[1] &= 0x1f;
1035 + blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count;
1039 +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) )
1041 + struct block_device_context *blkdev = blkvsc_req->dev;
1042 + struct device_context *device_ctx=blkdev->device_ctx;
1043 + struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
1044 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
1045 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
1048 + STORVSC_REQUEST *storvsc_req;
1050 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %llu count %d offset %d len %d\n",
1052 + (blkvsc_req->write)?"WRITE":"READ",
1053 + blkvsc_req->sector_start,
1054 + blkvsc_req->sector_count,
1055 + blkvsc_req->request.DataBuffer.Offset,
1056 + blkvsc_req->request.DataBuffer.Length);
1058 + /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
1060 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
1063 + blkvsc_req->request.DataBuffer.PfnArray[i]);
1066 + storvsc_req = &blkvsc_req->request;
1067 + storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request));
1069 + storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE;
1071 + storvsc_req->OnIOCompletion = request_completion;
1072 + storvsc_req->Context = blkvsc_req;
1074 + storvsc_req->Host = blkdev->port;
1075 + storvsc_req->Bus = blkdev->path;
1076 + storvsc_req->TargetId = blkdev->target;
1077 + storvsc_req->LunId = 0; // this is not really used at all
1079 + storvsc_req->CdbLen = blkvsc_req->cmd_len;
1080 + storvsc_req->Cdb = blkvsc_req->cmnd;
1082 + storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
1083 + storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
1085 + ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request);
1088 + blkdev->num_outstanding_reqs++;
1095 +// We break the request into 1 or more blkvsc_requests and submit them.
1096 +// If we cant submit them all, we put them on the pending_list. The
1097 +// blkvsc_request() will work on the pending_list.
1099 +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
1101 + struct bio *bio=NULL;
1102 + struct bio_vec *bvec=NULL;
1103 + struct bio_vec *prev_bvec=NULL;
1105 + struct blkvsc_request *blkvsc_req=NULL;
1106 + struct blkvsc_request *tmp;
1107 + int databuf_idx=0;
1110 + sector_t start_sector;
1111 + unsigned long num_sectors = 0;
1114 + struct blkvsc_request_group *group=NULL;
1116 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, req->sector);
1118 + // Create a group to tie req to list of blkvsc_reqs
1119 + group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1125 + INIT_LIST_HEAD(&group->blkvsc_req_list);
1126 + group->outstanding = group->status = 0;
1128 + start_sector = req->sector;
1130 + // foreach bio in the request
1132 + for (bio = req->bio; bio; bio = bio->bi_next)
1134 + // Map this bio into an existing or new storvsc request
1135 + bio_for_each_segment (bvec, bio, seg_idx)
1137 + DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
1138 + req, bio, bvec, seg_idx, databuf_idx);
1140 + // Get a new storvsc request
1141 + if ( (!blkvsc_req) || // 1st-time
1142 + (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
1143 + (bvec->bv_offset != 0) || // hole at the begin of page
1144 + (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page
1146 + // submit the prev one
1149 + blkvsc_req->sector_start = start_sector;
1150 + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1152 + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1154 + blkvsc_init_rw(blkvsc_req);
1157 + // Create new blkvsc_req to represent the current bvec
1158 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1161 + // free up everything
1162 + list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1164 + list_del(&blkvsc_req->req_entry);
1165 + kmem_cache_free(blkdev->request_pool, blkvsc_req);
1168 + kmem_cache_free(blkdev->request_pool, group);
1172 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
1174 + blkvsc_req->dev = blkdev;
1175 + blkvsc_req->req = req;
1176 + blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1177 + blkvsc_req->request.DataBuffer.Length = 0;
1179 + // Add to the group
1180 + blkvsc_req->group = group;
1181 + blkvsc_req->group->outstanding++;
1182 + list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
1184 + start_sector += num_sectors;
1189 + // Add the curr bvec/segment to the curr blkvsc_req
1190 + blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1191 + blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1196 + num_sectors += bvec->bv_len >> 9;
1198 + } // bio_for_each_segment
1200 + } // rq_for_each_bio
1202 + // Handle the last one
1205 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
1207 + blkvsc_req->sector_start = start_sector;
1208 + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1210 + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1212 + blkvsc_init_rw(blkvsc_req);
1215 + list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry)
1219 + DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %llu sect_count %d (%llu %d)\n",
1220 + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors);
1222 + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1226 + ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion);
1230 + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1233 + DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %llu sect_count %d (%llu %d) ret %d\n",
1234 + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors, ret);
1241 +static void blkvsc_cmd_completion(STORVSC_REQUEST* request)
1243 + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1244 + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1246 + struct scsi_sense_hdr sense_hdr;
1248 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req);
1250 + blkdev->num_outstanding_reqs--;
1252 + if (blkvsc_req->request.Status)
1254 + if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1256 + scsi_print_sense_hdr("blkvsc", &sense_hdr);
1260 + blkvsc_req->cond =1;
1261 + wake_up_interruptible(&blkvsc_req->wevent);
1264 +static void blkvsc_request_completion(STORVSC_REQUEST* request)
1266 + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1267 + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1268 + unsigned long flags;
1269 + struct blkvsc_request *comp_req, *tmp;
1271 + ASSERT(blkvsc_req->group);
1273 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %llu sect_count %d len %d group outstd %d total outstd %d\n",
1276 + blkvsc_req->group,
1277 + (blkvsc_req->write)?"WRITE":"READ",
1278 + blkvsc_req->sector_start,
1279 + blkvsc_req->sector_count,
1280 + blkvsc_req->request.DataBuffer.Length,
1281 + blkvsc_req->group->outstanding,
1282 + blkdev->num_outstanding_reqs);
1284 + spin_lock_irqsave(&blkdev->lock, flags);
1286 + blkdev->num_outstanding_reqs--;
1287 + blkvsc_req->group->outstanding--;
1289 + // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order
1290 + // blkvsc_req completion when calling end_that_request_first()
1291 + if (blkvsc_req->group->outstanding == 0)
1293 + list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
1295 + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1297 + comp_req->sector_start,
1298 + comp_req->sector_count);
1300 + list_del(&comp_req->req_entry);
1302 +#ifdef KERNEL_2_6_27
1303 + if (!__blk_end_request(
1305 + (!comp_req->request.Status ? 0: -EIO),
1306 + comp_req->sector_count * blkdev->sector_size))
1308 + //All the sectors have been xferred ie the request is done
1309 + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1310 + kmem_cache_free(blkdev->request_pool, comp_req->group);
1313 + if (!end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9))))
1315 + //All the sectors have been xferred ie the request is done
1316 + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1318 + end_that_request_last(comp_req->req, !comp_req->request.Status);
1320 + kmem_cache_free(blkdev->request_pool, comp_req->group);
1324 + kmem_cache_free(blkdev->request_pool, comp_req);
1327 + if (!blkdev->shutting_down)
1329 + blkvsc_do_pending_reqs(blkdev);
1330 + blk_start_queue(blkdev->gd->queue);
1331 + blkvsc_request(blkdev->gd->queue);
1335 + spin_unlock_irqrestore(&blkdev->lock, flags);
1338 +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1340 + struct blkvsc_request *pend_req, *tmp;
1341 + struct blkvsc_request *comp_req, *tmp2;
1345 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1347 + // Flush the pending list first
1348 + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1350 + // The pend_req could be part of a partially completed request. If so, complete those req first
1351 + // until we hit the pend_req
1352 + list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1354 + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1356 + comp_req->sector_start,
1357 + comp_req->sector_count);
1359 + if (comp_req == pend_req)
1362 + list_del(&comp_req->req_entry);
1364 + if (comp_req->req)
1366 +#ifdef KERNEL_2_6_27
1367 + ret = __blk_end_request(
1369 + (!comp_req->request.Status ? 0 : -EIO),
1370 + comp_req->sector_count * blkdev->sector_size);
1372 + ret = end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9)));
1377 + kmem_cache_free(blkdev->request_pool, comp_req);
1380 + DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req);
1382 + list_del(&pend_req->pend_entry);
1384 + list_del(&pend_req->req_entry);
1386 + if (comp_req->req)
1388 +#ifdef KERNEL_2_6_27
1389 + if (!__blk_end_request(
1392 + pend_req->sector_count * blkdev->sector_size))
1394 + //All the sectors have been xferred ie the request is done
1395 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1396 + kmem_cache_free(blkdev->request_pool, pend_req->group);
1399 + if (!end_that_request_first(pend_req->req, 0, (pend_req->sector_count * (blkdev->sector_size >> 9))))
1401 + //All the sectors have been xferred ie the request is done
1402 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1404 + end_that_request_last(pend_req->req, 0);
1406 + kmem_cache_free(blkdev->request_pool, pend_req->group);
1411 + kmem_cache_free(blkdev->request_pool, pend_req);
1417 +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1419 + struct blkvsc_request *pend_req, *tmp;
1422 + // Flush the pending list first
1423 + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1425 + DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
1427 + ret = blkvsc_submit_request(pend_req, blkvsc_request_completion);
1434 + list_del(&pend_req->pend_entry);
1441 +static void blkvsc_request(struct request_queue *queue)
1443 + struct block_device_context *blkdev = NULL;
1444 + struct request *req;
1447 + DPRINT_DBG(BLKVSC_DRV, "- enter \n");
1448 + while ((req = elv_next_request(queue)) != NULL)
1450 + DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1452 + blkdev = req->rq_disk->private_data;
1453 + if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) {
1454 + end_request(req, 0);
1458 + ret = blkvsc_do_pending_reqs(blkdev);
1462 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n");
1463 + blk_stop_queue(queue);
1467 + blkdev_dequeue_request(req);
1469 + ret = blkvsc_do_request(blkdev, req);
1472 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1473 + blk_stop_queue(queue);
1478 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1479 + blk_requeue_request(queue, req);
1480 + blk_stop_queue(queue);
1486 +static int blkvsc_open(struct inode *inode, struct file *filep)
1488 + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1490 + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1492 + spin_lock(&blkdev->lock);
1494 + if (!blkdev->users && blkdev->device_type == DVD_TYPE)
1496 + spin_unlock(&blkdev->lock);
1497 + check_disk_change(inode->i_bdev);
1498 + spin_lock(&blkdev->lock);
1503 + spin_unlock(&blkdev->lock);
1507 +static int blkvsc_release(struct inode *inode, struct file *filep)
1509 + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1511 + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1513 + spin_lock(&blkdev->lock);
1514 + if (blkdev->users == 1)
1516 + spin_unlock(&blkdev->lock);
1517 + blkvsc_do_flush(blkdev);
1518 + spin_lock(&blkdev->lock);
1523 + spin_unlock(&blkdev->lock);
1527 +static int blkvsc_media_changed(struct gendisk *gd)
1529 + DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1534 +static int blkvsc_revalidate_disk(struct gendisk *gd)
1536 + struct block_device_context *blkdev = gd->private_data;
1538 + DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1540 + if (blkdev->device_type == DVD_TYPE)
1542 + blkvsc_do_read_capacity(blkdev);
1543 + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
1544 + blk_queue_hardsect_size(gd->queue, blkdev->sector_size);
1549 +int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1551 + sector_t total_sectors = get_capacity(bd->bd_disk);
1552 + sector_t cylinder_times_heads=0;
1555 + int sectors_per_track=0;
1560 + if (total_sectors > (65535 * 16 * 255)) {
1561 + total_sectors = (65535 * 16 * 255);
1564 + if (total_sectors >= (65535 * 16 * 63)) {
1565 + sectors_per_track = 255;
1568 + cylinder_times_heads = total_sectors;
1569 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1573 + sectors_per_track = 17;
1575 + cylinder_times_heads = total_sectors;
1576 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1578 + temp = cylinder_times_heads + 1023;
1579 + rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1587 + if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1588 + sectors_per_track = 31;
1591 + cylinder_times_heads = total_sectors;
1592 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1595 + if (cylinder_times_heads >= (heads * 1024)) {
1596 + sectors_per_track = 63;
1599 + cylinder_times_heads = total_sectors;
1600 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1604 + temp = cylinder_times_heads;
1605 + rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1608 + hg->heads = heads;
1609 + hg->sectors = sectors_per_track;
1610 + hg->cylinders = cylinders;
1612 + DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1617 +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg)
1619 + struct block_device *bd = inode->i_bdev;
1620 + struct block_device_context *blkdev = bd->bd_disk->private_data;
1625 + // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just
1626 + // a GUID. Commented it out for now.
1627 + /*case HDIO_GET_IDENTITY:
1628 + DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1630 + if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1645 +MODULE_LICENSE("GPL");
1647 +static int __init blkvsc_init(void)
1651 + ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set
1653 + DPRINT_ENTER(BLKVSC_DRV);
1655 + DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1657 + ret = blkvsc_drv_init(BlkVscInitialize);
1659 + DPRINT_EXIT(BLKVSC_DRV);
1664 +static void __exit blkvsc_exit(void)
1666 + DPRINT_ENTER(BLKVSC_DRV);
1668 + blkvsc_drv_exit();
1670 + DPRINT_ENTER(BLKVSC_DRV);
1673 +module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1675 +module_init(blkvsc_init);
1676 +module_exit(blkvsc_exit);