]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.drivers/staging-hv-add-the-hyper-v-virtual-block-driver.patch
Merge branch 'master' of git://git.ipfire.org/ipfire-2.x
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.drivers / staging-hv-add-the-hyper-v-virtual-block-driver.patch
1 From 582e26118ab754a3bca9b98351cb874f22b76ffd Mon Sep 17 00:00:00 2001
2 From: Hank Janssen <hjanssen@microsoft.com>
3 Date: Mon, 13 Jul 2009 15:33:02 -0700
4 Subject: Staging: hv: add the Hyper-V virtual block driver
5
6 From: Hank Janssen <hjanssen@microsoft.com>
7
8 This is the virtual block driver when running Linux on top of Hyper-V.
9
10 Signed-off-by: Hank Janssen <hjanssen@microsoft.com>
11 Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
12 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
13 ---
14 drivers/staging/hv/BlkVsc.c | 107 ++
15 drivers/staging/hv/blkvsc_drv.c | 1547 ++++++++++++++++++++++++++++++++++++++++
16 2 files changed, 1654 insertions(+)
17 create mode 100644 drivers/staging/hv/blkvsc.c
18
19 --- /dev/null
20 +++ b/drivers/staging/hv/BlkVsc.c
21 @@ -0,0 +1,107 @@
22 +/*
23 + *
24 + * Copyright (c) 2009, Microsoft Corporation.
25 + *
26 + * This program is free software; you can redistribute it and/or modify it
27 + * under the terms and conditions of the GNU General Public License,
28 + * version 2, as published by the Free Software Foundation.
29 + *
30 + * This program is distributed in the hope it will be useful, but WITHOUT
31 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
33 + * more details.
34 + *
35 + * You should have received a copy of the GNU General Public License along with
36 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
37 + * Place - Suite 330, Boston, MA 02111-1307 USA.
38 + *
39 + * Authors:
40 + * Hank Janssen <hjanssen@microsoft.com>
41 + *
42 + */
43 +
44 +
45 +#include "../storvsc/StorVsc.c"
46 +
47 +static const char* gBlkDriverName="blkvsc";
48 +
49 +//{32412632-86cb-44a2-9b5c-50d1417354f5}
50 +static const GUID gBlkVscDeviceType={
51 + .Data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
52 +};
53 +
54 +// Static routines
55 +static int
56 +BlkVscOnDeviceAdd(
57 + DEVICE_OBJECT *Device,
58 + void *AdditionalInfo
59 + );
60 +
61 +
62 +int
63 +BlkVscInitialize(
64 + DRIVER_OBJECT *Driver
65 + )
66 +{
67 + STORVSC_DRIVER_OBJECT* storDriver = (STORVSC_DRIVER_OBJECT*)Driver;
68 + int ret=0;
69 +
70 + DPRINT_ENTER(BLKVSC);
71 +
72 + // Make sure we are at least 2 pages since 1 page is used for control
73 + ASSERT(storDriver->RingBufferSize >= (PAGE_SIZE << 1));
74 +
75 + Driver->name = gBlkDriverName;
76 + memcpy(&Driver->deviceType, &gBlkVscDeviceType, sizeof(GUID));
77 +
78 + storDriver->RequestExtSize = sizeof(STORVSC_REQUEST_EXTENSION);
79 + // Divide the ring buffer data size (which is 1 page less than the ring buffer size since that page is reserved for the ring buffer indices)
80 + // by the max request size (which is VMBUS_CHANNEL_PACKET_MULITPAGE_BUFFER + VSTOR_PACKET + UINT64)
81 + storDriver->MaxOutstandingRequestsPerChannel =
82 + ((storDriver->RingBufferSize - PAGE_SIZE) / ALIGN_UP(MAX_MULTIPAGE_BUFFER_PACKET + sizeof(VSTOR_PACKET) + sizeof(UINT64),sizeof(UINT64)));
83 +
84 + DPRINT_INFO(BLKVSC, "max io outstd %u", storDriver->MaxOutstandingRequestsPerChannel);
85 +
86 + // Setup the dispatch table
87 + storDriver->Base.OnDeviceAdd = BlkVscOnDeviceAdd;
88 + storDriver->Base.OnDeviceRemove = StorVscOnDeviceRemove;
89 + storDriver->Base.OnCleanup = StorVscOnCleanup;
90 +
91 + storDriver->OnIORequest = StorVscOnIORequest;
92 +
93 + DPRINT_EXIT(BLKVSC);
94 +
95 + return ret;
96 +}
97 +
98 +int
99 +BlkVscOnDeviceAdd(
100 + DEVICE_OBJECT *Device,
101 + void *AdditionalInfo
102 + )
103 +{
104 + int ret=0;
105 + STORVSC_DEVICE_INFO *deviceInfo = (STORVSC_DEVICE_INFO*)AdditionalInfo;
106 +
107 + DPRINT_ENTER(BLKVSC);
108 +
109 + ret = StorVscOnDeviceAdd(Device, AdditionalInfo);
110 +
111 + if (ret != 0)
112 + {
113 + DPRINT_EXIT(BLKVSC);
114 +
115 + return ret;
116 + }
117 +
118 + // We need to use the device instance guid to set the path and target id. For IDE devices, the
119 + // device instance id is formatted as <bus id> - <device id> - 8899 - 000000000000.
120 + deviceInfo->PathId = Device->deviceInstance.Data[3] << 24 | Device->deviceInstance.Data[2] << 16 |
121 + Device->deviceInstance.Data[1] << 8 |Device->deviceInstance.Data[0];
122 +
123 + deviceInfo->TargetId = Device->deviceInstance.Data[5] << 8 | Device->deviceInstance.Data[4];
124 +
125 + DPRINT_EXIT(BLKVSC);
126 +
127 + return ret;
128 +}
129 --- /dev/null
130 +++ b/drivers/staging/hv/blkvsc_drv.c
131 @@ -0,0 +1,1547 @@
132 +/*
133 + *
134 + * Copyright (c) 2009, Microsoft Corporation.
135 + *
136 + * This program is free software; you can redistribute it and/or modify it
137 + * under the terms and conditions of the GNU General Public License,
138 + * version 2, as published by the Free Software Foundation.
139 + *
140 + * This program is distributed in the hope it will be useful, but WITHOUT
141 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
142 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
143 + * more details.
144 + *
145 + * You should have received a copy of the GNU General Public License along with
146 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
147 + * Place - Suite 330, Boston, MA 02111-1307 USA.
148 + *
149 + * Authors:
150 + * Hank Janssen <hjanssen@microsoft.com>
151 + *
152 + */
153 +
154 +
155 +#include <linux/init.h>
156 +#include <linux/module.h>
157 +#include <linux/device.h>
158 +#include <linux/blkdev.h>
159 +#include <linux/major.h>
160 +#include <linux/delay.h>
161 +#include <linux/hdreg.h>
162 +
163 +#include <scsi/scsi.h>
164 +#include <scsi/scsi_cmnd.h>
165 +#include <scsi/scsi_eh.h>
166 +#include <scsi/scsi_dbg.h>
167 +
168 +#include "logging.h"
169 +#include "vmbus.h"
170 +
171 +#include "StorVscApi.h"
172 +
173 +//
174 +// #defines
175 +//
176 +#define BLKVSC_MINORS 64
177 +
178 +//
179 +// Data types
180 +//
181 +enum blkvsc_device_type {
182 + UNKNOWN_DEV_TYPE,
183 + HARDDISK_TYPE,
184 + DVD_TYPE,
185 +};
186 +
187 +// This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together
188 +// A struct request may be represented by 1 or more struct blkvsc_request
189 +struct blkvsc_request_group {
190 + int outstanding;
191 + int status;
192 +
193 + struct list_head blkvsc_req_list; // list of blkvsc_requests
194 +};
195 +
196 +
197 +struct blkvsc_request {
198 + struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list
199 +
200 + struct list_head pend_entry; // block_device_context.pending_list
201 +
202 + struct request *req; // This may be null if we generate a request internally
203 + struct block_device_context *dev;
204 + struct blkvsc_request_group *group; // The group this request is part of. Maybe null
205 +
206 + wait_queue_head_t wevent;
207 + int cond;
208 +
209 + int write;
210 + sector_t sector_start;
211 + unsigned long sector_count;
212 +
213 + unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
214 + unsigned char cmd_len;
215 + unsigned char cmnd[MAX_COMMAND_SIZE];
216 +
217 + STORVSC_REQUEST request;
218 + // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because -
219 + // The extension buffer falls right here and is pointed to by request.Extension;
220 +};
221 +
222 +// Per device structure
223 +struct block_device_context {
224 + struct device_context *device_ctx; // point back to our device context
225 + struct kmem_cache *request_pool;
226 + spinlock_t lock;
227 + struct gendisk *gd;
228 + enum blkvsc_device_type device_type;
229 + struct list_head pending_list;
230 +
231 + unsigned char device_id[64];
232 + unsigned int device_id_len;
233 + int num_outstanding_reqs;
234 + int shutting_down;
235 + int media_not_present;
236 + unsigned int sector_size;
237 + sector_t capacity;
238 + unsigned int port;
239 + unsigned char path;
240 + unsigned char target;
241 + int users;
242 +};
243 +
244 +// Per driver
245 +struct blkvsc_driver_context {
246 + // !! These must be the first 2 fields !!
247 + struct driver_context drv_ctx;
248 + STORVSC_DRIVER_OBJECT drv_obj;
249 +};
250 +
251 +// Static decl
252 +static int blkvsc_probe(struct device *dev);
253 +static int blkvsc_remove(struct device *device);
254 +static void blkvsc_shutdown(struct device *device);
255 +
256 +static int blkvsc_open(struct inode *inode, struct file *filep);
257 +static int blkvsc_release(struct inode *inode, struct file *filep);
258 +static int blkvsc_media_changed(struct gendisk *gd);
259 +static int blkvsc_revalidate_disk(struct gendisk *gd);
260 +static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
261 +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg);
262 +
263 +static void blkvsc_request(struct request_queue *queue);
264 +static void blkvsc_request_completion(STORVSC_REQUEST* request);
265 +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req);
266 +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) );
267 +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
268 +static void blkvsc_cmd_completion(STORVSC_REQUEST* request);
269 +static int blkvsc_do_inquiry(struct block_device_context *blkdev);
270 +static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
271 +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
272 +static int blkvsc_do_flush(struct block_device_context *blkdev);
273 +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
274 +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
275 +
276 +
277 +static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
278 +
279 +// The one and only one
280 +static struct blkvsc_driver_context g_blkvsc_drv;
281 +
282 +
283 +static struct block_device_operations block_ops =
284 +{
285 + .owner = THIS_MODULE,
286 + .open = blkvsc_open,
287 + .release = blkvsc_release,
288 + .media_changed = blkvsc_media_changed,
289 + .revalidate_disk = blkvsc_revalidate_disk,
290 + .getgeo = blkvsc_getgeo,
291 + .ioctl = blkvsc_ioctl,
292 +};
293 +
294 +/*++
295 +
296 +Name: blkvsc_drv_init()
297 +
298 +Desc: BlkVsc driver initialization.
299 +
300 +--*/
301 +int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
302 +{
303 + int ret=0;
304 + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
305 + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
306 +
307 + DPRINT_ENTER(BLKVSC_DRV);
308 +
309 + vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
310 +
311 + storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
312 +
313 + // Callback to client driver to complete the initialization
314 + pfn_drv_init(&storvsc_drv_obj->Base);
315 +
316 + drv_ctx->driver.name = storvsc_drv_obj->Base.name;
317 + memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
318 +
319 +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
320 + drv_ctx->driver.probe = blkvsc_probe;
321 + drv_ctx->driver.remove = blkvsc_remove;
322 +#else
323 + drv_ctx->probe = blkvsc_probe;
324 + drv_ctx->remove = blkvsc_remove;
325 + drv_ctx->shutdown = blkvsc_shutdown;
326 +#endif
327 +
328 + // The driver belongs to vmbus
329 + vmbus_child_driver_register(drv_ctx);
330 +
331 + DPRINT_EXIT(BLKVSC_DRV);
332 +
333 + return ret;
334 +}
335 +
336 +
337 +static int blkvsc_drv_exit_cb(struct device *dev, void *data)
338 +{
339 + struct device **curr = (struct device **)data;
340 + *curr = dev;
341 + return 1; // stop iterating
342 +}
343 +
344 +/*++
345 +
346 +Name: blkvsc_drv_exit()
347 +
348 +Desc:
349 +
350 +--*/
351 +void blkvsc_drv_exit(void)
352 +{
353 + STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
354 + struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
355 +
356 + struct device *current_dev=NULL;
357 +
358 +#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
359 +#define driver_for_each_device(drv, start, data, fn) \
360 + struct list_head *ptr, *n; \
361 + list_for_each_safe(ptr, n, &((drv)->devices)) {\
362 + struct device *curr_dev;\
363 + curr_dev = list_entry(ptr, struct device, driver_list);\
364 + fn(curr_dev, data);\
365 + }
366 +#endif // KERNEL_2_6_9
367 +
368 + DPRINT_ENTER(BLKVSC_DRV);
369 +
370 + while (1)
371 + {
372 + current_dev = NULL;
373 +
374 + // Get the device
375 + driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, blkvsc_drv_exit_cb);
376 +
377 + if (current_dev == NULL)
378 + break;
379 +
380 + // Initiate removal from the top-down
381 + device_unregister(current_dev);
382 + }
383 +
384 + if (storvsc_drv_obj->Base.OnCleanup)
385 + storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
386 +
387 + vmbus_child_driver_unregister(drv_ctx);
388 +
389 + DPRINT_EXIT(BLKVSC_DRV);
390 +
391 + return;
392 +}
393 +
394 +/*++
395 +
396 +Name: blkvsc_probe()
397 +
398 +Desc: Add a new device for this driver
399 +
400 +--*/
401 +static int blkvsc_probe(struct device *device)
402 +{
403 + int ret=0;
404 +
405 + struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
406 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
407 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
408 +
409 + struct device_context *device_ctx = device_to_device_context(device);
410 + DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
411 +
412 + struct block_device_context *blkdev=NULL;
413 + STORVSC_DEVICE_INFO device_info;
414 + int major=0;
415 + int devnum=0;
416 +
417 + static int ide0_registered=0;
418 + static int ide1_registered=0;
419 +
420 + DPRINT_ENTER(BLKVSC_DRV);
421 +
422 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
423 +
424 + if (!storvsc_drv_obj->Base.OnDeviceAdd)
425 + {
426 + DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
427 +
428 + ret = -1;
429 + goto Cleanup;
430 + }
431 +
432 + blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
433 + if (!blkdev)
434 + {
435 + ret = -ENOMEM;
436 + goto Cleanup;
437 + }
438 +
439 + INIT_LIST_HEAD(&blkdev->pending_list);
440 +
441 + // Initialize what we can here
442 + spin_lock_init(&blkdev->lock);
443 +
444 + ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
445 +
446 +#ifdef KERNEL_2_6_27
447 + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id,
448 + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
449 + SLAB_HWCACHE_ALIGN, NULL);
450 +#else
451 + blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id,
452 + sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
453 + SLAB_HWCACHE_ALIGN, NULL, NULL);
454 +#endif
455 + if (!blkdev->request_pool)
456 + {
457 + ret = -ENOMEM;
458 + goto Cleanup;
459 + }
460 +
461 +
462 + // Call to the vsc driver to add the device
463 + ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
464 + if (ret != 0)
465 + {
466 + DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
467 + goto Cleanup;
468 + }
469 +
470 + blkdev->device_ctx = device_ctx;
471 + blkdev->target = device_info.TargetId; // this identified the device 0 or 1
472 + blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1
473 +
474 + device->driver_data = blkdev;
475 +
476 + // Calculate the major and device num
477 + if (blkdev->path == 0)
478 + {
479 + major = IDE0_MAJOR;
480 + devnum = blkdev->path + blkdev->target; // 0 or 1
481 +
482 + if (!ide0_registered)
483 + {
484 + ret = register_blkdev(major, "ide");
485 + if (ret != 0)
486 + {
487 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
488 + goto Remove;
489 + }
490 +
491 + ide0_registered = 1;
492 + }
493 + }
494 + else if (blkdev->path == 1)
495 + {
496 + major = IDE1_MAJOR;
497 + devnum = blkdev->path + blkdev->target + 1; // 2 or 3
498 +
499 + if (!ide1_registered)
500 + {
501 + ret = register_blkdev(major, "ide");
502 + if (ret != 0)
503 + {
504 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
505 + goto Remove;
506 + }
507 +
508 + ide1_registered = 1;
509 + }
510 +
511 + }
512 + else
513 + {
514 + DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
515 + ret = -1;
516 + goto Cleanup;
517 + }
518 +
519 + DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
520 +
521 + blkdev->gd = alloc_disk(BLKVSC_MINORS);
522 + if (!blkdev->gd)
523 + {
524 + DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
525 + ret = -1;
526 + goto Cleanup;
527 + }
528 +
529 + blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
530 +
531 + blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
532 + blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
533 + blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
534 + blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
535 + blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
536 + blk_queue_dma_alignment(blkdev->gd->queue, 511);
537 +
538 + blkdev->gd->major = major;
539 + if (devnum == 1 || devnum == 3)
540 + blkdev->gd->first_minor = BLKVSC_MINORS;
541 + else
542 + blkdev->gd->first_minor = 0;
543 + blkdev->gd->fops = &block_ops;
544 + blkdev->gd->private_data = blkdev;
545 + sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum);
546 +
547 + blkvsc_do_inquiry(blkdev);
548 + if (blkdev->device_type == DVD_TYPE)
549 + {
550 + set_disk_ro(blkdev->gd, 1);
551 + blkdev->gd->flags |= GENHD_FL_REMOVABLE;
552 + blkvsc_do_read_capacity(blkdev);
553 + }
554 + else
555 + {
556 + blkvsc_do_read_capacity16(blkdev);
557 + }
558 +
559 + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
560 + blk_queue_hardsect_size(blkdev->gd->queue, blkdev->sector_size);
561 + // go!
562 + add_disk(blkdev->gd);
563 +
564 + DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size);
565 +
566 + return ret;
567 +
568 +Remove:
569 + storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
570 +
571 +Cleanup:
572 + if (blkdev)
573 + {
574 + if (blkdev->request_pool)
575 + {
576 + kmem_cache_destroy(blkdev->request_pool);
577 + blkdev->request_pool = NULL;
578 + }
579 + kfree(blkdev);
580 + blkdev = NULL;
581 + }
582 +
583 + DPRINT_EXIT(BLKVSC_DRV);
584 +
585 + return ret;
586 +}
587 +
588 +static void blkvsc_shutdown(struct device *device)
589 +{
590 + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data;
591 + unsigned long flags;
592 +
593 + if (!blkdev)
594 + return;
595 +
596 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
597 +
598 + spin_lock_irqsave(&blkdev->lock, flags);
599 +
600 + blkdev->shutting_down = 1;
601 +
602 + blk_stop_queue(blkdev->gd->queue);
603 +
604 + spin_unlock_irqrestore(&blkdev->lock, flags);
605 +
606 + while (blkdev->num_outstanding_reqs)
607 + {
608 + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
609 +
610 + udelay(100);
611 + }
612 +
613 + blkvsc_do_flush(blkdev);
614 +
615 + spin_lock_irqsave(&blkdev->lock, flags);
616 +
617 + blkvsc_cancel_pending_reqs(blkdev);
618 +
619 + spin_unlock_irqrestore(&blkdev->lock, flags);
620 +}
621 +
622 +static int blkvsc_do_flush(struct block_device_context *blkdev)
623 +{
624 + struct blkvsc_request *blkvsc_req=NULL;
625 +
626 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
627 +
628 + if (blkdev->device_type != HARDDISK_TYPE)
629 + return 0;
630 +
631 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
632 + if (!blkvsc_req)
633 + {
634 + return -ENOMEM;
635 + }
636 +
637 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
638 + init_waitqueue_head(&blkvsc_req->wevent);
639 + blkvsc_req->dev = blkdev;
640 + blkvsc_req->req = NULL;
641 + blkvsc_req->write = 0;
642 +
643 + blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
644 + blkvsc_req->request.DataBuffer.Offset = 0;
645 + blkvsc_req->request.DataBuffer.Length = 0;
646 +
647 + blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
648 + blkvsc_req->cmd_len = 10;
649 +
650 + // Set this here since the completion routine may be invoked and completed before we return
651 + blkvsc_req->cond =0;
652 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
653 +
654 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
655 +
656 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
657 +
658 + return 0;
659 +}
660 +
661 +// Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd)
662 +static int blkvsc_do_inquiry(struct block_device_context *blkdev)
663 +{
664 + struct blkvsc_request *blkvsc_req=NULL;
665 + struct page *page_buf;
666 + unsigned char *buf;
667 + unsigned char device_type;
668 +
669 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
670 +
671 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
672 + if (!blkvsc_req)
673 + {
674 + return -ENOMEM;
675 + }
676 +
677 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
678 + page_buf = alloc_page(GFP_KERNEL);
679 + if (!page_buf)
680 + {
681 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
682 + return -ENOMEM;
683 + }
684 +
685 + init_waitqueue_head(&blkvsc_req->wevent);
686 + blkvsc_req->dev = blkdev;
687 + blkvsc_req->req = NULL;
688 + blkvsc_req->write = 0;
689 +
690 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
691 + blkvsc_req->request.DataBuffer.Offset = 0;
692 + blkvsc_req->request.DataBuffer.Length = 64;
693 +
694 + blkvsc_req->cmnd[0] = INQUIRY;
695 + blkvsc_req->cmnd[1] = 0x1; // Get product data
696 + blkvsc_req->cmnd[2] = 0x83; // mode page 83
697 + blkvsc_req->cmnd[4] = 64;
698 + blkvsc_req->cmd_len = 6;
699 +
700 + // Set this here since the completion routine may be invoked and completed before we return
701 + blkvsc_req->cond =0;
702 +
703 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
704 +
705 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
706 +
707 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
708 +
709 + buf = kmap(page_buf);
710 +
711 + //PrintBytes(buf, 64);
712 + // be to le
713 + device_type = buf[0] & 0x1F;
714 +
715 + if (device_type == 0x0)
716 + {
717 + blkdev->device_type = HARDDISK_TYPE;
718 + }
719 + else if (device_type == 0x5)
720 + {
721 + blkdev->device_type = DVD_TYPE;
722 + }
723 + else
724 + {
725 + // TODO: this is currently unsupported device type
726 + blkdev->device_type = UNKNOWN_DEV_TYPE;
727 + }
728 +
729 + DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
730 +
731 + blkdev->device_id_len = buf[7];
732 + if (blkdev->device_id_len > 64)
733 + blkdev->device_id_len = 64;
734 +
735 + memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
736 + //PrintBytes(blkdev->device_id, blkdev->device_id_len);
737 +
738 + kunmap(page_buf);
739 +
740 + __free_page(page_buf);
741 +
742 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
743 +
744 + return 0;
745 +}
746 +
747 +// Do a scsi READ_CAPACITY cmd here to get the size of the disk
748 +static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
749 +{
750 + struct blkvsc_request *blkvsc_req=NULL;
751 + struct page *page_buf;
752 + unsigned char *buf;
753 + struct scsi_sense_hdr sense_hdr;
754 +
755 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
756 +
757 + blkdev->sector_size = 0;
758 + blkdev->capacity = 0;
759 + blkdev->media_not_present = 0; // assume a disk is present
760 +
761 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
762 + if (!blkvsc_req)
763 + {
764 + return -ENOMEM;
765 + }
766 +
767 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
768 + page_buf = alloc_page(GFP_KERNEL);
769 + if (!page_buf)
770 + {
771 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
772 + return -ENOMEM;
773 + }
774 +
775 + init_waitqueue_head(&blkvsc_req->wevent);
776 + blkvsc_req->dev = blkdev;
777 + blkvsc_req->req = NULL;
778 + blkvsc_req->write = 0;
779 +
780 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
781 + blkvsc_req->request.DataBuffer.Offset = 0;
782 + blkvsc_req->request.DataBuffer.Length = 8;
783 +
784 + blkvsc_req->cmnd[0] = READ_CAPACITY;
785 + blkvsc_req->cmd_len = 16;
786 +
787 + // Set this here since the completion routine may be invoked and completed before we return
788 + blkvsc_req->cond =0;
789 +
790 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
791 +
792 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
793 +
794 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
795 +
796 + // check error
797 + if (blkvsc_req->request.Status)
798 + {
799 + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
800 +
801 + if (sense_hdr.asc == 0x3A) // Medium not present
802 + {
803 + blkdev->media_not_present = 1;
804 + }
805 +
806 + return 0;
807 + }
808 + buf = kmap(page_buf);
809 +
810 + // be to le
811 + blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
812 + blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
813 +
814 + kunmap(page_buf);
815 +
816 + __free_page(page_buf);
817 +
818 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
819 +
820 + return 0;
821 +}
822 +
823 +
824 +static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
825 +{
826 + struct blkvsc_request *blkvsc_req=NULL;
827 + struct page *page_buf;
828 + unsigned char *buf;
829 + struct scsi_sense_hdr sense_hdr;
830 +
831 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
832 +
833 + blkdev->sector_size = 0;
834 + blkdev->capacity = 0;
835 + blkdev->media_not_present = 0; // assume a disk is present
836 +
837 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
838 + if (!blkvsc_req)
839 + {
840 + return -ENOMEM;
841 + }
842 +
843 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
844 + page_buf = alloc_page(GFP_KERNEL);
845 + if (!page_buf)
846 + {
847 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
848 + return -ENOMEM;
849 + }
850 +
851 + init_waitqueue_head(&blkvsc_req->wevent);
852 + blkvsc_req->dev = blkdev;
853 + blkvsc_req->req = NULL;
854 + blkvsc_req->write = 0;
855 +
856 + blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
857 + blkvsc_req->request.DataBuffer.Offset = 0;
858 + blkvsc_req->request.DataBuffer.Length = 12;
859 +
860 + blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16;
861 + blkvsc_req->cmd_len = 16;
862 +
863 + // Set this here since the completion routine may be invoked and completed before we return
864 + blkvsc_req->cond =0;
865 +
866 + blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
867 +
868 + DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
869 +
870 + wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
871 +
872 + // check error
873 + if (blkvsc_req->request.Status)
874 + {
875 + scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
876 +
877 + if (sense_hdr.asc == 0x3A) // Medium not present
878 + {
879 + blkdev->media_not_present = 1;
880 + }
881 +
882 + return 0;
883 + }
884 + buf = kmap(page_buf);
885 +
886 + // be to le
887 + blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
888 + blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
889 +
890 + //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
891 + //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
892 +
893 + kunmap(page_buf);
894 +
895 + __free_page(page_buf);
896 +
897 + kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
898 +
899 + return 0;
900 +}
901 +
902 +/*++
903 +
904 +Name: blkvsc_remove()
905 +
906 +Desc: Callback when our device is removed
907 +
908 +--*/
909 +static int blkvsc_remove(struct device *device)
910 +{
911 + int ret=0;
912 +
913 + struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
914 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
915 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
916 +
917 + struct device_context *device_ctx = device_to_device_context(device);
918 + DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
919 + struct block_device_context *blkdev = (struct block_device_context*)device->driver_data;
920 + unsigned long flags;
921 +
922 + DPRINT_ENTER(BLKVSC_DRV);
923 +
924 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
925 +
926 + if (!storvsc_drv_obj->Base.OnDeviceRemove)
927 + {
928 + DPRINT_EXIT(BLKVSC_DRV);
929 + return -1;
930 + }
931 +
932 + // Call to the vsc driver to let it know that the device is being removed
933 + ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
934 + if (ret != 0)
935 + {
936 + // TODO:
937 + DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
938 + }
939 +
940 + // Get to a known state
941 + spin_lock_irqsave(&blkdev->lock, flags);
942 +
943 + blkdev->shutting_down = 1;
944 +
945 + blk_stop_queue(blkdev->gd->queue);
946 +
947 + spin_unlock_irqrestore(&blkdev->lock, flags);
948 +
949 + while (blkdev->num_outstanding_reqs)
950 + {
951 + DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
952 +
953 + udelay(100);
954 + }
955 +
956 + blkvsc_do_flush(blkdev);
957 +
958 + spin_lock_irqsave(&blkdev->lock, flags);
959 +
960 + blkvsc_cancel_pending_reqs(blkdev);
961 +
962 + spin_unlock_irqrestore(&blkdev->lock, flags);
963 +
964 + blk_cleanup_queue(blkdev->gd->queue);
965 +
966 + del_gendisk(blkdev->gd);
967 +
968 + kmem_cache_destroy(blkdev->request_pool);
969 +
970 + kfree(blkdev);
971 +
972 + DPRINT_EXIT(BLKVSC_DRV);
973 +
974 + return ret;
975 +}
976 +
977 +static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
978 +{
979 + ASSERT(blkvsc_req->req);
980 + ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
981 +
982 + blkvsc_req->cmd_len = 16;
983 +
984 + if (blkvsc_req->sector_start > 0xffffffff)
985 + {
986 + if (rq_data_dir(blkvsc_req->req))
987 + {
988 + blkvsc_req->write = 1;
989 + blkvsc_req->cmnd[0] = WRITE_16;
990 + }
991 + else
992 + {
993 + blkvsc_req->write = 0;
994 + blkvsc_req->cmnd[0] = READ_16;
995 + }
996 +
997 + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
998 +
999 + *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start);
1000 + *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count);
1001 + }
1002 + else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff))
1003 + {
1004 + if (rq_data_dir(blkvsc_req->req))
1005 + {
1006 + blkvsc_req->write = 1;
1007 + blkvsc_req->cmnd[0] = WRITE_10;
1008 + }
1009 + else
1010 + {
1011 + blkvsc_req->write = 0;
1012 + blkvsc_req->cmnd[0] = READ_10;
1013 + }
1014 +
1015 + blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
1016 +
1017 + *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start);
1018 + *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count);
1019 + }
1020 + else
1021 + {
1022 + if (rq_data_dir(blkvsc_req->req))
1023 + {
1024 + blkvsc_req->write = 1;
1025 + blkvsc_req->cmnd[0] = WRITE_6;
1026 + }
1027 + else
1028 + {
1029 + blkvsc_req->write = 0;
1030 + blkvsc_req->cmnd[0] = READ_6;
1031 + }
1032 +
1033 + *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8;
1034 + blkvsc_req->cmnd[1] &= 0x1f;
1035 + blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count;
1036 + }
1037 +}
1038 +
1039 +static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) )
1040 +{
1041 + struct block_device_context *blkdev = blkvsc_req->dev;
1042 + struct device_context *device_ctx=blkdev->device_ctx;
1043 + struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
1044 + struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
1045 + STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
1046 + int ret =0;
1047 +
1048 + STORVSC_REQUEST *storvsc_req;
1049 +
1050 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %llu count %d offset %d len %d\n",
1051 + blkvsc_req,
1052 + (blkvsc_req->write)?"WRITE":"READ",
1053 + blkvsc_req->sector_start,
1054 + blkvsc_req->sector_count,
1055 + blkvsc_req->request.DataBuffer.Offset,
1056 + blkvsc_req->request.DataBuffer.Length);
1057 +
1058 + /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
1059 + {
1060 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
1061 + blkvsc_req,
1062 + i,
1063 + blkvsc_req->request.DataBuffer.PfnArray[i]);
1064 + }*/
1065 +
1066 + storvsc_req = &blkvsc_req->request;
1067 + storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request));
1068 +
1069 + storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE;
1070 +
1071 + storvsc_req->OnIOCompletion = request_completion;
1072 + storvsc_req->Context = blkvsc_req;
1073 +
1074 + storvsc_req->Host = blkdev->port;
1075 + storvsc_req->Bus = blkdev->path;
1076 + storvsc_req->TargetId = blkdev->target;
1077 + storvsc_req->LunId = 0; // this is not really used at all
1078 +
1079 + storvsc_req->CdbLen = blkvsc_req->cmd_len;
1080 + storvsc_req->Cdb = blkvsc_req->cmnd;
1081 +
1082 + storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
1083 + storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
1084 +
1085 + ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request);
1086 + if (ret == 0)
1087 + {
1088 + blkdev->num_outstanding_reqs++;
1089 + }
1090 +
1091 + return ret;
1092 +}
1093 +
1094 +//
1095 +// We break the request into 1 or more blkvsc_requests and submit them.
1096 +// If we cant submit them all, we put them on the pending_list. The
1097 +// blkvsc_request() will work on the pending_list.
1098 +//
1099 +static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
1100 +{
1101 + struct bio *bio=NULL;
1102 + struct bio_vec *bvec=NULL;
1103 + struct bio_vec *prev_bvec=NULL;
1104 +
1105 + struct blkvsc_request *blkvsc_req=NULL;
1106 + struct blkvsc_request *tmp;
1107 + int databuf_idx=0;
1108 + int seg_idx=0;
1109 +
1110 + sector_t start_sector;
1111 + unsigned long num_sectors = 0;
1112 + int ret=0;
1113 + int pending=0;
1114 + struct blkvsc_request_group *group=NULL;
1115 +
1116 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, req->sector);
1117 +
1118 + // Create a group to tie req to list of blkvsc_reqs
1119 + group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1120 + if (!group)
1121 + {
1122 + return -ENOMEM;
1123 + }
1124 +
1125 + INIT_LIST_HEAD(&group->blkvsc_req_list);
1126 + group->outstanding = group->status = 0;
1127 +
1128 + start_sector = req->sector;
1129 +
1130 + // foreach bio in the request
1131 + if (req->bio)
1132 + for (bio = req->bio; bio; bio = bio->bi_next)
1133 + {
1134 + // Map this bio into an existing or new storvsc request
1135 + bio_for_each_segment (bvec, bio, seg_idx)
1136 + {
1137 + DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
1138 + req, bio, bvec, seg_idx, databuf_idx);
1139 +
1140 + // Get a new storvsc request
1141 + if ( (!blkvsc_req) || // 1st-time
1142 + (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
1143 + (bvec->bv_offset != 0) || // hole at the begin of page
1144 + (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page
1145 + {
1146 + // submit the prev one
1147 + if (blkvsc_req)
1148 + {
1149 + blkvsc_req->sector_start = start_sector;
1150 + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1151 +
1152 + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1153 +
1154 + blkvsc_init_rw(blkvsc_req);
1155 + }
1156 +
1157 + // Create new blkvsc_req to represent the current bvec
1158 + blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1159 + if (!blkvsc_req)
1160 + {
1161 + // free up everything
1162 + list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1163 + {
1164 + list_del(&blkvsc_req->req_entry);
1165 + kmem_cache_free(blkdev->request_pool, blkvsc_req);
1166 + }
1167 +
1168 + kmem_cache_free(blkdev->request_pool, group);
1169 + return -ENOMEM;
1170 + }
1171 +
1172 + memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
1173 +
1174 + blkvsc_req->dev = blkdev;
1175 + blkvsc_req->req = req;
1176 + blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1177 + blkvsc_req->request.DataBuffer.Length = 0;
1178 +
1179 + // Add to the group
1180 + blkvsc_req->group = group;
1181 + blkvsc_req->group->outstanding++;
1182 + list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
1183 +
1184 + start_sector += num_sectors;
1185 + num_sectors = 0;
1186 + databuf_idx = 0;
1187 + }
1188 +
1189 + // Add the curr bvec/segment to the curr blkvsc_req
1190 + blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1191 + blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1192 +
1193 + prev_bvec = bvec;
1194 +
1195 + databuf_idx++;
1196 + num_sectors += bvec->bv_len >> 9;
1197 +
1198 + } // bio_for_each_segment
1199 +
1200 + } // rq_for_each_bio
1201 +
1202 + // Handle the last one
1203 + if (blkvsc_req)
1204 + {
1205 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
1206 +
1207 + blkvsc_req->sector_start = start_sector;
1208 + sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1209 +
1210 + blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1211 +
1212 + blkvsc_init_rw(blkvsc_req);
1213 + }
1214 +
1215 + list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry)
1216 + {
1217 + if (pending)
1218 + {
1219 + DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %llu sect_count %d (%llu %d)\n",
1220 + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors);
1221 +
1222 + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1223 + }
1224 + else
1225 + {
1226 + ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion);
1227 + if (ret == -1)
1228 + {
1229 + pending = 1;
1230 + list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1231 + }
1232 +
1233 + DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %llu sect_count %d (%llu %d) ret %d\n",
1234 + blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors, ret);
1235 + }
1236 + }
1237 +
1238 + return pending;
1239 +}
1240 +
1241 +static void blkvsc_cmd_completion(STORVSC_REQUEST* request)
1242 +{
1243 + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1244 + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1245 +
1246 + struct scsi_sense_hdr sense_hdr;
1247 +
1248 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req);
1249 +
1250 + blkdev->num_outstanding_reqs--;
1251 +
1252 + if (blkvsc_req->request.Status)
1253 + {
1254 + if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1255 + {
1256 + scsi_print_sense_hdr("blkvsc", &sense_hdr);
1257 + }
1258 + }
1259 +
1260 + blkvsc_req->cond =1;
1261 + wake_up_interruptible(&blkvsc_req->wevent);
1262 +}
1263 +
1264 +static void blkvsc_request_completion(STORVSC_REQUEST* request)
1265 +{
1266 + struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1267 + struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1268 + unsigned long flags;
1269 + struct blkvsc_request *comp_req, *tmp;
1270 +
1271 + ASSERT(blkvsc_req->group);
1272 +
1273 + DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %llu sect_count %d len %d group outstd %d total outstd %d\n",
1274 + blkdev,
1275 + blkvsc_req,
1276 + blkvsc_req->group,
1277 + (blkvsc_req->write)?"WRITE":"READ",
1278 + blkvsc_req->sector_start,
1279 + blkvsc_req->sector_count,
1280 + blkvsc_req->request.DataBuffer.Length,
1281 + blkvsc_req->group->outstanding,
1282 + blkdev->num_outstanding_reqs);
1283 +
1284 + spin_lock_irqsave(&blkdev->lock, flags);
1285 +
1286 + blkdev->num_outstanding_reqs--;
1287 + blkvsc_req->group->outstanding--;
1288 +
1289 + // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order
1290 + // blkvsc_req completion when calling end_that_request_first()
1291 + if (blkvsc_req->group->outstanding == 0)
1292 + {
1293 + list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
1294 + {
1295 + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1296 + comp_req,
1297 + comp_req->sector_start,
1298 + comp_req->sector_count);
1299 +
1300 + list_del(&comp_req->req_entry);
1301 +
1302 +#ifdef KERNEL_2_6_27
1303 + if (!__blk_end_request(
1304 + comp_req->req,
1305 + (!comp_req->request.Status ? 0: -EIO),
1306 + comp_req->sector_count * blkdev->sector_size))
1307 + {
1308 + //All the sectors have been xferred ie the request is done
1309 + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1310 + kmem_cache_free(blkdev->request_pool, comp_req->group);
1311 + }
1312 +#else
1313 + if (!end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9))))
1314 + {
1315 + //All the sectors have been xferred ie the request is done
1316 + DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1317 +
1318 + end_that_request_last(comp_req->req, !comp_req->request.Status);
1319 +
1320 + kmem_cache_free(blkdev->request_pool, comp_req->group);
1321 + }
1322 +#endif
1323 +
1324 + kmem_cache_free(blkdev->request_pool, comp_req);
1325 + }
1326 +
1327 + if (!blkdev->shutting_down)
1328 + {
1329 + blkvsc_do_pending_reqs(blkdev);
1330 + blk_start_queue(blkdev->gd->queue);
1331 + blkvsc_request(blkdev->gd->queue);
1332 + }
1333 + }
1334 +
1335 + spin_unlock_irqrestore(&blkdev->lock, flags);
1336 +}
1337 +
1338 +static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1339 +{
1340 + struct blkvsc_request *pend_req, *tmp;
1341 + struct blkvsc_request *comp_req, *tmp2;
1342 +
1343 + int ret=0;
1344 +
1345 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1346 +
1347 + // Flush the pending list first
1348 + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1349 + {
1350 + // The pend_req could be part of a partially completed request. If so, complete those req first
1351 + // until we hit the pend_req
1352 + list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1353 + {
1354 + DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1355 + comp_req,
1356 + comp_req->sector_start,
1357 + comp_req->sector_count);
1358 +
1359 + if (comp_req == pend_req)
1360 + break;
1361 +
1362 + list_del(&comp_req->req_entry);
1363 +
1364 + if (comp_req->req)
1365 + {
1366 +#ifdef KERNEL_2_6_27
1367 + ret = __blk_end_request(
1368 + comp_req->req,
1369 + (!comp_req->request.Status ? 0 : -EIO),
1370 + comp_req->sector_count * blkdev->sector_size);
1371 +#else
1372 + ret = end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9)));
1373 +#endif
1374 + ASSERT(ret != 0);
1375 + }
1376 +
1377 + kmem_cache_free(blkdev->request_pool, comp_req);
1378 + }
1379 +
1380 + DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req);
1381 +
1382 + list_del(&pend_req->pend_entry);
1383 +
1384 + list_del(&pend_req->req_entry);
1385 +
1386 + if (comp_req->req)
1387 + {
1388 +#ifdef KERNEL_2_6_27
1389 + if (!__blk_end_request(
1390 + pend_req->req,
1391 + -EIO,
1392 + pend_req->sector_count * blkdev->sector_size))
1393 + {
1394 + //All the sectors have been xferred ie the request is done
1395 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1396 + kmem_cache_free(blkdev->request_pool, pend_req->group);
1397 + }
1398 +#else
1399 + if (!end_that_request_first(pend_req->req, 0, (pend_req->sector_count * (blkdev->sector_size >> 9))))
1400 + {
1401 + //All the sectors have been xferred ie the request is done
1402 + DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1403 +
1404 + end_that_request_last(pend_req->req, 0);
1405 +
1406 + kmem_cache_free(blkdev->request_pool, pend_req->group);
1407 + }
1408 +#endif
1409 + }
1410 +
1411 + kmem_cache_free(blkdev->request_pool, pend_req);
1412 + }
1413 +
1414 + return ret;
1415 +}
1416 +
1417 +static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1418 +{
1419 + struct blkvsc_request *pend_req, *tmp;
1420 + int ret=0;
1421 +
1422 + // Flush the pending list first
1423 + list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1424 + {
1425 + DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
1426 +
1427 + ret = blkvsc_submit_request(pend_req, blkvsc_request_completion);
1428 + if (ret != 0)
1429 + {
1430 + break;
1431 + }
1432 + else
1433 + {
1434 + list_del(&pend_req->pend_entry);
1435 + }
1436 + }
1437 +
1438 + return ret;
1439 +}
1440 +
1441 +static void blkvsc_request(struct request_queue *queue)
1442 +{
1443 + struct block_device_context *blkdev = NULL;
1444 + struct request *req;
1445 + int ret=0;
1446 +
1447 + DPRINT_DBG(BLKVSC_DRV, "- enter \n");
1448 + while ((req = elv_next_request(queue)) != NULL)
1449 + {
1450 + DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1451 +
1452 + blkdev = req->rq_disk->private_data;
1453 + if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) {
1454 + end_request(req, 0);
1455 + continue;
1456 + }
1457 +
1458 + ret = blkvsc_do_pending_reqs(blkdev);
1459 +
1460 + if (ret != 0)
1461 + {
1462 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n");
1463 + blk_stop_queue(queue);
1464 + break;
1465 + }
1466 +
1467 + blkdev_dequeue_request(req);
1468 +
1469 + ret = blkvsc_do_request(blkdev, req);
1470 + if (ret > 0)
1471 + {
1472 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1473 + blk_stop_queue(queue);
1474 + break;
1475 + }
1476 + else if (ret < 0)
1477 + {
1478 + DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1479 + blk_requeue_request(queue, req);
1480 + blk_stop_queue(queue);
1481 + break;
1482 + }
1483 + }
1484 +}
1485 +
1486 +static int blkvsc_open(struct inode *inode, struct file *filep)
1487 +{
1488 + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1489 +
1490 + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1491 +
1492 + spin_lock(&blkdev->lock);
1493 +
1494 + if (!blkdev->users && blkdev->device_type == DVD_TYPE)
1495 + {
1496 + spin_unlock(&blkdev->lock);
1497 + check_disk_change(inode->i_bdev);
1498 + spin_lock(&blkdev->lock);
1499 + }
1500 +
1501 + blkdev->users++;
1502 +
1503 + spin_unlock(&blkdev->lock);
1504 + return 0;
1505 +}
1506 +
1507 +static int blkvsc_release(struct inode *inode, struct file *filep)
1508 +{
1509 + struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1510 +
1511 + DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1512 +
1513 + spin_lock(&blkdev->lock);
1514 + if (blkdev->users == 1)
1515 + {
1516 + spin_unlock(&blkdev->lock);
1517 + blkvsc_do_flush(blkdev);
1518 + spin_lock(&blkdev->lock);
1519 + }
1520 +
1521 + blkdev->users--;
1522 +
1523 + spin_unlock(&blkdev->lock);
1524 + return 0;
1525 +}
1526 +
1527 +static int blkvsc_media_changed(struct gendisk *gd)
1528 +{
1529 + DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1530 +
1531 + return 1;
1532 +}
1533 +
1534 +static int blkvsc_revalidate_disk(struct gendisk *gd)
1535 +{
1536 + struct block_device_context *blkdev = gd->private_data;
1537 +
1538 + DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1539 +
1540 + if (blkdev->device_type == DVD_TYPE)
1541 + {
1542 + blkvsc_do_read_capacity(blkdev);
1543 + set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
1544 + blk_queue_hardsect_size(gd->queue, blkdev->sector_size);
1545 + }
1546 + return 0;
1547 +}
1548 +
1549 +int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1550 +{
1551 + sector_t total_sectors = get_capacity(bd->bd_disk);
1552 + sector_t cylinder_times_heads=0;
1553 + sector_t temp=0;
1554 +
1555 + int sectors_per_track=0;
1556 + int heads=0;
1557 + int cylinders=0;
1558 + int rem=0;
1559 +
1560 + if (total_sectors > (65535 * 16 * 255)) {
1561 + total_sectors = (65535 * 16 * 255);
1562 + }
1563 +
1564 + if (total_sectors >= (65535 * 16 * 63)) {
1565 + sectors_per_track = 255;
1566 + heads = 16;
1567 +
1568 + cylinder_times_heads = total_sectors;
1569 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1570 + }
1571 + else
1572 + {
1573 + sectors_per_track = 17;
1574 +
1575 + cylinder_times_heads = total_sectors;
1576 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1577 +
1578 + temp = cylinder_times_heads + 1023;
1579 + rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1580 +
1581 + heads = temp;
1582 +
1583 + if (heads < 4) {
1584 + heads = 4;
1585 + }
1586 +
1587 + if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1588 + sectors_per_track = 31;
1589 + heads = 16;
1590 +
1591 + cylinder_times_heads = total_sectors;
1592 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1593 + }
1594 +
1595 + if (cylinder_times_heads >= (heads * 1024)) {
1596 + sectors_per_track = 63;
1597 + heads = 16;
1598 +
1599 + cylinder_times_heads = total_sectors;
1600 + rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1601 + }
1602 + }
1603 +
1604 + temp = cylinder_times_heads;
1605 + rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1606 + cylinders = temp;
1607 +
1608 + hg->heads = heads;
1609 + hg->sectors = sectors_per_track;
1610 + hg->cylinders = cylinders;
1611 +
1612 + DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1613 +
1614 + return 0;
1615 +}
1616 +
1617 +static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg)
1618 +{
1619 + struct block_device *bd = inode->i_bdev;
1620 + struct block_device_context *blkdev = bd->bd_disk->private_data;
1621 + int ret=0;
1622 +
1623 + switch (cmd)
1624 + {
1625 + // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just
1626 + // a GUID. Commented it out for now.
1627 + /*case HDIO_GET_IDENTITY:
1628 + DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1629 +
1630 + if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1631 + {
1632 + ret = -EFAULT;
1633 + }
1634 +
1635 + break;*/
1636 + default:
1637 + ret = -EINVAL;
1638 + break;
1639 + }
1640 +
1641 + return ret;
1642 +}
1643 +
1644 +
1645 +MODULE_LICENSE("GPL");
1646 +
1647 +static int __init blkvsc_init(void)
1648 +{
1649 + int ret;
1650 +
1651 + ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set
1652 +
1653 + DPRINT_ENTER(BLKVSC_DRV);
1654 +
1655 + DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1656 +
1657 + ret = blkvsc_drv_init(BlkVscInitialize);
1658 +
1659 + DPRINT_EXIT(BLKVSC_DRV);
1660 +
1661 + return ret;
1662 +}
1663 +
1664 +static void __exit blkvsc_exit(void)
1665 +{
1666 + DPRINT_ENTER(BLKVSC_DRV);
1667 +
1668 + blkvsc_drv_exit();
1669 +
1670 + DPRINT_ENTER(BLKVSC_DRV);
1671 +}
1672 +
1673 +module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1674 +
1675 +module_init(blkvsc_init);
1676 +module_exit(blkvsc_exit);
1677 +
1678 +// eof