]>
Commit | Line | Data |
---|---|---|
f82bd046 | 1 | /* |
f82bd046 HJ |
2 | * Copyright (c) 2009, Microsoft Corporation. |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
15 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
16 | * | |
17 | * Authors: | |
d0e94d17 | 18 | * Haiyang Zhang <haiyangz@microsoft.com> |
f82bd046 | 19 | * Hank Janssen <hjanssen@microsoft.com> |
f82bd046 | 20 | */ |
f82bd046 HJ |
21 | #include <linux/init.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/device.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/major.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/hdreg.h> | |
2a48fc0a | 28 | #include <linux/mutex.h> |
5a0e3ad6 | 29 | #include <linux/slab.h> |
f82bd046 HJ |
30 | #include <scsi/scsi.h> |
31 | #include <scsi/scsi_cmnd.h> | |
32 | #include <scsi/scsi_eh.h> | |
33 | #include <scsi/scsi_dbg.h> | |
e3fe0bb6 | 34 | #include "hv_api.h" |
645954c5 | 35 | #include "logging.h" |
2d82f6c7 | 36 | #include "version_info.h" |
870cde80 | 37 | #include "vmbus.h" |
bb969793 | 38 | #include "storvsc_api.h" |
f82bd046 | 39 | |
454f18a9 | 40 | |
f82bd046 HJ |
41 | #define BLKVSC_MINORS 64 |
42 | ||
f82bd046 HJ |
43 | enum blkvsc_device_type { |
44 | UNKNOWN_DEV_TYPE, | |
45 | HARDDISK_TYPE, | |
46 | DVD_TYPE, | |
47 | }; | |
48 | ||
454f18a9 BP |
49 | /* |
50 | * This request ties the struct request and struct | |
0b3f6834 | 51 | * blkvsc_request/hv_storvsc_request together A struct request may be |
454f18a9 BP |
52 | * represented by 1 or more struct blkvsc_request |
53 | */ | |
f82bd046 | 54 | struct blkvsc_request_group { |
8a280399 GKH |
55 | int outstanding; |
56 | int status; | |
57 | struct list_head blkvsc_req_list; /* list of blkvsc_requests */ | |
f82bd046 HJ |
58 | }; |
59 | ||
f82bd046 | 60 | struct blkvsc_request { |
8a280399 GKH |
61 | /* blkvsc_request_group.blkvsc_req_list */ |
62 | struct list_head req_entry; | |
63 | ||
64 | /* block_device_context.pending_list */ | |
65 | struct list_head pend_entry; | |
66 | ||
67 | /* This may be null if we generate a request internally */ | |
68 | struct request *req; | |
f82bd046 | 69 | |
8a280399 | 70 | struct block_device_context *dev; |
f82bd046 | 71 | |
8a280399 GKH |
72 | /* The group this request is part of. Maybe null */ |
73 | struct blkvsc_request_group *group; | |
f82bd046 | 74 | |
8a280399 | 75 | wait_queue_head_t wevent; |
f82bd046 HJ |
76 | int cond; |
77 | ||
8a280399 GKH |
78 | int write; |
79 | sector_t sector_start; | |
80 | unsigned long sector_count; | |
f82bd046 HJ |
81 | |
82 | unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; | |
83 | unsigned char cmd_len; | |
84 | unsigned char cmnd[MAX_COMMAND_SIZE]; | |
85 | ||
0b3f6834 | 86 | struct hv_storvsc_request request; |
8a280399 GKH |
87 | /* |
88 | * !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, | |
89 | * because - The extension buffer falls right here and is pointed to by | |
90 | * request.Extension; | |
91 | * Which sounds like a horrible idea, who designed this? | |
92 | */ | |
f82bd046 HJ |
93 | }; |
94 | ||
454f18a9 | 95 | /* Per device structure */ |
f82bd046 | 96 | struct block_device_context { |
8a280399 | 97 | /* point back to our device context */ |
6bad88da | 98 | struct hv_device *device_ctx; |
8a280399 GKH |
99 | struct kmem_cache *request_pool; |
100 | spinlock_t lock; | |
101 | struct gendisk *gd; | |
f82bd046 | 102 | enum blkvsc_device_type device_type; |
8a280399 GKH |
103 | struct list_head pending_list; |
104 | ||
105 | unsigned char device_id[64]; | |
106 | unsigned int device_id_len; | |
107 | int num_outstanding_reqs; | |
108 | int shutting_down; | |
109 | int media_not_present; | |
110 | unsigned int sector_size; | |
111 | sector_t capacity; | |
112 | unsigned int port; | |
113 | unsigned char path; | |
114 | unsigned char target; | |
115 | int users; | |
f82bd046 HJ |
116 | }; |
117 | ||
f82bd046 | 118 | |
454f18a9 | 119 | /* Static decl */ |
2a48fc0a | 120 | static DEFINE_MUTEX(blkvsc_mutex); |
f82bd046 HJ |
121 | static int blkvsc_probe(struct device *dev); |
122 | static int blkvsc_remove(struct device *device); | |
123 | static void blkvsc_shutdown(struct device *device); | |
124 | ||
39635f7d | 125 | static int blkvsc_open(struct block_device *bdev, fmode_t mode); |
77d2d9da | 126 | static int blkvsc_release(struct gendisk *disk, fmode_t mode); |
cafb0bfc TH |
127 | static unsigned int blkvsc_check_events(struct gendisk *gd, |
128 | unsigned int clearing); | |
f82bd046 HJ |
129 | static int blkvsc_revalidate_disk(struct gendisk *gd); |
130 | static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg); | |
dfe8b2d9 BP |
131 | static int blkvsc_ioctl(struct block_device *bd, fmode_t mode, |
132 | unsigned cmd, unsigned long argument); | |
f82bd046 | 133 | static void blkvsc_request(struct request_queue *queue); |
0b3f6834 | 134 | static void blkvsc_request_completion(struct hv_storvsc_request *request); |
8a280399 GKH |
135 | static int blkvsc_do_request(struct block_device_context *blkdev, |
136 | struct request *req); | |
137 | static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, | |
138 | void (*request_completion)(struct hv_storvsc_request *)); | |
f82bd046 | 139 | static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req); |
0b3f6834 | 140 | static void blkvsc_cmd_completion(struct hv_storvsc_request *request); |
f82bd046 HJ |
141 | static int blkvsc_do_inquiry(struct block_device_context *blkdev); |
142 | static int blkvsc_do_read_capacity(struct block_device_context *blkdev); | |
143 | static int blkvsc_do_read_capacity16(struct block_device_context *blkdev); | |
144 | static int blkvsc_do_flush(struct block_device_context *blkdev); | |
145 | static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev); | |
146 | static int blkvsc_do_pending_reqs(struct block_device_context *blkdev); | |
147 | ||
f82bd046 | 148 | static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE; |
1ec28abb SH |
149 | module_param(blkvsc_ringbuffer_size, int, S_IRUGO); |
150 | MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)"); | |
f82bd046 | 151 | |
454f18a9 | 152 | /* The one and only one */ |
67a5ee2d | 153 | static struct storvsc_driver_object g_blkvsc_drv; |
f82bd046 | 154 | |
48c9f7c3 | 155 | static const struct block_device_operations block_ops = { |
f82bd046 HJ |
156 | .owner = THIS_MODULE, |
157 | .open = blkvsc_open, | |
158 | .release = blkvsc_release, | |
cafb0bfc | 159 | .check_events = blkvsc_check_events, |
f82bd046 HJ |
160 | .revalidate_disk = blkvsc_revalidate_disk, |
161 | .getgeo = blkvsc_getgeo, | |
162 | .ioctl = blkvsc_ioctl, | |
163 | }; | |
164 | ||
3e189519 | 165 | /* |
8a280399 GKH |
166 | * blkvsc_drv_init - BlkVsc driver initialization. |
167 | */ | |
21707bed | 168 | static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv)) |
f82bd046 | 169 | { |
67a5ee2d S |
170 | struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv; |
171 | struct hv_driver *drv = &g_blkvsc_drv.base; | |
8a280399 | 172 | int ret; |
f82bd046 | 173 | |
8a046024 | 174 | storvsc_drv_obj->ring_buffer_size = blkvsc_ringbuffer_size; |
f82bd046 | 175 | |
150f9398 S |
176 | drv->priv = storvsc_drv_obj; |
177 | ||
454f18a9 | 178 | /* Callback to client driver to complete the initialization */ |
8a046024 | 179 | drv_init(&storvsc_drv_obj->base); |
f82bd046 | 180 | |
150f9398 | 181 | drv->driver.name = storvsc_drv_obj->base.name; |
f82bd046 | 182 | |
150f9398 S |
183 | drv->driver.probe = blkvsc_probe; |
184 | drv->driver.remove = blkvsc_remove; | |
185 | drv->driver.shutdown = blkvsc_shutdown; | |
f82bd046 | 186 | |
454f18a9 | 187 | /* The driver belongs to vmbus */ |
150f9398 | 188 | ret = vmbus_child_driver_register(&drv->driver); |
f82bd046 | 189 | |
f82bd046 HJ |
190 | return ret; |
191 | } | |
192 | ||
f82bd046 HJ |
193 | static int blkvsc_drv_exit_cb(struct device *dev, void *data) |
194 | { | |
195 | struct device **curr = (struct device **)data; | |
196 | *curr = dev; | |
454f18a9 | 197 | return 1; /* stop iterating */ |
f82bd046 HJ |
198 | } |
199 | ||
bd1de709 | 200 | static void blkvsc_drv_exit(void) |
f82bd046 | 201 | { |
67a5ee2d S |
202 | struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv; |
203 | struct hv_driver *drv = &g_blkvsc_drv.base; | |
8a280399 | 204 | struct device *current_dev; |
2295ba2e | 205 | int ret; |
f82bd046 | 206 | |
8a280399 | 207 | while (1) { |
f82bd046 HJ |
208 | current_dev = NULL; |
209 | ||
454f18a9 | 210 | /* Get the device */ |
150f9398 | 211 | ret = driver_for_each_device(&drv->driver, NULL, |
2295ba2e BP |
212 | (void *) ¤t_dev, |
213 | blkvsc_drv_exit_cb); | |
214 | ||
215 | if (ret) | |
216 | DPRINT_WARN(BLKVSC_DRV, | |
217 | "driver_for_each_device returned %d", ret); | |
218 | ||
f82bd046 HJ |
219 | |
220 | if (current_dev == NULL) | |
221 | break; | |
222 | ||
454f18a9 | 223 | /* Initiate removal from the top-down */ |
f82bd046 HJ |
224 | device_unregister(current_dev); |
225 | } | |
226 | ||
ca623ad3 HZ |
227 | if (storvsc_drv_obj->base.cleanup) |
228 | storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base); | |
f82bd046 | 229 | |
150f9398 | 230 | vmbus_child_driver_unregister(&drv->driver); |
f82bd046 | 231 | |
f82bd046 HJ |
232 | return; |
233 | } | |
234 | ||
3e189519 | 235 | /* |
8a280399 GKH |
236 | * blkvsc_probe - Add a new device for this driver |
237 | */ | |
f82bd046 HJ |
238 | static int blkvsc_probe(struct device *device) |
239 | { | |
150f9398 S |
240 | struct hv_driver *drv = |
241 | drv_to_hv_drv(device->driver); | |
8a280399 | 242 | struct storvsc_driver_object *storvsc_drv_obj = |
67a5ee2d | 243 | drv->priv; |
6bad88da | 244 | struct hv_device *device_obj = device_to_hv_device(device); |
f82bd046 | 245 | |
8a280399 | 246 | struct block_device_context *blkdev = NULL; |
9f0c7d2c | 247 | struct storvsc_device_info device_info; |
8a280399 GKH |
248 | int major = 0; |
249 | int devnum = 0; | |
250 | int ret = 0; | |
251 | static int ide0_registered; | |
252 | static int ide1_registered; | |
f82bd046 | 253 | |
f82bd046 HJ |
254 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter"); |
255 | ||
ca623ad3 | 256 | if (!storvsc_drv_obj->base.dev_add) { |
f82bd046 | 257 | DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set"); |
f82bd046 HJ |
258 | ret = -1; |
259 | goto Cleanup; | |
260 | } | |
261 | ||
262 | blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL); | |
8a280399 | 263 | if (!blkdev) { |
f82bd046 HJ |
264 | ret = -ENOMEM; |
265 | goto Cleanup; | |
266 | } | |
267 | ||
268 | INIT_LIST_HEAD(&blkdev->pending_list); | |
269 | ||
454f18a9 | 270 | /* Initialize what we can here */ |
f82bd046 HJ |
271 | spin_lock_init(&blkdev->lock); |
272 | ||
4e5166b5 BP |
273 | /* ASSERT(sizeof(struct blkvsc_request_group) <= */ |
274 | /* sizeof(struct blkvsc_request)); */ | |
f82bd046 | 275 | |
6bad88da | 276 | blkdev->request_pool = kmem_cache_create(dev_name(&device_obj->device), |
8a280399 | 277 | sizeof(struct blkvsc_request) + |
8a046024 | 278 | storvsc_drv_obj->request_ext_size, 0, |
8a280399 GKH |
279 | SLAB_HWCACHE_ALIGN, NULL); |
280 | if (!blkdev->request_pool) { | |
f82bd046 HJ |
281 | ret = -ENOMEM; |
282 | goto Cleanup; | |
283 | } | |
284 | ||
285 | ||
454f18a9 | 286 | /* Call to the vsc driver to add the device */ |
ca623ad3 | 287 | ret = storvsc_drv_obj->base.dev_add(device_obj, &device_info); |
8a280399 | 288 | if (ret != 0) { |
f82bd046 HJ |
289 | DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device"); |
290 | goto Cleanup; | |
291 | } | |
292 | ||
6bad88da | 293 | blkdev->device_ctx = device_obj; |
8a280399 | 294 | /* this identified the device 0 or 1 */ |
8a046024 | 295 | blkdev->target = device_info.target_id; |
8a280399 | 296 | /* this identified the ide ctrl 0 or 1 */ |
8a046024 | 297 | blkdev->path = device_info.path_id; |
f82bd046 | 298 | |
b57a68dc | 299 | dev_set_drvdata(device, blkdev); |
f82bd046 | 300 | |
454f18a9 | 301 | /* Calculate the major and device num */ |
8a280399 | 302 | if (blkdev->path == 0) { |
f82bd046 | 303 | major = IDE0_MAJOR; |
454f18a9 | 304 | devnum = blkdev->path + blkdev->target; /* 0 or 1 */ |
f82bd046 | 305 | |
8a280399 | 306 | if (!ide0_registered) { |
f82bd046 | 307 | ret = register_blkdev(major, "ide"); |
8a280399 GKH |
308 | if (ret != 0) { |
309 | DPRINT_ERR(BLKVSC_DRV, | |
310 | "register_blkdev() failed! ret %d", | |
311 | ret); | |
f82bd046 HJ |
312 | goto Remove; |
313 | } | |
314 | ||
315 | ide0_registered = 1; | |
316 | } | |
8a280399 | 317 | } else if (blkdev->path == 1) { |
f82bd046 | 318 | major = IDE1_MAJOR; |
454f18a9 | 319 | devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */ |
f82bd046 | 320 | |
8a280399 | 321 | if (!ide1_registered) { |
f82bd046 | 322 | ret = register_blkdev(major, "ide"); |
8a280399 GKH |
323 | if (ret != 0) { |
324 | DPRINT_ERR(BLKVSC_DRV, | |
325 | "register_blkdev() failed! ret %d", | |
326 | ret); | |
f82bd046 HJ |
327 | goto Remove; |
328 | } | |
329 | ||
330 | ide1_registered = 1; | |
331 | } | |
8a280399 | 332 | } else { |
f82bd046 HJ |
333 | DPRINT_ERR(BLKVSC_DRV, "invalid pathid"); |
334 | ret = -1; | |
335 | goto Cleanup; | |
336 | } | |
337 | ||
338 | DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major); | |
339 | ||
340 | blkdev->gd = alloc_disk(BLKVSC_MINORS); | |
8a280399 | 341 | if (!blkdev->gd) { |
f82bd046 HJ |
342 | DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); |
343 | ret = -1; | |
344 | goto Cleanup; | |
345 | } | |
346 | ||
347 | blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); | |
348 | ||
349 | blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); | |
8a78362c | 350 | blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); |
f82bd046 HJ |
351 | blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); |
352 | blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); | |
353 | blk_queue_dma_alignment(blkdev->gd->queue, 511); | |
354 | ||
355 | blkdev->gd->major = major; | |
356 | if (devnum == 1 || devnum == 3) | |
357 | blkdev->gd->first_minor = BLKVSC_MINORS; | |
358 | else | |
359 | blkdev->gd->first_minor = 0; | |
360 | blkdev->gd->fops = &block_ops; | |
cafb0bfc | 361 | blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE; |
f82bd046 | 362 | blkdev->gd->private_data = blkdev; |
268eff90 | 363 | blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device); |
8a280399 | 364 | sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum); |
f82bd046 HJ |
365 | |
366 | blkvsc_do_inquiry(blkdev); | |
8a280399 | 367 | if (blkdev->device_type == DVD_TYPE) { |
f82bd046 HJ |
368 | set_disk_ro(blkdev->gd, 1); |
369 | blkdev->gd->flags |= GENHD_FL_REMOVABLE; | |
370 | blkvsc_do_read_capacity(blkdev); | |
8a280399 | 371 | } else { |
f82bd046 HJ |
372 | blkvsc_do_read_capacity16(blkdev); |
373 | } | |
374 | ||
375 | set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); | |
0fce4c2f | 376 | blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size); |
454f18a9 | 377 | /* go! */ |
f82bd046 HJ |
378 | add_disk(blkdev->gd); |
379 | ||
8a280399 GKH |
380 | DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d", |
381 | blkdev->gd->disk_name, (unsigned long)blkdev->capacity, | |
382 | blkdev->sector_size); | |
f82bd046 HJ |
383 | |
384 | return ret; | |
385 | ||
386 | Remove: | |
ca623ad3 | 387 | storvsc_drv_obj->base.dev_rm(device_obj); |
f82bd046 HJ |
388 | |
389 | Cleanup: | |
8a280399 GKH |
390 | if (blkdev) { |
391 | if (blkdev->request_pool) { | |
f82bd046 HJ |
392 | kmem_cache_destroy(blkdev->request_pool); |
393 | blkdev->request_pool = NULL; | |
394 | } | |
395 | kfree(blkdev); | |
396 | blkdev = NULL; | |
397 | } | |
398 | ||
f82bd046 HJ |
399 | return ret; |
400 | } | |
401 | ||
402 | static void blkvsc_shutdown(struct device *device) | |
403 | { | |
b57a68dc | 404 | struct block_device_context *blkdev = dev_get_drvdata(device); |
f82bd046 HJ |
405 | unsigned long flags; |
406 | ||
407 | if (!blkdev) | |
408 | return; | |
409 | ||
8a280399 GKH |
410 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", |
411 | blkdev->users, blkdev->gd->disk_name); | |
f82bd046 HJ |
412 | |
413 | spin_lock_irqsave(&blkdev->lock, flags); | |
414 | ||
415 | blkdev->shutting_down = 1; | |
416 | ||
417 | blk_stop_queue(blkdev->gd->queue); | |
418 | ||
419 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
420 | ||
8a280399 GKH |
421 | while (blkdev->num_outstanding_reqs) { |
422 | DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", | |
423 | blkdev->num_outstanding_reqs); | |
f82bd046 HJ |
424 | udelay(100); |
425 | } | |
426 | ||
427 | blkvsc_do_flush(blkdev); | |
428 | ||
429 | spin_lock_irqsave(&blkdev->lock, flags); | |
430 | ||
431 | blkvsc_cancel_pending_reqs(blkdev); | |
432 | ||
433 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
434 | } | |
435 | ||
436 | static int blkvsc_do_flush(struct block_device_context *blkdev) | |
437 | { | |
8a280399 | 438 | struct blkvsc_request *blkvsc_req; |
f82bd046 HJ |
439 | |
440 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n"); | |
441 | ||
442 | if (blkdev->device_type != HARDDISK_TYPE) | |
443 | return 0; | |
444 | ||
445 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
446 | if (!blkvsc_req) | |
f82bd046 | 447 | return -ENOMEM; |
f82bd046 HJ |
448 | |
449 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
450 | init_waitqueue_head(&blkvsc_req->wevent); | |
451 | blkvsc_req->dev = blkdev; | |
452 | blkvsc_req->req = NULL; | |
453 | blkvsc_req->write = 0; | |
454 | ||
ca623ad3 HZ |
455 | blkvsc_req->request.data_buffer.pfn_array[0] = 0; |
456 | blkvsc_req->request.data_buffer.offset = 0; | |
457 | blkvsc_req->request.data_buffer.len = 0; | |
f82bd046 HJ |
458 | |
459 | blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE; | |
460 | blkvsc_req->cmd_len = 10; | |
461 | ||
8a280399 GKH |
462 | /* |
463 | * Set this here since the completion routine may be invoked and | |
464 | * completed before we return | |
465 | */ | |
466 | blkvsc_req->cond = 0; | |
f82bd046 HJ |
467 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); |
468 | ||
469 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
470 | ||
471 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
454f18a9 | 476 | /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */ |
f82bd046 HJ |
477 | static int blkvsc_do_inquiry(struct block_device_context *blkdev) |
478 | { | |
8a280399 | 479 | struct blkvsc_request *blkvsc_req; |
f82bd046 HJ |
480 | struct page *page_buf; |
481 | unsigned char *buf; | |
482 | unsigned char device_type; | |
483 | ||
484 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n"); | |
485 | ||
486 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
487 | if (!blkvsc_req) | |
f82bd046 | 488 | return -ENOMEM; |
f82bd046 HJ |
489 | |
490 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
491 | page_buf = alloc_page(GFP_KERNEL); | |
8a280399 | 492 | if (!page_buf) { |
f82bd046 HJ |
493 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); |
494 | return -ENOMEM; | |
495 | } | |
496 | ||
497 | init_waitqueue_head(&blkvsc_req->wevent); | |
498 | blkvsc_req->dev = blkdev; | |
499 | blkvsc_req->req = NULL; | |
500 | blkvsc_req->write = 0; | |
501 | ||
ca623ad3 HZ |
502 | blkvsc_req->request.data_buffer.pfn_array[0] = page_to_pfn(page_buf); |
503 | blkvsc_req->request.data_buffer.offset = 0; | |
504 | blkvsc_req->request.data_buffer.len = 64; | |
f82bd046 HJ |
505 | |
506 | blkvsc_req->cmnd[0] = INQUIRY; | |
454f18a9 BP |
507 | blkvsc_req->cmnd[1] = 0x1; /* Get product data */ |
508 | blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */ | |
f82bd046 HJ |
509 | blkvsc_req->cmnd[4] = 64; |
510 | blkvsc_req->cmd_len = 6; | |
511 | ||
8a280399 GKH |
512 | /* |
513 | * Set this here since the completion routine may be invoked and | |
514 | * completed before we return | |
515 | */ | |
516 | blkvsc_req->cond = 0; | |
f82bd046 HJ |
517 | |
518 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
519 | ||
8a280399 GKH |
520 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", |
521 | blkvsc_req, blkvsc_req->cond); | |
f82bd046 HJ |
522 | |
523 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
524 | ||
525 | buf = kmap(page_buf); | |
526 | ||
04f50c4d | 527 | /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */ |
454f18a9 | 528 | /* be to le */ |
f82bd046 HJ |
529 | device_type = buf[0] & 0x1F; |
530 | ||
8a280399 | 531 | if (device_type == 0x0) { |
f82bd046 | 532 | blkdev->device_type = HARDDISK_TYPE; |
8a280399 | 533 | } else if (device_type == 0x5) { |
f82bd046 | 534 | blkdev->device_type = DVD_TYPE; |
8a280399 | 535 | } else { |
454f18a9 | 536 | /* TODO: this is currently unsupported device type */ |
f82bd046 HJ |
537 | blkdev->device_type = UNKNOWN_DEV_TYPE; |
538 | } | |
539 | ||
0686e4f4 | 540 | DPRINT_DBG(BLKVSC_DRV, "device type %d\n", device_type); |
f82bd046 HJ |
541 | |
542 | blkdev->device_id_len = buf[7]; | |
543 | if (blkdev->device_id_len > 64) | |
544 | blkdev->device_id_len = 64; | |
545 | ||
546 | memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len); | |
04f50c4d | 547 | /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id, |
454f18a9 | 548 | * blkdev->device_id_len); */ |
f82bd046 HJ |
549 | |
550 | kunmap(page_buf); | |
551 | ||
552 | __free_page(page_buf); | |
553 | ||
554 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
555 | ||
556 | return 0; | |
557 | } | |
558 | ||
454f18a9 | 559 | /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */ |
f82bd046 HJ |
560 | static int blkvsc_do_read_capacity(struct block_device_context *blkdev) |
561 | { | |
8a280399 | 562 | struct blkvsc_request *blkvsc_req; |
f82bd046 HJ |
563 | struct page *page_buf; |
564 | unsigned char *buf; | |
565 | struct scsi_sense_hdr sense_hdr; | |
566 | ||
567 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n"); | |
568 | ||
569 | blkdev->sector_size = 0; | |
570 | blkdev->capacity = 0; | |
454f18a9 | 571 | blkdev->media_not_present = 0; /* assume a disk is present */ |
f82bd046 HJ |
572 | |
573 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
574 | if (!blkvsc_req) | |
f82bd046 | 575 | return -ENOMEM; |
f82bd046 HJ |
576 | |
577 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
578 | page_buf = alloc_page(GFP_KERNEL); | |
8a280399 | 579 | if (!page_buf) { |
f82bd046 HJ |
580 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); |
581 | return -ENOMEM; | |
582 | } | |
583 | ||
584 | init_waitqueue_head(&blkvsc_req->wevent); | |
585 | blkvsc_req->dev = blkdev; | |
586 | blkvsc_req->req = NULL; | |
587 | blkvsc_req->write = 0; | |
588 | ||
ca623ad3 HZ |
589 | blkvsc_req->request.data_buffer.pfn_array[0] = page_to_pfn(page_buf); |
590 | blkvsc_req->request.data_buffer.offset = 0; | |
591 | blkvsc_req->request.data_buffer.len = 8; | |
f82bd046 HJ |
592 | |
593 | blkvsc_req->cmnd[0] = READ_CAPACITY; | |
594 | blkvsc_req->cmd_len = 16; | |
595 | ||
454f18a9 BP |
596 | /* |
597 | * Set this here since the completion routine may be invoked | |
598 | * and completed before we return | |
599 | */ | |
8a280399 | 600 | blkvsc_req->cond = 0; |
f82bd046 HJ |
601 | |
602 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
603 | ||
8a280399 GKH |
604 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", |
605 | blkvsc_req, blkvsc_req->cond); | |
f82bd046 HJ |
606 | |
607 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
608 | ||
454f18a9 | 609 | /* check error */ |
8a046024 | 610 | if (blkvsc_req->request.status) { |
8a280399 GKH |
611 | scsi_normalize_sense(blkvsc_req->sense_buffer, |
612 | SCSI_SENSE_BUFFERSIZE, &sense_hdr); | |
f82bd046 | 613 | |
8a280399 GKH |
614 | if (sense_hdr.asc == 0x3A) { |
615 | /* Medium not present */ | |
f82bd046 HJ |
616 | blkdev->media_not_present = 1; |
617 | } | |
f82bd046 HJ |
618 | return 0; |
619 | } | |
620 | buf = kmap(page_buf); | |
621 | ||
454f18a9 | 622 | /* be to le */ |
8a280399 GKH |
623 | blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | |
624 | (buf[2] << 8) | buf[3]) + 1; | |
625 | blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | | |
626 | (buf[6] << 8) | buf[7]; | |
f82bd046 HJ |
627 | |
628 | kunmap(page_buf); | |
629 | ||
630 | __free_page(page_buf); | |
631 | ||
632 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
633 | ||
634 | return 0; | |
635 | } | |
636 | ||
f82bd046 HJ |
637 | static int blkvsc_do_read_capacity16(struct block_device_context *blkdev) |
638 | { | |
8a280399 | 639 | struct blkvsc_request *blkvsc_req; |
f82bd046 HJ |
640 | struct page *page_buf; |
641 | unsigned char *buf; | |
642 | struct scsi_sense_hdr sense_hdr; | |
643 | ||
644 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n"); | |
645 | ||
646 | blkdev->sector_size = 0; | |
647 | blkdev->capacity = 0; | |
454f18a9 | 648 | blkdev->media_not_present = 0; /* assume a disk is present */ |
f82bd046 HJ |
649 | |
650 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
651 | if (!blkvsc_req) | |
f82bd046 | 652 | return -ENOMEM; |
f82bd046 HJ |
653 | |
654 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
655 | page_buf = alloc_page(GFP_KERNEL); | |
8a280399 | 656 | if (!page_buf) { |
f82bd046 HJ |
657 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); |
658 | return -ENOMEM; | |
659 | } | |
660 | ||
661 | init_waitqueue_head(&blkvsc_req->wevent); | |
662 | blkvsc_req->dev = blkdev; | |
663 | blkvsc_req->req = NULL; | |
664 | blkvsc_req->write = 0; | |
665 | ||
ca623ad3 HZ |
666 | blkvsc_req->request.data_buffer.pfn_array[0] = page_to_pfn(page_buf); |
667 | blkvsc_req->request.data_buffer.offset = 0; | |
668 | blkvsc_req->request.data_buffer.len = 12; | |
f82bd046 | 669 | |
454f18a9 | 670 | blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */ |
f82bd046 HJ |
671 | blkvsc_req->cmd_len = 16; |
672 | ||
454f18a9 BP |
673 | /* |
674 | * Set this here since the completion routine may be invoked | |
675 | * and completed before we return | |
676 | */ | |
8a280399 | 677 | blkvsc_req->cond = 0; |
f82bd046 HJ |
678 | |
679 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
680 | ||
8a280399 GKH |
681 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", |
682 | blkvsc_req, blkvsc_req->cond); | |
f82bd046 HJ |
683 | |
684 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
685 | ||
454f18a9 | 686 | /* check error */ |
8a046024 | 687 | if (blkvsc_req->request.status) { |
8a280399 GKH |
688 | scsi_normalize_sense(blkvsc_req->sense_buffer, |
689 | SCSI_SENSE_BUFFERSIZE, &sense_hdr); | |
690 | if (sense_hdr.asc == 0x3A) { | |
691 | /* Medium not present */ | |
f82bd046 HJ |
692 | blkdev->media_not_present = 1; |
693 | } | |
f82bd046 HJ |
694 | return 0; |
695 | } | |
696 | buf = kmap(page_buf); | |
697 | ||
454f18a9 | 698 | /* be to le */ |
8a280399 GKH |
699 | blkdev->capacity = be64_to_cpu(*(unsigned long long *) &buf[0]) + 1; |
700 | blkdev->sector_size = be32_to_cpu(*(unsigned int *)&buf[8]); | |
f82bd046 | 701 | |
8a280399 GKH |
702 | #if 0 |
703 | blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | | |
704 | (buf[2] << 8) | buf[3]) + 1; | |
705 | blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | | |
706 | (buf[6] << 8) | buf[7]; | |
707 | #endif | |
f82bd046 HJ |
708 | |
709 | kunmap(page_buf); | |
710 | ||
711 | __free_page(page_buf); | |
712 | ||
713 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
714 | ||
715 | return 0; | |
716 | } | |
717 | ||
3e189519 | 718 | /* |
8a280399 GKH |
719 | * blkvsc_remove() - Callback when our device is removed |
720 | */ | |
f82bd046 HJ |
721 | static int blkvsc_remove(struct device *device) |
722 | { | |
150f9398 S |
723 | struct hv_driver *drv = |
724 | drv_to_hv_drv(device->driver); | |
8a280399 | 725 | struct storvsc_driver_object *storvsc_drv_obj = |
67a5ee2d | 726 | drv->priv; |
6bad88da | 727 | struct hv_device *device_obj = device_to_hv_device(device); |
b57a68dc | 728 | struct block_device_context *blkdev = dev_get_drvdata(device); |
f82bd046 | 729 | unsigned long flags; |
8a280399 | 730 | int ret; |
f82bd046 | 731 | |
f82bd046 HJ |
732 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n"); |
733 | ||
ca623ad3 | 734 | if (!storvsc_drv_obj->base.dev_rm) |
f82bd046 | 735 | return -1; |
f82bd046 | 736 | |
8a280399 GKH |
737 | /* |
738 | * Call to the vsc driver to let it know that the device is being | |
739 | * removed | |
740 | */ | |
ca623ad3 | 741 | ret = storvsc_drv_obj->base.dev_rm(device_obj); |
8a280399 | 742 | if (ret != 0) { |
454f18a9 | 743 | /* TODO: */ |
8a280399 GKH |
744 | DPRINT_ERR(BLKVSC_DRV, |
745 | "unable to remove blkvsc device (ret %d)", ret); | |
f82bd046 HJ |
746 | } |
747 | ||
454f18a9 | 748 | /* Get to a known state */ |
f82bd046 HJ |
749 | spin_lock_irqsave(&blkdev->lock, flags); |
750 | ||
751 | blkdev->shutting_down = 1; | |
752 | ||
753 | blk_stop_queue(blkdev->gd->queue); | |
754 | ||
755 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
756 | ||
8a280399 GKH |
757 | while (blkdev->num_outstanding_reqs) { |
758 | DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", | |
759 | blkdev->num_outstanding_reqs); | |
f82bd046 HJ |
760 | udelay(100); |
761 | } | |
762 | ||
763 | blkvsc_do_flush(blkdev); | |
764 | ||
765 | spin_lock_irqsave(&blkdev->lock, flags); | |
766 | ||
767 | blkvsc_cancel_pending_reqs(blkdev); | |
768 | ||
769 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
770 | ||
771 | blk_cleanup_queue(blkdev->gd->queue); | |
772 | ||
773 | del_gendisk(blkdev->gd); | |
774 | ||
775 | kmem_cache_destroy(blkdev->request_pool); | |
776 | ||
777 | kfree(blkdev); | |
778 | ||
f82bd046 HJ |
779 | return ret; |
780 | } | |
781 | ||
782 | static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req) | |
783 | { | |
4e5166b5 BP |
784 | /* ASSERT(blkvsc_req->req); */ |
785 | /* ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); */ | |
f82bd046 HJ |
786 | |
787 | blkvsc_req->cmd_len = 16; | |
788 | ||
8a280399 GKH |
789 | if (blkvsc_req->sector_start > 0xffffffff) { |
790 | if (rq_data_dir(blkvsc_req->req)) { | |
f82bd046 HJ |
791 | blkvsc_req->write = 1; |
792 | blkvsc_req->cmnd[0] = WRITE_16; | |
8a280399 | 793 | } else { |
f82bd046 HJ |
794 | blkvsc_req->write = 0; |
795 | blkvsc_req->cmnd[0] = READ_16; | |
796 | } | |
797 | ||
33659ebb CH |
798 | blkvsc_req->cmnd[1] |= |
799 | (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; | |
f82bd046 | 800 | |
8a280399 GKH |
801 | *(unsigned long long *)&blkvsc_req->cmnd[2] = |
802 | cpu_to_be64(blkvsc_req->sector_start); | |
803 | *(unsigned int *)&blkvsc_req->cmnd[10] = | |
804 | cpu_to_be32(blkvsc_req->sector_count); | |
805 | } else if ((blkvsc_req->sector_count > 0xff) || | |
806 | (blkvsc_req->sector_start > 0x1fffff)) { | |
807 | if (rq_data_dir(blkvsc_req->req)) { | |
f82bd046 HJ |
808 | blkvsc_req->write = 1; |
809 | blkvsc_req->cmnd[0] = WRITE_10; | |
8a280399 | 810 | } else { |
f82bd046 HJ |
811 | blkvsc_req->write = 0; |
812 | blkvsc_req->cmnd[0] = READ_10; | |
813 | } | |
814 | ||
33659ebb CH |
815 | blkvsc_req->cmnd[1] |= |
816 | (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; | |
f82bd046 | 817 | |
8a280399 GKH |
818 | *(unsigned int *)&blkvsc_req->cmnd[2] = |
819 | cpu_to_be32(blkvsc_req->sector_start); | |
820 | *(unsigned short *)&blkvsc_req->cmnd[7] = | |
821 | cpu_to_be16(blkvsc_req->sector_count); | |
822 | } else { | |
823 | if (rq_data_dir(blkvsc_req->req)) { | |
f82bd046 HJ |
824 | blkvsc_req->write = 1; |
825 | blkvsc_req->cmnd[0] = WRITE_6; | |
8a280399 | 826 | } else { |
f82bd046 HJ |
827 | blkvsc_req->write = 0; |
828 | blkvsc_req->cmnd[0] = READ_6; | |
829 | } | |
830 | ||
8a280399 GKH |
831 | *(unsigned int *)&blkvsc_req->cmnd[1] = |
832 | cpu_to_be32(blkvsc_req->sector_start) >> 8; | |
f82bd046 | 833 | blkvsc_req->cmnd[1] &= 0x1f; |
8a280399 | 834 | blkvsc_req->cmnd[4] = (unsigned char)blkvsc_req->sector_count; |
f82bd046 HJ |
835 | } |
836 | } | |
837 | ||
8a280399 GKH |
838 | static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, |
839 | void (*request_completion)(struct hv_storvsc_request *)) | |
f82bd046 HJ |
840 | { |
841 | struct block_device_context *blkdev = blkvsc_req->dev; | |
6bad88da | 842 | struct hv_device *device_ctx = blkdev->device_ctx; |
150f9398 S |
843 | struct hv_driver *drv = |
844 | drv_to_hv_drv(device_ctx->device.driver); | |
8a280399 | 845 | struct storvsc_driver_object *storvsc_drv_obj = |
67a5ee2d | 846 | drv->priv; |
0b3f6834 | 847 | struct hv_storvsc_request *storvsc_req; |
8a280399 | 848 | int ret; |
f82bd046 | 849 | |
8a280399 GKH |
850 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - " |
851 | "req %p type %s start_sector %lu count %ld offset %d " | |
852 | "len %d\n", blkvsc_req, | |
853 | (blkvsc_req->write) ? "WRITE" : "READ", | |
854 | (unsigned long) blkvsc_req->sector_start, | |
855 | blkvsc_req->sector_count, | |
ca623ad3 HZ |
856 | blkvsc_req->request.data_buffer.offset, |
857 | blkvsc_req->request.data_buffer.len); | |
8a280399 | 858 | #if 0 |
ca623ad3 | 859 | for (i = 0; i < (blkvsc_req->request.data_buffer.len >> 12); i++) { |
8a280399 GKH |
860 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - " |
861 | "req %p pfn[%d] %llx\n", | |
862 | blkvsc_req, i, | |
415f2287 | 863 | blkvsc_req->request.data_buffer.pfn_array[i]); |
8a280399 GKH |
864 | } |
865 | #endif | |
f82bd046 HJ |
866 | |
867 | storvsc_req = &blkvsc_req->request; | |
8a046024 | 868 | storvsc_req->extension = (void *)((unsigned long)blkvsc_req + |
8a280399 | 869 | sizeof(struct blkvsc_request)); |
f82bd046 | 870 | |
8a046024 | 871 | storvsc_req->type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE; |
f82bd046 | 872 | |
8a046024 HJ |
873 | storvsc_req->on_io_completion = request_completion; |
874 | storvsc_req->context = blkvsc_req; | |
f82bd046 | 875 | |
8a046024 HJ |
876 | storvsc_req->host = blkdev->port; |
877 | storvsc_req->bus = blkdev->path; | |
878 | storvsc_req->target_id = blkdev->target; | |
879 | storvsc_req->lun_id = 0; /* this is not really used at all */ | |
f82bd046 | 880 | |
8a046024 HJ |
881 | storvsc_req->cdb_len = blkvsc_req->cmd_len; |
882 | storvsc_req->cdb = blkvsc_req->cmnd; | |
f82bd046 | 883 | |
8a046024 HJ |
884 | storvsc_req->sense_buffer = blkvsc_req->sense_buffer; |
885 | storvsc_req->sense_buffer_size = SCSI_SENSE_BUFFERSIZE; | |
f82bd046 | 886 | |
6bad88da | 887 | ret = storvsc_drv_obj->on_io_request(blkdev->device_ctx, |
8a280399 | 888 | &blkvsc_req->request); |
f82bd046 | 889 | if (ret == 0) |
f82bd046 | 890 | blkdev->num_outstanding_reqs++; |
f82bd046 HJ |
891 | |
892 | return ret; | |
893 | } | |
894 | ||
454f18a9 BP |
895 | /* |
896 | * We break the request into 1 or more blkvsc_requests and submit | |
25985edc | 897 | * them. If we can't submit them all, we put them on the |
454f18a9 BP |
898 | * pending_list. The blkvsc_request() will work on the pending_list. |
899 | */ | |
8a280399 GKH |
900 | static int blkvsc_do_request(struct block_device_context *blkdev, |
901 | struct request *req) | |
f82bd046 | 902 | { |
8a280399 GKH |
903 | struct bio *bio = NULL; |
904 | struct bio_vec *bvec = NULL; | |
905 | struct bio_vec *prev_bvec = NULL; | |
906 | struct blkvsc_request *blkvsc_req = NULL; | |
f82bd046 | 907 | struct blkvsc_request *tmp; |
8a280399 GKH |
908 | int databuf_idx = 0; |
909 | int seg_idx = 0; | |
f82bd046 HJ |
910 | sector_t start_sector; |
911 | unsigned long num_sectors = 0; | |
8a280399 GKH |
912 | int ret = 0; |
913 | int pending = 0; | |
914 | struct blkvsc_request_group *group = NULL; | |
f82bd046 | 915 | |
0686e4f4 | 916 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu\n", blkdev, req, |
8a280399 | 917 | (unsigned long)blk_rq_pos(req)); |
f82bd046 | 918 | |
454f18a9 | 919 | /* Create a group to tie req to list of blkvsc_reqs */ |
8a280399 | 920 | group = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); |
f82bd046 | 921 | if (!group) |
f82bd046 | 922 | return -ENOMEM; |
f82bd046 HJ |
923 | |
924 | INIT_LIST_HEAD(&group->blkvsc_req_list); | |
925 | group->outstanding = group->status = 0; | |
926 | ||
0fce4c2f | 927 | start_sector = blk_rq_pos(req); |
f82bd046 | 928 | |
454f18a9 | 929 | /* foreach bio in the request */ |
8a280399 GKH |
930 | if (req->bio) { |
931 | for (bio = req->bio; bio; bio = bio->bi_next) { | |
932 | /* | |
933 | * Map this bio into an existing or new storvsc request | |
934 | */ | |
935 | bio_for_each_segment(bvec, bio, seg_idx) { | |
936 | DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() " | |
937 | "- req %p bio %p bvec %p seg_idx %d " | |
938 | "databuf_idx %d\n", req, bio, bvec, | |
939 | seg_idx, databuf_idx); | |
940 | ||
941 | /* Get a new storvsc request */ | |
942 | /* 1st-time */ | |
943 | if ((!blkvsc_req) || | |
944 | (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) | |
945 | /* hole at the begin of page */ | |
946 | || (bvec->bv_offset != 0) || | |
947 | /* hold at the end of page */ | |
948 | (prev_bvec && | |
949 | (prev_bvec->bv_len != PAGE_SIZE))) { | |
950 | /* submit the prev one */ | |
951 | if (blkvsc_req) { | |
952 | blkvsc_req->sector_start = start_sector; | |
953 | sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9)); | |
954 | ||
955 | blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9); | |
956 | blkvsc_init_rw(blkvsc_req); | |
f82bd046 HJ |
957 | } |
958 | ||
8a280399 GKH |
959 | /* |
960 | * Create new blkvsc_req to represent | |
961 | * the current bvec | |
962 | */ | |
963 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); | |
964 | if (!blkvsc_req) { | |
965 | /* free up everything */ | |
966 | list_for_each_entry_safe( | |
967 | blkvsc_req, tmp, | |
968 | &group->blkvsc_req_list, | |
969 | req_entry) { | |
970 | list_del(&blkvsc_req->req_entry); | |
971 | kmem_cache_free(blkdev->request_pool, blkvsc_req); | |
972 | } | |
973 | ||
974 | kmem_cache_free(blkdev->request_pool, group); | |
975 | return -ENOMEM; | |
976 | } | |
f82bd046 | 977 | |
8a280399 GKH |
978 | memset(blkvsc_req, 0, |
979 | sizeof(struct blkvsc_request)); | |
f82bd046 | 980 | |
8a280399 GKH |
981 | blkvsc_req->dev = blkdev; |
982 | blkvsc_req->req = req; | |
ca623ad3 | 983 | blkvsc_req->request.data_buffer.offset |
8a046024 | 984 | = bvec->bv_offset; |
ca623ad3 | 985 | blkvsc_req->request.data_buffer.len |
8a046024 | 986 | = 0; |
f82bd046 | 987 | |
8a280399 GKH |
988 | /* Add to the group */ |
989 | blkvsc_req->group = group; | |
990 | blkvsc_req->group->outstanding++; | |
991 | list_add_tail(&blkvsc_req->req_entry, | |
992 | &blkvsc_req->group->blkvsc_req_list); | |
f82bd046 | 993 | |
8a280399 GKH |
994 | start_sector += num_sectors; |
995 | num_sectors = 0; | |
996 | databuf_idx = 0; | |
997 | } | |
f82bd046 | 998 | |
8a280399 | 999 | /* Add the curr bvec/segment to the curr blkvsc_req */ |
8a046024 | 1000 | blkvsc_req->request.data_buffer. |
ca623ad3 | 1001 | pfn_array[databuf_idx] |
8a046024 | 1002 | = page_to_pfn(bvec->bv_page); |
ca623ad3 | 1003 | blkvsc_req->request.data_buffer.len |
8a046024 | 1004 | += bvec->bv_len; |
f82bd046 | 1005 | |
8a280399 | 1006 | prev_bvec = bvec; |
f82bd046 | 1007 | |
8a280399 GKH |
1008 | databuf_idx++; |
1009 | num_sectors += bvec->bv_len >> 9; | |
f82bd046 | 1010 | |
8a280399 | 1011 | } /* bio_for_each_segment */ |
f82bd046 | 1012 | |
8a280399 GKH |
1013 | } /* rq_for_each_bio */ |
1014 | } | |
f82bd046 | 1015 | |
454f18a9 | 1016 | /* Handle the last one */ |
8a280399 GKH |
1017 | if (blkvsc_req) { |
1018 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", | |
1019 | blkdev, req, blkvsc_req->group, | |
1020 | blkvsc_req->group->outstanding); | |
f82bd046 HJ |
1021 | |
1022 | blkvsc_req->sector_start = start_sector; | |
8a280399 GKH |
1023 | sector_div(blkvsc_req->sector_start, |
1024 | (blkdev->sector_size >> 9)); | |
f82bd046 | 1025 | |
8a280399 GKH |
1026 | blkvsc_req->sector_count = num_sectors / |
1027 | (blkdev->sector_size >> 9); | |
f82bd046 HJ |
1028 | |
1029 | blkvsc_init_rw(blkvsc_req); | |
1030 | } | |
1031 | ||
8a280399 GKH |
1032 | list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) { |
1033 | if (pending) { | |
1034 | DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to " | |
1035 | "pending_list - blkvsc_req %p start_sect %lu" | |
1036 | " sect_count %ld (%lu %ld)\n", blkvsc_req, | |
1037 | (unsigned long)blkvsc_req->sector_start, | |
1038 | blkvsc_req->sector_count, | |
1039 | (unsigned long)start_sector, | |
1040 | (unsigned long)num_sectors); | |
1041 | ||
1042 | list_add_tail(&blkvsc_req->pend_entry, | |
1043 | &blkdev->pending_list); | |
1044 | } else { | |
1045 | ret = blkvsc_submit_request(blkvsc_req, | |
1046 | blkvsc_request_completion); | |
1047 | if (ret == -1) { | |
f82bd046 | 1048 | pending = 1; |
8a280399 GKH |
1049 | list_add_tail(&blkvsc_req->pend_entry, |
1050 | &blkdev->pending_list); | |
f82bd046 HJ |
1051 | } |
1052 | ||
8a280399 GKH |
1053 | DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p " |
1054 | "start_sect %lu sect_count %ld (%lu %ld) " | |
1055 | "ret %d\n", blkvsc_req, | |
1056 | (unsigned long)blkvsc_req->sector_start, | |
1057 | blkvsc_req->sector_count, | |
1058 | (unsigned long)start_sector, | |
1059 | num_sectors, ret); | |
f82bd046 HJ |
1060 | } |
1061 | } | |
1062 | ||
1063 | return pending; | |
1064 | } | |
1065 | ||
0b3f6834 | 1066 | static void blkvsc_cmd_completion(struct hv_storvsc_request *request) |
f82bd046 | 1067 | { |
8a280399 | 1068 | struct blkvsc_request *blkvsc_req = |
8a046024 | 1069 | (struct blkvsc_request *)request->context; |
8a280399 GKH |
1070 | struct block_device_context *blkdev = |
1071 | (struct block_device_context *)blkvsc_req->dev; | |
f82bd046 HJ |
1072 | struct scsi_sense_hdr sense_hdr; |
1073 | ||
8a280399 GKH |
1074 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", |
1075 | blkvsc_req); | |
f82bd046 HJ |
1076 | |
1077 | blkdev->num_outstanding_reqs--; | |
1078 | ||
8a046024 | 1079 | if (blkvsc_req->request.status) |
8a280399 GKH |
1080 | if (scsi_normalize_sense(blkvsc_req->sense_buffer, |
1081 | SCSI_SENSE_BUFFERSIZE, &sense_hdr)) | |
f82bd046 | 1082 | scsi_print_sense_hdr("blkvsc", &sense_hdr); |
f82bd046 | 1083 | |
8a280399 | 1084 | blkvsc_req->cond = 1; |
f82bd046 HJ |
1085 | wake_up_interruptible(&blkvsc_req->wevent); |
1086 | } | |
1087 | ||
0b3f6834 | 1088 | static void blkvsc_request_completion(struct hv_storvsc_request *request) |
f82bd046 | 1089 | { |
8a280399 | 1090 | struct blkvsc_request *blkvsc_req = |
8a046024 | 1091 | (struct blkvsc_request *)request->context; |
8a280399 GKH |
1092 | struct block_device_context *blkdev = |
1093 | (struct block_device_context *)blkvsc_req->dev; | |
f82bd046 HJ |
1094 | unsigned long flags; |
1095 | struct blkvsc_request *comp_req, *tmp; | |
1096 | ||
4e5166b5 | 1097 | /* ASSERT(blkvsc_req->group); */ |
f82bd046 | 1098 | |
8a280399 GKH |
1099 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s " |
1100 | "sect_start %lu sect_count %ld len %d group outstd %d " | |
1101 | "total outstd %d\n", | |
1102 | blkdev, blkvsc_req, blkvsc_req->group, | |
1103 | (blkvsc_req->write) ? "WRITE" : "READ", | |
1104 | (unsigned long)blkvsc_req->sector_start, | |
1105 | blkvsc_req->sector_count, | |
ca623ad3 | 1106 | blkvsc_req->request.data_buffer.len, |
8a280399 GKH |
1107 | blkvsc_req->group->outstanding, |
1108 | blkdev->num_outstanding_reqs); | |
f82bd046 HJ |
1109 | |
1110 | spin_lock_irqsave(&blkdev->lock, flags); | |
1111 | ||
1112 | blkdev->num_outstanding_reqs--; | |
1113 | blkvsc_req->group->outstanding--; | |
1114 | ||
454f18a9 BP |
1115 | /* |
1116 | * Only start processing when all the blkvsc_reqs are | |
1117 | * completed. This guarantees no out-of-order blkvsc_req | |
1118 | * completion when calling end_that_request_first() | |
1119 | */ | |
8a280399 GKH |
1120 | if (blkvsc_req->group->outstanding == 0) { |
1121 | list_for_each_entry_safe(comp_req, tmp, | |
1122 | &blkvsc_req->group->blkvsc_req_list, | |
1123 | req_entry) { | |
1124 | DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p " | |
0686e4f4 | 1125 | "sect_start %lu sect_count %ld\n", |
8a280399 GKH |
1126 | comp_req, |
1127 | (unsigned long)comp_req->sector_start, | |
1128 | comp_req->sector_count); | |
f82bd046 HJ |
1129 | |
1130 | list_del(&comp_req->req_entry); | |
1131 | ||
8a280399 | 1132 | if (!__blk_end_request(comp_req->req, |
8a046024 | 1133 | (!comp_req->request.status ? 0 : -EIO), |
8a280399 GKH |
1134 | comp_req->sector_count * blkdev->sector_size)) { |
1135 | /* | |
1136 | * All the sectors have been xferred ie the | |
1137 | * request is done | |
1138 | */ | |
1139 | DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", | |
1140 | comp_req->req); | |
1141 | kmem_cache_free(blkdev->request_pool, | |
1142 | comp_req->group); | |
f82bd046 | 1143 | } |
f82bd046 HJ |
1144 | |
1145 | kmem_cache_free(blkdev->request_pool, comp_req); | |
1146 | } | |
1147 | ||
8a280399 | 1148 | if (!blkdev->shutting_down) { |
f82bd046 HJ |
1149 | blkvsc_do_pending_reqs(blkdev); |
1150 | blk_start_queue(blkdev->gd->queue); | |
1151 | blkvsc_request(blkdev->gd->queue); | |
1152 | } | |
1153 | } | |
1154 | ||
1155 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
1156 | } | |
1157 | ||
1158 | static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev) | |
1159 | { | |
1160 | struct blkvsc_request *pend_req, *tmp; | |
1161 | struct blkvsc_request *comp_req, *tmp2; | |
1162 | ||
8a280399 | 1163 | int ret = 0; |
f82bd046 HJ |
1164 | |
1165 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()"); | |
1166 | ||
454f18a9 | 1167 | /* Flush the pending list first */ |
8a280399 GKH |
1168 | list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, |
1169 | pend_entry) { | |
454f18a9 BP |
1170 | /* |
1171 | * The pend_req could be part of a partially completed | |
1172 | * request. If so, complete those req first until we | |
1173 | * hit the pend_req | |
1174 | */ | |
8a280399 GKH |
1175 | list_for_each_entry_safe(comp_req, tmp2, |
1176 | &pend_req->group->blkvsc_req_list, | |
1177 | req_entry) { | |
1178 | DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p " | |
0686e4f4 | 1179 | "sect_start %lu sect_count %ld\n", |
8a280399 GKH |
1180 | comp_req, |
1181 | (unsigned long) comp_req->sector_start, | |
1182 | comp_req->sector_count); | |
f82bd046 HJ |
1183 | |
1184 | if (comp_req == pend_req) | |
1185 | break; | |
1186 | ||
1187 | list_del(&comp_req->req_entry); | |
1188 | ||
8a280399 GKH |
1189 | if (comp_req->req) { |
1190 | ret = __blk_end_request(comp_req->req, | |
8a046024 | 1191 | (!comp_req->request.status ? 0 : -EIO), |
8a280399 GKH |
1192 | comp_req->sector_count * |
1193 | blkdev->sector_size); | |
ee350376 BP |
1194 | |
1195 | /* FIXME: shouldn't this do more than return? */ | |
1196 | if (ret) | |
1197 | goto out; | |
f82bd046 HJ |
1198 | } |
1199 | ||
1200 | kmem_cache_free(blkdev->request_pool, comp_req); | |
1201 | } | |
1202 | ||
8a280399 GKH |
1203 | DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", |
1204 | pend_req); | |
f82bd046 HJ |
1205 | |
1206 | list_del(&pend_req->pend_entry); | |
1207 | ||
1208 | list_del(&pend_req->req_entry); | |
1209 | ||
8a280399 GKH |
1210 | if (comp_req->req) { |
1211 | if (!__blk_end_request(pend_req->req, -EIO, | |
1212 | pend_req->sector_count * | |
1213 | blkdev->sector_size)) { | |
1214 | /* | |
1215 | * All the sectors have been xferred ie the | |
1216 | * request is done | |
1217 | */ | |
1218 | DPRINT_DBG(BLKVSC_DRV, | |
1219 | "blkvsc_cancel_pending_reqs() - " | |
1220 | "req %p COMPLETED\n", pend_req->req); | |
1221 | kmem_cache_free(blkdev->request_pool, | |
1222 | pend_req->group); | |
1223 | } | |
f82bd046 HJ |
1224 | } |
1225 | ||
1226 | kmem_cache_free(blkdev->request_pool, pend_req); | |
1227 | } | |
1228 | ||
ee350376 | 1229 | out: |
f82bd046 HJ |
1230 | return ret; |
1231 | } | |
1232 | ||
1233 | static int blkvsc_do_pending_reqs(struct block_device_context *blkdev) | |
1234 | { | |
1235 | struct blkvsc_request *pend_req, *tmp; | |
8a280399 | 1236 | int ret = 0; |
f82bd046 | 1237 | |
454f18a9 | 1238 | /* Flush the pending list first */ |
8a280399 GKH |
1239 | list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, |
1240 | pend_entry) { | |
1241 | DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", | |
1242 | pend_req); | |
f82bd046 | 1243 | |
8a280399 GKH |
1244 | ret = blkvsc_submit_request(pend_req, |
1245 | blkvsc_request_completion); | |
f82bd046 | 1246 | if (ret != 0) |
f82bd046 | 1247 | break; |
f82bd046 | 1248 | else |
f82bd046 | 1249 | list_del(&pend_req->pend_entry); |
f82bd046 HJ |
1250 | } |
1251 | ||
1252 | return ret; | |
1253 | } | |
1254 | ||
1255 | static void blkvsc_request(struct request_queue *queue) | |
1256 | { | |
1257 | struct block_device_context *blkdev = NULL; | |
1258 | struct request *req; | |
8a280399 | 1259 | int ret = 0; |
f82bd046 | 1260 | |
0686e4f4 | 1261 | DPRINT_DBG(BLKVSC_DRV, "- enter\n"); |
8a280399 | 1262 | while ((req = blk_peek_request(queue)) != NULL) { |
f82bd046 HJ |
1263 | DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); |
1264 | ||
1265 | blkdev = req->rq_disk->private_data; | |
33659ebb | 1266 | if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS || |
8a280399 | 1267 | blkdev->media_not_present) { |
0fce4c2f | 1268 | __blk_end_request_cur(req, 0); |
f82bd046 HJ |
1269 | continue; |
1270 | } | |
1271 | ||
1272 | ret = blkvsc_do_pending_reqs(blkdev); | |
1273 | ||
8a280399 GKH |
1274 | if (ret != 0) { |
1275 | DPRINT_DBG(BLKVSC_DRV, | |
1276 | "- stop queue - pending_list not empty\n"); | |
f82bd046 HJ |
1277 | blk_stop_queue(queue); |
1278 | break; | |
1279 | } | |
1280 | ||
0fce4c2f | 1281 | blk_start_request(req); |
f82bd046 HJ |
1282 | |
1283 | ret = blkvsc_do_request(blkdev, req); | |
8a280399 | 1284 | if (ret > 0) { |
f82bd046 HJ |
1285 | DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n"); |
1286 | blk_stop_queue(queue); | |
1287 | break; | |
8a280399 | 1288 | } else if (ret < 0) { |
f82bd046 HJ |
1289 | DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n"); |
1290 | blk_requeue_request(queue, req); | |
1291 | blk_stop_queue(queue); | |
1292 | break; | |
1293 | } | |
1294 | } | |
1295 | } | |
1296 | ||
8a280399 | 1297 | static int blkvsc_open(struct block_device *bdev, fmode_t mode) |
f82bd046 | 1298 | { |
39635f7d | 1299 | struct block_device_context *blkdev = bdev->bd_disk->private_data; |
f82bd046 | 1300 | |
8a280399 GKH |
1301 | DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, |
1302 | blkdev->gd->disk_name); | |
f82bd046 | 1303 | |
2a48fc0a | 1304 | mutex_lock(&blkvsc_mutex); |
f82bd046 HJ |
1305 | spin_lock(&blkdev->lock); |
1306 | ||
8a280399 | 1307 | if (!blkdev->users && blkdev->device_type == DVD_TYPE) { |
f82bd046 | 1308 | spin_unlock(&blkdev->lock); |
39635f7d | 1309 | check_disk_change(bdev); |
f82bd046 HJ |
1310 | spin_lock(&blkdev->lock); |
1311 | } | |
1312 | ||
1313 | blkdev->users++; | |
1314 | ||
1315 | spin_unlock(&blkdev->lock); | |
2a48fc0a | 1316 | mutex_unlock(&blkvsc_mutex); |
f82bd046 HJ |
1317 | return 0; |
1318 | } | |
1319 | ||
77d2d9da | 1320 | static int blkvsc_release(struct gendisk *disk, fmode_t mode) |
f82bd046 | 1321 | { |
77d2d9da | 1322 | struct block_device_context *blkdev = disk->private_data; |
f82bd046 | 1323 | |
8a280399 GKH |
1324 | DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, |
1325 | blkdev->gd->disk_name); | |
f82bd046 | 1326 | |
2a48fc0a | 1327 | mutex_lock(&blkvsc_mutex); |
f82bd046 | 1328 | spin_lock(&blkdev->lock); |
8a280399 | 1329 | if (blkdev->users == 1) { |
f82bd046 HJ |
1330 | spin_unlock(&blkdev->lock); |
1331 | blkvsc_do_flush(blkdev); | |
1332 | spin_lock(&blkdev->lock); | |
1333 | } | |
1334 | ||
1335 | blkdev->users--; | |
1336 | ||
1337 | spin_unlock(&blkdev->lock); | |
2a48fc0a | 1338 | mutex_unlock(&blkvsc_mutex); |
f82bd046 HJ |
1339 | return 0; |
1340 | } | |
1341 | ||
cafb0bfc TH |
1342 | static unsigned int blkvsc_check_events(struct gendisk *gd, |
1343 | unsigned int clearing) | |
f82bd046 HJ |
1344 | { |
1345 | DPRINT_DBG(BLKVSC_DRV, "- enter\n"); | |
cafb0bfc | 1346 | return DISK_EVENT_MEDIA_CHANGE; |
f82bd046 HJ |
1347 | } |
1348 | ||
1349 | static int blkvsc_revalidate_disk(struct gendisk *gd) | |
1350 | { | |
1351 | struct block_device_context *blkdev = gd->private_data; | |
1352 | ||
1353 | DPRINT_DBG(BLKVSC_DRV, "- enter\n"); | |
1354 | ||
8a280399 | 1355 | if (blkdev->device_type == DVD_TYPE) { |
f82bd046 | 1356 | blkvsc_do_read_capacity(blkdev); |
8a280399 GKH |
1357 | set_capacity(blkdev->gd, blkdev->capacity * |
1358 | (blkdev->sector_size/512)); | |
0fce4c2f | 1359 | blk_queue_logical_block_size(gd->queue, blkdev->sector_size); |
f82bd046 HJ |
1360 | } |
1361 | return 0; | |
1362 | } | |
1363 | ||
bd1de709 | 1364 | static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg) |
f82bd046 HJ |
1365 | { |
1366 | sector_t total_sectors = get_capacity(bd->bd_disk); | |
8a280399 GKH |
1367 | sector_t cylinder_times_heads = 0; |
1368 | sector_t temp = 0; | |
f82bd046 | 1369 | |
8a280399 GKH |
1370 | int sectors_per_track = 0; |
1371 | int heads = 0; | |
1372 | int cylinders = 0; | |
1373 | int rem = 0; | |
f82bd046 | 1374 | |
8a280399 GKH |
1375 | if (total_sectors > (65535 * 16 * 255)) |
1376 | total_sectors = (65535 * 16 * 255); | |
f82bd046 | 1377 | |
8a280399 GKH |
1378 | if (total_sectors >= (65535 * 16 * 63)) { |
1379 | sectors_per_track = 255; | |
1380 | heads = 16; | |
f82bd046 HJ |
1381 | |
1382 | cylinder_times_heads = total_sectors; | |
8a280399 GKH |
1383 | /* sector_div stores the quotient in cylinder_times_heads */ |
1384 | rem = sector_div(cylinder_times_heads, sectors_per_track); | |
1385 | } else { | |
1386 | sectors_per_track = 17; | |
f82bd046 HJ |
1387 | |
1388 | cylinder_times_heads = total_sectors; | |
8a280399 GKH |
1389 | /* sector_div stores the quotient in cylinder_times_heads */ |
1390 | rem = sector_div(cylinder_times_heads, sectors_per_track); | |
f82bd046 HJ |
1391 | |
1392 | temp = cylinder_times_heads + 1023; | |
8a280399 GKH |
1393 | /* sector_div stores the quotient in temp */ |
1394 | rem = sector_div(temp, 1024); | |
f82bd046 HJ |
1395 | |
1396 | heads = temp; | |
1397 | ||
8a280399 GKH |
1398 | if (heads < 4) |
1399 | heads = 4; | |
1400 | ||
f82bd046 | 1401 | |
8a280399 GKH |
1402 | if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) { |
1403 | sectors_per_track = 31; | |
1404 | heads = 16; | |
f82bd046 HJ |
1405 | |
1406 | cylinder_times_heads = total_sectors; | |
8a280399 GKH |
1407 | /* |
1408 | * sector_div stores the quotient in | |
1409 | * cylinder_times_heads | |
1410 | */ | |
1411 | rem = sector_div(cylinder_times_heads, | |
1412 | sectors_per_track); | |
1413 | } | |
f82bd046 | 1414 | |
8a280399 GKH |
1415 | if (cylinder_times_heads >= (heads * 1024)) { |
1416 | sectors_per_track = 63; | |
1417 | heads = 16; | |
f82bd046 HJ |
1418 | |
1419 | cylinder_times_heads = total_sectors; | |
8a280399 GKH |
1420 | /* |
1421 | * sector_div stores the quotient in | |
1422 | * cylinder_times_heads | |
1423 | */ | |
1424 | rem = sector_div(cylinder_times_heads, | |
1425 | sectors_per_track); | |
1426 | } | |
454f18a9 | 1427 | } |
f82bd046 HJ |
1428 | |
1429 | temp = cylinder_times_heads; | |
8a280399 GKH |
1430 | /* sector_div stores the quotient in temp */ |
1431 | rem = sector_div(temp, heads); | |
f82bd046 HJ |
1432 | cylinders = temp; |
1433 | ||
1434 | hg->heads = heads; | |
8a280399 GKH |
1435 | hg->sectors = sectors_per_track; |
1436 | hg->cylinders = cylinders; | |
f82bd046 | 1437 | |
8a280399 GKH |
1438 | DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, |
1439 | sectors_per_track); | |
f82bd046 HJ |
1440 | |
1441 | return 0; | |
1442 | } | |
1443 | ||
dfe8b2d9 BP |
1444 | static int blkvsc_ioctl(struct block_device *bd, fmode_t mode, |
1445 | unsigned cmd, unsigned long argument) | |
f82bd046 | 1446 | { |
b5788529 | 1447 | /* struct block_device_context *blkdev = bd->bd_disk->private_data; */ |
8a280399 | 1448 | int ret; |
f82bd046 | 1449 | |
8a280399 GKH |
1450 | switch (cmd) { |
1451 | /* | |
1452 | * TODO: I think there is certain format for HDIO_GET_IDENTITY rather | |
1453 | * than just a GUID. Commented it out for now. | |
1454 | */ | |
1455 | #if 0 | |
1456 | case HDIO_GET_IDENTITY: | |
f82bd046 | 1457 | DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n"); |
8a280399 GKH |
1458 | if (copy_to_user((void __user *)arg, blkdev->device_id, |
1459 | blkdev->device_id_len)) | |
f82bd046 | 1460 | ret = -EFAULT; |
8a280399 GKH |
1461 | break; |
1462 | #endif | |
f82bd046 HJ |
1463 | default: |
1464 | ret = -EINVAL; | |
1465 | break; | |
1466 | } | |
1467 | ||
1468 | return ret; | |
1469 | } | |
1470 | ||
f82bd046 HJ |
1471 | static int __init blkvsc_init(void) |
1472 | { | |
1473 | int ret; | |
1474 | ||
5afd06cc | 1475 | BUILD_BUG_ON(sizeof(sector_t) != 8); |
f82bd046 | 1476 | |
f82bd046 HJ |
1477 | DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing...."); |
1478 | ||
eb4f3e0a | 1479 | ret = blkvsc_drv_init(blk_vsc_initialize); |
f82bd046 | 1480 | |
f82bd046 HJ |
1481 | return ret; |
1482 | } | |
1483 | ||
1484 | static void __exit blkvsc_exit(void) | |
1485 | { | |
f82bd046 | 1486 | blkvsc_drv_exit(); |
f82bd046 HJ |
1487 | } |
1488 | ||
8a280399 | 1489 | MODULE_LICENSE("GPL"); |
26c14cc1 | 1490 | MODULE_VERSION(HV_DRV_VERSION); |
1ec28abb | 1491 | MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver"); |
f82bd046 HJ |
1492 | module_init(blkvsc_init); |
1493 | module_exit(blkvsc_exit); |