1 Subject: block: unify request timeout handling
2 From: Jens Axboe <jens.axboe@oracle.com>
3 Date: Thu Oct 9 08:56:13 2008 +0200:
4 Git: 242f9dcb8ba6f68fcd217a119a7648a4f69290e9
5 References: FATE#304151,bnc#417544
7 Right now SCSI and others do their own command timeout handling.
8 Move those bits to the block layer.
10 Instead of having a timer per command, we try to be a bit more clever
11 and simply have one per-queue. This avoids the overhead of having to
12 tear down and setup a timer for each command, so it will result in a lot
15 Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
16 Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
17 Signed-off-by: Hannes Reinecke <hare@suse.de>
20 block/blk-core.c | 7 +
21 block/blk-settings.c | 12 ++
22 block/blk-softirq.c | 30 ++++--
23 block/blk-timeout.c | 155 +++++++++++++++++++++++++++++++++++
24 block/blk.h | 24 +++++
25 block/elevator.c | 8 +
26 drivers/ata/libata-eh.c | 13 +-
27 drivers/ata/libata.h | 2
28 drivers/scsi/aacraid/aachba.c | 2
29 drivers/scsi/gdth.c | 60 ++++++++-----
30 drivers/scsi/gdth.h | 2
31 drivers/scsi/gdth_proc.c | 66 --------------
32 drivers/scsi/gdth_proc.h | 3
33 drivers/scsi/ibmvscsi/ibmvscsi.c | 2
34 drivers/scsi/ide-scsi.c | 2
35 drivers/scsi/ipr.c | 3
36 drivers/scsi/ips.c | 2
37 drivers/scsi/libiscsi.c | 17 ++-
38 drivers/scsi/libsas/sas_ata.c | 2
39 drivers/scsi/libsas/sas_internal.h | 2
40 drivers/scsi/libsas/sas_scsi_host.c | 30 +++---
41 drivers/scsi/megaraid/megaraid_sas.c | 6 -
42 drivers/scsi/ncr53c8xx.c | 4
43 drivers/scsi/qla1280.c | 4
44 drivers/scsi/qla4xxx/ql4_os.c | 4
45 drivers/scsi/scsi.c | 92 +++-----------------
46 drivers/scsi/scsi_error.c | 90 ++------------------
47 drivers/scsi/scsi_lib.c | 17 ++-
48 drivers/scsi/scsi_priv.h | 7 -
49 drivers/scsi/scsi_sysfs.c | 7 +
50 drivers/scsi/scsi_transport_fc.c | 6 -
51 drivers/scsi/sd.c | 9 --
52 drivers/scsi/sr.c | 5 -
53 drivers/scsi/sym53c8xx_2/sym_glue.c | 4
54 include/linux/blkdev.h | 20 ++++
55 include/scsi/scsi_cmnd.h | 3
56 include/scsi/scsi_host.h | 9 --
57 include/scsi/scsi_transport.h | 3
58 39 files changed, 399 insertions(+), 339 deletions(-)
59 create mode 100644 block/blk-timeout.c
61 --- a/block/blk-core.c
62 +++ b/block/blk-core.c
63 @@ -109,6 +109,7 @@ void blk_rq_init(struct request_queue *q
64 memset(rq, 0, sizeof(*rq));
66 INIT_LIST_HEAD(&rq->queuelist);
67 + INIT_LIST_HEAD(&rq->timeout_list);
70 rq->sector = rq->hard_sector = (sector_t) -1;
71 @@ -489,6 +490,8 @@ struct request_queue *blk_alloc_queue_no
74 init_timer(&q->unplug_timer);
75 + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
76 + INIT_LIST_HEAD(&q->timeout_list);
78 kobject_init(&q->kobj, &blk_queue_ktype);
80 @@ -896,6 +899,8 @@ EXPORT_SYMBOL(blk_start_queueing);
82 void blk_requeue_request(struct request_queue *q, struct request *rq)
84 + blk_delete_timer(rq);
85 + blk_clear_rq_complete(rq);
86 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
88 if (blk_rq_tagged(rq))
89 @@ -1652,6 +1657,8 @@ static void end_that_request_last(struct
91 struct gendisk *disk = req->rq_disk;
93 + blk_delete_timer(req);
95 if (blk_rq_tagged(req))
96 blk_queue_end_tag(req->q, req);
100 @@ -17,6 +17,30 @@ void __blk_queue_free_tags(struct reques
102 void blk_unplug_work(struct work_struct *work);
103 void blk_unplug_timeout(unsigned long data);
104 +void blk_rq_timed_out_timer(unsigned long data);
105 +void blk_delete_timer(struct request *);
106 +void blk_add_timer(struct request *);
109 + * Internal atomic flags for request handling
111 +enum rq_atomic_flags {
112 + REQ_ATOM_COMPLETE = 0,
116 + * EH timer and IO completion will both attempt to 'grab' the request, make
117 + * sure that only one of them suceeds
119 +static inline int blk_mark_rq_complete(struct request *rq)
121 + return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
124 +static inline void blk_clear_rq_complete(struct request *rq)
126 + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
129 struct io_context *current_io_context(gfp_t gfp_flags, int node);
131 --- a/block/blk-settings.c
132 +++ b/block/blk-settings.c
133 @@ -77,6 +77,18 @@ void blk_queue_softirq_done(struct reque
135 EXPORT_SYMBOL(blk_queue_softirq_done);
137 +void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
139 + q->rq_timeout = timeout;
141 +EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
143 +void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
145 + q->rq_timed_out_fn = fn;
147 +EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
150 * blk_queue_make_request - define an alternate make_request function for a device
151 * @q: the request queue for the device to be affected
152 --- a/block/blk-softirq.c
153 +++ b/block/blk-softirq.c
154 @@ -101,18 +101,7 @@ static struct notifier_block __cpuinitda
155 .notifier_call = blk_cpu_notify,
159 - * blk_complete_request - end I/O on a request
160 - * @req: the request being processed
163 - * Ends all I/O on a request. It does not handle partial completions,
164 - * unless the driver actually implements this in its completion callback
165 - * through requeueing. The actual completion happens out-of-order,
166 - * through a softirq handler. The user must have registered a completion
167 - * callback through blk_queue_softirq_done().
169 -void blk_complete_request(struct request *req)
170 +void __blk_complete_request(struct request *req)
172 struct request_queue *q = req->q;
174 @@ -151,6 +140,23 @@ do_local:
176 local_irq_restore(flags);
180 + * blk_complete_request - end I/O on a request
181 + * @req: the request being processed
184 + * Ends all I/O on a request. It does not handle partial completions,
185 + * unless the driver actually implements this in its completion callback
186 + * through requeueing. The actual completion happens out-of-order,
187 + * through a softirq handler. The user must have registered a completion
188 + * callback through blk_queue_softirq_done().
190 +void blk_complete_request(struct request *req)
192 + if (!blk_mark_rq_complete(req))
193 + __blk_complete_request(req);
195 EXPORT_SYMBOL(blk_complete_request);
197 __init int blk_softirq_init(void)
199 +++ b/block/blk-timeout.c
202 + * Functions related to generic timeout handling of requests.
204 +#include <linux/kernel.h>
205 +#include <linux/module.h>
206 +#include <linux/blkdev.h>
211 + * blk_delete_timer - Delete/cancel timer for a given function.
212 + * @req: request that we are canceling timer for
215 +void blk_delete_timer(struct request *req)
217 + struct request_queue *q = req->q;
220 + * Nothing to detach
222 + if (!q->rq_timed_out_fn || !req->deadline)
225 + list_del_init(&req->timeout_list);
227 + if (list_empty(&q->timeout_list))
228 + del_timer(&q->timeout);
231 +static void blk_rq_timed_out(struct request *req)
233 + struct request_queue *q = req->q;
234 + enum blk_eh_timer_return ret;
236 + ret = q->rq_timed_out_fn(req);
238 + case BLK_EH_HANDLED:
239 + __blk_complete_request(req);
241 + case BLK_EH_RESET_TIMER:
242 + blk_clear_rq_complete(req);
243 + blk_add_timer(req);
245 + case BLK_EH_NOT_HANDLED:
247 + * LLD handles this for now but in the future
248 + * we can send a request msg to abort the command
249 + * and we can move more of the generic scsi eh code to
254 + printk(KERN_ERR "block: bad eh return: %d\n", ret);
259 +void blk_rq_timed_out_timer(unsigned long data)
261 + struct request_queue *q = (struct request_queue *) data;
262 + unsigned long flags, uninitialized_var(next), next_set = 0;
263 + struct request *rq, *tmp;
265 + spin_lock_irqsave(q->queue_lock, flags);
267 + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
268 + if (time_after_eq(jiffies, rq->deadline)) {
269 + list_del_init(&rq->timeout_list);
272 + * Check if we raced with end io completion
274 + if (blk_mark_rq_complete(rq))
276 + blk_rq_timed_out(rq);
279 + next = rq->deadline;
281 + } else if (time_after(next, rq->deadline))
282 + next = rq->deadline;
285 + if (next_set && !list_empty(&q->timeout_list))
286 + mod_timer(&q->timeout, round_jiffies(next));
288 + spin_unlock_irqrestore(q->queue_lock, flags);
292 + * blk_abort_request -- Request request recovery for the specified command
293 + * @req: pointer to the request of interest
295 + * This function requests that the block layer start recovery for the
296 + * request by deleting the timer and calling the q's timeout function.
297 + * LLDDs who implement their own error recovery MAY ignore the timeout
298 + * event if they generated blk_abort_req. Must hold queue lock.
300 +void blk_abort_request(struct request *req)
302 + blk_delete_timer(req);
303 + blk_rq_timed_out(req);
305 +EXPORT_SYMBOL_GPL(blk_abort_request);
308 + * blk_add_timer - Start timeout timer for a single request
309 + * @req: request that is about to start running.
312 + * Each request has its own timer, and as it is added to the queue, we
313 + * set up the timer. When the request completes, we cancel the timer.
315 +void blk_add_timer(struct request *req)
317 + struct request_queue *q = req->q;
318 + unsigned long expiry;
320 + if (!q->rq_timed_out_fn)
323 + BUG_ON(!list_empty(&req->timeout_list));
324 + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
327 + req->deadline = jiffies + req->timeout;
329 + req->deadline = jiffies + q->rq_timeout;
331 + * Some LLDs, like scsi, peek at the timeout to prevent
332 + * a command from being retried forever.
334 + req->timeout = q->rq_timeout;
336 + list_add_tail(&req->timeout_list, &q->timeout_list);
339 + * If the timer isn't already pending or this timeout is earlier
340 + * than an existing one, modify the timer. Round to next nearest
343 + expiry = round_jiffies(req->deadline);
346 + * We use ->deadline == 0 to detect whether a timer was added or
347 + * not, so just increase to next jiffy for that specific case
349 + if (unlikely(!req->deadline))
352 + if (!timer_pending(&q->timeout) ||
353 + time_before(expiry, q->timeout.expires))
354 + mod_timer(&q->timeout, expiry);
356 --- a/block/elevator.c
357 +++ b/block/elevator.c
359 #include <linux/hash.h>
360 #include <linux/uaccess.h>
364 static DEFINE_SPINLOCK(elv_list_lock);
365 static LIST_HEAD(elv_list);
367 @@ -771,6 +773,12 @@ struct request *elv_next_request(struct
369 rq->cmd_flags |= REQ_STARTED;
370 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
373 + * We are now handing the request to the hardware,
374 + * add the timeout handler
379 if (!q->boundary_rq || q->boundary_rq == rq) {
384 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
385 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
386 - blk-exec.o blk-merge.o blk-softirq.o ioctl.o genhd.o \
387 - scsi_ioctl.o cmd-filter.o
388 + blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
389 + ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
391 obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
392 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
393 --- a/drivers/ata/libata-eh.c
394 +++ b/drivers/ata/libata-eh.c
398 #include <linux/kernel.h>
399 +#include <linux/blkdev.h>
400 #include <linux/pci.h>
401 #include <scsi/scsi.h>
402 #include <scsi/scsi_host.h>
403 @@ -457,29 +458,29 @@ static void ata_eh_clear_action(struct a
405 * EH_HANDLED or EH_NOT_HANDLED
407 -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
408 +enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
410 struct Scsi_Host *host = cmd->device->host;
411 struct ata_port *ap = ata_shost_to_port(host);
413 struct ata_queued_cmd *qc;
414 - enum scsi_eh_timer_return ret;
415 + enum blk_eh_timer_return ret;
419 if (ap->ops->error_handler) {
420 - ret = EH_NOT_HANDLED;
421 + ret = BLK_EH_NOT_HANDLED;
426 + ret = BLK_EH_HANDLED;
427 spin_lock_irqsave(ap->lock, flags);
428 qc = ata_qc_from_tag(ap, ap->link.active_tag);
430 WARN_ON(qc->scsicmd != cmd);
431 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
432 qc->err_mask |= AC_ERR_TIMEOUT;
433 - ret = EH_NOT_HANDLED;
434 + ret = BLK_EH_NOT_HANDLED;
436 spin_unlock_irqrestore(ap->lock, flags);
438 @@ -828,7 +829,7 @@ void ata_qc_schedule_eh(struct ata_queue
439 * Note that ATA_QCFLAG_FAILED is unconditionally set after
440 * this function completes.
442 - scsi_req_abort_cmd(qc->scsicmd);
443 + blk_abort_request(qc->scsicmd->request);
447 --- a/drivers/ata/libata.h
448 +++ b/drivers/ata/libata.h
449 @@ -155,7 +155,7 @@ extern int ata_bus_probe(struct ata_port
451 extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
452 extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
453 -extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
454 +extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
455 extern void ata_scsi_error(struct Scsi_Host *host);
456 extern void ata_port_wait_eh(struct ata_port *ap);
457 extern void ata_eh_fastdrain_timerfn(unsigned long arg);
458 --- a/drivers/scsi/aacraid/aachba.c
459 +++ b/drivers/scsi/aacraid/aachba.c
460 @@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(
461 srbcmd->id = cpu_to_le32(scmd_id(cmd));
462 srbcmd->lun = cpu_to_le32(cmd->device->lun);
463 srbcmd->flags = cpu_to_le32(flag);
464 - timeout = cmd->timeout_per_command/HZ;
465 + timeout = cmd->request->timeout/HZ;
468 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
469 --- a/drivers/scsi/gdth.c
470 +++ b/drivers/scsi/gdth.c
471 @@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *s
473 /* use request field to save the ptr. to completion struct. */
474 scp->request = (struct request *)&wait;
475 - scp->timeout_per_command = timeout*HZ;
478 cmndinfo.priority = IOCTL_PRI;
479 @@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, S
480 register Scsi_Cmnd *pscp;
481 register Scsi_Cmnd *nscp;
485 TRACE(("gdth_putq() priority %d\n",priority));
486 spin_lock_irqsave(&ha->smp_lock, flags);
488 - if (!cmndinfo->internal_command) {
489 + if (!cmndinfo->internal_command)
490 cmndinfo->priority = priority;
491 - b = scp->device->channel;
492 - t = scp->device->id;
493 - if (priority >= DEFAULT_PRI) {
494 - if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
495 - (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
496 - TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
497 - cmndinfo->timeout = gdth_update_timeout(scp, 0);
502 if (ha->req_first==NULL) {
503 ha->req_first = scp; /* queue was empty */
504 @@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi
505 return ((const char *)ha->binfo.type_string);
508 +static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
510 + gdth_ha_str *ha = shost_priv(scp->device->host);
511 + struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
514 + enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
516 + TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
517 + b = scp->device->channel;
518 + t = scp->device->id;
521 + * We don't really honor the command timeout, but we try to
522 + * honor 6 times of the actual command timeout! So reset the
523 + * timer if this is less than 6th timeout on this command!
525 + if (++cmndinfo->timeout_count < 6)
526 + retval = BLK_EH_RESET_TIMER;
528 + /* Reset the timeout if it is locked IO */
529 + spin_lock_irqsave(&ha->smp_lock, flags);
530 + if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
531 + (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
532 + TRACE2(("%s(): locked IO, reset timeout\n", __func__));
533 + retval = BLK_EH_RESET_TIMER;
535 + spin_unlock_irqrestore(&ha->smp_lock, flags);
541 static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
543 gdth_ha_str *ha = shost_priv(scp->device->host);
544 @@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi
547 scp->scsi_done = done;
548 - gdth_update_timeout(scp, scp->timeout_per_command * 6);
549 + cmndinfo->timeout_count = 0;
550 cmndinfo->priority = DEFAULT_PRI;
552 return __gdth_queuecommand(ha, scp, cmndinfo);
553 @@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
555 spin_unlock_irqrestore(&ha->smp_lock, flags);
556 gdth_wait_completion(ha, ha->bus_cnt, j);
557 - gdth_stop_timeout(ha, ha->bus_cnt, j);
559 spin_lock_irqsave(&ha->smp_lock, flags);
561 spin_unlock_irqrestore(&ha->smp_lock, flags);
562 - gdth_start_timeout(ha, ha->bus_cnt, j);
566 @@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inod
567 spin_lock_irqsave(&ha->smp_lock, flags);
569 spin_unlock_irqrestore(&ha->smp_lock, flags);
570 - for (j = 0; j < ha->tid_cnt; ++j) {
571 + for (j = 0; j < ha->tid_cnt; ++j)
572 gdth_wait_completion(ha, i, j);
573 - gdth_stop_timeout(ha, i, j);
576 spin_lock_irqsave(&ha->smp_lock, flags);
578 spin_unlock_irqrestore(&ha->smp_lock, flags);
579 - for (j = 0; j < ha->tid_cnt; ++j) {
580 - gdth_start_timeout(ha, i, j);
581 + for (j = 0; j < ha->tid_cnt; ++j)
587 @@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_te
588 .slave_configure = gdth_slave_configure,
589 .bios_param = gdth_bios_param,
590 .proc_info = gdth_proc_info,
591 + .eh_timed_out = gdth_timed_out,
593 .can_queue = GDTH_MAXCMDS,
595 --- a/drivers/scsi/gdth.h
596 +++ b/drivers/scsi/gdth.h
597 @@ -916,7 +916,7 @@ typedef struct {
598 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
599 dma_addr_t sense_paddr; /* sense dma-addr */
602 + int timeout_count; /* # of timeout calls */
603 volatile int wait_for_completion;
606 --- a/drivers/scsi/gdth_proc.c
607 +++ b/drivers/scsi/gdth_proc.c
608 @@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha
610 spin_unlock_irqrestore(&ha->smp_lock, flags);
613 -static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
619 - spin_lock_irqsave(&ha->smp_lock, flags);
621 - for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
622 - struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
623 - if (!cmndinfo->internal_command) {
624 - b = scp->device->channel;
625 - t = scp->device->id;
626 - if (t == (unchar)id && b == (unchar)busnum) {
627 - TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
628 - cmndinfo->timeout = gdth_update_timeout(scp, 0);
632 - spin_unlock_irqrestore(&ha->smp_lock, flags);
635 -static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
641 - spin_lock_irqsave(&ha->smp_lock, flags);
643 - for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
644 - struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
645 - if (!cmndinfo->internal_command) {
646 - b = scp->device->channel;
647 - t = scp->device->id;
648 - if (t == (unchar)id && b == (unchar)busnum) {
649 - TRACE2(("gdth_start_timeout(): update_timeout()\n"));
650 - gdth_update_timeout(scp, cmndinfo->timeout);
654 - spin_unlock_irqrestore(&ha->smp_lock, flags);
657 -static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
661 - oldto = scp->timeout_per_command;
662 - scp->timeout_per_command = timeout;
664 - if (timeout == 0) {
665 - del_timer(&scp->eh_timeout);
666 - scp->eh_timeout.data = (unsigned long) NULL;
667 - scp->eh_timeout.expires = 0;
669 - if (scp->eh_timeout.data != (unsigned long) NULL)
670 - del_timer(&scp->eh_timeout);
671 - scp->eh_timeout.data = (unsigned long) scp;
672 - scp->eh_timeout.expires = jiffies + timeout;
673 - add_timer(&scp->eh_timeout);
678 --- a/drivers/scsi/gdth_proc.h
679 +++ b/drivers/scsi/gdth_proc.h
680 @@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_st
682 static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
683 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
684 -static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
685 -static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
686 -static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
690 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
691 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
692 @@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct
693 init_event_struct(evt_struct,
696 - cmnd->timeout_per_command/HZ);
697 + cmnd->request->timeout/HZ);
699 evt_struct->cmnd = cmnd;
700 evt_struct->cmnd_done = done;
701 --- a/drivers/scsi/ide-scsi.c
702 +++ b/drivers/scsi/ide-scsi.c
703 @@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cm
704 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
707 - pc->timeout = jiffies + cmd->timeout_per_command;
708 + pc->timeout = jiffies + cmd->request->timeout;
710 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
711 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
712 --- a/drivers/scsi/ipr.c
713 +++ b/drivers/scsi/ipr.c
714 @@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct sc
715 sdev->no_uld_attach = 1;
717 if (ipr_is_vset_device(res)) {
718 - sdev->timeout = IPR_VSET_RW_TIMEOUT;
719 + blk_queue_rq_timeout(sdev->request_queue,
720 + IPR_VSET_RW_TIMEOUT);
721 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
723 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
724 --- a/drivers/scsi/ips.c
725 +++ b/drivers/scsi/ips.c
726 @@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t *
727 scb->cmd.dcdb.segment_4G = 0;
728 scb->cmd.dcdb.enhanced_sg = 0;
730 - TimeOut = scb->scsi_cmd->timeout_per_command;
731 + TimeOut = scb->scsi_cmd->request->timeout;
733 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
735 --- a/drivers/scsi/libiscsi.c
736 +++ b/drivers/scsi/libiscsi.c
737 @@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_
738 scsi_queue_work(conn->session->host, &conn->xmitwork);
741 -static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
742 +static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
744 struct iscsi_cls_session *cls_session;
745 struct iscsi_session *session;
746 struct iscsi_conn *conn;
747 - enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
748 + enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
750 cls_session = starget_to_session(scsi_target(scmd->device));
751 session = cls_session->dd_data;
752 @@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_e
753 * We are probably in the middle of iscsi recovery so let
754 * that complete and handle the error.
756 - rc = EH_RESET_TIMER;
757 + rc = BLK_EH_RESET_TIMER;
761 conn = session->leadconn;
763 /* In the middle of shuting down */
764 - rc = EH_RESET_TIMER;
765 + rc = BLK_EH_RESET_TIMER;
769 @@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_e
771 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
772 (conn->ping_timeout * HZ), jiffies))
773 - rc = EH_RESET_TIMER;
774 + rc = BLK_EH_RESET_TIMER;
776 * if we are about to check the transport then give the command
779 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
781 - rc = EH_RESET_TIMER;
782 + rc = BLK_EH_RESET_TIMER;
783 /* if in the middle of checking the transport then give us more time */
785 - rc = EH_RESET_TIMER;
786 + rc = BLK_EH_RESET_TIMER;
788 spin_unlock(&session->lock);
789 - debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
790 + debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
791 + "timer reset" : "nh");
795 --- a/drivers/scsi/libsas/sas_ata.c
796 +++ b/drivers/scsi/libsas/sas_ata.c
797 @@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task
799 /* Bounce SCSI-initiated commands to the SCSI EH */
801 - scsi_req_abort_cmd(qc->scsicmd);
802 + blk_abort_request(qc->scsicmd->request);
803 scsi_schedule_eh(qc->scsicmd->device->host);
806 --- a/drivers/scsi/libsas/sas_internal.h
807 +++ b/drivers/scsi/libsas/sas_internal.h
808 @@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_s
809 int sas_register_ports(struct sas_ha_struct *sas_ha);
810 void sas_unregister_ports(struct sas_ha_struct *sas_ha);
812 -enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
813 +enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
815 int sas_init_queue(struct sas_ha_struct *sas_ha);
816 int sas_init_events(struct sas_ha_struct *sas_ha);
817 --- a/drivers/scsi/libsas/sas_scsi_host.c
818 +++ b/drivers/scsi/libsas/sas_scsi_host.c
819 @@ -673,43 +673,43 @@ out:
823 -enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
824 +enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
826 struct sas_task *task = TO_SAS_TASK(cmd);
830 - cmd->timeout_per_command /= 2;
831 + cmd->request->timeout /= 2;
832 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
833 - cmd, task, (cmd->timeout_per_command ?
834 - "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
835 - if (!cmd->timeout_per_command)
836 - return EH_NOT_HANDLED;
837 - return EH_RESET_TIMER;
838 + cmd, task, (cmd->request->timeout ?
839 + "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
840 + if (!cmd->request->timeout)
841 + return BLK_EH_NOT_HANDLED;
842 + return BLK_EH_RESET_TIMER;
845 spin_lock_irqsave(&task->task_state_lock, flags);
846 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
847 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
848 spin_unlock_irqrestore(&task->task_state_lock, flags);
849 - SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
852 + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
853 + "BLK_EH_HANDLED\n", cmd, task);
854 + return BLK_EH_HANDLED;
856 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
857 spin_unlock_irqrestore(&task->task_state_lock, flags);
858 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
859 - "EH_RESET_TIMER\n",
860 + "BLK_EH_RESET_TIMER\n",
862 - return EH_RESET_TIMER;
863 + return BLK_EH_RESET_TIMER;
865 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
866 spin_unlock_irqrestore(&task->task_state_lock, flags);
868 - SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
869 + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
872 - return EH_NOT_HANDLED;
873 + return BLK_EH_NOT_HANDLED;
876 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
877 @@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *tas
881 - scsi_req_abort_cmd(sc);
882 + blk_abort_request(sc->request);
883 scsi_schedule_eh(sc->device->host);
886 --- a/drivers/scsi/megaraid/megaraid_sas.c
887 +++ b/drivers/scsi/megaraid/megaraid_sas.c
888 @@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct
889 * cmd has not been completed within the timeout period.
892 -scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
893 +blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
895 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
896 struct megasas_instance *instance;
897 @@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer
899 if (time_after(jiffies, scmd->jiffies_at_alloc +
900 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
901 - return EH_NOT_HANDLED;
902 + return BLK_EH_NOT_HANDLED;
905 instance = cmd->instance;
906 @@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer
908 spin_unlock_irqrestore(instance->host->host_lock, flags);
910 - return EH_RESET_TIMER;
911 + return BLK_EH_RESET_TIMER;
915 --- a/drivers/scsi/ncr53c8xx.c
916 +++ b/drivers/scsi/ncr53c8xx.c
917 @@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb
919 **----------------------------------------------------
921 - if (np->settle_time && cmd->timeout_per_command >= HZ) {
922 - u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
923 + if (np->settle_time && cmd->request->timeout >= HZ) {
924 + u_long tlimit = jiffies + cmd->request->timeout - HZ;
925 if (time_after(np->settle_time, tlimit))
926 np->settle_time = tlimit;
928 --- a/drivers/scsi/qla1280.c
929 +++ b/drivers/scsi/qla1280.c
930 @@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla
931 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
933 /* Set ISP command timeout. */
934 - pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
935 + pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
937 /* Set device target ID and LUN */
938 pkt->lun = SCSI_LUN_32(cmd);
939 @@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla
940 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
942 /* Set ISP command timeout. */
943 - pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
944 + pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
946 /* Set device target ID and LUN */
947 pkt->lun = SCSI_LUN_32(cmd);
948 --- a/drivers/scsi/qla4xxx/ql4_os.c
949 +++ b/drivers/scsi/qla4xxx/ql4_os.c
950 @@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struc
951 DEBUG2(printk(KERN_INFO
952 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
953 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
954 - cmd, jiffies, cmd->timeout_per_command / HZ,
955 + cmd, jiffies, cmd->request->timeout / HZ,
956 ha->dpc_flags, cmd->result, cmd->allowed));
958 /* FIXME: wait for hba to go online */
959 @@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struc
960 DEBUG2(printk(KERN_INFO
961 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
962 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
963 - ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
964 + ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
965 ha->dpc_flags, cmd->result, cmd->allowed));
967 stat = qla4xxx_reset_target(ha, ddb_entry);
968 --- a/drivers/scsi/scsi.c
969 +++ b/drivers/scsi/scsi.c
970 @@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struc
974 - init_timer(&cmd->eh_timeout);
975 INIT_LIST_HEAD(&cmd->list);
976 spin_lock_irqsave(&dev->list_lock, flags);
977 list_add_tail(&cmd->list, &dev->cmd_list);
978 @@ -652,14 +651,19 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
979 unsigned long timeout;
983 + * We will use a queued command if possible, otherwise we will
984 + * emulate the queuing and calling of completion function ourselves.
986 + atomic_inc(&cmd->device->iorequest_cnt);
988 /* check if the device is still usable */
989 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
990 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
991 * returns an immediate error upwards, and signals
992 * that the device is no longer present */
993 cmd->result = DID_NO_CONNECT << 16;
994 - atomic_inc(&cmd->device->iorequest_cnt);
997 /* return 0 (because the command has been processed) */
1000 @@ -672,6 +676,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1001 * future requests should not occur until the device
1002 * transitions out of the suspend state.
1005 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1007 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
1008 @@ -714,21 +719,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1009 host->resetting = 0;
1013 - * AK: unlikely race here: for some reason the timer could
1014 - * expire before the serial number is set up below.
1016 - scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
1021 - * We will use a queued command if possible, otherwise we will
1022 - * emulate the queuing and calling of completion function ourselves.
1024 - atomic_inc(&cmd->device->iorequest_cnt);
1027 * Before we queue this command, check if the command
1028 * length exceeds what the host adapter can handle.
1030 @@ -744,6 +737,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1033 spin_lock_irqsave(host->host_lock, flags);
1035 + * AK: unlikely race here: for some reason the timer could
1036 + * expire before the serial number is set up below.
1038 + * TODO: kill serial or move to blk layer
1040 scsi_cmd_get_serial(host, cmd);
1042 if (unlikely(host->shost_state == SHOST_DEL)) {
1043 @@ -754,12 +753,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1045 spin_unlock_irqrestore(host->host_lock, flags);
1047 - if (scsi_delete_timer(cmd)) {
1048 - atomic_inc(&cmd->device->iodone_cnt);
1049 - scsi_queue_insert(cmd,
1050 - (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
1051 - rtn : SCSI_MLQUEUE_HOST_BUSY);
1053 + scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
1054 + rtn : SCSI_MLQUEUE_HOST_BUSY);
1056 printk("queuecommand : request rejected\n"));
1058 @@ -770,24 +765,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1062 - * scsi_req_abort_cmd -- Request command recovery for the specified command
1063 - * @cmd: pointer to the SCSI command of interest
1065 - * This function requests that SCSI Core start recovery for the
1066 - * command by deleting the timer and adding the command to the eh
1067 - * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
1068 - * implement their own error recovery MAY ignore the timeout event if
1069 - * they generated scsi_req_abort_cmd.
1071 -void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
1073 - if (!scsi_delete_timer(cmd))
1075 - scsi_times_out(cmd);
1077 -EXPORT_SYMBOL(scsi_req_abort_cmd);
1080 * scsi_done - Enqueue the finished SCSI command into the done queue.
1081 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1082 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
1083 @@ -802,42 +779,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
1085 static void scsi_done(struct scsi_cmnd *cmd)
1088 - * We don't have to worry about this one timing out anymore.
1089 - * If we are unable to remove the timer, then the command
1090 - * has already timed out. In which case, we have no choice but to
1091 - * let the timeout function run, as we have no idea where in fact
1092 - * that function could really be. It might be on another processor,
1095 - if (!scsi_delete_timer(cmd))
1100 -/* Private entry to scsi_done() to complete a command when the timer
1101 - * isn't running --- used by scsi_times_out */
1102 -void __scsi_done(struct scsi_cmnd *cmd)
1104 - struct request *rq = cmd->request;
1107 - * Set the serial numbers back to zero
1109 - cmd->serial_number = 0;
1111 - atomic_inc(&cmd->device->iodone_cnt);
1113 - atomic_inc(&cmd->device->ioerr_cnt);
1118 - * The uptodate/nbytes values don't matter, as we allow partial
1119 - * completes and thus will check this in the softirq callback
1121 - rq->completion_data = cmd;
1122 - blk_complete_request(rq);
1123 + blk_complete_request(cmd->request);
1126 /* Move this to a header if it becomes more generally useful */
1127 --- a/drivers/scsi/scsi_error.c
1128 +++ b/drivers/scsi/scsi_error.c
1129 @@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *s
1133 - * scsi_add_timer - Start timeout timer for a single scsi command.
1134 - * @scmd: scsi command that is about to start running.
1135 - * @timeout: amount of time to allow this command to run.
1136 - * @complete: timeout function to call if timer isn't canceled.
1139 - * This should be turned into an inline function. Each scsi command
1140 - * has its own timer, and as it is added to the queue, we set up the
1141 - * timer. When the command completes, we cancel the timer.
1143 -void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
1144 - void (*complete)(struct scsi_cmnd *))
1148 - * If the clock was already running for this command, then
1149 - * first delete the timer. The timer handling code gets rather
1150 - * confused if we don't do this.
1152 - if (scmd->eh_timeout.function)
1153 - del_timer(&scmd->eh_timeout);
1155 - scmd->eh_timeout.data = (unsigned long)scmd;
1156 - scmd->eh_timeout.expires = jiffies + timeout;
1157 - scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
1159 - SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
1160 - " %d, (%p)\n", __func__,
1161 - scmd, timeout, complete));
1163 - add_timer(&scmd->eh_timeout);
1167 - * scsi_delete_timer - Delete/cancel timer for a given function.
1168 - * @scmd: Cmd that we are canceling timer for
1171 - * This should be turned into an inline function.
1174 - * 1 if we were able to detach the timer. 0 if we blew it, and the
1175 - * timer function has already started to run.
1177 -int scsi_delete_timer(struct scsi_cmnd *scmd)
1181 - rtn = del_timer(&scmd->eh_timeout);
1183 - SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
1184 - " rtn: %d\n", __func__,
1187 - scmd->eh_timeout.data = (unsigned long)NULL;
1188 - scmd->eh_timeout.function = NULL;
1194 * scsi_times_out - Timeout function for normal scsi commands.
1195 - * @scmd: Cmd that is timing out.
1196 + * @req: request that is timing out.
1199 * We do not need to lock this. There is the potential for a race
1200 @@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *
1201 * normal completion function determines that the timer has already
1202 * fired, then it mustn't do anything.
1204 -void scsi_times_out(struct scsi_cmnd *scmd)
1205 +enum blk_eh_timer_return scsi_times_out(struct request *req)
1207 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1208 + struct scsi_cmnd *scmd = req->special;
1209 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1210 + enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
1212 scsi_log_completion(scmd, TIMEOUT_ERROR);
1214 @@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *sc
1215 eh_timed_out = NULL;
1218 - switch (eh_timed_out(scmd)) {
1220 - __scsi_done(scmd);
1222 - case EH_RESET_TIMER:
1223 - scsi_add_timer(scmd, scmd->timeout_per_command,
1226 - case EH_NOT_HANDLED:
1227 + rtn = eh_timed_out(scmd);
1229 + case BLK_EH_NOT_HANDLED:
1235 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
1236 scmd->result |= DID_TIME_OUT << 16;
1237 - __scsi_done(scmd);
1238 + return BLK_EH_HANDLED;
1241 + return BLK_EH_NOT_HANDLED;
1245 @@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *
1247 blk_rq_init(NULL, &req);
1248 scmd->request = &req;
1249 - memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1251 scmd->cmnd = req.cmd;
1253 @@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *
1255 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1257 - init_timer(&scmd->eh_timeout);
1259 spin_lock_irqsave(shost->host_lock, flags);
1260 shost->tmf_in_progress = 1;
1261 spin_unlock_irqrestore(shost->host_lock, flags);
1262 --- a/drivers/scsi/scsi_lib.c
1263 +++ b/drivers/scsi/scsi_lib.c
1264 @@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_d
1266 cmd->transfersize = req->data_len;
1267 cmd->allowed = req->retries;
1268 - cmd->timeout_per_command = req->timeout;
1271 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1272 @@ -1416,17 +1415,26 @@ static void scsi_kill_request(struct req
1273 spin_unlock(shost->host_lock);
1274 spin_lock(sdev->request_queue->queue_lock);
1277 + blk_complete_request(req);
1280 static void scsi_softirq_done(struct request *rq)
1282 - struct scsi_cmnd *cmd = rq->completion_data;
1283 - unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
1284 + struct scsi_cmnd *cmd = rq->special;
1285 + unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1288 INIT_LIST_HEAD(&cmd->eh_entry);
1291 + * Set the serial numbers back to zero
1293 + cmd->serial_number = 0;
1295 + atomic_inc(&cmd->device->iodone_cnt);
1297 + atomic_inc(&cmd->device->ioerr_cnt);
1299 disposition = scsi_decide_disposition(cmd);
1300 if (disposition != SUCCESS &&
1301 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1302 @@ -1675,6 +1683,7 @@ struct request_queue *scsi_alloc_queue(s
1304 blk_queue_prep_rq(q, scsi_prep_fn);
1305 blk_queue_softirq_done(q, scsi_softirq_done);
1306 + blk_queue_rq_timed_out(q, scsi_times_out);
1310 --- a/drivers/scsi/scsi_priv.h
1311 +++ b/drivers/scsi/scsi_priv.h
1313 #include <linux/device.h>
1315 struct request_queue;
1319 struct scsi_host_template;
1320 @@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
1321 extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
1322 extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
1323 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
1324 -extern void __scsi_done(struct scsi_cmnd *cmd);
1325 #ifdef CONFIG_SCSI_LOGGING
1326 void scsi_log_send(struct scsi_cmnd *cmd);
1327 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
1328 @@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void
1329 extern void scsi_exit_devinfo(void);
1332 -extern void scsi_add_timer(struct scsi_cmnd *, int,
1333 - void (*)(struct scsi_cmnd *));
1334 -extern int scsi_delete_timer(struct scsi_cmnd *);
1335 -extern void scsi_times_out(struct scsi_cmnd *cmd);
1336 +extern enum blk_eh_timer_return scsi_times_out(struct request *req);
1337 extern int scsi_error_handler(void *host);
1338 extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
1339 extern void scsi_eh_wakeup(struct Scsi_Host *shost);
1340 --- a/drivers/scsi/scsi_sysfs.c
1341 +++ b/drivers/scsi/scsi_sysfs.c
1342 @@ -560,12 +560,15 @@ sdev_rd_attr (vendor, "%.8s\n");
1343 sdev_rd_attr (model, "%.16s\n");
1344 sdev_rd_attr (rev, "%.4s\n");
1347 + * TODO: can we make these symlinks to the block layer ones?
1350 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
1352 struct scsi_device *sdev;
1353 sdev = to_scsi_device(dev);
1354 - return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
1355 + return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
1359 @@ -576,7 +579,7 @@ sdev_store_timeout (struct device *dev,
1361 sdev = to_scsi_device(dev);
1362 sscanf (buf, "%d\n", &timeout);
1363 - sdev->timeout = timeout * HZ;
1364 + blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
1367 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
1368 --- a/drivers/scsi/scsi_transport_fc.c
1369 +++ b/drivers/scsi/scsi_transport_fc.c
1370 @@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribu
1372 * This routine assumes no locks are held on entry.
1374 -static enum scsi_eh_timer_return
1375 +static enum blk_eh_timer_return
1376 fc_timed_out(struct scsi_cmnd *scmd)
1378 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1380 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1381 - return EH_RESET_TIMER;
1382 + return BLK_EH_RESET_TIMER;
1384 - return EH_NOT_HANDLED;
1385 + return BLK_EH_NOT_HANDLED;
1389 --- a/drivers/scsi/sd.c
1390 +++ b/drivers/scsi/sd.c
1391 @@ -378,7 +378,6 @@ static int sd_prep_fn(struct request_que
1392 sector_t block = rq->sector;
1394 unsigned int this_count = rq->nr_sectors;
1395 - unsigned int timeout = sdp->timeout;
1398 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1399 @@ -579,7 +578,6 @@ static int sd_prep_fn(struct request_que
1400 SCpnt->transfersize = sdp->sector_size;
1401 SCpnt->underflow = this_count << 9;
1402 SCpnt->allowed = SD_MAX_RETRIES;
1403 - SCpnt->timeout_per_command = timeout;
1406 * This indicates that the command is ready from our end to be
1407 @@ -1837,11 +1835,12 @@ static int sd_probe(struct device *dev)
1409 sdkp->previous_state = 1;
1411 - if (!sdp->timeout) {
1412 + if (!sdp->request_queue->rq_timeout) {
1413 if (sdp->type != TYPE_MOD)
1414 - sdp->timeout = SD_TIMEOUT;
1415 + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1417 - sdp->timeout = SD_MOD_TIMEOUT;
1418 + blk_queue_rq_timeout(sdp->request_queue,
1422 device_initialize(&sdkp->dev);
1423 --- a/drivers/scsi/sr.c
1424 +++ b/drivers/scsi/sr.c
1425 @@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCp
1427 static int sr_prep_fn(struct request_queue *q, struct request *rq)
1429 - int block=0, this_count, s_size, timeout = SR_TIMEOUT;
1430 + int block = 0, this_count, s_size;
1432 struct scsi_cmnd *SCpnt;
1433 struct scsi_device *sdp = q->queuedata;
1434 @@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_que
1435 SCpnt->transfersize = cd->device->sector_size;
1436 SCpnt->underflow = this_count << 9;
1437 SCpnt->allowed = MAX_RETRIES;
1438 - SCpnt->timeout_per_command = timeout;
1441 * This indicates that the command is ready from our end to be
1442 @@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
1443 disk->fops = &sr_bdops;
1444 disk->flags = GENHD_FL_CD;
1446 + blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
1450 cd->driver = &sr_template;
1451 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
1452 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
1453 @@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struc
1454 * Shorten our settle_time if needed for
1455 * this command not to time out.
1457 - if (np->s.settle_time_valid && cmd->timeout_per_command) {
1458 - unsigned long tlimit = jiffies + cmd->timeout_per_command;
1459 + if (np->s.settle_time_valid && cmd->request->timeout) {
1460 + unsigned long tlimit = jiffies + cmd->request->timeout;
1461 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
1462 if (time_after(np->s.settle_time, tlimit)) {
1463 np->s.settle_time = tlimit;
1464 --- a/include/linux/blkdev.h
1465 +++ b/include/linux/blkdev.h
1466 @@ -147,6 +147,7 @@ struct request {
1468 unsigned int cmd_flags;
1469 enum rq_cmd_type_bits cmd_type;
1470 + unsigned long atomic_flags;
1472 /* Maintain bio traversal state for part by part I/O submission.
1473 * hard_* are block layer internals, no driver should touch them!
1474 @@ -214,6 +215,8 @@ struct request {
1478 + unsigned long deadline;
1479 + struct list_head timeout_list;
1480 unsigned int timeout;
1483 @@ -266,6 +269,14 @@ typedef void (prepare_flush_fn) (struct
1484 typedef void (softirq_done_fn)(struct request *);
1485 typedef int (dma_drain_needed_fn)(struct request *);
1487 +enum blk_eh_timer_return {
1488 + BLK_EH_NOT_HANDLED,
1490 + BLK_EH_RESET_TIMER,
1493 +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
1495 enum blk_queue_state {
1498 @@ -311,6 +322,7 @@ struct request_queue
1499 merge_bvec_fn *merge_bvec_fn;
1500 prepare_flush_fn *prepare_flush_fn;
1501 softirq_done_fn *softirq_done_fn;
1502 + rq_timed_out_fn *rq_timed_out_fn;
1503 dma_drain_needed_fn *dma_drain_needed;
1506 @@ -386,6 +398,10 @@ struct request_queue
1507 unsigned int nr_sorted;
1508 unsigned int in_flight;
1510 + unsigned int rq_timeout;
1511 + struct timer_list timeout;
1512 + struct list_head timeout_list;
1517 @@ -762,6 +778,8 @@ extern int blk_end_request_callback(stru
1518 unsigned int nr_bytes,
1519 int (drv_callback)(struct request *));
1520 extern void blk_complete_request(struct request *);
1521 +extern void __blk_complete_request(struct request *);
1522 +extern void blk_abort_request(struct request *);
1525 * blk_end_request() takes bytes instead of sectors as a complete size.
1526 @@ -803,6 +821,8 @@ extern void blk_queue_dma_alignment(stru
1527 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1528 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1529 extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
1530 +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1531 +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1532 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1533 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
1534 extern int blk_do_ordered(struct request_queue *, struct request **);
1535 --- a/include/scsi/scsi_cmnd.h
1536 +++ b/include/scsi/scsi_cmnd.h
1537 @@ -75,7 +75,6 @@ struct scsi_cmnd {
1541 - int timeout_per_command;
1543 unsigned char prot_op;
1544 unsigned char prot_type;
1545 @@ -86,7 +85,6 @@ struct scsi_cmnd {
1546 /* These elements define the operation we are about to perform */
1547 unsigned char *cmnd;
1549 - struct timer_list eh_timeout; /* Used to time out the command. */
1551 /* These elements define the operation we ultimately want to perform */
1552 struct scsi_data_buffer sdb;
1553 @@ -139,7 +137,6 @@ extern void scsi_put_command(struct scsi
1554 extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
1556 extern void scsi_finish_command(struct scsi_cmnd *cmd);
1557 -extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
1559 extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
1560 size_t *offset, size_t *len);
1561 --- a/include/scsi/scsi_host.h
1562 +++ b/include/scsi/scsi_host.h
1563 @@ -43,13 +43,6 @@ struct blk_queue_tags;
1564 #define DISABLE_CLUSTERING 0
1565 #define ENABLE_CLUSTERING 1
1567 -enum scsi_eh_timer_return {
1574 struct scsi_host_template {
1575 struct module *module;
1577 @@ -347,7 +340,7 @@ struct scsi_host_template {
1581 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1582 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1585 * Name of proc directory
1586 --- a/include/scsi/scsi_transport.h
1587 +++ b/include/scsi/scsi_transport.h
1589 #define SCSI_TRANSPORT_H
1591 #include <linux/transport_class.h>
1592 +#include <linux/blkdev.h>
1593 #include <scsi/scsi_host.h>
1594 #include <scsi/scsi_device.h>
1596 @@ -64,7 +65,7 @@ struct scsi_transport_template {
1597 * begin counting again
1598 * EH_NOT_HANDLED Begin normal error recovery
1600 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1601 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1604 * Used as callback for the completion of i_t_nexus request