]> git.ipfire.org Git - ipfire-2.x.git/blob - src/patches/suse-2.6.27.39/patches.drivers/block-timeout-handling
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.drivers / block-timeout-handling
1 Subject: block: unify request timeout handling
2 From: Jens Axboe <jens.axboe@oracle.com>
3 Date: Thu Oct 9 08:56:13 2008 +0200:
4 Git: 242f9dcb8ba6f68fcd217a119a7648a4f69290e9
5 References: FATE#304151,bnc#417544
6
7 Right now SCSI and others do their own command timeout handling.
8 Move those bits to the block layer.
9
10 Instead of having a timer per command, we try to be a bit more clever
11 and simply have one per-queue. This avoids the overhead of having to
12 tear down and setup a timer for each command, so it will result in a lot
13 less timer fiddling.
14
15 Signed-off-by: Mike Anderson <andmike@linux.vnet.ibm.com>
16 Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
17 Signed-off-by: Hannes Reinecke <hare@suse.de>
18 ---
19 block/Makefile | 4
20 block/blk-core.c | 7 +
21 block/blk-settings.c | 12 ++
22 block/blk-softirq.c | 30 ++++--
23 block/blk-timeout.c | 155 +++++++++++++++++++++++++++++++++++
24 block/blk.h | 24 +++++
25 block/elevator.c | 8 +
26 drivers/ata/libata-eh.c | 13 +-
27 drivers/ata/libata.h | 2
28 drivers/scsi/aacraid/aachba.c | 2
29 drivers/scsi/gdth.c | 60 ++++++++-----
30 drivers/scsi/gdth.h | 2
31 drivers/scsi/gdth_proc.c | 66 --------------
32 drivers/scsi/gdth_proc.h | 3
33 drivers/scsi/ibmvscsi/ibmvscsi.c | 2
34 drivers/scsi/ide-scsi.c | 2
35 drivers/scsi/ipr.c | 3
36 drivers/scsi/ips.c | 2
37 drivers/scsi/libiscsi.c | 17 ++-
38 drivers/scsi/libsas/sas_ata.c | 2
39 drivers/scsi/libsas/sas_internal.h | 2
40 drivers/scsi/libsas/sas_scsi_host.c | 30 +++---
41 drivers/scsi/megaraid/megaraid_sas.c | 6 -
42 drivers/scsi/ncr53c8xx.c | 4
43 drivers/scsi/qla1280.c | 4
44 drivers/scsi/qla4xxx/ql4_os.c | 4
45 drivers/scsi/scsi.c | 92 +++-----------------
46 drivers/scsi/scsi_error.c | 90 ++------------------
47 drivers/scsi/scsi_lib.c | 17 ++-
48 drivers/scsi/scsi_priv.h | 7 -
49 drivers/scsi/scsi_sysfs.c | 7 +
50 drivers/scsi/scsi_transport_fc.c | 6 -
51 drivers/scsi/sd.c | 9 --
52 drivers/scsi/sr.c | 5 -
53 drivers/scsi/sym53c8xx_2/sym_glue.c | 4
54 include/linux/blkdev.h | 20 ++++
55 include/scsi/scsi_cmnd.h | 3
56 include/scsi/scsi_host.h | 9 --
57 include/scsi/scsi_transport.h | 3
58 39 files changed, 399 insertions(+), 339 deletions(-)
59 create mode 100644 block/blk-timeout.c
60
61 --- a/block/Makefile
62 +++ b/block/Makefile
63 @@ -4,8 +4,8 @@
64
65 obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
66 blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
67 - blk-exec.o blk-merge.o blk-softirq.o ioctl.o genhd.o \
68 - scsi_ioctl.o cmd-filter.o
69 + blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
70 + ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
71
72 obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
73 obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
74 --- a/block/blk-core.c
75 +++ b/block/blk-core.c
76 @@ -109,6 +109,7 @@ void blk_rq_init(struct request_queue *q
77 memset(rq, 0, sizeof(*rq));
78
79 INIT_LIST_HEAD(&rq->queuelist);
80 + INIT_LIST_HEAD(&rq->timeout_list);
81 rq->cpu = -1;
82 rq->q = q;
83 rq->sector = rq->hard_sector = (sector_t) -1;
84 @@ -489,6 +490,8 @@ struct request_queue *blk_alloc_queue_no
85 }
86
87 init_timer(&q->unplug_timer);
88 + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
89 + INIT_LIST_HEAD(&q->timeout_list);
90
91 kobject_init(&q->kobj, &blk_queue_ktype);
92
93 @@ -896,6 +899,8 @@ EXPORT_SYMBOL(blk_start_queueing);
94 */
95 void blk_requeue_request(struct request_queue *q, struct request *rq)
96 {
97 + blk_delete_timer(rq);
98 + blk_clear_rq_complete(rq);
99 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
100
101 if (blk_rq_tagged(rq))
102 @@ -1652,6 +1657,8 @@ static void end_that_request_last(struct
103 {
104 struct gendisk *disk = req->rq_disk;
105
106 + blk_delete_timer(req);
107 +
108 if (blk_rq_tagged(req))
109 blk_queue_end_tag(req->q, req);
110
111 --- a/block/blk-settings.c
112 +++ b/block/blk-settings.c
113 @@ -77,6 +77,18 @@ void blk_queue_softirq_done(struct reque
114 }
115 EXPORT_SYMBOL(blk_queue_softirq_done);
116
117 +void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
118 +{
119 + q->rq_timeout = timeout;
120 +}
121 +EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
122 +
123 +void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
124 +{
125 + q->rq_timed_out_fn = fn;
126 +}
127 +EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
128 +
129 /**
130 * blk_queue_make_request - define an alternate make_request function for a device
131 * @q: the request queue for the device to be affected
132 --- a/block/blk-softirq.c
133 +++ b/block/blk-softirq.c
134 @@ -101,18 +101,7 @@ static struct notifier_block __cpuinitda
135 .notifier_call = blk_cpu_notify,
136 };
137
138 -/**
139 - * blk_complete_request - end I/O on a request
140 - * @req: the request being processed
141 - *
142 - * Description:
143 - * Ends all I/O on a request. It does not handle partial completions,
144 - * unless the driver actually implements this in its completion callback
145 - * through requeueing. The actual completion happens out-of-order,
146 - * through a softirq handler. The user must have registered a completion
147 - * callback through blk_queue_softirq_done().
148 - **/
149 -void blk_complete_request(struct request *req)
150 +void __blk_complete_request(struct request *req)
151 {
152 struct request_queue *q = req->q;
153 unsigned long flags;
154 @@ -151,6 +140,23 @@ do_local:
155
156 local_irq_restore(flags);
157 }
158 +
159 +/**
160 + * blk_complete_request - end I/O on a request
161 + * @req: the request being processed
162 + *
163 + * Description:
164 + * Ends all I/O on a request. It does not handle partial completions,
165 + * unless the driver actually implements this in its completion callback
166 + * through requeueing. The actual completion happens out-of-order,
167 + * through a softirq handler. The user must have registered a completion
168 + * callback through blk_queue_softirq_done().
169 + **/
170 +void blk_complete_request(struct request *req)
171 +{
172 + if (!blk_mark_rq_complete(req))
173 + __blk_complete_request(req);
174 +}
175 EXPORT_SYMBOL(blk_complete_request);
176
177 __init int blk_softirq_init(void)
178 --- /dev/null
179 +++ b/block/blk-timeout.c
180 @@ -0,0 +1,155 @@
181 +/*
182 + * Functions related to generic timeout handling of requests.
183 + */
184 +#include <linux/kernel.h>
185 +#include <linux/module.h>
186 +#include <linux/blkdev.h>
187 +
188 +#include "blk.h"
189 +
190 +/*
191 + * blk_delete_timer - Delete/cancel timer for a given function.
192 + * @req: request that we are canceling timer for
193 + *
194 + */
195 +void blk_delete_timer(struct request *req)
196 +{
197 + struct request_queue *q = req->q;
198 +
199 + /*
200 + * Nothing to detach
201 + */
202 + if (!q->rq_timed_out_fn || !req->deadline)
203 + return;
204 +
205 + list_del_init(&req->timeout_list);
206 +
207 + if (list_empty(&q->timeout_list))
208 + del_timer(&q->timeout);
209 +}
210 +
211 +static void blk_rq_timed_out(struct request *req)
212 +{
213 + struct request_queue *q = req->q;
214 + enum blk_eh_timer_return ret;
215 +
216 + ret = q->rq_timed_out_fn(req);
217 + switch (ret) {
218 + case BLK_EH_HANDLED:
219 + __blk_complete_request(req);
220 + break;
221 + case BLK_EH_RESET_TIMER:
222 + blk_clear_rq_complete(req);
223 + blk_add_timer(req);
224 + break;
225 + case BLK_EH_NOT_HANDLED:
226 + /*
227 + * LLD handles this for now but in the future
228 + * we can send a request msg to abort the command
229 + * and we can move more of the generic scsi eh code to
230 + * the blk layer.
231 + */
232 + break;
233 + default:
234 + printk(KERN_ERR "block: bad eh return: %d\n", ret);
235 + break;
236 + }
237 +}
238 +
239 +void blk_rq_timed_out_timer(unsigned long data)
240 +{
241 + struct request_queue *q = (struct request_queue *) data;
242 + unsigned long flags, uninitialized_var(next), next_set = 0;
243 + struct request *rq, *tmp;
244 +
245 + spin_lock_irqsave(q->queue_lock, flags);
246 +
247 + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
248 + if (time_after_eq(jiffies, rq->deadline)) {
249 + list_del_init(&rq->timeout_list);
250 +
251 + /*
252 + * Check if we raced with end io completion
253 + */
254 + if (blk_mark_rq_complete(rq))
255 + continue;
256 + blk_rq_timed_out(rq);
257 + }
258 + if (!next_set) {
259 + next = rq->deadline;
260 + next_set = 1;
261 + } else if (time_after(next, rq->deadline))
262 + next = rq->deadline;
263 + }
264 +
265 + if (next_set && !list_empty(&q->timeout_list))
266 + mod_timer(&q->timeout, round_jiffies(next));
267 +
268 + spin_unlock_irqrestore(q->queue_lock, flags);
269 +}
270 +
271 +/**
272 + * blk_abort_request -- Request request recovery for the specified command
273 + * @req: pointer to the request of interest
274 + *
275 + * This function requests that the block layer start recovery for the
276 + * request by deleting the timer and calling the q's timeout function.
277 + * LLDDs who implement their own error recovery MAY ignore the timeout
278 + * event if they generated blk_abort_req. Must hold queue lock.
279 + */
280 +void blk_abort_request(struct request *req)
281 +{
282 + blk_delete_timer(req);
283 + blk_rq_timed_out(req);
284 +}
285 +EXPORT_SYMBOL_GPL(blk_abort_request);
286 +
287 +/**
288 + * blk_add_timer - Start timeout timer for a single request
289 + * @req: request that is about to start running.
290 + *
291 + * Notes:
292 + * Each request has its own timer, and as it is added to the queue, we
293 + * set up the timer. When the request completes, we cancel the timer.
294 + */
295 +void blk_add_timer(struct request *req)
296 +{
297 + struct request_queue *q = req->q;
298 + unsigned long expiry;
299 +
300 + if (!q->rq_timed_out_fn)
301 + return;
302 +
303 + BUG_ON(!list_empty(&req->timeout_list));
304 + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
305 +
306 + if (req->timeout)
307 + req->deadline = jiffies + req->timeout;
308 + else {
309 + req->deadline = jiffies + q->rq_timeout;
310 + /*
311 + * Some LLDs, like scsi, peek at the timeout to prevent
312 + * a command from being retried forever.
313 + */
314 + req->timeout = q->rq_timeout;
315 + }
316 + list_add_tail(&req->timeout_list, &q->timeout_list);
317 +
318 + /*
319 + * If the timer isn't already pending or this timeout is earlier
320 + * than an existing one, modify the timer. Round to next nearest
321 + * second.
322 + */
323 + expiry = round_jiffies(req->deadline);
324 +
325 + /*
326 + * We use ->deadline == 0 to detect whether a timer was added or
327 + * not, so just increase to next jiffy for that specific case
328 + */
329 + if (unlikely(!req->deadline))
330 + req->deadline = 1;
331 +
332 + if (!timer_pending(&q->timeout) ||
333 + time_before(expiry, q->timeout.expires))
334 + mod_timer(&q->timeout, expiry);
335 +}
336 --- a/block/blk.h
337 +++ b/block/blk.h
338 @@ -17,6 +17,30 @@ void __blk_queue_free_tags(struct reques
339
340 void blk_unplug_work(struct work_struct *work);
341 void blk_unplug_timeout(unsigned long data);
342 +void blk_rq_timed_out_timer(unsigned long data);
343 +void blk_delete_timer(struct request *);
344 +void blk_add_timer(struct request *);
345 +
346 +/*
347 + * Internal atomic flags for request handling
348 + */
349 +enum rq_atomic_flags {
350 + REQ_ATOM_COMPLETE = 0,
351 +};
352 +
353 +/*
354 + * EH timer and IO completion will both attempt to 'grab' the request, make
355 + * sure that only one of them suceeds
356 + */
357 +static inline int blk_mark_rq_complete(struct request *rq)
358 +{
359 + return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
360 +}
361 +
362 +static inline void blk_clear_rq_complete(struct request *rq)
363 +{
364 + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
365 +}
366
367 struct io_context *current_io_context(gfp_t gfp_flags, int node);
368
369 --- a/block/elevator.c
370 +++ b/block/elevator.c
371 @@ -36,6 +36,8 @@
372 #include <linux/hash.h>
373 #include <linux/uaccess.h>
374
375 +#include "blk.h"
376 +
377 static DEFINE_SPINLOCK(elv_list_lock);
378 static LIST_HEAD(elv_list);
379
380 @@ -779,6 +781,12 @@ struct request *elv_next_request(struct
381 */
382 rq->cmd_flags |= REQ_STARTED;
383 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
384 +
385 + /*
386 + * We are now handing the request to the hardware,
387 + * add the timeout handler
388 + */
389 + blk_add_timer(rq);
390 }
391
392 if (!q->boundary_rq || q->boundary_rq == rq) {
393 --- a/drivers/ata/libata-eh.c
394 +++ b/drivers/ata/libata-eh.c
395 @@ -33,6 +33,7 @@
396 */
397
398 #include <linux/kernel.h>
399 +#include <linux/blkdev.h>
400 #include <linux/pci.h>
401 #include <scsi/scsi.h>
402 #include <scsi/scsi_host.h>
403 @@ -457,29 +458,29 @@ static void ata_eh_clear_action(struct a
404 * RETURNS:
405 * EH_HANDLED or EH_NOT_HANDLED
406 */
407 -enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
408 +enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
409 {
410 struct Scsi_Host *host = cmd->device->host;
411 struct ata_port *ap = ata_shost_to_port(host);
412 unsigned long flags;
413 struct ata_queued_cmd *qc;
414 - enum scsi_eh_timer_return ret;
415 + enum blk_eh_timer_return ret;
416
417 DPRINTK("ENTER\n");
418
419 if (ap->ops->error_handler) {
420 - ret = EH_NOT_HANDLED;
421 + ret = BLK_EH_NOT_HANDLED;
422 goto out;
423 }
424
425 - ret = EH_HANDLED;
426 + ret = BLK_EH_HANDLED;
427 spin_lock_irqsave(ap->lock, flags);
428 qc = ata_qc_from_tag(ap, ap->link.active_tag);
429 if (qc) {
430 WARN_ON(qc->scsicmd != cmd);
431 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
432 qc->err_mask |= AC_ERR_TIMEOUT;
433 - ret = EH_NOT_HANDLED;
434 + ret = BLK_EH_NOT_HANDLED;
435 }
436 spin_unlock_irqrestore(ap->lock, flags);
437
438 @@ -828,7 +829,7 @@ void ata_qc_schedule_eh(struct ata_queue
439 * Note that ATA_QCFLAG_FAILED is unconditionally set after
440 * this function completes.
441 */
442 - scsi_req_abort_cmd(qc->scsicmd);
443 + blk_abort_request(qc->scsicmd->request);
444 }
445
446 /**
447 --- a/drivers/ata/libata.h
448 +++ b/drivers/ata/libata.h
449 @@ -155,7 +155,7 @@ extern int ata_bus_probe(struct ata_port
450 /* libata-eh.c */
451 extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
452 extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
453 -extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
454 +extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
455 extern void ata_scsi_error(struct Scsi_Host *host);
456 extern void ata_port_wait_eh(struct ata_port *ap);
457 extern void ata_eh_fastdrain_timerfn(unsigned long arg);
458 --- a/drivers/scsi/aacraid/aachba.c
459 +++ b/drivers/scsi/aacraid/aachba.c
460 @@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(
461 srbcmd->id = cpu_to_le32(scmd_id(cmd));
462 srbcmd->lun = cpu_to_le32(cmd->device->lun);
463 srbcmd->flags = cpu_to_le32(flag);
464 - timeout = cmd->timeout_per_command/HZ;
465 + timeout = cmd->request->timeout/HZ;
466 if (timeout == 0)
467 timeout = 1;
468 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
469 --- a/drivers/scsi/gdth.c
470 +++ b/drivers/scsi/gdth.c
471 @@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *s
472
473 /* use request field to save the ptr. to completion struct. */
474 scp->request = (struct request *)&wait;
475 - scp->timeout_per_command = timeout*HZ;
476 scp->cmd_len = 12;
477 scp->cmnd = cmnd;
478 cmndinfo.priority = IOCTL_PRI;
479 @@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, S
480 register Scsi_Cmnd *pscp;
481 register Scsi_Cmnd *nscp;
482 ulong flags;
483 - unchar b, t;
484
485 TRACE(("gdth_putq() priority %d\n",priority));
486 spin_lock_irqsave(&ha->smp_lock, flags);
487
488 - if (!cmndinfo->internal_command) {
489 + if (!cmndinfo->internal_command)
490 cmndinfo->priority = priority;
491 - b = scp->device->channel;
492 - t = scp->device->id;
493 - if (priority >= DEFAULT_PRI) {
494 - if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
495 - (b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
496 - TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
497 - cmndinfo->timeout = gdth_update_timeout(scp, 0);
498 - }
499 - }
500 - }
501
502 if (ha->req_first==NULL) {
503 ha->req_first = scp; /* queue was empty */
504 @@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi
505 return ((const char *)ha->binfo.type_string);
506 }
507
508 +static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
509 +{
510 + gdth_ha_str *ha = shost_priv(scp->device->host);
511 + struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
512 + unchar b, t;
513 + ulong flags;
514 + enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
515 +
516 + TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
517 + b = scp->device->channel;
518 + t = scp->device->id;
519 +
520 + /*
521 + * We don't really honor the command timeout, but we try to
522 + * honor 6 times of the actual command timeout! So reset the
523 + * timer if this is less than 6th timeout on this command!
524 + */
525 + if (++cmndinfo->timeout_count < 6)
526 + retval = BLK_EH_RESET_TIMER;
527 +
528 + /* Reset the timeout if it is locked IO */
529 + spin_lock_irqsave(&ha->smp_lock, flags);
530 + if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
531 + (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
532 + TRACE2(("%s(): locked IO, reset timeout\n", __func__));
533 + retval = BLK_EH_RESET_TIMER;
534 + }
535 + spin_unlock_irqrestore(&ha->smp_lock, flags);
536 +
537 + return retval;
538 +}
539 +
540 +
541 static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
542 {
543 gdth_ha_str *ha = shost_priv(scp->device->host);
544 @@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi
545 BUG_ON(!cmndinfo);
546
547 scp->scsi_done = done;
548 - gdth_update_timeout(scp, scp->timeout_per_command * 6);
549 + cmndinfo->timeout_count = 0;
550 cmndinfo->priority = DEFAULT_PRI;
551
552 return __gdth_queuecommand(ha, scp, cmndinfo);
553 @@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
554 ha->hdr[j].lock = 1;
555 spin_unlock_irqrestore(&ha->smp_lock, flags);
556 gdth_wait_completion(ha, ha->bus_cnt, j);
557 - gdth_stop_timeout(ha, ha->bus_cnt, j);
558 } else {
559 spin_lock_irqsave(&ha->smp_lock, flags);
560 ha->hdr[j].lock = 0;
561 spin_unlock_irqrestore(&ha->smp_lock, flags);
562 - gdth_start_timeout(ha, ha->bus_cnt, j);
563 gdth_next(ha);
564 }
565 }
566 @@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inod
567 spin_lock_irqsave(&ha->smp_lock, flags);
568 ha->raw[i].lock = 1;
569 spin_unlock_irqrestore(&ha->smp_lock, flags);
570 - for (j = 0; j < ha->tid_cnt; ++j) {
571 + for (j = 0; j < ha->tid_cnt; ++j)
572 gdth_wait_completion(ha, i, j);
573 - gdth_stop_timeout(ha, i, j);
574 - }
575 } else {
576 spin_lock_irqsave(&ha->smp_lock, flags);
577 ha->raw[i].lock = 0;
578 spin_unlock_irqrestore(&ha->smp_lock, flags);
579 - for (j = 0; j < ha->tid_cnt; ++j) {
580 - gdth_start_timeout(ha, i, j);
581 + for (j = 0; j < ha->tid_cnt; ++j)
582 gdth_next(ha);
583 - }
584 }
585 }
586 break;
587 @@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_te
588 .slave_configure = gdth_slave_configure,
589 .bios_param = gdth_bios_param,
590 .proc_info = gdth_proc_info,
591 + .eh_timed_out = gdth_timed_out,
592 .proc_name = "gdth",
593 .can_queue = GDTH_MAXCMDS,
594 .this_id = -1,
595 --- a/drivers/scsi/gdth.h
596 +++ b/drivers/scsi/gdth.h
597 @@ -916,7 +916,7 @@ typedef struct {
598 gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
599 dma_addr_t sense_paddr; /* sense dma-addr */
600 unchar priority;
601 - int timeout;
602 + int timeout_count; /* # of timeout calls */
603 volatile int wait_for_completion;
604 ushort status;
605 ulong32 info;
606 --- a/drivers/scsi/gdth_proc.c
607 +++ b/drivers/scsi/gdth_proc.c
608 @@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha
609 }
610 spin_unlock_irqrestore(&ha->smp_lock, flags);
611 }
612 -
613 -static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
614 -{
615 - ulong flags;
616 - Scsi_Cmnd *scp;
617 - unchar b, t;
618 -
619 - spin_lock_irqsave(&ha->smp_lock, flags);
620 -
621 - for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
622 - struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
623 - if (!cmndinfo->internal_command) {
624 - b = scp->device->channel;
625 - t = scp->device->id;
626 - if (t == (unchar)id && b == (unchar)busnum) {
627 - TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
628 - cmndinfo->timeout = gdth_update_timeout(scp, 0);
629 - }
630 - }
631 - }
632 - spin_unlock_irqrestore(&ha->smp_lock, flags);
633 -}
634 -
635 -static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
636 -{
637 - ulong flags;
638 - Scsi_Cmnd *scp;
639 - unchar b, t;
640 -
641 - spin_lock_irqsave(&ha->smp_lock, flags);
642 -
643 - for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
644 - struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
645 - if (!cmndinfo->internal_command) {
646 - b = scp->device->channel;
647 - t = scp->device->id;
648 - if (t == (unchar)id && b == (unchar)busnum) {
649 - TRACE2(("gdth_start_timeout(): update_timeout()\n"));
650 - gdth_update_timeout(scp, cmndinfo->timeout);
651 - }
652 - }
653 - }
654 - spin_unlock_irqrestore(&ha->smp_lock, flags);
655 -}
656 -
657 -static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
658 -{
659 - int oldto;
660 -
661 - oldto = scp->timeout_per_command;
662 - scp->timeout_per_command = timeout;
663 -
664 - if (timeout == 0) {
665 - del_timer(&scp->eh_timeout);
666 - scp->eh_timeout.data = (unsigned long) NULL;
667 - scp->eh_timeout.expires = 0;
668 - } else {
669 - if (scp->eh_timeout.data != (unsigned long) NULL)
670 - del_timer(&scp->eh_timeout);
671 - scp->eh_timeout.data = (unsigned long) scp;
672 - scp->eh_timeout.expires = jiffies + timeout;
673 - add_timer(&scp->eh_timeout);
674 - }
675 -
676 - return oldto;
677 -}
678 --- a/drivers/scsi/gdth_proc.h
679 +++ b/drivers/scsi/gdth_proc.h
680 @@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_st
681 ulong64 *paddr);
682 static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
683 static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
684 -static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
685 -static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
686 -static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
687
688 #endif
689
690 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c
691 +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
692 @@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct
693 init_event_struct(evt_struct,
694 handle_cmd_rsp,
695 VIOSRP_SRP_FORMAT,
696 - cmnd->timeout_per_command/HZ);
697 + cmnd->request->timeout/HZ);
698
699 evt_struct->cmnd = cmnd;
700 evt_struct->cmnd_done = done;
701 --- a/drivers/scsi/ide-scsi.c
702 +++ b/drivers/scsi/ide-scsi.c
703 @@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cm
704 pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
705 pc->scsi_cmd = cmd;
706 pc->done = done;
707 - pc->timeout = jiffies + cmd->timeout_per_command;
708 + pc->timeout = jiffies + cmd->request->timeout;
709
710 if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
711 printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
712 --- a/drivers/scsi/ipr.c
713 +++ b/drivers/scsi/ipr.c
714 @@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct sc
715 sdev->no_uld_attach = 1;
716 }
717 if (ipr_is_vset_device(res)) {
718 - sdev->timeout = IPR_VSET_RW_TIMEOUT;
719 + blk_queue_rq_timeout(sdev->request_queue,
720 + IPR_VSET_RW_TIMEOUT);
721 blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
722 }
723 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
724 --- a/drivers/scsi/ips.c
725 +++ b/drivers/scsi/ips.c
726 @@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t *
727 scb->cmd.dcdb.segment_4G = 0;
728 scb->cmd.dcdb.enhanced_sg = 0;
729
730 - TimeOut = scb->scsi_cmd->timeout_per_command;
731 + TimeOut = scb->scsi_cmd->request->timeout;
732
733 if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
734 if (!scb->sg_len) {
735 --- a/drivers/scsi/libiscsi.c
736 +++ b/drivers/scsi/libiscsi.c
737 @@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_
738 scsi_queue_work(conn->session->host, &conn->xmitwork);
739 }
740
741 -static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
742 +static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
743 {
744 struct iscsi_cls_session *cls_session;
745 struct iscsi_session *session;
746 struct iscsi_conn *conn;
747 - enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
748 + enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
749
750 cls_session = starget_to_session(scsi_target(scmd->device));
751 session = cls_session->dd_data;
752 @@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_e
753 * We are probably in the middle of iscsi recovery so let
754 * that complete and handle the error.
755 */
756 - rc = EH_RESET_TIMER;
757 + rc = BLK_EH_RESET_TIMER;
758 goto done;
759 }
760
761 conn = session->leadconn;
762 if (!conn) {
763 /* In the middle of shuting down */
764 - rc = EH_RESET_TIMER;
765 + rc = BLK_EH_RESET_TIMER;
766 goto done;
767 }
768
769 @@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_e
770 */
771 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
772 (conn->ping_timeout * HZ), jiffies))
773 - rc = EH_RESET_TIMER;
774 + rc = BLK_EH_RESET_TIMER;
775 /*
776 * if we are about to check the transport then give the command
777 * more time
778 */
779 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
780 jiffies))
781 - rc = EH_RESET_TIMER;
782 + rc = BLK_EH_RESET_TIMER;
783 /* if in the middle of checking the transport then give us more time */
784 if (conn->ping_task)
785 - rc = EH_RESET_TIMER;
786 + rc = BLK_EH_RESET_TIMER;
787 done:
788 spin_unlock(&session->lock);
789 - debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
790 + debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
791 + "timer reset" : "nh");
792 return rc;
793 }
794
795 --- a/drivers/scsi/libsas/sas_ata.c
796 +++ b/drivers/scsi/libsas/sas_ata.c
797 @@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task
798
799 /* Bounce SCSI-initiated commands to the SCSI EH */
800 if (qc->scsicmd) {
801 - scsi_req_abort_cmd(qc->scsicmd);
802 + blk_abort_request(qc->scsicmd->request);
803 scsi_schedule_eh(qc->scsicmd->device->host);
804 return;
805 }
806 --- a/drivers/scsi/libsas/sas_internal.h
807 +++ b/drivers/scsi/libsas/sas_internal.h
808 @@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_s
809 int sas_register_ports(struct sas_ha_struct *sas_ha);
810 void sas_unregister_ports(struct sas_ha_struct *sas_ha);
811
812 -enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
813 +enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
814
815 int sas_init_queue(struct sas_ha_struct *sas_ha);
816 int sas_init_events(struct sas_ha_struct *sas_ha);
817 --- a/drivers/scsi/libsas/sas_scsi_host.c
818 +++ b/drivers/scsi/libsas/sas_scsi_host.c
819 @@ -673,43 +673,43 @@ out:
820 return;
821 }
822
823 -enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
824 +enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
825 {
826 struct sas_task *task = TO_SAS_TASK(cmd);
827 unsigned long flags;
828
829 if (!task) {
830 - cmd->timeout_per_command /= 2;
831 + cmd->request->timeout /= 2;
832 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
833 - cmd, task, (cmd->timeout_per_command ?
834 - "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
835 - if (!cmd->timeout_per_command)
836 - return EH_NOT_HANDLED;
837 - return EH_RESET_TIMER;
838 + cmd, task, (cmd->request->timeout ?
839 + "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
840 + if (!cmd->request->timeout)
841 + return BLK_EH_NOT_HANDLED;
842 + return BLK_EH_RESET_TIMER;
843 }
844
845 spin_lock_irqsave(&task->task_state_lock, flags);
846 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
847 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
848 spin_unlock_irqrestore(&task->task_state_lock, flags);
849 - SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
850 - cmd, task);
851 - return EH_HANDLED;
852 + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
853 + "BLK_EH_HANDLED\n", cmd, task);
854 + return BLK_EH_HANDLED;
855 }
856 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
857 spin_unlock_irqrestore(&task->task_state_lock, flags);
858 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
859 - "EH_RESET_TIMER\n",
860 + "BLK_EH_RESET_TIMER\n",
861 cmd, task);
862 - return EH_RESET_TIMER;
863 + return BLK_EH_RESET_TIMER;
864 }
865 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
866 spin_unlock_irqrestore(&task->task_state_lock, flags);
867
868 - SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
869 + SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
870 cmd, task);
871
872 - return EH_NOT_HANDLED;
873 + return BLK_EH_NOT_HANDLED;
874 }
875
876 int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
877 @@ -1039,7 +1039,7 @@ void sas_task_abort(struct sas_task *tas
878 return;
879 }
880
881 - scsi_req_abort_cmd(sc);
882 + blk_abort_request(sc->request);
883 scsi_schedule_eh(sc->device->host);
884 }
885
886 --- a/drivers/scsi/megaraid/megaraid_sas.c
887 +++ b/drivers/scsi/megaraid/megaraid_sas.c
888 @@ -1167,7 +1167,7 @@ static int megasas_generic_reset(struct
889 * cmd has not been completed within the timeout period.
890 */
891 static enum
892 -scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
893 +blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
894 {
895 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
896 struct megasas_instance *instance;
897 @@ -1175,7 +1175,7 @@ scsi_eh_timer_return megasas_reset_timer
898
899 if (time_after(jiffies, scmd->jiffies_at_alloc +
900 (MEGASAS_DEFAULT_CMD_TIMEOUT * 2) * HZ)) {
901 - return EH_NOT_HANDLED;
902 + return BLK_EH_NOT_HANDLED;
903 }
904
905 instance = cmd->instance;
906 @@ -1189,7 +1189,7 @@ scsi_eh_timer_return megasas_reset_timer
907
908 spin_unlock_irqrestore(instance->host->host_lock, flags);
909 }
910 - return EH_RESET_TIMER;
911 + return BLK_EH_RESET_TIMER;
912 }
913
914 /**
915 --- a/drivers/scsi/ncr53c8xx.c
916 +++ b/drivers/scsi/ncr53c8xx.c
917 @@ -4170,8 +4170,8 @@ static int ncr_queue_command (struct ncb
918 **
919 **----------------------------------------------------
920 */
921 - if (np->settle_time && cmd->timeout_per_command >= HZ) {
922 - u_long tlimit = jiffies + cmd->timeout_per_command - HZ;
923 + if (np->settle_time && cmd->request->timeout >= HZ) {
924 + u_long tlimit = jiffies + cmd->request->timeout - HZ;
925 if (time_after(np->settle_time, tlimit))
926 np->settle_time = tlimit;
927 }
928 --- a/drivers/scsi/qla1280.c
929 +++ b/drivers/scsi/qla1280.c
930 @@ -2845,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla
931 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
932
933 /* Set ISP command timeout. */
934 - pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
935 + pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
936
937 /* Set device target ID and LUN */
938 pkt->lun = SCSI_LUN_32(cmd);
939 @@ -3114,7 +3114,7 @@ qla1280_32bit_start_scsi(struct scsi_qla
940 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
941
942 /* Set ISP command timeout. */
943 - pkt->timeout = cpu_to_le16(cmd->timeout_per_command/HZ);
944 + pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
945
946 /* Set device target ID and LUN */
947 pkt->lun = SCSI_LUN_32(cmd);
948 --- a/drivers/scsi/qla4xxx/ql4_os.c
949 +++ b/drivers/scsi/qla4xxx/ql4_os.c
950 @@ -1542,7 +1542,7 @@ static int qla4xxx_eh_device_reset(struc
951 DEBUG2(printk(KERN_INFO
952 "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,"
953 "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no,
954 - cmd, jiffies, cmd->timeout_per_command / HZ,
955 + cmd, jiffies, cmd->request->timeout / HZ,
956 ha->dpc_flags, cmd->result, cmd->allowed));
957
958 /* FIXME: wait for hba to go online */
959 @@ -1598,7 +1598,7 @@ static int qla4xxx_eh_target_reset(struc
960 DEBUG2(printk(KERN_INFO
961 "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, "
962 "to=%x,dpc_flags=%lx, status=%x allowed=%d\n",
963 - ha->host_no, cmd, jiffies, cmd->timeout_per_command / HZ,
964 + ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
965 ha->dpc_flags, cmd->result, cmd->allowed));
966
967 stat = qla4xxx_reset_target(ha, ddb_entry);
968 --- a/drivers/scsi/scsi.c
969 +++ b/drivers/scsi/scsi.c
970 @@ -291,7 +291,6 @@ struct scsi_cmnd *scsi_get_command(struc
971 unsigned long flags;
972
973 cmd->device = dev;
974 - init_timer(&cmd->eh_timeout);
975 INIT_LIST_HEAD(&cmd->list);
976 spin_lock_irqsave(&dev->list_lock, flags);
977 list_add_tail(&cmd->list, &dev->cmd_list);
978 @@ -652,14 +651,19 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
979 unsigned long timeout;
980 int rtn = 0;
981
982 + /*
983 + * We will use a queued command if possible, otherwise we will
984 + * emulate the queuing and calling of completion function ourselves.
985 + */
986 + atomic_inc(&cmd->device->iorequest_cnt);
987 +
988 /* check if the device is still usable */
989 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
990 /* in SDEV_DEL we error all commands. DID_NO_CONNECT
991 * returns an immediate error upwards, and signals
992 * that the device is no longer present */
993 cmd->result = DID_NO_CONNECT << 16;
994 - atomic_inc(&cmd->device->iorequest_cnt);
995 - __scsi_done(cmd);
996 + scsi_done(cmd);
997 /* return 0 (because the command has been processed) */
998 goto out;
999 }
1000 @@ -672,6 +676,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1001 * future requests should not occur until the device
1002 * transitions out of the suspend state.
1003 */
1004 +
1005 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1006
1007 SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
1008 @@ -714,21 +719,9 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1009 host->resetting = 0;
1010 }
1011
1012 - /*
1013 - * AK: unlikely race here: for some reason the timer could
1014 - * expire before the serial number is set up below.
1015 - */
1016 - scsi_add_timer(cmd, cmd->timeout_per_command, scsi_times_out);
1017 -
1018 scsi_log_send(cmd);
1019
1020 /*
1021 - * We will use a queued command if possible, otherwise we will
1022 - * emulate the queuing and calling of completion function ourselves.
1023 - */
1024 - atomic_inc(&cmd->device->iorequest_cnt);
1025 -
1026 - /*
1027 * Before we queue this command, check if the command
1028 * length exceeds what the host adapter can handle.
1029 */
1030 @@ -744,6 +737,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1031 }
1032
1033 spin_lock_irqsave(host->host_lock, flags);
1034 + /*
1035 + * AK: unlikely race here: for some reason the timer could
1036 + * expire before the serial number is set up below.
1037 + *
1038 + * TODO: kill serial or move to blk layer
1039 + */
1040 scsi_cmd_get_serial(host, cmd);
1041
1042 if (unlikely(host->shost_state == SHOST_DEL)) {
1043 @@ -754,12 +753,8 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1044 }
1045 spin_unlock_irqrestore(host->host_lock, flags);
1046 if (rtn) {
1047 - if (scsi_delete_timer(cmd)) {
1048 - atomic_inc(&cmd->device->iodone_cnt);
1049 - scsi_queue_insert(cmd,
1050 - (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
1051 - rtn : SCSI_MLQUEUE_HOST_BUSY);
1052 - }
1053 + scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
1054 + rtn : SCSI_MLQUEUE_HOST_BUSY);
1055 SCSI_LOG_MLQUEUE(3,
1056 printk("queuecommand : request rejected\n"));
1057 }
1058 @@ -770,24 +765,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
1059 }
1060
1061 /**
1062 - * scsi_req_abort_cmd -- Request command recovery for the specified command
1063 - * @cmd: pointer to the SCSI command of interest
1064 - *
1065 - * This function requests that SCSI Core start recovery for the
1066 - * command by deleting the timer and adding the command to the eh
1067 - * queue. It can be called by either LLDDs or SCSI Core. LLDDs who
1068 - * implement their own error recovery MAY ignore the timeout event if
1069 - * they generated scsi_req_abort_cmd.
1070 - */
1071 -void scsi_req_abort_cmd(struct scsi_cmnd *cmd)
1072 -{
1073 - if (!scsi_delete_timer(cmd))
1074 - return;
1075 - scsi_times_out(cmd);
1076 -}
1077 -EXPORT_SYMBOL(scsi_req_abort_cmd);
1078 -
1079 -/**
1080 * scsi_done - Enqueue the finished SCSI command into the done queue.
1081 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
1082 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
1083 @@ -802,42 +779,7 @@ EXPORT_SYMBOL(scsi_req_abort_cmd);
1084 */
1085 static void scsi_done(struct scsi_cmnd *cmd)
1086 {
1087 - /*
1088 - * We don't have to worry about this one timing out anymore.
1089 - * If we are unable to remove the timer, then the command
1090 - * has already timed out. In which case, we have no choice but to
1091 - * let the timeout function run, as we have no idea where in fact
1092 - * that function could really be. It might be on another processor,
1093 - * etc, etc.
1094 - */
1095 - if (!scsi_delete_timer(cmd))
1096 - return;
1097 - __scsi_done(cmd);
1098 -}
1099 -
1100 -/* Private entry to scsi_done() to complete a command when the timer
1101 - * isn't running --- used by scsi_times_out */
1102 -void __scsi_done(struct scsi_cmnd *cmd)
1103 -{
1104 - struct request *rq = cmd->request;
1105 -
1106 - /*
1107 - * Set the serial numbers back to zero
1108 - */
1109 - cmd->serial_number = 0;
1110 -
1111 - atomic_inc(&cmd->device->iodone_cnt);
1112 - if (cmd->result)
1113 - atomic_inc(&cmd->device->ioerr_cnt);
1114 -
1115 - BUG_ON(!rq);
1116 -
1117 - /*
1118 - * The uptodate/nbytes values don't matter, as we allow partial
1119 - * completes and thus will check this in the softirq callback
1120 - */
1121 - rq->completion_data = cmd;
1122 - blk_complete_request(rq);
1123 + blk_complete_request(cmd->request);
1124 }
1125
1126 /* Move this to a header if it becomes more generally useful */
1127 --- a/drivers/scsi/scsi_error.c
1128 +++ b/drivers/scsi/scsi_error.c
1129 @@ -112,69 +112,8 @@ int scsi_eh_scmd_add(struct scsi_cmnd *s
1130 }
1131
1132 /**
1133 - * scsi_add_timer - Start timeout timer for a single scsi command.
1134 - * @scmd: scsi command that is about to start running.
1135 - * @timeout: amount of time to allow this command to run.
1136 - * @complete: timeout function to call if timer isn't canceled.
1137 - *
1138 - * Notes:
1139 - * This should be turned into an inline function. Each scsi command
1140 - * has its own timer, and as it is added to the queue, we set up the
1141 - * timer. When the command completes, we cancel the timer.
1142 - */
1143 -void scsi_add_timer(struct scsi_cmnd *scmd, int timeout,
1144 - void (*complete)(struct scsi_cmnd *))
1145 -{
1146 -
1147 - /*
1148 - * If the clock was already running for this command, then
1149 - * first delete the timer. The timer handling code gets rather
1150 - * confused if we don't do this.
1151 - */
1152 - if (scmd->eh_timeout.function)
1153 - del_timer(&scmd->eh_timeout);
1154 -
1155 - scmd->eh_timeout.data = (unsigned long)scmd;
1156 - scmd->eh_timeout.expires = jiffies + timeout;
1157 - scmd->eh_timeout.function = (void (*)(unsigned long)) complete;
1158 -
1159 - SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:"
1160 - " %d, (%p)\n", __func__,
1161 - scmd, timeout, complete));
1162 -
1163 - add_timer(&scmd->eh_timeout);
1164 -}
1165 -
1166 -/**
1167 - * scsi_delete_timer - Delete/cancel timer for a given function.
1168 - * @scmd: Cmd that we are canceling timer for
1169 - *
1170 - * Notes:
1171 - * This should be turned into an inline function.
1172 - *
1173 - * Return value:
1174 - * 1 if we were able to detach the timer. 0 if we blew it, and the
1175 - * timer function has already started to run.
1176 - */
1177 -int scsi_delete_timer(struct scsi_cmnd *scmd)
1178 -{
1179 - int rtn;
1180 -
1181 - rtn = del_timer(&scmd->eh_timeout);
1182 -
1183 - SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p,"
1184 - " rtn: %d\n", __func__,
1185 - scmd, rtn));
1186 -
1187 - scmd->eh_timeout.data = (unsigned long)NULL;
1188 - scmd->eh_timeout.function = NULL;
1189 -
1190 - return rtn;
1191 -}
1192 -
1193 -/**
1194 * scsi_times_out - Timeout function for normal scsi commands.
1195 - * @scmd: Cmd that is timing out.
1196 + * @req: request that is timing out.
1197 *
1198 * Notes:
1199 * We do not need to lock this. There is the potential for a race
1200 @@ -182,9 +121,11 @@ int scsi_delete_timer(struct scsi_cmnd *
1201 * normal completion function determines that the timer has already
1202 * fired, then it mustn't do anything.
1203 */
1204 -void scsi_times_out(struct scsi_cmnd *scmd)
1205 +enum blk_eh_timer_return scsi_times_out(struct request *req)
1206 {
1207 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1208 + struct scsi_cmnd *scmd = req->special;
1209 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1210 + enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
1211
1212 scsi_log_completion(scmd, TIMEOUT_ERROR);
1213
1214 @@ -196,22 +137,20 @@ void scsi_times_out(struct scsi_cmnd *sc
1215 eh_timed_out = NULL;
1216
1217 if (eh_timed_out)
1218 - switch (eh_timed_out(scmd)) {
1219 - case EH_HANDLED:
1220 - __scsi_done(scmd);
1221 - return;
1222 - case EH_RESET_TIMER:
1223 - scsi_add_timer(scmd, scmd->timeout_per_command,
1224 - scsi_times_out);
1225 - return;
1226 - case EH_NOT_HANDLED:
1227 + rtn = eh_timed_out(scmd);
1228 + switch (rtn) {
1229 + case BLK_EH_NOT_HANDLED:
1230 break;
1231 + default:
1232 + return rtn;
1233 }
1234
1235 if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) {
1236 scmd->result |= DID_TIME_OUT << 16;
1237 - __scsi_done(scmd);
1238 + return BLK_EH_HANDLED;
1239 }
1240 +
1241 + return BLK_EH_NOT_HANDLED;
1242 }
1243
1244 /**
1245 @@ -1793,7 +1732,6 @@ scsi_reset_provider(struct scsi_device *
1246
1247 blk_rq_init(NULL, &req);
1248 scmd->request = &req;
1249 - memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
1250
1251 scmd->cmnd = req.cmd;
1252
1253 @@ -1804,8 +1742,6 @@ scsi_reset_provider(struct scsi_device *
1254
1255 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
1256
1257 - init_timer(&scmd->eh_timeout);
1258 -
1259 spin_lock_irqsave(shost->host_lock, flags);
1260 shost->tmf_in_progress = 1;
1261 spin_unlock_irqrestore(shost->host_lock, flags);
1262 --- a/drivers/scsi/scsi_lib.c
1263 +++ b/drivers/scsi/scsi_lib.c
1264 @@ -1181,7 +1181,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_d
1265
1266 cmd->transfersize = req->data_len;
1267 cmd->allowed = req->retries;
1268 - cmd->timeout_per_command = req->timeout;
1269 return BLKPREP_OK;
1270 }
1271 EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1272 @@ -1416,17 +1415,26 @@ static void scsi_kill_request(struct req
1273 spin_unlock(shost->host_lock);
1274 spin_lock(sdev->request_queue->queue_lock);
1275
1276 - __scsi_done(cmd);
1277 + blk_complete_request(req);
1278 }
1279
1280 static void scsi_softirq_done(struct request *rq)
1281 {
1282 - struct scsi_cmnd *cmd = rq->completion_data;
1283 - unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command;
1284 + struct scsi_cmnd *cmd = rq->special;
1285 + unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1286 int disposition;
1287
1288 INIT_LIST_HEAD(&cmd->eh_entry);
1289
1290 + /*
1291 + * Set the serial numbers back to zero
1292 + */
1293 + cmd->serial_number = 0;
1294 +
1295 + atomic_inc(&cmd->device->iodone_cnt);
1296 + if (cmd->result)
1297 + atomic_inc(&cmd->device->ioerr_cnt);
1298 +
1299 disposition = scsi_decide_disposition(cmd);
1300 if (disposition != SUCCESS &&
1301 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1302 @@ -1675,6 +1683,7 @@ struct request_queue *scsi_alloc_queue(s
1303
1304 blk_queue_prep_rq(q, scsi_prep_fn);
1305 blk_queue_softirq_done(q, scsi_softirq_done);
1306 + blk_queue_rq_timed_out(q, scsi_times_out);
1307 return q;
1308 }
1309
1310 --- a/drivers/scsi/scsi_priv.h
1311 +++ b/drivers/scsi/scsi_priv.h
1312 @@ -4,6 +4,7 @@
1313 #include <linux/device.h>
1314
1315 struct request_queue;
1316 +struct request;
1317 struct scsi_cmnd;
1318 struct scsi_device;
1319 struct scsi_host_template;
1320 @@ -27,7 +28,6 @@ extern void scsi_exit_hosts(void);
1321 extern int scsi_dispatch_cmd(struct scsi_cmnd *cmd);
1322 extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
1323 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
1324 -extern void __scsi_done(struct scsi_cmnd *cmd);
1325 #ifdef CONFIG_SCSI_LOGGING
1326 void scsi_log_send(struct scsi_cmnd *cmd);
1327 void scsi_log_completion(struct scsi_cmnd *cmd, int disposition);
1328 @@ -49,10 +49,7 @@ extern int __init scsi_init_devinfo(void
1329 extern void scsi_exit_devinfo(void);
1330
1331 /* scsi_error.c */
1332 -extern void scsi_add_timer(struct scsi_cmnd *, int,
1333 - void (*)(struct scsi_cmnd *));
1334 -extern int scsi_delete_timer(struct scsi_cmnd *);
1335 -extern void scsi_times_out(struct scsi_cmnd *cmd);
1336 +extern enum blk_eh_timer_return scsi_times_out(struct request *req);
1337 extern int scsi_error_handler(void *host);
1338 extern int scsi_decide_disposition(struct scsi_cmnd *cmd);
1339 extern void scsi_eh_wakeup(struct Scsi_Host *shost);
1340 --- a/drivers/scsi/scsi_sysfs.c
1341 +++ b/drivers/scsi/scsi_sysfs.c
1342 @@ -560,12 +560,15 @@ sdev_rd_attr (vendor, "%.8s\n");
1343 sdev_rd_attr (model, "%.16s\n");
1344 sdev_rd_attr (rev, "%.4s\n");
1345
1346 +/*
1347 + * TODO: can we make these symlinks to the block layer ones?
1348 + */
1349 static ssize_t
1350 sdev_show_timeout (struct device *dev, struct device_attribute *attr, char *buf)
1351 {
1352 struct scsi_device *sdev;
1353 sdev = to_scsi_device(dev);
1354 - return snprintf (buf, 20, "%d\n", sdev->timeout / HZ);
1355 + return snprintf(buf, 20, "%d\n", sdev->request_queue->rq_timeout / HZ);
1356 }
1357
1358 static ssize_t
1359 @@ -576,7 +579,7 @@ sdev_store_timeout (struct device *dev,
1360 int timeout;
1361 sdev = to_scsi_device(dev);
1362 sscanf (buf, "%d\n", &timeout);
1363 - sdev->timeout = timeout * HZ;
1364 + blk_queue_rq_timeout(sdev->request_queue, timeout * HZ);
1365 return count;
1366 }
1367 static DEVICE_ATTR(timeout, S_IRUGO | S_IWUSR, sdev_show_timeout, sdev_store_timeout);
1368 --- a/drivers/scsi/scsi_transport_fc.c
1369 +++ b/drivers/scsi/scsi_transport_fc.c
1370 @@ -1950,15 +1950,15 @@ static int fc_vport_match(struct attribu
1371 * Notes:
1372 * This routine assumes no locks are held on entry.
1373 */
1374 -static enum scsi_eh_timer_return
1375 +static enum blk_eh_timer_return
1376 fc_timed_out(struct scsi_cmnd *scmd)
1377 {
1378 struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
1379
1380 if (rport->port_state == FC_PORTSTATE_BLOCKED)
1381 - return EH_RESET_TIMER;
1382 + return BLK_EH_RESET_TIMER;
1383
1384 - return EH_NOT_HANDLED;
1385 + return BLK_EH_NOT_HANDLED;
1386 }
1387
1388 /*
1389 --- a/drivers/scsi/sd.c
1390 +++ b/drivers/scsi/sd.c
1391 @@ -378,7 +378,6 @@ static int sd_prep_fn(struct request_que
1392 sector_t block = rq->sector;
1393 sector_t threshold;
1394 unsigned int this_count = rq->nr_sectors;
1395 - unsigned int timeout = sdp->timeout;
1396 int ret;
1397
1398 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
1399 @@ -579,7 +578,6 @@ static int sd_prep_fn(struct request_que
1400 SCpnt->transfersize = sdp->sector_size;
1401 SCpnt->underflow = this_count << 9;
1402 SCpnt->allowed = SD_MAX_RETRIES;
1403 - SCpnt->timeout_per_command = timeout;
1404
1405 /*
1406 * This indicates that the command is ready from our end to be
1407 @@ -1837,11 +1835,12 @@ static int sd_probe(struct device *dev)
1408 sdkp->openers = 0;
1409 sdkp->previous_state = 1;
1410
1411 - if (!sdp->timeout) {
1412 + if (!sdp->request_queue->rq_timeout) {
1413 if (sdp->type != TYPE_MOD)
1414 - sdp->timeout = SD_TIMEOUT;
1415 + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
1416 else
1417 - sdp->timeout = SD_MOD_TIMEOUT;
1418 + blk_queue_rq_timeout(sdp->request_queue,
1419 + SD_MOD_TIMEOUT);
1420 }
1421
1422 device_initialize(&sdkp->dev);
1423 --- a/drivers/scsi/sr.c
1424 +++ b/drivers/scsi/sr.c
1425 @@ -331,7 +331,7 @@ static int sr_done(struct scsi_cmnd *SCp
1426
1427 static int sr_prep_fn(struct request_queue *q, struct request *rq)
1428 {
1429 - int block=0, this_count, s_size, timeout = SR_TIMEOUT;
1430 + int block = 0, this_count, s_size;
1431 struct scsi_cd *cd;
1432 struct scsi_cmnd *SCpnt;
1433 struct scsi_device *sdp = q->queuedata;
1434 @@ -461,7 +461,6 @@ static int sr_prep_fn(struct request_que
1435 SCpnt->transfersize = cd->device->sector_size;
1436 SCpnt->underflow = this_count << 9;
1437 SCpnt->allowed = MAX_RETRIES;
1438 - SCpnt->timeout_per_command = timeout;
1439
1440 /*
1441 * This indicates that the command is ready from our end to be
1442 @@ -620,6 +619,8 @@ static int sr_probe(struct device *dev)
1443 disk->fops = &sr_bdops;
1444 disk->flags = GENHD_FL_CD;
1445
1446 + blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
1447 +
1448 cd->device = sdev;
1449 cd->disk = disk;
1450 cd->driver = &sr_template;
1451 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c
1452 +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
1453 @@ -519,8 +519,8 @@ static int sym53c8xx_queue_command(struc
1454 * Shorten our settle_time if needed for
1455 * this command not to time out.
1456 */
1457 - if (np->s.settle_time_valid && cmd->timeout_per_command) {
1458 - unsigned long tlimit = jiffies + cmd->timeout_per_command;
1459 + if (np->s.settle_time_valid && cmd->request->timeout) {
1460 + unsigned long tlimit = jiffies + cmd->request->timeout;
1461 tlimit -= SYM_CONF_TIMER_INTERVAL*2;
1462 if (time_after(np->s.settle_time, tlimit)) {
1463 np->s.settle_time = tlimit;
1464 --- a/include/linux/blkdev.h
1465 +++ b/include/linux/blkdev.h
1466 @@ -147,6 +147,7 @@ struct request {
1467
1468 unsigned int cmd_flags;
1469 enum rq_cmd_type_bits cmd_type;
1470 + unsigned long atomic_flags;
1471
1472 /* Maintain bio traversal state for part by part I/O submission.
1473 * hard_* are block layer internals, no driver should touch them!
1474 @@ -214,6 +215,8 @@ struct request {
1475 void *data;
1476 void *sense;
1477
1478 + unsigned long deadline;
1479 + struct list_head timeout_list;
1480 unsigned int timeout;
1481 int retries;
1482
1483 @@ -266,6 +269,14 @@ typedef void (prepare_flush_fn) (struct
1484 typedef void (softirq_done_fn)(struct request *);
1485 typedef int (dma_drain_needed_fn)(struct request *);
1486
1487 +enum blk_eh_timer_return {
1488 + BLK_EH_NOT_HANDLED,
1489 + BLK_EH_HANDLED,
1490 + BLK_EH_RESET_TIMER,
1491 +};
1492 +
1493 +typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
1494 +
1495 enum blk_queue_state {
1496 Queue_down,
1497 Queue_up,
1498 @@ -311,6 +322,7 @@ struct request_queue
1499 merge_bvec_fn *merge_bvec_fn;
1500 prepare_flush_fn *prepare_flush_fn;
1501 softirq_done_fn *softirq_done_fn;
1502 + rq_timed_out_fn *rq_timed_out_fn;
1503 dma_drain_needed_fn *dma_drain_needed;
1504
1505 /*
1506 @@ -386,6 +398,10 @@ struct request_queue
1507 unsigned int nr_sorted;
1508 unsigned int in_flight;
1509
1510 + unsigned int rq_timeout;
1511 + struct timer_list timeout;
1512 + struct list_head timeout_list;
1513 +
1514 /*
1515 * sg stuff
1516 */
1517 @@ -762,6 +778,8 @@ extern int blk_end_request_callback(stru
1518 unsigned int nr_bytes,
1519 int (drv_callback)(struct request *));
1520 extern void blk_complete_request(struct request *);
1521 +extern void __blk_complete_request(struct request *);
1522 +extern void blk_abort_request(struct request *);
1523
1524 /*
1525 * blk_end_request() takes bytes instead of sectors as a complete size.
1526 @@ -803,6 +821,8 @@ extern void blk_queue_dma_alignment(stru
1527 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1528 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1529 extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *);
1530 +extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1531 +extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1532 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1533 extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
1534 extern int blk_do_ordered(struct request_queue *, struct request **);
1535 --- a/include/scsi/scsi_cmnd.h
1536 +++ b/include/scsi/scsi_cmnd.h
1537 @@ -75,7 +75,6 @@ struct scsi_cmnd {
1538
1539 int retries;
1540 int allowed;
1541 - int timeout_per_command;
1542
1543 unsigned char prot_op;
1544 unsigned char prot_type;
1545 @@ -86,7 +85,6 @@ struct scsi_cmnd {
1546 /* These elements define the operation we are about to perform */
1547 unsigned char *cmnd;
1548
1549 - struct timer_list eh_timeout; /* Used to time out the command. */
1550
1551 /* These elements define the operation we ultimately want to perform */
1552 struct scsi_data_buffer sdb;
1553 @@ -139,7 +137,6 @@ extern void scsi_put_command(struct scsi
1554 extern void __scsi_put_command(struct Scsi_Host *, struct scsi_cmnd *,
1555 struct device *);
1556 extern void scsi_finish_command(struct scsi_cmnd *cmd);
1557 -extern void scsi_req_abort_cmd(struct scsi_cmnd *cmd);
1558
1559 extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
1560 size_t *offset, size_t *len);
1561 --- a/include/scsi/scsi_host.h
1562 +++ b/include/scsi/scsi_host.h
1563 @@ -43,13 +43,6 @@ struct blk_queue_tags;
1564 #define DISABLE_CLUSTERING 0
1565 #define ENABLE_CLUSTERING 1
1566
1567 -enum scsi_eh_timer_return {
1568 - EH_NOT_HANDLED,
1569 - EH_HANDLED,
1570 - EH_RESET_TIMER,
1571 -};
1572 -
1573 -
1574 struct scsi_host_template {
1575 struct module *module;
1576 const char *name;
1577 @@ -347,7 +340,7 @@ struct scsi_host_template {
1578 *
1579 * Status: OPTIONAL
1580 */
1581 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1582 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1583
1584 /*
1585 * Name of proc directory
1586 --- a/include/scsi/scsi_transport.h
1587 +++ b/include/scsi/scsi_transport.h
1588 @@ -21,6 +21,7 @@
1589 #define SCSI_TRANSPORT_H
1590
1591 #include <linux/transport_class.h>
1592 +#include <linux/blkdev.h>
1593 #include <scsi/scsi_host.h>
1594 #include <scsi/scsi_device.h>
1595
1596 @@ -64,7 +65,7 @@ struct scsi_transport_template {
1597 * begin counting again
1598 * EH_NOT_HANDLED Begin normal error recovery
1599 */
1600 - enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
1601 + enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
1602
1603 /*
1604 * Used as callback for the completion of i_t_nexus request