]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blob - src/patches/suse-2.6.27.31/patches.fixes/scsi-enhance-error-codes
Reenabled linux-xen, added patches for Xen Kernel Version 2.6.27.31,
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.fixes / scsi-enhance-error-codes
1 From: Mike Christie <michaelc@cs.wisc.edu>
2 Subject: Separate failfast into multiple bits
3 References: FATE#303485,FATE#303484
4
5 This is a combined patch from linux-2.6.git. Commit IDs:
6
7 f0c0a376d0fcd4c5579ecf5e95f88387cba85211
8 c5e98e912c5423a0ec2eed7aa1064578d44f8a8e
9 7b594131c4f38edeb13d8c6c0147949173c47013
10 d6d13ee19da6d291c99f980dcb76f6b7dc676804
11 a93ce0244f2e94dd48e0b4a2742a4e3bf196ab53
12 fff9d40ce0eb4b46f3e186823ceab6bc02c3e5d3
13 9cc328f502eacfcc52ab1c1bf9a7729cf12f14be
14 a4dfaa6f2e55b736adf2719133996f7e7dc309bc
15 56d7fcfa815564b40a1b0ec7a30ea8cb3bc0713e
16 f46e307da925a7b71a0018c0510cdc6e588b87fc
17 056a44834950ffa51fafa6c76a720fa32e86851a
18 6000a368cd8e6da1caf101411bdb494cd6fb8b09
19 4a27446f3e39b06c28d1c8e31d33a5340826ed5c
20
21 Signed-off-by: Hannes Reinecke <hare@suse.de>
22
23 ---
24 block/blk-core.c | 11 ++
25 drivers/md/dm-mpath.c | 2
26 drivers/md/multipath.c | 4 -
27 drivers/s390/block/dasd_diag.c | 2
28 drivers/s390/block/dasd_eckd.c | 2
29 drivers/s390/block/dasd_fba.c | 2
30 drivers/scsi/constants.c | 3
31 drivers/scsi/device_handler/scsi_dh_alua.c | 3
32 drivers/scsi/device_handler/scsi_dh_emc.c | 3
33 drivers/scsi/device_handler/scsi_dh_hp_sw.c | 6 +
34 drivers/scsi/device_handler/scsi_dh_rdac.c | 3
35 drivers/scsi/ibmvscsi/ibmvfc.c | 2
36 drivers/scsi/libiscsi.c | 16 ++--
37 drivers/scsi/lpfc/lpfc_hbadisc.c | 8 --
38 drivers/scsi/lpfc/lpfc_scsi.c | 9 +-
39 drivers/scsi/qla2xxx/qla_attr.c | 1
40 drivers/scsi/qla2xxx/qla_isr.c | 14 +++
41 drivers/scsi/qla2xxx/qla_os.c | 26 +++---
42 drivers/scsi/qla4xxx/ql4_isr.c | 4 -
43 drivers/scsi/qla4xxx/ql4_os.c | 2
44 drivers/scsi/scsi.c | 10 ++
45 drivers/scsi/scsi_error.c | 53 +++++++++++++-
46 drivers/scsi/scsi_lib.c | 106 +++++++++++++++++++++++-----
47 drivers/scsi/scsi_priv.h | 1
48 drivers/scsi/scsi_scan.c | 1
49 drivers/scsi/scsi_transport_fc.c | 47 +++++++-----
50 drivers/scsi/scsi_transport_iscsi.c | 4 -
51 drivers/scsi/scsi_transport_spi.c | 4 -
52 include/linux/bio.h | 38 +++++++---
53 include/linux/blkdev.h | 15 +++
54 include/scsi/scsi.h | 6 +
55 include/scsi/scsi_device.h | 10 ++
56 include/scsi/scsi_transport_fc.h | 8 +-
57 33 files changed, 311 insertions(+), 115 deletions(-)
58
59 --- a/block/blk-core.c
60 +++ b/block/blk-core.c
61 @@ -1100,8 +1100,15 @@ void init_request_from_bio(struct reques
62 /*
63 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
64 */
65 - if (bio_rw_ahead(bio) || bio_failfast(bio))
66 - req->cmd_flags |= REQ_FAILFAST;
67 + if (bio_rw_ahead(bio))
68 + req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
69 + REQ_FAILFAST_DRIVER);
70 + if (bio_failfast_dev(bio))
71 + req->cmd_flags |= REQ_FAILFAST_DEV;
72 + if (bio_failfast_transport(bio))
73 + req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
74 + if (bio_failfast_driver(bio))
75 + req->cmd_flags |= REQ_FAILFAST_DRIVER;
76
77 /*
78 * REQ_BARRIER implies no merging, but lets make it explicit
79 --- a/drivers/md/dm-mpath.c
80 +++ b/drivers/md/dm-mpath.c
81 @@ -857,7 +857,7 @@ static int multipath_map(struct dm_targe
82 dm_bio_record(&mpio->details, bio);
83
84 map_context->ptr = mpio;
85 - bio->bi_rw |= (1 << BIO_RW_FAILFAST);
86 + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
87 r = map_io(m, bio, mpio, 0);
88 if (r < 0 || r == DM_MAPIO_REQUEUE)
89 mempool_free(mpio, m->mpio_pool);
90 --- a/drivers/md/multipath.c
91 +++ b/drivers/md/multipath.c
92 @@ -172,7 +172,7 @@ static int multipath_make_request (struc
93 mp_bh->bio = *bio;
94 mp_bh->bio.bi_sector += multipath->rdev->data_offset;
95 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
96 - mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST);
97 + mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
98 mp_bh->bio.bi_end_io = multipath_end_request;
99 mp_bh->bio.bi_private = mp_bh;
100 generic_make_request(&mp_bh->bio);
101 @@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
102 *bio = *(mp_bh->master_bio);
103 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
104 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
105 - bio->bi_rw |= (1 << BIO_RW_FAILFAST);
106 + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
107 bio->bi_end_io = multipath_end_request;
108 bio->bi_private = mp_bh;
109 generic_make_request(bio);
110 --- a/drivers/s390/block/dasd_diag.c
111 +++ b/drivers/s390/block/dasd_diag.c
112 @@ -544,7 +544,7 @@ static struct dasd_ccw_req *dasd_diag_bu
113 }
114 cqr->retries = DIAG_MAX_RETRIES;
115 cqr->buildclk = get_clock();
116 - if (req->cmd_flags & REQ_FAILFAST)
117 + if (blk_noretry_request(req))
118 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
119 cqr->startdev = memdev;
120 cqr->memdev = memdev;
121 --- a/drivers/s390/block/dasd_eckd.c
122 +++ b/drivers/s390/block/dasd_eckd.c
123 @@ -1700,7 +1700,7 @@ static struct dasd_ccw_req *dasd_eckd_bu
124 recid++;
125 }
126 }
127 - if (req->cmd_flags & REQ_FAILFAST)
128 + if (blk_noretry_request(req))
129 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
130 cqr->startdev = startdev;
131 cqr->memdev = startdev;
132 --- a/drivers/s390/block/dasd_fba.c
133 +++ b/drivers/s390/block/dasd_fba.c
134 @@ -355,7 +355,7 @@ static struct dasd_ccw_req *dasd_fba_bui
135 recid++;
136 }
137 }
138 - if (req->cmd_flags & REQ_FAILFAST)
139 + if (blk_noretry_request(req))
140 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
141 cqr->startdev = memdev;
142 cqr->memdev = memdev;
143 --- a/drivers/scsi/constants.c
144 +++ b/drivers/scsi/constants.c
145 @@ -1364,7 +1364,8 @@ EXPORT_SYMBOL(scsi_print_sense);
146 static const char * const hostbyte_table[]={
147 "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET",
148 "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
149 -"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE"};
150 +"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
151 +"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" };
152 #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table)
153
154 static const char * const driverbyte_table[]={
155 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
156 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
157 @@ -109,7 +109,8 @@ static struct request *get_alua_req(stru
158 }
159
160 rq->cmd_type = REQ_TYPE_BLOCK_PC;
161 - rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
162 + rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
163 + REQ_FAILFAST_DRIVER | REQ_NOMERGE;
164 rq->retries = ALUA_FAILOVER_RETRIES;
165 rq->timeout = ALUA_FAILOVER_TIMEOUT;
166
167 --- a/drivers/scsi/device_handler/scsi_dh_emc.c
168 +++ b/drivers/scsi/device_handler/scsi_dh_emc.c
169 @@ -303,7 +303,8 @@ static struct request *get_req(struct sc
170
171 rq->cmd[4] = len;
172 rq->cmd_type = REQ_TYPE_BLOCK_PC;
173 - rq->cmd_flags |= REQ_FAILFAST;
174 + rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
175 + REQ_FAILFAST_DRIVER;
176 rq->timeout = CLARIION_TIMEOUT;
177 rq->retries = CLARIION_RETRIES;
178
179 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
180 +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
181 @@ -112,7 +112,8 @@ static int hp_sw_tur(struct scsi_device
182 return SCSI_DH_RES_TEMP_UNAVAIL;
183
184 req->cmd_type = REQ_TYPE_BLOCK_PC;
185 - req->cmd_flags |= REQ_FAILFAST;
186 + req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
187 + REQ_FAILFAST_DRIVER;
188 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY);
189 req->cmd[0] = TEST_UNIT_READY;
190 req->timeout = HP_SW_TIMEOUT;
191 @@ -204,7 +205,8 @@ static int hp_sw_start_stop(struct scsi_
192 return SCSI_DH_RES_TEMP_UNAVAIL;
193
194 req->cmd_type = REQ_TYPE_BLOCK_PC;
195 - req->cmd_flags |= REQ_FAILFAST;
196 + req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
197 + REQ_FAILFAST_DRIVER;
198 req->cmd_len = COMMAND_SIZE(START_STOP);
199 req->cmd[0] = START_STOP;
200 req->cmd[4] = 1; /* Start spin cycle */
201 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
202 +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
203 @@ -226,7 +226,8 @@ static struct request *get_rdac_req(stru
204 }
205
206 rq->cmd_type = REQ_TYPE_BLOCK_PC;
207 - rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
208 + rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
209 + REQ_FAILFAST_DRIVER;
210 rq->retries = RDAC_RETRIES;
211 rq->timeout = RDAC_TIMEOUT;
212
213 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
214 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
215 @@ -2032,8 +2032,6 @@ static void ibmvfc_terminate_rport_io(st
216 spin_unlock_irqrestore(shost->host_lock, flags);
217 } else
218 ibmvfc_issue_fc_host_lip(shost);
219 -
220 - scsi_target_unblock(&rport->dev);
221 LEAVE;
222 }
223
224 --- a/drivers/scsi/libiscsi.c
225 +++ b/drivers/scsi/libiscsi.c
226 @@ -1194,15 +1194,13 @@ int iscsi_queuecommand(struct scsi_cmnd
227 switch (session->state) {
228 case ISCSI_STATE_IN_RECOVERY:
229 reason = FAILURE_SESSION_IN_RECOVERY;
230 - sc->result = DID_IMM_RETRY << 16;
231 - break;
232 + goto reject;
233 case ISCSI_STATE_LOGGING_OUT:
234 reason = FAILURE_SESSION_LOGGING_OUT;
235 - sc->result = DID_IMM_RETRY << 16;
236 - break;
237 + goto reject;
238 case ISCSI_STATE_RECOVERY_FAILED:
239 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
240 - sc->result = DID_NO_CONNECT << 16;
241 + sc->result = DID_TRANSPORT_FAILFAST << 16;
242 break;
243 case ISCSI_STATE_TERMINATE:
244 reason = FAILURE_SESSION_TERMINATE;
245 @@ -1267,7 +1265,7 @@ reject:
246 spin_unlock(&session->lock);
247 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
248 spin_lock(host->host_lock);
249 - return SCSI_MLQUEUE_HOST_BUSY;
250 + return SCSI_MLQUEUE_TARGET_BUSY;
251
252 fault:
253 spin_unlock(&session->lock);
254 @@ -2337,8 +2335,10 @@ static void iscsi_start_session_recovery
255 * flush queues.
256 */
257 spin_lock_bh(&session->lock);
258 - fail_all_commands(conn, -1,
259 - STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
260 + if (STOP_CONN_RECOVER)
261 + fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
262 + else
263 + fail_all_commands(conn, -1, DID_ERROR);
264 flush_control_queues(session, conn);
265 spin_unlock_bh(&session->lock);
266 mutex_unlock(&session->eh_mutex);
267 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
268 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
269 @@ -88,14 +88,6 @@ lpfc_terminate_rport_io(struct fc_rport
270 &phba->sli.ring[phba->sli.fcp_ring],
271 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
272 }
273 -
274 - /*
275 - * A device is normally blocked for rediscovery and unblocked when
276 - * devloss timeout happens. In case a vport is removed or driver
277 - * unloaded before devloss timeout happens, we need to unblock here.
278 - */
279 - scsi_target_unblock(&rport->dev);
280 - return;
281 }
282
283 /*
284 --- a/drivers/scsi/lpfc/lpfc_scsi.c
285 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
286 @@ -966,10 +966,9 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd
287 * Catch race where our node has transitioned, but the
288 * transport is still transitioning.
289 */
290 - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
291 - cmnd->result = ScsiResult(DID_BUS_BUSY, 0);
292 - goto out_fail_command;
293 - }
294 + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
295 + goto out_target_busy;
296 +
297 lpfc_cmd = lpfc_get_scsi_buf(phba);
298 if (lpfc_cmd == NULL) {
299 lpfc_adjust_queue_depth(phba);
300 @@ -1014,6 +1013,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd
301 lpfc_release_scsi_buf(phba, lpfc_cmd);
302 out_host_busy:
303 return SCSI_MLQUEUE_HOST_BUSY;
304 + out_target_busy:
305 + return SCSI_MLQUEUE_TARGET_BUSY;
306
307 out_fail_command:
308 done(cmnd);
309 --- a/drivers/scsi/qla2xxx/qla_attr.c
310 +++ b/drivers/scsi/qla2xxx/qla_attr.c
311 @@ -1005,7 +1005,6 @@ qla2x00_terminate_rport_io(struct fc_rpo
312 }
313
314 qla2x00_abort_fcport_cmds(fcport);
315 - scsi_target_unblock(&rport->dev);
316 }
317
318 static int
319 --- a/drivers/scsi/qla2xxx/qla_isr.c
320 +++ b/drivers/scsi/qla2xxx/qla_isr.c
321 @@ -1184,7 +1184,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha
322 cp->serial_number, comp_status,
323 atomic_read(&fcport->state)));
324
325 - cp->result = DID_BUS_BUSY << 16;
326 + /*
327 + * We are going to have the fc class block the rport
328 + * while we try to recover so instruct the mid layer
329 + * to requeue until the class decides how to handle this.
330 + */
331 + cp->result = DID_TRANSPORT_DISRUPTED << 16;
332 if (atomic_read(&fcport->state) == FCS_ONLINE)
333 qla2x00_mark_device_lost(fcport->ha, fcport, 1, 1);
334 break;
335 @@ -1211,7 +1216,12 @@ qla2x00_status_entry(scsi_qla_host_t *ha
336 break;
337
338 case CS_TIMEOUT:
339 - cp->result = DID_BUS_BUSY << 16;
340 + /*
341 + * We are going to have the fc class block the rport
342 + * while we try to recover so instruct the mid layer
343 + * to requeue until the class decides how to handle this.
344 + */
345 + cp->result = DID_TRANSPORT_DISRUPTED << 16;
346
347 if (IS_FWI2_CAPABLE(ha)) {
348 DEBUG2(printk(KERN_INFO
349 --- a/drivers/scsi/qla2xxx/qla_os.c
350 +++ b/drivers/scsi/qla2xxx/qla_os.c
351 @@ -394,10 +394,8 @@ qla2x00_queuecommand(struct scsi_cmnd *c
352 }
353
354 /* Close window on fcport/rport state-transitioning. */
355 - if (fcport->drport) {
356 - cmd->result = DID_IMM_RETRY << 16;
357 - goto qc_fail_command;
358 - }
359 + if (fcport->drport)
360 + goto qc_target_busy;
361
362 if (atomic_read(&fcport->state) != FCS_ONLINE) {
363 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
364 @@ -405,7 +403,7 @@ qla2x00_queuecommand(struct scsi_cmnd *c
365 cmd->result = DID_NO_CONNECT << 16;
366 goto qc_fail_command;
367 }
368 - goto qc_host_busy;
369 + goto qc_target_busy;
370 }
371
372 spin_unlock_irq(ha->host->host_lock);
373 @@ -428,10 +426,11 @@ qc_host_busy_free_sp:
374
375 qc_host_busy_lock:
376 spin_lock_irq(ha->host->host_lock);
377 -
378 -qc_host_busy:
379 return SCSI_MLQUEUE_HOST_BUSY;
380
381 +qc_target_busy:
382 + return SCSI_MLQUEUE_TARGET_BUSY;
383 +
384 qc_fail_command:
385 done(cmd);
386
387 @@ -461,10 +460,8 @@ qla24xx_queuecommand(struct scsi_cmnd *c
388 }
389
390 /* Close window on fcport/rport state-transitioning. */
391 - if (fcport->drport) {
392 - cmd->result = DID_IMM_RETRY << 16;
393 - goto qc24_fail_command;
394 - }
395 + if (fcport->drport)
396 + goto qc24_target_busy;
397
398 if (atomic_read(&fcport->state) != FCS_ONLINE) {
399 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
400 @@ -472,7 +469,7 @@ qla24xx_queuecommand(struct scsi_cmnd *c
401 cmd->result = DID_NO_CONNECT << 16;
402 goto qc24_fail_command;
403 }
404 - goto qc24_host_busy;
405 + goto qc24_target_busy;
406 }
407
408 spin_unlock_irq(ha->host->host_lock);
409 @@ -495,10 +492,11 @@ qc24_host_busy_free_sp:
410
411 qc24_host_busy_lock:
412 spin_lock_irq(ha->host->host_lock);
413 -
414 -qc24_host_busy:
415 return SCSI_MLQUEUE_HOST_BUSY;
416
417 +qc24_target_busy:
418 + return SCSI_MLQUEUE_TARGET_BUSY;
419 +
420 qc24_fail_command:
421 done(cmd);
422
423 --- a/drivers/scsi/qla4xxx/ql4_isr.c
424 +++ b/drivers/scsi/qla4xxx/ql4_isr.c
425 @@ -139,7 +139,7 @@ static void qla4xxx_status_entry(struct
426 ha->host_no, cmd->device->channel,
427 cmd->device->id, cmd->device->lun));
428
429 - cmd->result = DID_BUS_BUSY << 16;
430 + cmd->result = DID_TRANSPORT_DISRUPTED << 16;
431
432 /*
433 * Mark device missing so that we won't continue to send
434 @@ -243,7 +243,7 @@ static void qla4xxx_status_entry(struct
435 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
436 qla4xxx_mark_device_missing(ha, ddb_entry);
437
438 - cmd->result = DID_BUS_BUSY << 16;
439 + cmd->result = DID_TRANSPORT_DISRUPTED << 16;
440 break;
441
442 case SCS_QUEUE_FULL:
443 --- a/drivers/scsi/qla4xxx/ql4_os.c
444 +++ b/drivers/scsi/qla4xxx/ql4_os.c
445 @@ -439,7 +439,7 @@ static int qla4xxx_queuecommand(struct s
446 cmd->result = DID_NO_CONNECT << 16;
447 goto qc_fail_command;
448 }
449 - goto qc_host_busy;
450 + return SCSI_MLQUEUE_TARGET_BUSY;
451 }
452
453 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
454 --- a/drivers/scsi/scsi.c
455 +++ b/drivers/scsi/scsi.c
456 @@ -754,8 +754,12 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
457 }
458 spin_unlock_irqrestore(host->host_lock, flags);
459 if (rtn) {
460 - scsi_queue_insert(cmd, (rtn == SCSI_MLQUEUE_DEVICE_BUSY) ?
461 - rtn : SCSI_MLQUEUE_HOST_BUSY);
462 + if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
463 + rtn != SCSI_MLQUEUE_TARGET_BUSY)
464 + rtn = SCSI_MLQUEUE_HOST_BUSY;
465 +
466 + scsi_queue_insert(cmd, rtn);
467 +
468 SCSI_LOG_MLQUEUE(3,
469 printk("queuecommand : request rejected\n"));
470 }
471 @@ -800,6 +804,7 @@ static struct scsi_driver *scsi_cmd_to_d
472 void scsi_finish_command(struct scsi_cmnd *cmd)
473 {
474 struct scsi_device *sdev = cmd->device;
475 + struct scsi_target *starget = scsi_target(sdev);
476 struct Scsi_Host *shost = sdev->host;
477 struct scsi_driver *drv;
478 unsigned int good_bytes;
479 @@ -815,6 +820,7 @@ void scsi_finish_command(struct scsi_cmn
480 * XXX(hch): What about locking?
481 */
482 shost->host_blocked = 0;
483 + starget->target_blocked = 0;
484 sdev->device_blocked = 0;
485
486 /*
487 --- a/drivers/scsi/scsi_error.c
488 +++ b/drivers/scsi/scsi_error.c
489 @@ -1219,6 +1219,40 @@ static void scsi_eh_offline_sdevs(struct
490 }
491
492 /**
493 + * scsi_noretry_cmd - determinte if command should be failed fast
494 + * @scmd: SCSI cmd to examine.
495 + */
496 +int scsi_noretry_cmd(struct scsi_cmnd *scmd)
497 +{
498 + switch (host_byte(scmd->result)) {
499 + case DID_OK:
500 + break;
501 + case DID_BUS_BUSY:
502 + return blk_failfast_transport(scmd->request);
503 + case DID_PARITY:
504 + return blk_failfast_dev(scmd->request);
505 + case DID_ERROR:
506 + if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
507 + status_byte(scmd->result) == RESERVATION_CONFLICT)
508 + return 0;
509 + /* fall through */
510 + case DID_SOFT_ERROR:
511 + return blk_failfast_driver(scmd->request);
512 + }
513 +
514 + switch (status_byte(scmd->result)) {
515 + case CHECK_CONDITION:
516 + /*
517 + * assume caller has checked sense and determinted
518 + * the check condition was retryable.
519 + */
520 + return blk_failfast_dev(scmd->request);
521 + }
522 +
523 + return 0;
524 +}
525 +
526 +/**
527 * scsi_decide_disposition - Disposition a cmd on return from LLD.
528 * @scmd: SCSI cmd to examine.
529 *
530 @@ -1290,7 +1324,20 @@ int scsi_decide_disposition(struct scsi_
531
532 case DID_REQUEUE:
533 return ADD_TO_MLQUEUE;
534 -
535 + case DID_TRANSPORT_DISRUPTED:
536 + /*
537 + * LLD/transport was disrupted during processing of the IO.
538 + * The transport class is now blocked/blocking,
539 + * and the transport will decide what to do with the IO
540 + * based on its timers and recovery capablilities.
541 + */
542 + return ADD_TO_MLQUEUE;
543 + case DID_TRANSPORT_FAILFAST:
544 + /*
545 + * The transport decided to failfast the IO (most likely
546 + * the fast io fail tmo fired), so send IO directly upwards.
547 + */
548 + return SUCCESS;
549 case DID_ERROR:
550 if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
551 status_byte(scmd->result) == RESERVATION_CONFLICT)
552 @@ -1383,7 +1430,7 @@ int scsi_decide_disposition(struct scsi_
553 * even if the request is marked fast fail, we still requeue
554 * for queue congestion conditions (QUEUE_FULL or BUSY) */
555 if ((++scmd->retries) <= scmd->allowed
556 - && !blk_noretry_request(scmd->request)) {
557 + && !scsi_noretry_cmd(scmd)) {
558 return NEEDS_RETRY;
559 } else {
560 /*
561 @@ -1508,7 +1555,7 @@ void scsi_eh_flush_done_q(struct list_he
562 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
563 list_del_init(&scmd->eh_entry);
564 if (scsi_device_online(scmd->device) &&
565 - !blk_noretry_request(scmd->request) &&
566 + !scsi_noretry_cmd(scmd) &&
567 (++scmd->retries <= scmd->allowed)) {
568 SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
569 " retry cmd: %p\n",
570 --- a/drivers/scsi/scsi_lib.c
571 +++ b/drivers/scsi/scsi_lib.c
572 @@ -114,6 +114,7 @@ int scsi_queue_insert(struct scsi_cmnd *
573 {
574 struct Scsi_Host *host = cmd->device->host;
575 struct scsi_device *device = cmd->device;
576 + struct scsi_target *starget = scsi_target(device);
577 struct request_queue *q = device->request_queue;
578 unsigned long flags;
579
580 @@ -133,10 +134,17 @@ int scsi_queue_insert(struct scsi_cmnd *
581 * if a command is requeued with no other commands outstanding
582 * either for the device or for the host.
583 */
584 - if (reason == SCSI_MLQUEUE_HOST_BUSY)
585 + switch (reason) {
586 + case SCSI_MLQUEUE_HOST_BUSY:
587 host->host_blocked = host->max_host_blocked;
588 - else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
589 + break;
590 + case SCSI_MLQUEUE_DEVICE_BUSY:
591 device->device_blocked = device->max_device_blocked;
592 + break;
593 + case SCSI_MLQUEUE_TARGET_BUSY:
594 + starget->target_blocked = starget->max_target_blocked;
595 + break;
596 + }
597
598 /*
599 * Decrement the counters, since these commands are no longer
600 @@ -460,10 +468,12 @@ static void scsi_init_cmd_errh(struct sc
601 void scsi_device_unbusy(struct scsi_device *sdev)
602 {
603 struct Scsi_Host *shost = sdev->host;
604 + struct scsi_target *starget = scsi_target(sdev);
605 unsigned long flags;
606
607 spin_lock_irqsave(shost->host_lock, flags);
608 shost->host_busy--;
609 + starget->target_busy--;
610 if (unlikely(scsi_host_in_recovery(shost) &&
611 (shost->host_failed || shost->host_eh_scheduled)))
612 scsi_eh_wakeup(shost);
613 @@ -519,6 +529,13 @@ static void scsi_single_lun_run(struct s
614 spin_unlock_irqrestore(shost->host_lock, flags);
615 }
616
617 +static inline int scsi_target_is_busy(struct scsi_target *starget)
618 +{
619 + return ((starget->can_queue > 0 &&
620 + starget->target_busy >= starget->can_queue) ||
621 + starget->target_blocked);
622 +}
623 +
624 /*
625 * Function: scsi_run_queue()
626 *
627 @@ -533,7 +550,7 @@ static void scsi_single_lun_run(struct s
628 */
629 static void scsi_run_queue(struct request_queue *q)
630 {
631 - struct scsi_device *sdev = q->queuedata;
632 + struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
633 struct Scsi_Host *shost = sdev->host;
634 unsigned long flags;
635
636 @@ -560,6 +577,21 @@ static void scsi_run_queue(struct reques
637 */
638 sdev = list_entry(shost->starved_list.next,
639 struct scsi_device, starved_entry);
640 + /*
641 + * The *queue_ready functions can add a device back onto the
642 + * starved list's tail, so we must check for a infinite loop.
643 + */
644 + if (sdev == starved_head)
645 + break;
646 + if (!starved_head)
647 + starved_head = sdev;
648 +
649 + if (scsi_target_is_busy(scsi_target(sdev))) {
650 + list_move_tail(&sdev->starved_entry,
651 + &shost->starved_list);
652 + continue;
653 + }
654 +
655 list_del_init(&sdev->starved_entry);
656 spin_unlock(shost->host_lock);
657
658 @@ -575,13 +607,6 @@ static void scsi_run_queue(struct reques
659 spin_unlock(sdev->request_queue->queue_lock);
660
661 spin_lock(shost->host_lock);
662 - if (unlikely(!list_empty(&sdev->starved_entry)))
663 - /*
664 - * sdev lost a race, and was put back on the
665 - * starved list. This is unlikely but without this
666 - * in theory we could loop forever.
667 - */
668 - break;
669 }
670 spin_unlock_irqrestore(shost->host_lock, flags);
671
672 @@ -681,7 +706,7 @@ static struct scsi_cmnd *scsi_end_reques
673 leftover = req->data_len;
674
675 /* kill remainder if no retrys */
676 - if (error && blk_noretry_request(req))
677 + if (error && scsi_noretry_cmd(cmd))
678 blk_end_request(req, error, leftover);
679 else {
680 if (requeue) {
681 @@ -1344,6 +1369,52 @@ static inline int scsi_dev_queue_ready(s
682 return 1;
683 }
684
685 +
686 +/*
687 + * scsi_target_queue_ready: checks if there we can send commands to target
688 + * @sdev: scsi device on starget to check.
689 + *
690 + * Called with the host lock held.
691 + */
692 +static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
693 + struct scsi_device *sdev)
694 +{
695 + struct scsi_target *starget = scsi_target(sdev);
696 +
697 + if (starget->single_lun) {
698 + if (starget->starget_sdev_user &&
699 + starget->starget_sdev_user != sdev)
700 + return 0;
701 + starget->starget_sdev_user = sdev;
702 + }
703 +
704 + if (starget->target_busy == 0 && starget->target_blocked) {
705 + /*
706 + * unblock after target_blocked iterates to zero
707 + */
708 + if (--starget->target_blocked == 0) {
709 + SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
710 + "unblocking target at zero depth\n"));
711 + } else {
712 + blk_plug_device(sdev->request_queue);
713 + return 0;
714 + }
715 + }
716 +
717 + if (scsi_target_is_busy(starget)) {
718 + if (list_empty(&sdev->starved_entry)) {
719 + list_add_tail(&sdev->starved_entry,
720 + &shost->starved_list);
721 + return 0;
722 + }
723 + }
724 +
725 + /* We're OK to process the command, so we can't be starved */
726 + if (!list_empty(&sdev->starved_entry))
727 + list_del_init(&sdev->starved_entry);
728 + return 1;
729 +}
730 +
731 /*
732 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
733 * return 0. We must end up running the queue again whenever 0 is
734 @@ -1390,6 +1461,7 @@ static void scsi_kill_request(struct req
735 {
736 struct scsi_cmnd *cmd = req->special;
737 struct scsi_device *sdev = cmd->device;
738 + struct scsi_target *starget = scsi_target(sdev);
739 struct Scsi_Host *shost = sdev->host;
740
741 blkdev_dequeue_request(req);
742 @@ -1413,6 +1485,7 @@ static void scsi_kill_request(struct req
743 spin_unlock(sdev->request_queue->queue_lock);
744 spin_lock(shost->host_lock);
745 shost->host_busy++;
746 + starget->target_busy++;
747 spin_unlock(shost->host_lock);
748 spin_lock(sdev->request_queue->queue_lock);
749
750 @@ -1550,14 +1623,13 @@ static void scsi_request_fn(struct reque
751 goto not_ready;
752 }
753
754 + if (!scsi_target_queue_ready(shost, sdev))
755 + goto not_ready;
756 +
757 if (!scsi_host_queue_ready(q, shost, sdev))
758 goto not_ready;
759 - if (scsi_target(sdev)->single_lun) {
760 - if (scsi_target(sdev)->starget_sdev_user &&
761 - scsi_target(sdev)->starget_sdev_user != sdev)
762 - goto not_ready;
763 - scsi_target(sdev)->starget_sdev_user = sdev;
764 - }
765 +
766 + scsi_target(sdev)->target_busy++;
767 shost->host_busy++;
768
769 /*
770 --- a/drivers/scsi/scsi_priv.h
771 +++ b/drivers/scsi/scsi_priv.h
772 @@ -59,6 +59,7 @@ void scsi_eh_ready_devs(struct Scsi_Host
773 struct list_head *done_q);
774 int scsi_eh_get_sense(struct list_head *work_q,
775 struct list_head *done_q);
776 +int scsi_noretry_cmd(struct scsi_cmnd *scmd);
777
778 /* scsi_lib.c */
779 extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
780 --- a/drivers/scsi/scsi_scan.c
781 +++ b/drivers/scsi/scsi_scan.c
782 @@ -419,6 +419,7 @@ static struct scsi_target *scsi_alloc_ta
783 dev->type = &scsi_target_type;
784 starget->id = id;
785 starget->channel = channel;
786 + starget->can_queue = 0;
787 INIT_LIST_HEAD(&starget->siblings);
788 INIT_LIST_HEAD(&starget->devices);
789 starget->state = STARGET_CREATED;
790 --- a/drivers/scsi/scsi_transport_fc.c
791 +++ b/drivers/scsi/scsi_transport_fc.c
792 @@ -2133,8 +2133,7 @@ fc_attach_transport(struct fc_function_t
793 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
794 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
795 SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
796 - if (ft->terminate_rport_io)
797 - SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
798 + SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
799
800 BUG_ON(count > FC_RPORT_NUM_ATTRS);
801
802 @@ -2328,6 +2327,22 @@ fc_remove_host(struct Scsi_Host *shost)
803 }
804 EXPORT_SYMBOL(fc_remove_host);
805
806 +static void fc_terminate_rport_io(struct fc_rport *rport)
807 +{
808 + struct Scsi_Host *shost = rport_to_shost(rport);
809 + struct fc_internal *i = to_fc_internal(shost->transportt);
810 +
811 + /* Involve the LLDD if possible to terminate all io on the rport. */
812 + if (i->f->terminate_rport_io)
813 + i->f->terminate_rport_io(rport);
814 +
815 + /*
816 + * must unblock to flush queued IO. The caller will have set
817 + * the port_state or flags, so that fc_remote_port_chkready will
818 + * fail IO.
819 + */
820 + scsi_target_unblock(&rport->dev);
821 +}
822
823 /**
824 * fc_starget_delete - called to delete the scsi decendents of an rport
825 @@ -2340,13 +2355,8 @@ fc_starget_delete(struct work_struct *wo
826 {
827 struct fc_rport *rport =
828 container_of(work, struct fc_rport, stgt_delete_work);
829 - struct Scsi_Host *shost = rport_to_shost(rport);
830 - struct fc_internal *i = to_fc_internal(shost->transportt);
831 -
832 - /* Involve the LLDD if possible to terminate all io on the rport. */
833 - if (i->f->terminate_rport_io)
834 - i->f->terminate_rport_io(rport);
835
836 + fc_terminate_rport_io(rport);
837 scsi_remove_target(&rport->dev);
838 }
839
840 @@ -2372,10 +2382,7 @@ fc_rport_final_delete(struct work_struct
841 if (rport->flags & FC_RPORT_SCAN_PENDING)
842 scsi_flush_work(shost);
843
844 - /* involve the LLDD to terminate all pending i/o */
845 - if (i->f->terminate_rport_io)
846 - i->f->terminate_rport_io(rport);
847 -
848 + fc_terminate_rport_io(rport);
849 /*
850 * Cancel any outstanding timers. These should really exist
851 * only when rmmod'ing the LLDD and we're asking for
852 @@ -2639,7 +2646,8 @@ fc_remote_port_add(struct Scsi_Host *sho
853
854 spin_lock_irqsave(shost->host_lock, flags);
855
856 - rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
857 + rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
858 + FC_RPORT_DEVLOSS_PENDING);
859
860 /* if target, initiate a scan */
861 if (rport->scsi_target_id != -1) {
862 @@ -2702,6 +2710,7 @@ fc_remote_port_add(struct Scsi_Host *sho
863 rport->port_id = ids->port_id;
864 rport->roles = ids->roles;
865 rport->port_state = FC_PORTSTATE_ONLINE;
866 + rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
867
868 if (fci->f->dd_fcrport_size)
869 memset(rport->dd_data, 0,
870 @@ -2784,7 +2793,6 @@ void
871 fc_remote_port_delete(struct fc_rport *rport)
872 {
873 struct Scsi_Host *shost = rport_to_shost(rport);
874 - struct fc_internal *i = to_fc_internal(shost->transportt);
875 int timeout = rport->dev_loss_tmo;
876 unsigned long flags;
877
878 @@ -2830,7 +2838,7 @@ fc_remote_port_delete(struct fc_rport *
879
880 /* see if we need to kill io faster than waiting for device loss */
881 if ((rport->fast_io_fail_tmo != -1) &&
882 - (rport->fast_io_fail_tmo < timeout) && (i->f->terminate_rport_io))
883 + (rport->fast_io_fail_tmo < timeout))
884 fc_queue_devloss_work(shost, &rport->fail_io_work,
885 rport->fast_io_fail_tmo * HZ);
886
887 @@ -2906,7 +2914,8 @@ fc_remote_port_rolechg(struct fc_rport
888 fc_flush_devloss(shost);
889
890 spin_lock_irqsave(shost->host_lock, flags);
891 - rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
892 + rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
893 + FC_RPORT_DEVLOSS_PENDING);
894 spin_unlock_irqrestore(shost->host_lock, flags);
895
896 /* ensure any stgt delete functions are done */
897 @@ -3001,6 +3010,7 @@ fc_timeout_deleted_rport(struct work_str
898 rport->supported_classes = FC_COS_UNSPECIFIED;
899 rport->roles = FC_PORT_ROLE_UNKNOWN;
900 rport->port_state = FC_PORTSTATE_NOTPRESENT;
901 + rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
902
903 /* remove the identifiers that aren't used in the consisting binding */
904 switch (fc_host->tgtid_bind_type) {
905 @@ -3043,13 +3053,12 @@ fc_timeout_fail_rport_io(struct work_str
906 {
907 struct fc_rport *rport =
908 container_of(work, struct fc_rport, fail_io_work.work);
909 - struct Scsi_Host *shost = rport_to_shost(rport);
910 - struct fc_internal *i = to_fc_internal(shost->transportt);
911
912 if (rport->port_state != FC_PORTSTATE_BLOCKED)
913 return;
914
915 - i->f->terminate_rport_io(rport);
916 + rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
917 + fc_terminate_rport_io(rport);
918 }
919
920 /**
921 --- a/drivers/scsi/scsi_transport_iscsi.c
922 +++ b/drivers/scsi/scsi_transport_iscsi.c
923 @@ -374,10 +374,10 @@ int iscsi_session_chkready(struct iscsi_
924 err = 0;
925 break;
926 case ISCSI_SESSION_FAILED:
927 - err = DID_IMM_RETRY << 16;
928 + err = DID_TRANSPORT_DISRUPTED << 16;
929 break;
930 case ISCSI_SESSION_FREE:
931 - err = DID_NO_CONNECT << 16;
932 + err = DID_TRANSPORT_FAILFAST << 16;
933 break;
934 default:
935 err = DID_NO_CONNECT << 16;
936 --- a/drivers/scsi/scsi_transport_spi.c
937 +++ b/drivers/scsi/scsi_transport_spi.c
938 @@ -109,7 +109,9 @@ static int spi_execute(struct scsi_devic
939 for(i = 0; i < DV_RETRIES; i++) {
940 result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
941 sense, DV_TIMEOUT, /* retries */ 1,
942 - REQ_FAILFAST);
943 + REQ_FAILFAST_DEV |
944 + REQ_FAILFAST_TRANSPORT |
945 + REQ_FAILFAST_DRIVER);
946 if (result & DRIVER_SENSE) {
947 struct scsi_sense_hdr sshdr_tmp;
948 if (!sshdr)
949 --- a/include/linux/bio.h
950 +++ b/include/linux/bio.h
951 @@ -130,21 +130,36 @@ struct bio {
952 /*
953 * bio bi_rw flags
954 *
955 - * bit 0 -- read (not set) or write (set)
956 + * bit 0 -- data direction
957 + * If not set, bio is a read from device. If set, it's a write to device.
958 * bit 1 -- rw-ahead when set
959 * bit 2 -- barrier
960 - * bit 3 -- fail fast, don't want low level driver retries
961 - * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
962 - * bit 5 -- metadata request
963 - * bit 6 -- discard sectors
964 + * Insert a serialization point in the IO queue, forcing previously
965 + * submitted IO to be completed before this oen is issued.
966 + * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
967 + * Note that this does NOT indicate that the IO itself is sync, just
968 + * that the block layer will not postpone issue of this IO by plugging.
969 + * bit 4 -- metadata request
970 + * Used for tracing to differentiate metadata and data IO. May also
971 + * get some preferential treatment in the IO scheduler
972 + * bit 5 -- discard sectors
973 + * Informs the lower level device that this range of sectors is no longer
974 + * used by the file system and may thus be freed by the device. Used
975 + * for flash based storage.
976 + * bit 6 -- fail fast device errors
977 + * bit 7 -- fail fast transport errors
978 + * bit 8 -- fail fast driver errors
979 + * Don't want driver retries for any fast fail whatever the reason.
980 */
981 #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
982 #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
983 #define BIO_RW_BARRIER 2
984 -#define BIO_RW_FAILFAST 3
985 -#define BIO_RW_SYNC 4
986 -#define BIO_RW_META 5
987 -#define BIO_RW_DISCARD 6
988 +#define BIO_RW_SYNC 3
989 +#define BIO_RW_META 4
990 +#define BIO_RW_DISCARD 5
991 +#define BIO_RW_FAILFAST_DEV 6
992 +#define BIO_RW_FAILFAST_TRANSPORT 7
993 +#define BIO_RW_FAILFAST_DRIVER 8
994
995 /*
996 * upper 16 bits of bi_rw define the io priority of this bio
997 @@ -171,7 +186,10 @@ struct bio {
998 #define bio_sectors(bio) ((bio)->bi_size >> 9)
999 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
1000 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
1001 -#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
1002 +#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
1003 +#define bio_failfast_transport(bio) \
1004 + ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
1005 +#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
1006 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
1007 #define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
1008 #define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
1009 --- a/include/linux/blkdev.h
1010 +++ b/include/linux/blkdev.h
1011 @@ -86,7 +86,9 @@ enum {
1012 */
1013 enum rq_flag_bits {
1014 __REQ_RW, /* not set, read. set, write */
1015 - __REQ_FAILFAST, /* no low level driver retries */
1016 + __REQ_FAILFAST_DEV, /* no driver retries of device errors */
1017 + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
1018 + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
1019 __REQ_DISCARD, /* request to discard sectors */
1020 __REQ_SORTED, /* elevator knows about this request */
1021 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
1022 @@ -110,8 +112,10 @@ enum rq_flag_bits {
1023 };
1024
1025 #define REQ_RW (1 << __REQ_RW)
1026 +#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
1027 +#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
1028 +#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
1029 #define REQ_DISCARD (1 << __REQ_DISCARD)
1030 -#define REQ_FAILFAST (1 << __REQ_FAILFAST)
1031 #define REQ_SORTED (1 << __REQ_SORTED)
1032 #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
1033 #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
1034 @@ -551,7 +555,12 @@ enum {
1035 #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
1036 #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
1037
1038 -#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST)
1039 +#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
1040 +#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
1041 +#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
1042 +#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
1043 + blk_failfast_transport(rq) || \
1044 + blk_failfast_driver(rq))
1045 #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
1046
1047 #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
1048 --- a/include/scsi/scsi.h
1049 +++ b/include/scsi/scsi.h
1050 @@ -381,6 +381,11 @@ static inline int scsi_is_wlun(unsigned
1051 #define DID_IMM_RETRY 0x0c /* Retry without decrementing retry count */
1052 #define DID_REQUEUE 0x0d /* Requeue command (no immediate retry) also
1053 * without decrementing the retry count */
1054 +#define DID_TRANSPORT_DISRUPTED 0x0e /* Transport error disrupted execution
1055 + * and the driver blocked the port to
1056 + * recover the link. Transport class will
1057 + * retry or fail IO */
1058 +#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
1059 #define DRIVER_OK 0x00 /* Driver status */
1060
1061 /*
1062 @@ -426,6 +431,7 @@ static inline int scsi_is_wlun(unsigned
1063 #define SCSI_MLQUEUE_HOST_BUSY 0x1055
1064 #define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
1065 #define SCSI_MLQUEUE_EH_RETRY 0x1057
1066 +#define SCSI_MLQUEUE_TARGET_BUSY 0x1058
1067
1068 /*
1069 * Use these to separate status msg and our bytes
1070 --- a/include/scsi/scsi_device.h
1071 +++ b/include/scsi/scsi_device.h
1072 @@ -238,6 +238,16 @@ struct scsi_target {
1073 * for the device at a time. */
1074 unsigned int pdt_1f_for_no_lun; /* PDT = 0x1f */
1075 /* means no lun present */
1076 + /* commands actually active on LLD. protected by host lock. */
1077 + unsigned int target_busy;
1078 + /*
1079 + * LLDs should set this in the slave_alloc host template callout.
1080 + * If set to zero then there is not limit.
1081 + */
1082 + unsigned int can_queue;
1083 + unsigned int target_blocked;
1084 + unsigned int max_target_blocked;
1085 +#define SCSI_DEFAULT_TARGET_BLOCKED 3
1086
1087 char scsi_level;
1088 struct execute_work ew;
1089 --- a/include/scsi/scsi_transport_fc.h
1090 +++ b/include/scsi/scsi_transport_fc.h
1091 @@ -357,6 +357,7 @@ struct fc_rport { /* aka fc_starget_attr
1092 /* bit field values for struct fc_rport "flags" field: */
1093 #define FC_RPORT_DEVLOSS_PENDING 0x01
1094 #define FC_RPORT_SCAN_PENDING 0x02
1095 +#define FC_RPORT_FAST_FAIL_TIMEDOUT 0x03
1096
1097 #define dev_to_rport(d) \
1098 container_of(d, struct fc_rport, dev)
1099 @@ -678,12 +679,15 @@ fc_remote_port_chkready(struct fc_rport
1100 if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
1101 result = 0;
1102 else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
1103 - result = DID_IMM_RETRY << 16;
1104 + result = DID_TRANSPORT_DISRUPTED << 16;
1105 else
1106 result = DID_NO_CONNECT << 16;
1107 break;
1108 case FC_PORTSTATE_BLOCKED:
1109 - result = DID_IMM_RETRY << 16;
1110 + if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
1111 + result = DID_TRANSPORT_FAILFAST << 16;
1112 + else
1113 + result = DID_TRANSPORT_DISRUPTED << 16;
1114 break;
1115 default:
1116 result = DID_NO_CONNECT << 16;