*/
void blk_steal_bios(struct bio_list *list, struct request *rq)
{
+ struct bio *bio;
+
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if (bio->bi_opf & REQ_POLLED) {
+ bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_cookie = BLK_QC_T_NONE;
+ }
+ /*
+ * The alternate request queue that we may end up submitting
+ * the bio to may be frozen temporarily, in this case REQ_NOWAIT
+ * will fail the I/O immediately with EAGAIN to the issuer.
+ * We are not in the issuer context which cannot block. Clear
+ * the flag to avoid spurious EAGAIN I/O failures.
+ */
+ bio->bi_opf &= ~REQ_NOWAIT;
+ }
+
if (rq->bio) {
if (list->tail)
list->tail->bi_next = rq->bio;
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
- for (bio = req->bio; bio; bio = bio->bi_next) {
+ for (bio = req->bio; bio; bio = bio->bi_next)
bio_set_dev(bio, ns->head->disk->part0);
- if (bio->bi_opf & REQ_POLLED) {
- bio->bi_opf &= ~REQ_POLLED;
- bio->bi_cookie = BLK_QC_T_NONE;
- }
- /*
- * The alternate request queue that we may end up submitting
- * the bio to may be frozen temporarily, in this case REQ_NOWAIT
- * will fail the I/O immediately with EAGAIN to the issuer.
- * We are not in the issuer context which cannot block. Clear
- * the flag to avoid spurious EAGAIN I/O failures.
- */
- bio->bi_opf &= ~REQ_NOWAIT;
- }
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);