]> git.ipfire.org Git - people/ms/linux.git/commitdiff
Merge tag 'for-linus-20191212' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Dec 2019 22:27:19 +0000 (14:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Dec 2019 22:27:19 +0000 (14:27 -0800)
Pull block fixes from Jens Axboe:

 - stable fix for the bi_size overflow. Not a corruption issue, but a
   case wher we could merge but disallowed (Andreas)

 - NVMe pull request via Keith, with various fixes.

 - MD pull request from Song.

 - Merge window regression fix for the rq passthrough stats (Logan)

 - Remove unused blkcg_drain_queue() function (Guoqing)

* tag 'for-linus-20191212' of git://git.kernel.dk/linux-block:
  blk-cgroup: remove blkcg_drain_queue
  block: fix NULL pointer dereference in account statistics with IDE
  md: make sure desc_nr less than MD_SB_DISKS
  md: raid1: check rdev before reference in raid1_sync_request func
  raid5: need to set STRIPE_HANDLE for batch head
  block: fix "check bi_size overflow before merge"
  nvme/pci: Fix read queue count
  nvme/pci Limit write queue sizes to possible cpus
  nvme/pci: Fix write and poll queue types
  nvme/pci: Remove last_cq_head
  nvme: Namepace identification descriptor list is optional
  nvme-fc: fix double-free scenarios on hw queues
  nvme: else following return is not needed
  nvme: add error message on mismatching controller ids
  nvme_fc: add module to ops template to allow module references
  nvmet-loop: Avoid preallocating big SGL for data
  nvme-fc: Avoid preallocating big SGL for data
  nvme-rdma: Avoid preallocating big SGL for data

17 files changed:
block/bio.c
block/blk-cgroup.c
block/blk-core.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/loop.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/qla2xxx/qla_nvme.c
include/linux/blk-cgroup.h
include/linux/nvme-fc-driver.h

index 9d54aa37ce6c7074be56e99a14bc60d0bc058edd..a5d75f6bf4c7eedc96454bade72dbc9fe88af74d 100644 (file)
@@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
        if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
                return false;
 
-       if (bio->bi_vcnt > 0 && !bio_full(bio, len)) {
+       if (bio->bi_vcnt > 0) {
                struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
 
                if (page_is_mergeable(bv, page, len, off, same_page)) {
+                       if (bio->bi_iter.bi_size > UINT_MAX - len)
+                               return false;
                        bv->bv_len += len;
                        bio->bi_iter.bi_size += len;
                        return true;
index 708dea92dac8c6037dd1716e3d1bccadf346abac..a229b94d53908aa35ff3f2d8d1fa3532df4c2899 100644 (file)
@@ -1061,26 +1061,6 @@ err_unlock:
        return PTR_ERR(blkg);
 }
 
-/**
- * blkcg_drain_queue - drain blkcg part of request_queue
- * @q: request_queue to drain
- *
- * Called from blk_drain_queue().  Responsible for draining blkcg part.
- */
-void blkcg_drain_queue(struct request_queue *q)
-{
-       lockdep_assert_held(&q->queue_lock);
-
-       /*
-        * @q could be exiting and already have destroyed all blkgs as
-        * indicated by NULL root_blkg.  If so, don't confuse policies.
-        */
-       if (!q->root_blkg)
-               return;
-
-       blk_throtl_drain(q);
-}
-
 /**
  * blkcg_exit_queue - exit and release blkcg part of request_queue
  * @q: request_queue being released
index e4b27f7e9f51d7a0d31fa530b9b484ac55eefff2..e0a094fddee5f3e16b660c8cab189987663cb7f7 100644 (file)
@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
 
 void blk_account_io_completion(struct request *req, unsigned int bytes)
 {
-       if (blk_do_io_stat(req)) {
+       if (req->part && blk_do_io_stat(req)) {
                const int sgrp = op_stat_group(req_op(req));
                struct hd_struct *part;
 
@@ -1328,7 +1328,8 @@ void blk_account_io_done(struct request *req, u64 now)
         * normal IO on queueing nor completion.  Accounting the
         * containing request is enough.
         */
-       if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
+       if (req->part && blk_do_io_stat(req) &&
+           !(req->rq_flags & RQF_FLUSH_SEQ)) {
                const int sgrp = op_stat_group(req_op(req));
                struct hd_struct *part;
 
index 805b33e274967f7320eb2b05e1f5958512981500..4e7c9f398bc66b39b420c400769b496291338442 100644 (file)
@@ -1159,6 +1159,7 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
        /* not spare disk, or LEVEL_MULTIPATH */
        if (sb->level == LEVEL_MULTIPATH ||
                (rdev->desc_nr >= 0 &&
+                rdev->desc_nr < MD_SB_DISKS &&
                 sb->disks[rdev->desc_nr].state &
                 ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))))
                spare_disk = false;
index a409ab6f30bc33375561d4cebc20c5e20f9435ba..201fd8aec59aca67842611d8524f205d2c9b63d4 100644 (file)
@@ -2782,7 +2782,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
                                write_targets++;
                        }
                }
-               if (bio->bi_end_io) {
+               if (rdev && bio->bi_end_io) {
                        atomic_inc(&rdev->nr_pending);
                        bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
                        bio_set_dev(bio, rdev->bdev);
index f0fc538bfe597f7a1179b31eeaa8a8bfa837101a..d4d3b67ffbba7c7b57b142efb5f9441d8511c9a1 100644 (file)
@@ -5726,7 +5726,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
                                do_flush = false;
                        }
 
-                       if (!sh->batch_head)
+                       if (!sh->batch_head || sh == sh->batch_head)
                                set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if ((!sh->batch_head || sh == sh->batch_head) &&
index dfe37a525f3aff78433229932afa28854332eb90..667f18f465be1c6d35e52685f29095665efba00e 100644 (file)
@@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
                if (ret)
                        dev_warn(ctrl->device,
                                 "Identify Descriptors failed (%d)\n", ret);
+               if (ret > 0)
+                       ret = 0;
        }
        return ret;
 }
@@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                 * admin connect
                 */
                if (ctrl->cntlid != le16_to_cpu(id->cntlid)) {
+                       dev_err(ctrl->device,
+                               "Mismatching cntlid: Connect %u vs Identify "
+                               "%u, rejecting\n",
+                               ctrl->cntlid, le16_to_cpu(id->cntlid));
                        ret = -EINVAL;
                        goto out_free;
                }
index 679a721ae229aaaf8432dc7206f4adbd3f305ec6..5a70ac395d53a0f724f3f29431c4c32afa235619 100644 (file)
@@ -95,7 +95,7 @@ struct nvme_fc_fcp_op {
 
 struct nvme_fcp_op_w_sgl {
        struct nvme_fc_fcp_op   op;
-       struct scatterlist      sgl[SG_CHUNK_SIZE];
+       struct scatterlist      sgl[NVME_INLINE_SG_CNT];
        uint8_t                 priv[0];
 };
 
@@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
            !template->ls_req || !template->fcp_io ||
            !template->ls_abort || !template->fcp_abort ||
            !template->max_hw_queues || !template->max_sgl_segments ||
-           !template->max_dif_sgl_segments || !template->dma_boundary) {
+           !template->max_dif_sgl_segments || !template->dma_boundary ||
+           !template->module) {
                ret = -EINVAL;
                goto out_reghost_failed;
        }
@@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref)
 {
        struct nvme_fc_ctrl *ctrl =
                container_of(ref, struct nvme_fc_ctrl, ref);
+       struct nvme_fc_lport *lport = ctrl->lport;
        unsigned long flags;
 
        if (ctrl->ctrl.tagset) {
@@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref)
        if (ctrl->ctrl.opts)
                nvmf_free_options(ctrl->ctrl.opts);
        kfree(ctrl);
+       module_put(lport->ops->module);
 }
 
 static void
@@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_table.sgl = freq->first_sgl;
        ret = sg_alloc_table_chained(&freq->sg_table,
                        blk_rq_nr_phys_segments(rq), freq->sg_table.sgl,
-                       SG_CHUNK_SIZE);
+                       NVME_INLINE_SG_CNT);
        if (ret)
                return -ENOMEM;
 
@@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, rq_dma_dir(rq));
        if (unlikely(freq->sg_cnt <= 0)) {
-               sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+               sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
                freq->sg_cnt = 0;
                return -EFAULT;
        }
@@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
        fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
                        rq_dma_dir(rq));
 
-       sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT);
 
        freq->sg_cnt = 0;
 }
@@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 static void
 __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
 {
-       nvme_stop_keep_alive(&ctrl->ctrl);
+       /*
+        * if state is connecting - the error occurred as part of a
+        * reconnect attempt. The create_association error paths will
+        * clean up any outstanding io.
+        *
+        * if it's a different state - ensure all pending io is
+        * terminated. Given this can delay while waiting for the
+        * aborted io to return, we recheck adapter state below
+        * before changing state.
+        */
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
+               nvme_stop_keep_alive(&ctrl->ctrl);
 
-       /* will block will waiting for io to terminate */
-       nvme_fc_delete_association(ctrl);
+               /* will block will waiting for io to terminate */
+               nvme_fc_delete_association(ctrl);
+       }
 
        if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
            !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
@@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                goto out_fail;
        }
 
+       if (!try_module_get(lport->ops->module)) {
+               ret = -EUNATCH;
+               goto out_free_ctrl;
+       }
+
        idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
        if (idx < 0) {
                ret = -ENOSPC;
-               goto out_free_ctrl;
+               goto out_mod_put;
        }
 
        ctrl->ctrl.opts = opts;
@@ -3215,6 +3235,8 @@ out_free_queues:
 out_free_ida:
        put_device(ctrl->dev);
        ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
+out_mod_put:
+       module_put(lport->ops->module);
 out_free_ctrl:
        kfree(ctrl);
 out_fail:
index 3b9cbe0668fa488523f59aa0a12978c0c24fce18..1024fec7914c41b50d8e7087e39a5090131dd5e8 100644 (file)
@@ -28,6 +28,12 @@ extern unsigned int admin_timeout;
 #define NVME_DEFAULT_KATO      5
 #define NVME_KATO_GRACE                10
 
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define  NVME_INLINE_SG_CNT  0
+#else
+#define  NVME_INLINE_SG_CNT  2
+#endif
+
 extern struct workqueue_struct *nvme_wq;
 extern struct workqueue_struct *nvme_reset_wq;
 extern struct workqueue_struct *nvme_delete_wq;
index dcaad5831cee7aaef48758a332ae049d06ef9991..365a2ddbeaa762f84a51106163cc915e2c2919ef 100644 (file)
@@ -68,14 +68,14 @@ static int io_queue_depth = 1024;
 module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
 MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
 
-static int write_queues;
-module_param(write_queues, int, 0644);
+static unsigned int write_queues;
+module_param(write_queues, uint, 0644);
 MODULE_PARM_DESC(write_queues,
        "Number of queues to use for writes. If not set, reads and writes "
        "will share a queue set.");
 
-static int poll_queues;
-module_param(poll_queues, int, 0644);
+static unsigned int poll_queues;
+module_param(poll_queues, uint, 0644);
 MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
 
 struct nvme_dev;
@@ -176,7 +176,6 @@ struct nvme_queue {
        u16 sq_tail;
        u16 last_sq_tail;
        u16 cq_head;
-       u16 last_cq_head;
        u16 qid;
        u8 cq_phase;
        u8 sqes;
@@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
         * the irq handler, even if that was on another CPU.
         */
        rmb();
-       if (nvmeq->cq_head != nvmeq->last_cq_head)
-               ret = IRQ_HANDLED;
        nvme_process_cq(nvmeq, &start, &end, -1);
-       nvmeq->last_cq_head = nvmeq->cq_head;
        wmb();
 
        if (start != end) {
@@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
        result = adapter_alloc_sq(dev, qid, nvmeq);
        if (result < 0)
                return result;
-       else if (result)
+       if (result)
                goto release_cq;
 
        nvmeq->cq_vector = vector;
@@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
                .priv           = dev,
        };
        unsigned int irq_queues, this_p_queues;
-       unsigned int nr_cpus = num_possible_cpus();
 
        /*
         * Poll queues don't need interrupts, but we need at least one IO
@@ -2069,10 +2064,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
                this_p_queues = nr_io_queues - 1;
                irq_queues = 1;
        } else {
-               if (nr_cpus < nr_io_queues - this_p_queues)
-                       irq_queues = nr_cpus + 1;
-               else
-                       irq_queues = nr_io_queues - this_p_queues + 1;
+               irq_queues = nr_io_queues - this_p_queues + 1;
        }
        dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
 
@@ -3142,6 +3134,9 @@ static int __init nvme_init(void)
        BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
        BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
+
+       write_queues = min(write_queues, num_possible_cpus());
+       poll_queues = min(poll_queues, num_possible_cpus());
        return pci_register_driver(&nvme_driver);
 }
 
index dce59459ed41b9867b3645ae2688d62c57c1d52c..2a47c6c5007e1280a320f9776afe10005e23b98a 100644 (file)
@@ -731,7 +731,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set->reserved_tags = 2; /* connect + keep-alive */
                set->numa_node = nctrl->numa_node;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
-                       SG_CHUNK_SIZE * sizeof(struct scatterlist);
+                       NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
                set->driver_data = ctrl;
                set->nr_hw_queues = 1;
                set->timeout = ADMIN_TIMEOUT;
@@ -745,7 +745,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
-                       SG_CHUNK_SIZE * sizeof(struct scatterlist);
+                       NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
                set->driver_data = ctrl;
                set->nr_hw_queues = nctrl->queue_count - 1;
                set->timeout = NVME_IO_TIMEOUT;
@@ -1160,7 +1160,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
        }
 
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
-       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
 }
 
 static int nvme_rdma_set_sg_null(struct nvme_command *c)
@@ -1276,7 +1276,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        req->sg_table.sgl = req->first_sgl;
        ret = sg_alloc_table_chained(&req->sg_table,
                        blk_rq_nr_phys_segments(rq), req->sg_table.sgl,
-                       SG_CHUNK_SIZE);
+                       NVME_INLINE_SG_CNT);
        if (ret)
                return -ENOMEM;
 
@@ -1314,7 +1314,7 @@ out:
 out_unmap_sg:
        ib_dma_unmap_sg(ibdev, req->sg_table.sgl, req->nents, rq_dma_dir(rq));
 out_free_table:
-       sg_free_table_chained(&req->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&req->sg_table, NVME_INLINE_SG_CNT);
        return ret;
 }
 
index b50b53db37462499cafc46c40a0fe77f7a52095b..1c50af6219f321360b05b729a9f382842b6fb46b 100644 (file)
@@ -850,6 +850,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
 #define FCLOOP_DMABOUND_4G             0xFFFFFFFF
 
 static struct nvme_fc_port_template fctemplate = {
+       .module                 = THIS_MODULE,
        .localport_delete       = fcloop_localport_delete,
        .remoteport_delete      = fcloop_remoteport_delete,
        .create_queue           = fcloop_create_queue,
index a758bb3d5dd49fbfaee6e79001726a904b644261..4df4ebde208a0465dac1e975304fe4a6fd358c2f 100644 (file)
@@ -76,7 +76,7 @@ static void nvme_loop_complete_rq(struct request *req)
 {
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
 
-       sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
+       sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
        nvme_complete_rq(req);
 }
 
@@ -156,7 +156,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+                               iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
                        nvme_cleanup_cmd(req);
                        return BLK_STS_RESOURCE;
                }
@@ -342,7 +342,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
        ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
        ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-               SG_CHUNK_SIZE * sizeof(struct scatterlist);
+               NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->admin_tag_set.driver_data = ctrl;
        ctrl->admin_tag_set.nr_hw_queues = 1;
        ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -516,7 +516,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
        ctrl->tag_set.numa_node = NUMA_NO_NODE;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
-               SG_CHUNK_SIZE * sizeof(struct scatterlist);
+               NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->tag_set.driver_data = ctrl;
        ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
        ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
index db4a04a207ecee98a15e702f18cc20d59d52bcfd..f6c8963c915d4aeda456299cddc62d4f7b03c811 100644 (file)
@@ -1985,6 +1985,8 @@ out_unlock:
 
 /* Declare and initialization an instance of the FC NVME template. */
 static struct nvme_fc_port_template lpfc_nvme_template = {
+       .module = THIS_MODULE,
+
        /* initiator-based functions */
        .localport_delete  = lpfc_nvme_localport_delete,
        .remoteport_delete = lpfc_nvme_remoteport_delete,
index 941aa53363f564a23a15cc306abff534283659ee..bfcd02fdf2b8915df121562303e0b9358b836f89 100644 (file)
@@ -610,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
 }
 
 static struct nvme_fc_port_template qla_nvme_fc_transport = {
+       .module = THIS_MODULE,
        .localport_delete = qla_nvme_localport_delete,
        .remoteport_delete = qla_nvme_remoteport_delete,
        .create_queue   = qla_nvme_alloc_queue,
index 19394c77ed9955ac0b949f8a536f9ed4fbbace08..e4a6949fd17165979616c4f361d2cf4948703007 100644 (file)
@@ -188,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q);
 int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
 void blkcg_exit_queue(struct request_queue *q);
 
 /* Blkio controller policy registration */
@@ -720,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 { return NULL; }
 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
 static inline void blkcg_exit_queue(struct request_queue *q) { }
 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
index 10f81629b9cecc71fbf3bb0d906f6389b2d4b963..6d0d70f3219c5b80fccb2314acf0d2bc8de7f129 100644 (file)
@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
  *
  * Host/Initiator Transport Entrypoints/Parameters:
  *
+ * @module:  The LLDD module using the interface
+ *
  * @localport_delete:  The LLDD initiates deletion of a localport via
  *       nvme_fc_deregister_localport(). However, the teardown is
  *       asynchronous. This routine is called upon the completion of the
@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
  *       Value is Mandatory. Allowed to be zero.
  */
 struct nvme_fc_port_template {
+       struct module   *module;
+
        /* initiator-based functions */
        void    (*localport_delete)(struct nvme_fc_local_port *);
        void    (*remoteport_delete)(struct nvme_fc_remote_port *);