.reg_read64 = apple_nvme_reg_read64,
.free_ctrl = apple_nvme_free_ctrl,
.get_address = apple_nvme_get_address,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
}
static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl,
- struct queue_limits *lim)
+ struct queue_limits *lim, bool is_admin)
{
lim->max_hw_sectors = ctrl->max_hw_sectors;
lim->max_segments = min_t(u32, USHRT_MAX,
min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments));
lim->max_integrity_segments = ctrl->max_integrity_segments;
- lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1;
+ lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin);
lim->max_segment_size = UINT_MAX;
lim->dma_alignment = 3;
}
int ret;
lim = queue_limits_start_update(ns->disk->queue);
- nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_set_ctrl_limits(ns->ctrl, &lim, false);
memflags = blk_mq_freeze_queue(ns->disk->queue);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
- nvme_set_ctrl_limits(ns->ctrl, &lim);
+ nvme_set_ctrl_limits(ns->ctrl, &lim, false);
nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info);
nvme_set_chunk_sectors(ns, id, &lim);
if (!nvme_update_disk_info(ns, id, &lim))
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
lim = queue_limits_start_update(ctrl->admin_q);
- nvme_set_ctrl_limits(ctrl, &lim);
+ nvme_set_ctrl_limits(ctrl, &lim, true);
ret = queue_limits_commit_update(ctrl->admin_q, &lim);
if (ret)
goto out_free;
min(opts->nr_poll_queues, num_online_cpus());
}
+static inline unsigned long nvmf_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ return 0;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
.submit_async_event = nvme_fc_submit_async_event,
.delete_ctrl = nvme_fc_delete_ctrl,
.get_address = nvmf_get_address,
+ .get_virt_boundary = nvmf_get_virt_boundary,
};
static void
return head->pi_type && head->ms == head->pi_size;
}
+static inline unsigned long nvme_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ return NVME_CTRL_PAGE_SIZE - 1;
+}
+
struct nvme_ctrl_ops {
const char *name;
struct module *module;
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
void (*print_device_info)(struct nvme_ctrl *ctrl);
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
+ unsigned long (*get_virt_boundary)(struct nvme_ctrl *ctrl, bool is_admin);
};
/*
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
- if (nvme_req(req)->flags & NVME_REQ_USERCMD)
- return SGL_FORCED;
- if (req->nr_integrity_segments > 1)
+ /*
+ * When the controller is capable of using SGL, there are
+ * several conditions that we force to use it:
+ *
+ * 1. A request containing page gaps within the controller's
+ * mask can not use the PRP format.
+ *
+ * 2. User commands use SGL because that lets the device
+ * validate the requested transfer lengths.
+ *
+ * 3. Multiple integrity segments must use SGL as that's the
+ * only way to describe such a command in NVMe.
+ */
+ if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) ||
+ nvme_req(req)->flags & NVME_REQ_USERCMD ||
+ req->nr_integrity_segments > 1)
return SGL_FORCED;
return SGL_SUPPORTED;
}
return dma_pci_p2pdma_supported(dev->dev);
}
+static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl,
+ bool is_admin)
+{
+ if (!nvme_ctrl_sgl_supported(ctrl) || is_admin)
+ return NVME_CTRL_PAGE_SIZE - 1;
+ return 0;
+}
+
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
.get_address = nvme_pci_get_address,
.print_device_info = nvme_pci_print_device_info,
.supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma,
+ .get_virt_boundary = nvme_pci_get_virt_boundary,
};
static int nvme_dev_map(struct nvme_dev *dev)
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
.stop_ctrl = nvme_rdma_stop_ctrl,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
/*
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvme_tcp_get_address,
.stop_ctrl = nvme_tcp_stop_ctrl,
+ .get_virt_boundary = nvmf_get_virt_boundary,
};
static bool
.submit_async_event = nvme_loop_submit_async_event,
.delete_ctrl = nvme_loop_delete_ctrl_host,
.get_address = nvmf_get_address,
+ .get_virt_boundary = nvme_get_virt_boundary,
};
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)