Fix various spelling errors in comments.
Signed-off-by: Yi Zhang <yi.zhang@redhat.com>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
* @c1: Value of challenge C1
* @c2: Value of challenge C2
* @hash_len: Hash length of the hash algorithm
- * @ret_psk: Pointer too the resulting generated PSK
+ * @ret_psk: Pointer to the resulting generated PSK
* @ret_len: length of @ret_psk
*
* Generate a PSK for TLS as specified in NVMe base specification, section
goto out_free_prk;
/*
- * 2 addtional bytes for the length field from HDKF-Expand-Label,
- * 2 addtional bytes for the HMAC ID, and one byte for the space
+ * 2 additional bytes for the length field from HDKF-Expand-Label,
+ * 2 additional bytes for the HMAC ID, and one byte for the space
* separator.
*/
info_len = strlen(psk_digest) + strlen(psk_prefix) + 5;
help
Enables TLS encryption for NVMe TCP using the netlink handshake API.
- The TLS handshake daemon is availble at
+ The TLS handshake daemon is available at
https://github.com/oracle/ktls-utils.
If unsure, say N.
if (ns->head->ms) {
/*
- * If formated with metadata, the block layer always provides a
+ * If formatted with metadata, the block layer always provides a
* metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
* we enable the PRACT bit for protection information or set the
* namespace capacity to zero to prevent any I/O.
* Do not retry when:
*
* - the DNR bit is set and the specification states no further connect
- * attempts with the same set of paramenters should be attempted.
+ * attempts with the same set of parameters should be attempted.
*
* - when the authentication attempt fails, because the key was invalid.
* This error code is set on the host side.
* @transport: Holds the fabric transport "technology name" (for a lack of
* better description) that will be used by an NVMe controller
* being added.
- * @subsysnqn: Hold the fully qualified NQN subystem name (format defined
+ * @subsysnqn: Hold the fully qualified NQN subsystem name (format defined
* in the NVMe specification, "NVMe Qualified Names").
* @traddr: The transport-specific TRADDR field for a port on the
* subsystem which is adding a controller.
* @create_ctrl(): function pointer that points to a non-NVMe
* implementation-specific fabric technology
* that would go into starting up that fabric
- * for the purpose of conneciton to an NVMe controller
+ * for the purpose of connection to an NVMe controller
* using that fabric technology.
*
* Notes:
* 2. create_ctrl() must be defined (even if it does nothing)
* 3. struct nvmf_transport_ops must be statically allocated in the
* modules .bss section so that a pure module_get on @module
- * prevents the memory from beeing freed.
+ * prevents the memory from being freed.
*/
struct nvmf_transport_ops {
struct list_head entry;
}
/*
- * For the linux implementation, if we have an unsuccesful
+ * For the linux implementation, if we have an unsucceesful
* status, they blk-mq layer can typically be called with the
* non-zero status and the content of the cqe isn't important.
*/
* writing the registers for shutdown and polling (call
* nvme_disable_ctrl()). Given a bunch of i/o was potentially
* just aborted and we will wait on those contexts, and given
- * there was no indication of how live the controlelr is on the
+ * there was no indication of how live the controller is on the
* link, don't send more io to create more contexts for the
* shutdown. Let the controller fail via keepalive failure if
* its still present.
/*
* Handle ioctls that apply to the controller instead of the namespace
- * seperately and drop the ns SRCU reference early. This avoids a
+ * separately and drop the ns SRCU reference early. This avoids a
* deadlock when deleting namespaces using the passthrough interface.
*/
if (is_ctrl_ioctl(cmd))
* controller's scan_work context. If a path error occurs here, the IO
* will wait until a path becomes available or all paths are torn down,
* but that action also occurs within scan_work, so it would deadlock.
- * Defer the partion scan to a different context that does not block
+ * Defer the partition scan to a different context that does not block
* scan_work.
*/
set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state);
enum nvme_ns_features {
NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
- NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */
+ NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeroes supported */
};
struct nvme_ns {
goto out;
/*
- * Freeze and update the number of I/O queues as thos might have
+ * Freeze and update the number of I/O queues as those might have
* changed. If there are no I/O queues left after this reset, keep the
* controller around but remove all namespaces.
*/
/*
* Exclude some Kingston NV1 and A2000 devices from
* NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a
- * lot fo energy with s2idle sleep on some TUXEDO platforms.
+ * lot of energy with s2idle sleep on some TUXEDO platforms.
*/
if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") ||
dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") ||
/*
* Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
- * lifetime. It's safe, since any chage in the underlying RDMA device
+ * lifetime. It's safe, since any change in the underlying RDMA device
* will issue error recovery and queue re-creation.
*/
for (i = 0; i < ib_queue_size; i++) {
/*
* Bind the async event SQE DMA mapping to the admin queue lifetime.
- * It's safe, since any chage in the underlying RDMA device will issue
+ * It's safe, since any change in the underlying RDMA device will issue
* error recovery and queue re-creation.
*/
error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
* A "minimum viable" abort implementation: the command is mandatory in the
* spec, but we are not required to do any useful work. We couldn't really
* do a useful abort, so don't bother even with waiting for the command
- * to be exectuted and return immediately telling the command to abort
+ * to be executed and return immediately telling the command to abort
* wasn't found.
*/
static void nvmet_execute_abort(struct nvmet_req *req)
* Now that we removed the namespaces from the lookup list, we
* can kill the per_cpu ref and wait for any remaining references
* to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
+ * using the namespace under rcu_read_lock(). Note that we can't
* use call_rcu here as we need to ensure the namespaces have
* been fully destroyed before unloading the module.
*/
/**
* nvmet_fc_register_targetport - transport entry point called by an
* LLDD to register the existence of a local
- * NVME subystem FC port.
+ * NVME subsystem FC port.
* @pinfo: pointer to information about the port to be registered
* @template: LLDD entrypoints and operational parameters for the port
* @dev: physical hardware device node port corresponds to. Will be
* Right now there exists M : 1 mapping between block layer error
* to the NVMe status code (see nvme_error_status()). For consistency,
* when we reverse map we use most appropriate NVMe Status code from
- * the group of the NVMe staus codes used in the nvme_error_status().
+ * the group of the NVMe status codes used in the nvme_error_status().
*/
switch (blk_sts) {
case BLK_STS_NOSPC:
/*
* The passthru NVMe driver may have a limit on the number of segments
- * which depends on the host's memory fragementation. To solve this,
+ * which depends on the host's memory fragmentation. To solve this,
* ensure mdts is limited to the pages equal to the number of segments.
*/
max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT,