static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_queue_id_reg_info *chunks;
if (!test_bit(IDPF_VPORT_UP, np->state))
return;
netif_carrier_off(vport->netdev);
netif_tx_disable(vport->netdev);
+ chunks = &vport->adapter->vport_config[vport->idx]->qid_reg_info;
+
idpf_send_disable_vport_msg(vport);
idpf_send_disable_queues_msg(vport);
idpf_send_map_unmap_queue_vector_msg(vport, false);
* instead of deleting and reallocating the vport.
*/
if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(vport, chunks);
idpf_remove_features(vport);
kfree(vport->q_vector_idxs);
vport->q_vector_idxs = NULL;
+ idpf_vport_deinit_queue_reg_chunks(vport_config);
+
kfree(adapter->vport_params_recvd[idx]);
adapter->vport_params_recvd[idx] = NULL;
kfree(adapter->vport_params_reqd[idx]);
adapter->vport_params_reqd[idx] = NULL;
- if (adapter->vport_config[idx]) {
- kfree(adapter->vport_config[idx]->req_qs_chunks);
- adapter->vport_config[idx]->req_qs_chunks = NULL;
- }
kfree(vport->rx_ptype_lkup);
vport->rx_ptype_lkup = NULL;
+
kfree(vport);
adapter->num_alloc_vports--;
}
if (!vport->q_vector_idxs)
goto free_vport;
- idpf_vport_init(vport, max_q);
+ err = idpf_vport_init(vport, max_q);
+ if (err)
+ goto free_vector_idxs;
/* LUT and key are both initialized here. Key is not strictly dependent
* on how many queues we have. If we change number of queues and soft
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
if (!rss_data->rss_key)
- goto free_vector_idxs;
+ goto free_qreg_chunks;
/* Initialize default rss key */
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
free_rss_key:
kfree(rss_data->rss_key);
+free_qreg_chunks:
+ idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
kfree(vport->q_vector_idxs);
free_vport:
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_queue_id_reg_info *chunks;
int err;
if (test_bit(IDPF_VPORT_UP, np->state))
if (err)
goto intr_rel;
- err = idpf_vport_queue_ids_init(vport);
+ vport_config = adapter->vport_config[vport->idx];
+ chunks = &vport_config->qid_reg_info;
+
+ err = idpf_vport_queue_ids_init(vport, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_queue_reg_init(vport);
+ err = idpf_queue_reg_init(vport, chunks);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
bool vport_is_up = test_bit(IDPF_VPORT_UP, np->state);
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
struct idpf_vport *new_vport;
- int err;
+ int err, tmp_err = 0;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
goto free_vport;
}
+ vport_config = adapter->vport_config[vport->idx];
+
if (!vport_is_up) {
- idpf_send_delete_queues_msg(vport);
+ idpf_send_delete_queues_msg(vport, &vport_config->qid_reg_info);
} else {
set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
idpf_vport_stop(vport, false);
goto free_vport;
err_reset:
- idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
- vport->num_rxq, vport->num_bufq);
+ tmp_err = idpf_send_add_queues_msg(vport, vport->num_txq,
+ vport->num_complq, vport->num_rxq,
+ vport->num_bufq);
err_open:
- if (vport_is_up)
+ if (!tmp_err && vport_is_up)
idpf_vport_open(vport, false);
free_vport:
avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
}
+/**
+ * idpf_vport_init_queue_reg_chunks - initialize queue register chunks
+ * @vport_config: persistent vport structure to store the queue register info
+ * @schunks: source chunks to copy data from
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int
+idpf_vport_init_queue_reg_chunks(struct idpf_vport_config *vport_config,
+ struct virtchnl2_queue_reg_chunks *schunks)
+{
+ struct idpf_queue_id_reg_info *q_info = &vport_config->qid_reg_info;
+ u16 num_chunks = le16_to_cpu(schunks->num_chunks);
+
+ kfree(q_info->queue_chunks);
+
+ q_info->queue_chunks = kcalloc(num_chunks, sizeof(*q_info->queue_chunks),
+ GFP_KERNEL);
+ if (!q_info->queue_chunks) {
+ q_info->num_chunks = 0;
+ return -ENOMEM;
+ }
+
+ q_info->num_chunks = num_chunks;
+
+ for (u16 i = 0; i < num_chunks; i++) {
+ struct idpf_queue_id_reg_chunk *dchunk = &q_info->queue_chunks[i];
+ struct virtchnl2_queue_reg_chunk *schunk = &schunks->chunks[i];
+
+ dchunk->qtail_reg_start = le64_to_cpu(schunk->qtail_reg_start);
+ dchunk->qtail_reg_spacing = le32_to_cpu(schunk->qtail_reg_spacing);
+ dchunk->type = le32_to_cpu(schunk->type);
+ dchunk->start_queue_id = le32_to_cpu(schunk->start_queue_id);
+ dchunk->num_queues = le32_to_cpu(schunk->num_queues);
+ }
+
+ return 0;
+}
+
/**
* idpf_get_reg_intr_vecs - Get vector queue register offset
* @vport: virtual port structure
* are filled.
*/
static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
int reg_filled = 0, i;
u32 reg_val;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
u16 num_q;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ num_q = chunk->num_queues;
+ reg_val = chunk->qtail_reg_start;
for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
reg_vals[reg_filled++] = reg_val;
- reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ reg_val += chunk->qtail_reg_spacing;
}
}
/**
* idpf_queue_reg_init - initialize queue registers
* @vport: virtual port structure
+ * @chunks: queue registers received over mailbox
*
- * Return 0 on success, negative on failure
+ * Return: 0 on success, negative on failure
*/
-int idpf_queue_reg_init(struct idpf_vport *vport)
+int idpf_queue_reg_init(struct idpf_vport *vport,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_regs, ret = 0;
u32 *reg_vals;
if (!reg_vals)
return -ENOMEM;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
/* Initialize Tx queue tail register address */
num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
VIRTCHNL2_QUEUE_TYPE_TX,
* @num_chunks: number of chunks to copy
*/
static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
- struct virtchnl2_queue_reg_chunk *schunks,
+ struct idpf_queue_id_reg_chunk *schunks,
u16 num_chunks)
{
u16 i;
for (i = 0; i < num_chunks; i++) {
- dchunks[i].type = schunks[i].type;
- dchunks[i].start_queue_id = schunks[i].start_queue_id;
- dchunks[i].num_queues = schunks[i].num_queues;
+ dchunks[i].type = cpu_to_le32(schunks[i].type);
+ dchunks[i].start_queue_id = cpu_to_le32(schunks[i].start_queue_id);
+ dchunks[i].num_queues = cpu_to_le32(schunks[i].num_queues);
}
}
/**
* idpf_send_delete_queues_msg - send delete queues virtchnl message
- * @vport: Virtual port private data structure
+ * @vport: virtual port private data structure
+ * @chunks: queue ids received over mailbox
*
- * Will send delete queues virtchnl message. Return 0 on success, negative on
- * failure.
+ * Return: 0 on success, negative on failure.
*/
-int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+int idpf_send_delete_queues_msg(struct idpf_vport *vport,
+ struct idpf_queue_id_reg_info *chunks)
{
struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
struct idpf_vc_xn_params xn_params = {};
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
ssize_t reply_sz;
u16 num_chunks;
int buf_size;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- chunks = &vport_config->req_qs_chunks->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
- num_chunks = le16_to_cpu(chunks->num_chunks);
+ num_chunks = chunks->num_chunks;
buf_size = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_size, GFP_KERNEL);
eq->vport_id = cpu_to_le32(vport->vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
- idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->queue_chunks,
num_chunks);
xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
* @num_rx_q: number of receive queues
* @num_rx_bufq: number of receive buffer queues
*
- * Returns 0 on success, negative on failure. vport _MUST_ be const here as
- * we should not change any fields within vport itself in this function.
+ * Return: 0 on success, negative on failure.
*/
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
return -ENOMEM;
vport_config = vport->adapter->vport_config[vport_idx];
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
if (reply_sz < size)
return -EIO;
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks)
- return -ENOMEM;
-
- return 0;
+ return idpf_vport_init_queue_reg_chunks(vport_config, &vc_msg->chunks);
}
/**
* @max_q: vport max queue info
*
* Will initialize vport with the info received through MB earlier
+ *
+ * Return: 0 on success, negative on failure.
*/
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
rss_data = &vport_config->user_config.rss_data;
vport_msg = adapter->vport_params_recvd[idx];
+ err = idpf_vport_init_queue_reg_chunks(vport_config,
+ &vport_msg->chunks);
+ if (err)
+ return err;
+
vport_config->max_q.max_txq = max_q->max_txq;
vport_config->max_q.max_rxq = max_q->max_rxq;
vport_config->max_q.max_complq = max_q->max_complq;
if (!(vport_msg->vport_flags &
cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT)))
- return;
+ return 0;
err = idpf_ptp_get_vport_tstamps_caps(vport);
if (err) {
+ /* Do not error on timestamp failure */
pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n");
- return;
+ return 0;
}
INIT_WORK(&vport->tstamp_task, idpf_tstamp_task);
+
+ return 0;
}
/**
* Returns number of ids filled
*/
static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
- struct virtchnl2_queue_reg_chunks *chunks)
+ struct idpf_queue_id_reg_info *chunks)
{
- u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u16 num_chunks = chunks->num_chunks;
u32 num_q_id_filled = 0, i;
u32 start_q_id, num_q;
while (num_chunks--) {
- struct virtchnl2_queue_reg_chunk *chunk;
+ struct idpf_queue_id_reg_chunk *chunk;
- chunk = &chunks->chunks[num_chunks];
- if (le32_to_cpu(chunk->type) != q_type)
+ chunk = &chunks->queue_chunks[num_chunks];
+ if (chunk->type != q_type)
continue;
- num_q = le32_to_cpu(chunk->num_queues);
- start_q_id = le32_to_cpu(chunk->start_queue_id);
+ num_q = chunk->num_queues;
+ start_q_id = chunk->start_queue_id;
for (i = 0; i < num_q; i++) {
if ((num_q_id_filled + i) < num_qids) {
/**
* idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
* @vport: virtual port for which the queues ids are initialized
+ * @chunks: queue ids received over mailbox
*
* Will initialize all queue ids with ids received as mailbox parameters.
- * Returns 0 on success, negative if all the queues are not initialized.
+ *
+ * Return: 0 on success, negative if all the queues are not initialized.
*/
-int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+int idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ struct idpf_queue_id_reg_info *chunks)
{
- struct virtchnl2_create_vport *vport_params;
- struct virtchnl2_queue_reg_chunks *chunks;
- struct idpf_vport_config *vport_config;
- u16 vport_idx = vport->idx;
int num_ids, err = 0;
u16 q_type;
u32 *qids;
- vport_config = vport->adapter->vport_config[vport_idx];
- if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
- } else {
- vport_params = vport->adapter->vport_params_recvd[vport_idx];
- chunks = &vport_params->chunks;
- }
-
qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
if (!qids)
return -ENOMEM;