return err;
}
-static int idpf_init_queue_set(const struct idpf_queue_set *qs)
+static int idpf_init_queue_set(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs)
{
- const struct idpf_vport *vport = qs->vport;
bool splitq;
int err;
static struct idpf_queue_set *
idpf_vector_to_queue_set(struct idpf_q_vector *qv)
{
- bool xdp = qv->vport->xdp_txq_offset && !qv->num_xsksq;
+ u32 xdp_txq_offset = qv->vport->dflt_qv_rsrc.xdp_txq_offset;
+ bool xdp = xdp_txq_offset && !qv->num_xsksq;
struct idpf_vport *vport = qv->vport;
struct idpf_queue_set *qs;
u32 num;
if (!num)
return NULL;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, num);
if (!qs)
return NULL;
qs->qs[num++].complq = qv->complq[i];
}
- if (!vport->xdp_txq_offset)
+ if (!xdp_txq_offset)
goto finalize;
if (xdp) {
for (u32 i = 0; i < qv->num_rxq; i++) {
- u32 idx = vport->xdp_txq_offset + qv->rx[i]->idx;
+ u32 idx = xdp_txq_offset + qv->rx[i]->idx;
qs->qs[num].type = VIRTCHNL2_QUEUE_TYPE_TX;
qs->qs[num++].txq = vport->txqs[idx];
return qs;
}
-static int idpf_qp_enable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_enable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_q_vec_rsrc *rsrc = qs->qv_rsrc;
- struct idpf_vport *vport = qs->vport;
+ const struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_q_vector *q_vector;
int err;
q_vector = idpf_find_rxq_vec(vport, qid);
- err = idpf_init_queue_set(qs);
+ err = idpf_init_queue_set(vport, qs);
if (err) {
netdev_err(vport->netdev, "Could not initialize queues in pair %u: %pe\n",
qid, ERR_PTR(err));
return err;
}
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
goto config;
q_vector->xsksq = kcalloc(DIV_ROUND_UP(rsrc->num_rxq_grp,
return 0;
}
-static int idpf_qp_disable(const struct idpf_queue_set *qs, u32 qid)
+static int idpf_qp_disable(const struct idpf_vport *vport,
+ const struct idpf_queue_set *qs, u32 qid)
{
- struct idpf_vport *vport = qs->vport;
struct idpf_q_vector *q_vector;
int err;
if (!qs)
return -ENOMEM;
- return en ? idpf_qp_enable(qs, qid) : idpf_qp_disable(qs, qid);
+ return en ? idpf_qp_enable(vport, qs, qid) :
+ idpf_qp_disable(vport, qs, qid);
}
/**
vport->xdp_prog = config_data->xdp_prog;
if (idpf_xdp_enabled(vport)) {
- vport->xdp_txq_offset = config_data->num_req_tx_qs;
+ rsrc->xdp_txq_offset = config_data->num_req_tx_qs;
vport->num_xdp_txq = le16_to_cpu(vport_msg->num_tx_q) -
- vport->xdp_txq_offset;
+ rsrc->xdp_txq_offset;
vport->xdpsq_share = libeth_xdpsq_shared(vport->num_xdp_txq);
} else {
- vport->xdp_txq_offset = 0;
+ rsrc->xdp_txq_offset = 0;
vport->num_xdp_txq = 0;
vport->xdpsq_share = false;
}
struct idpf_tx_queue *xdpsq;
struct idpf_q_vector *qv;
- xdpsq = vport->txqs[vport->xdp_txq_offset + i];
+ xdpsq = vport->txqs[rsrc->xdp_txq_offset + i];
if (!idpf_queue_has(XSK, xdpsq))
continue;
if (!q_vector->complq)
goto error;
- if (!vport->xdp_txq_offset)
+ if (!rsrc->xdp_txq_offset)
continue;
q_vector->xsksq = kcalloc(rxqs_per_vector,
}
struct idpf_chunked_msg_params {
- u32 (*prepare_msg)(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num);
+ u32 (*prepare_msg)(u32 vport_id, void *buf,
+ const void *pos, u32 num);
const void *chunks;
u32 num_chunks;
u32 config_sz;
u32 vc_op;
+ u32 vport_id;
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport,
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_adapter *adapter,
struct idpf_q_vec_rsrc *qv_rsrc,
- u32 num)
+ u32 vport_id, u32 num)
{
struct idpf_queue_set *qp;
if (!qp)
return NULL;
- qp->vport = vport;
+ qp->adapter = adapter;
qp->qv_rsrc = qv_rsrc;
+ qp->vport_id = vport_id;
qp->num = num;
return qp;
/**
* idpf_send_chunked_msg - send VC message consisting of chunks
- * @vport: virtual port data structure
+ * @adapter: Driver specific private structure
* @params: message params
*
* Helper function for preparing a message describing queues to be enabled
*
* Return: the total size of the prepared message.
*/
-static int idpf_send_chunked_msg(struct idpf_vport *vport,
+static int idpf_send_chunked_msg(struct idpf_adapter *adapter,
const struct idpf_chunked_msg_params *params)
{
struct idpf_vc_xn_params xn_params = {
u32 num_chunks, num_msgs, buf_sz;
void *buf __free(kfree) = NULL;
u32 totqs = params->num_chunks;
+ u32 vid = params->vport_id;
num_chunks = min(IDPF_NUM_CHUNKS_PER_MSG(params->config_sz,
params->chunk_sz), totqs);
memset(buf, 0, buf_sz);
xn_params.send_buf.iov_len = buf_sz;
- if (params->prepare_msg(vport, buf, pos, num_chunks) != buf_sz)
+ if (params->prepare_msg(vid, buf, pos, num_chunks) != buf_sz)
return -EINVAL;
- reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
if (reply_sz < 0)
return reply_sz;
*/
static int idpf_wait_for_marker_event_set(const struct idpf_queue_set *qs)
{
+ struct net_device *netdev;
struct idpf_tx_queue *txq;
bool markers_rcvd = true;
case VIRTCHNL2_QUEUE_TYPE_TX:
txq = qs->qs[i].txq;
+ netdev = txq->netdev;
+
idpf_queue_set(SW_MARKER, txq);
idpf_wait_for_sw_marker_completion(txq);
markers_rcvd &= !idpf_queue_has(SW_MARKER, txq);
}
if (!markers_rcvd) {
- netdev_warn(qs->vport->netdev,
+ netdev_warn(netdev,
"Failed to receive marker packets\n");
return -ETIMEDOUT;
}
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, vport->num_txq);
+ qs = idpf_alloc_queue_set(vport->adapter, &vport->dflt_qv_rsrc,
+ vport->vport_id, vport->num_txq);
if (!qs)
return -ENOMEM;
/**
* idpf_prepare_cfg_txqs_msg - prepare message to configure selected Tx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the tx queue
* @num_chunks: number of chunks in the message
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_txqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_txqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_tx_queues *ctq = buf;
- ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->vport_id = cpu_to_le32(vport_id);
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, pos, num_chunks * sizeof(*ctq->qinfo));
{
struct virtchnl2_txq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES,
.prepare_msg = idpf_prepare_cfg_txqs_msg,
.config_sz = sizeof(struct virtchnl2_config_tx_queues),
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, ¶ms);
+ return idpf_send_chunked_msg(qs->adapter, ¶ms);
}
/**
u32 totqs = rsrc->num_txq + rsrc->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, rsrc, totqs);
+ qs = idpf_alloc_queue_set(vport->adapter, rsrc, vport->vport_id, totqs);
if (!qs)
return -ENOMEM;
/**
* idpf_prepare_cfg_rxqs_msg - prepare message to configure selected Rx queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the rx queue
* @num_chunks: number of chunks in the message
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_cfg_rxqs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_cfg_rxqs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_config_rx_queues *crq = buf;
- crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->vport_id = cpu_to_le32(vport_id);
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, pos, num_chunks * sizeof(*crq->qinfo));
{
struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES,
.prepare_msg = idpf_prepare_cfg_rxqs_msg,
.config_sz = sizeof(struct virtchnl2_config_rx_queues),
&qi[params.num_chunks++]);
}
- return idpf_send_chunked_msg(qs->vport, ¶ms);
+ return idpf_send_chunked_msg(qs->adapter, ¶ms);
}
/**
u32 totqs = rsrc->num_rxq + rsrc->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, rsrc, totqs);
+ qs = idpf_alloc_queue_set(vport->adapter, rsrc, vport->vport_id, totqs);
if (!qs)
return -ENOMEM;
/**
* idpf_prepare_ena_dis_qs_msg - prepare message to enable/disable selected
* queues
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the queue
* @num_chunks: number of chunks in the message
*
* Return: the total size of the prepared message.
*/
-static u32 idpf_prepare_ena_dis_qs_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
+static u32 idpf_prepare_ena_dis_qs_msg(u32 vport_id, void *buf, const void *pos,
u32 num_chunks)
{
struct virtchnl2_del_ena_dis_queues *eq = buf;
- eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->vport_id = cpu_to_le32(vport_id);
eq->chunks.num_chunks = cpu_to_le16(num_chunks);
memcpy(eq->chunks.chunks, pos,
num_chunks * sizeof(*eq->chunks.chunks));
{
struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = en ? VIRTCHNL2_OP_ENABLE_QUEUES :
VIRTCHNL2_OP_DISABLE_QUEUES,
.prepare_msg = idpf_prepare_ena_dis_qs_msg,
qc[i].start_queue_id = cpu_to_le32(qid);
}
- return idpf_send_chunked_msg(qs->vport, ¶ms);
+ return idpf_send_chunked_msg(qs->adapter, ¶ms);
}
/**
num_txq = rsrc->num_txq + rsrc->num_complq;
num_q = num_txq + rsrc->num_rxq + rsrc->num_bufq;
- qs = idpf_alloc_queue_set(vport, rsrc, num_q);
+ qs = idpf_alloc_queue_set(vport->adapter, rsrc, vport->vport_id, num_q);
if (!qs)
return -ENOMEM;
/**
* idpf_prep_map_unmap_queue_set_vector_msg - prepare message to map or unmap
* queue set to the interrupt vector
- * @vport: virtual port data structure
+ * @vport_id: ID of virtual port queues are associated with
* @buf: buffer containing the message
* @pos: pointer to the first chunk describing the vector mapping
* @num_chunks: number of chunks in the message
* Return: the total size of the prepared message.
*/
static u32
-idpf_prep_map_unmap_queue_set_vector_msg(const struct idpf_vport *vport,
- void *buf, const void *pos,
- u32 num_chunks)
+idpf_prep_map_unmap_queue_set_vector_msg(u32 vport_id, void *buf,
+ const void *pos, u32 num_chunks)
{
struct virtchnl2_queue_vector_maps *vqvm = buf;
- vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->vport_id = cpu_to_le32(vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, pos, num_chunks * sizeof(*vqvm->qv_maps));
{
struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
struct idpf_chunked_msg_params params = {
+ .vport_id = qs->vport_id,
.vc_op = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :
VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
.prepare_msg = idpf_prep_map_unmap_queue_set_vector_msg,
vqv[i].itr_idx = cpu_to_le32(itr_idx);
}
- return idpf_send_chunked_msg(qs->vport, ¶ms);
+ return idpf_send_chunked_msg(qs->adapter, ¶ms);
}
/**
u32 num_q = rsrc->num_txq + rsrc->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, rsrc, num_q);
+ qs = idpf_alloc_queue_set(vport->adapter, rsrc, vport->vport_id, num_q);
if (!qs)
return -ENOMEM;