struct idpf_adapter;
struct idpf_vport;
struct idpf_vport_max_q;
+struct idpf_q_vec_rsrc;
#include <net/pkt_sched.h>
#include <linux/aer.h>
struct idpf_reg_ops {
void (*ctlq_reg_init)(struct idpf_adapter *adapter,
struct idpf_ctlq_create_info *cq);
- int (*intr_reg_init)(struct idpf_vport *vport);
+ int (*intr_reg_init)(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*trigger_reset)(struct idpf_adapter *adapter,
struct ethtool_rx_flow_spec fs;
};
+/**
+ * struct idpf_q_vec_rsrc - handle for queue and vector resources
+ * @q_vectors: array of queue vectors
+ * @q_vector_idxs: starting index of queue vectors
+ * @num_q_vectors: number of IRQ vectors allocated
+ * @noirq_v_idx: ID of the NOIRQ vector
+ * @noirq_dyn_ctl_ena: value to write to the above to enable it
+ * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
+ */
+struct idpf_q_vec_rsrc {
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 num_q_vectors;
+ u16 noirq_v_idx;
+ u32 noirq_dyn_ctl_ena;
+ void __iomem *noirq_dyn_ctl;
+
+};
+
/**
* struct idpf_vport - Handle for netdevices and queue resources
+ * @dflt_qv_rsrc: contains default queue and vector resources
* @num_txq: Number of allocated TX queues
* @num_complq: Number of allocated completion queues
* @txq_desc_count: TX queue descriptor count
* @idx: Software index in adapter vports struct
* @default_vport: Use this vport if one isn't specified
* @base_rxd: True if the driver should use base descriptors instead of flex
- * @num_q_vectors: Number of IRQ vectors allocated
- * @q_vectors: Array of queue vectors
- * @q_vector_idxs: Starting index of queue vectors
- * @noirq_dyn_ctl: register to enable/disable the vector for NOIRQ queues
- * @noirq_dyn_ctl_ena: value to write to the above to enable it
- * @noirq_v_idx: ID of the NOIRQ vector
* @max_mtu: device given max possible MTU
* @default_mac_addr: device will give a default MAC to use
* @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
* @tstamp_stats: Tx timestamping statistics
*/
struct idpf_vport {
+ struct idpf_q_vec_rsrc dflt_qv_rsrc;
u16 num_txq;
u16 num_complq;
u32 txq_desc_count;
bool default_vport;
bool base_rxd;
- u16 num_q_vectors;
- struct idpf_q_vector *q_vectors;
- u16 *q_vector_idxs;
-
- void __iomem *noirq_dyn_ctl;
- u32 noirq_dyn_ctl_ena;
- u16 noirq_v_idx;
-
u16 max_mtu;
u8 default_mac_addr[ETH_ALEN];
u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
/**
* idpf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = PF_GLINT_DYN_CTL_WB_ON_ITR_M | PF_GLINT_DYN_CTL_INTENA_MSK_M |
FIELD_PREP(PF_GLINT_DYN_CTL_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_queue_id_reg_info *chunks;
if (!test_bit(IDPF_VPORT_UP, np->state))
idpf_remove_features(vport);
vport->link_up = false;
- idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
idpf_xdp_rxq_info_deinit_all(vport);
idpf_vport_queues_rel(vport);
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
clear_bit(IDPF_VPORT_UP, np->state);
if (rtnl)
*/
static void idpf_vport_rel(struct idpf_vport *vport)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vector_info vec_info;
/* Release all the allocated vectors on the stack */
vec_info.num_req_vecs = 0;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
vec_info.default_vport = vport->default_vport;
- idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+ idpf_req_rel_vector_indexes(adapter, rsrc->q_vector_idxs, &vec_info);
- kfree(vport->q_vector_idxs);
- vport->q_vector_idxs = NULL;
+ kfree(rsrc->q_vector_idxs);
+ rsrc->q_vector_idxs = NULL;
idpf_vport_deinit_queue_reg_chunks(vport_config);
{
struct idpf_rss_data *rss_data;
u16 idx = adapter->next_vport;
+ struct idpf_q_vec_rsrc *rsrc;
struct idpf_vport *vport;
u16 num_max_q;
int err;
vport->default_vport = adapter->num_alloc_vports <
idpf_get_default_vports(adapter);
- vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
- if (!vport->q_vector_idxs)
+ rsrc = &vport->dflt_qv_rsrc;
+ rsrc->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!rsrc->q_vector_idxs)
goto free_vport;
err = idpf_vport_init(vport, max_q);
free_qreg_chunks:
idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]);
free_vector_idxs:
- kfree(vport->q_vector_idxs);
+ kfree(rsrc->q_vector_idxs);
free_vport:
kfree(vport);
static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport_config *vport_config;
struct idpf_queue_id_reg_info *chunks;
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- err = idpf_vport_intr_alloc(vport);
+ err = idpf_vport_intr_alloc(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
goto queues_rel;
}
- err = idpf_vport_intr_init(vport);
+ err = idpf_vport_intr_init(vport, rsrc);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
goto intr_deinit;
}
- idpf_vport_intr_ena(vport);
+ idpf_vport_intr_ena(vport, rsrc);
err = idpf_send_config_queues_msg(vport);
if (err) {
rxq_deinit:
idpf_xdp_rxq_info_deinit_all(vport);
intr_deinit:
- idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_deinit(vport, rsrc);
queues_rel:
idpf_vport_queues_rel(vport);
intr_rel:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
err_rtnl_unlock:
if (rtnl)
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_up));
if (reset_cause == IDPF_SR_Q_CHANGE)
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_alloc_vec_indexes(vport, &vport->dflt_qv_rsrc);
err = idpf_set_real_num_queues(vport);
if (err)
if (!num)
return NULL;
- qs = idpf_alloc_queue_set(vport, num);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num);
if (!qs)
return NULL;
goto config;
q_vector->xsksq = kcalloc(DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors),
+ qs->qv_rsrc->num_q_vectors),
sizeof(*q_vector->xsksq), GFP_KERNEL);
if (!q_vector->xsksq)
return -ENOMEM;
/**
* idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
- * @vport: virtual port structure
- *
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_del_all(struct idpf_q_vec_rsrc *rsrc)
{
- u16 v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- netif_napi_del(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ netif_napi_del(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_dis_all(struct idpf_q_vec_rsrc *rsrc)
{
- int v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
- napi_disable(&vport->q_vectors[v_idx].napi);
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++)
+ napi_disable(&rsrc->q_vectors[v_idx].napi);
}
/**
* idpf_vport_intr_rel - Free memory allocated for interrupt vectors
- * @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Free the memory allocated for interrupt vectors associated to a vport
*/
-void idpf_vport_intr_rel(struct idpf_vport *vport)
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc)
{
- for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
kfree(q_vector->xsksq);
q_vector->xsksq = NULL;
q_vector->rx = NULL;
}
- kfree(vport->q_vectors);
- vport->q_vectors = NULL;
+ kfree(rsrc->q_vectors);
+ rsrc->q_vectors = NULL;
}
static void idpf_q_vector_set_napi(struct idpf_q_vector *q_vector, bool link)
/**
* idpf_vport_intr_rel_irq - Free the IRQ association with the OS
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int vector;
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (u16 vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
int irq_num, vidx;
/* free only the irqs that were actually requested */
if (!q_vector)
continue;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
idpf_q_vector_set_napi(q_vector, false);
/**
* idpf_vport_intr_dis_irq_all - Disable all interrupt
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_dis_irq_all(struct idpf_q_vec_rsrc *rsrc)
{
- struct idpf_q_vector *q_vector = vport->q_vectors;
- int q_idx;
+ struct idpf_q_vector *q_vector = rsrc->q_vectors;
- writel(0, vport->noirq_dyn_ctl);
+ writel(0, rsrc->noirq_dyn_ctl);
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++)
writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
}
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
const char *drv_name, *if_name, *vec_name;
drv_name = dev_driver_string(&adapter->pdev->dev);
if_name = netdev_name(vport->netdev);
- for (vector = 0; vector < vport->num_q_vectors; vector++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ for (vector = 0; vector < rsrc->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[vector];
char *name;
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
if (q_vector->num_rxq && q_vector->num_txq)
free_q_irqs:
while (--vector >= 0) {
- vidx = vport->q_vector_idxs[vector];
+ vidx = rsrc->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- kfree(free_irq(irq_num, &vport->q_vectors[vector]));
+ kfree(free_irq(irq_num, &rsrc->q_vectors[vector]));
}
return err;
/**
* idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
bool dynamic;
- int q_idx;
u16 itr;
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &rsrc->q_vectors[q_idx];
/* Set the initial ITR values */
if (qv->num_txq) {
idpf_vport_intr_update_itr_ena_irq(qv);
}
- writel(vport->noirq_dyn_ctl_ena, vport->noirq_dyn_ctl);
+ writel(rsrc->noirq_dyn_ctl_ena, rsrc->noirq_dyn_ctl);
}
/**
* idpf_vport_intr_deinit - Release all vector associations for the vport
* @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-void idpf_vport_intr_deinit(struct idpf_vport *vport)
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_dis_irq_all(vport);
- idpf_vport_intr_napi_dis_all(vport);
- idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_rel_irq(vport);
+ idpf_vport_intr_dis_irq_all(rsrc);
+ idpf_vport_intr_napi_dis_all(rsrc);
+ idpf_vport_intr_napi_del_all(rsrc);
+ idpf_vport_intr_rel_irq(vport, rsrc);
}
/**
/**
* idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
- * @vport: main vport structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_ena_all(struct idpf_q_vec_rsrc *rsrc)
{
- int q_idx;
-
- for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+ for (u16 q_idx = 0; q_idx < rsrc->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[q_idx];
idpf_init_dim(q_vector);
napi_enable(&q_vector->napi);
/**
* idpf_vport_intr_map_vector_to_qs - Map vectors to queues
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Mapping for vectors to queues
*/
-static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 num_txq_grp = vport->num_txq_grp - vport->num_xdp_txq;
bool split = idpf_is_queue_model_split(vport->rxq_model);
for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
u16 num_rxq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
rx_qgrp = &vport->rxq_grps[i];
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
struct idpf_buf_queue *bufq;
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[qv_idx];
+ bufq->q_vector = &rsrc->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
- if (qv_idx >= vport->num_q_vectors)
+ if (qv_idx >= rsrc->num_q_vectors)
qv_idx = 0;
tx_qgrp = &vport->txq_grps[i];
struct idpf_tx_queue *q;
q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->tx[q->q_vector->num_txq++] = q;
}
if (split) {
struct idpf_compl_queue *q = tx_qgrp->complq;
- q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector = &rsrc->q_vectors[qv_idx];
q->q_vector->complq[q->q_vector->num_complq++] = q;
}
/**
* idpf_vport_intr_init_vec_idx - Initialize the vector indexes
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Initialize vector indexes with values returned over mailbox.
*
* Return: 0 on success, negative on failure
*/
-static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_alloc_vectors *ac;
ac = adapter->req_vec_chunks;
if (!ac) {
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = rsrc->q_vector_idxs[i];
- vport->noirq_v_idx = vport->q_vector_idxs[i];
+ rsrc->noirq_v_idx = rsrc->q_vector_idxs[i];
return 0;
}
idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
- for (i = 0; i < vport->num_q_vectors; i++)
- vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+ for (i = 0; i < rsrc->num_q_vectors; i++)
+ rsrc->q_vectors[i].v_idx = vecids[rsrc->q_vector_idxs[i]];
- vport->noirq_v_idx = vecids[vport->q_vector_idxs[i]];
+ rsrc->noirq_v_idx = vecids[rsrc->q_vector_idxs[i]];
kfree(vecids);
/**
* idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
int (*napi_poll)(struct napi_struct *napi, int budget);
- u16 v_idx, qv_idx;
int irq_num;
+ u16 qv_idx;
if (idpf_is_queue_model_split(vport->txq_model))
napi_poll = idpf_vport_splitq_napi_poll;
else
napi_poll = idpf_vport_singleq_napi_poll;
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
- qv_idx = vport->q_vector_idxs[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[v_idx];
+
+ qv_idx = rsrc->q_vector_idxs[v_idx];
irq_num = vport->adapter->msix_entries[qv_idx].vector;
netif_napi_add_config(vport->netdev, &q_vector->napi,
/**
* idpf_vport_intr_alloc - Allocate memory for interrupt vectors
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Allocate one q_vector per queue interrupt.
*
* Return: 0 on success, if allocation fails we return -ENOMEM.
*/
-int idpf_vport_intr_alloc(struct idpf_vport *vport)
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_vport_user_config_data *user_config;
struct idpf_q_vector *q_vector;
struct idpf_q_coalesce *q_coal;
- u32 complqs_per_vector, v_idx;
+ u32 complqs_per_vector;
u16 idx = vport->idx;
user_config = &vport->adapter->vport_config[idx]->user_config;
- vport->q_vectors = kcalloc(vport->num_q_vectors,
- sizeof(struct idpf_q_vector), GFP_KERNEL);
- if (!vport->q_vectors)
+
+ rsrc->q_vectors = kcalloc(rsrc->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!rsrc->q_vectors)
return -ENOMEM;
txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
+ rsrc->num_q_vectors);
rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
+ rsrc->num_q_vectors);
bufqs_per_vector = vport->num_bufqs_per_qgrp *
DIV_ROUND_UP(vport->num_rxq_grp,
- vport->num_q_vectors);
+ rsrc->num_q_vectors);
complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
- vport->num_q_vectors);
+ rsrc->num_q_vectors);
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
- q_vector = &vport->q_vectors[v_idx];
+ for (u16 v_idx = 0; v_idx < rsrc->num_q_vectors; v_idx++) {
+ q_vector = &rsrc->q_vectors[v_idx];
q_coal = &user_config->q_coalesce[v_idx];
q_vector->vport = vport;
return 0;
error:
- idpf_vport_intr_rel(vport);
+ idpf_vport_intr_rel(rsrc);
return -ENOMEM;
}
/**
* idpf_vport_intr_init - Setup all vectors for the given vport
* @vport: virtual port
+ * @rsrc: pointer to queue and vector resources
*
* Return: 0 on success or negative on failure
*/
-int idpf_vport_intr_init(struct idpf_vport *vport)
+int idpf_vport_intr_init(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
int err;
- err = idpf_vport_intr_init_vec_idx(vport);
+ err = idpf_vport_intr_init_vec_idx(vport, rsrc);
if (err)
return err;
- idpf_vport_intr_map_vector_to_qs(vport);
- idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_map_vector_to_qs(vport, rsrc);
+ idpf_vport_intr_napi_add_all(vport, rsrc);
- err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
- err = idpf_vport_intr_req_irq(vport);
+ err = idpf_vport_intr_req_irq(vport, rsrc);
if (err)
goto unroll_vectors_alloc;
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_napi_del_all(rsrc);
return err;
}
-void idpf_vport_intr_ena(struct idpf_vport *vport)
+void idpf_vport_intr_ena(struct idpf_vport *vport, struct idpf_q_vec_rsrc *rsrc)
{
- idpf_vport_intr_napi_ena_all(vport);
- idpf_vport_intr_ena_irq_all(vport);
+ idpf_vport_intr_napi_ena_all(rsrc);
+ idpf_vport_intr_ena_irq_all(vport, rsrc);
}
/**
void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
int idpf_vport_queues_alloc(struct idpf_vport *vport);
void idpf_vport_queues_rel(struct idpf_vport *vport);
-void idpf_vport_intr_rel(struct idpf_vport *vport);
-int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_intr_rel(struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_alloc(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
-void idpf_vport_intr_deinit(struct idpf_vport *vport);
-int idpf_vport_intr_init(struct idpf_vport *vport);
-void idpf_vport_intr_ena(struct idpf_vport *vport);
+void idpf_vport_intr_deinit(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+int idpf_vport_intr_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
+void idpf_vport_intr_ena(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss_lut(struct idpf_vport *vport);
/**
* idpf_vf_intr_reg_init - Initialize interrupt registers
* @vport: virtual port structure
+ * @rsrc: pointer to queue and vector resources
*/
-static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_adapter *adapter = vport->adapter;
- int num_vecs = vport->num_q_vectors;
+ u16 num_vecs = rsrc->num_q_vectors;
struct idpf_vec_regs *reg_vals;
int num_regs, i, err = 0;
u32 rx_itr, tx_itr, val;
}
for (i = 0; i < num_vecs; i++) {
- struct idpf_q_vector *q_vector = &vport->q_vectors[i];
- u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_q_vector *q_vector = &rsrc->q_vectors[i];
+ u16 vec_id = rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
struct idpf_intr_reg *intr = &q_vector->intr_reg;
u32 spacing;
/* Data vector for NOIRQ queues */
- val = reg_vals[vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
- vport->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
+ val = reg_vals[rsrc->q_vector_idxs[i] - IDPF_MBX_Q_VEC].dyn_ctl_reg;
+ rsrc->noirq_dyn_ctl = idpf_get_reg_addr(adapter, val);
val = VF_INT_DYN_CTLN_WB_ON_ITR_M | VF_INT_DYN_CTLN_INTENA_MSK_M |
FIELD_PREP(VF_INT_DYN_CTLN_ITR_INDX_M, IDPF_NO_ITR_UPDATE_IDX);
- vport->noirq_dyn_ctl_ena = val;
+ rsrc->noirq_dyn_ctl_ena = val;
free_reg_vals:
kfree(reg_vals);
u32 vc_op;
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num)
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *qv_rsrc,
+ u32 num)
{
struct idpf_queue_set *qp;
return NULL;
qp->vport = vport;
+ qp->qv_rsrc = qv_rsrc;
qp->num = num;
return qp;
{
struct idpf_queue_set *qs __free(kfree) = NULL;
- qs = idpf_alloc_queue_set(vport, vport->num_txq);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, vport->num_txq);
if (!qs)
return -ENOMEM;
u32 totqs = vport->num_txq + vport->num_complq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs);
if (!qs)
return -ENOMEM;
u32 totqs = vport->num_rxq + vport->num_bufq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, totqs);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, totqs);
if (!qs)
return -ENOMEM;
num_txq = vport->num_txq + vport->num_complq;
num_q = num_txq + vport->num_rxq + vport->num_bufq;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q);
if (!qs)
return -ENOMEM;
v_idx = vec->v_idx;
itr_idx = vec->rx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_0;
}
break;
v_idx = vec->v_idx;
itr_idx = vec->tx_itr_idx;
} else {
- v_idx = qs->vport->noirq_v_idx;
+ v_idx = qs->qv_rsrc->noirq_v_idx;
itr_idx = VIRTCHNL2_ITR_IDX_1;
}
break;
u32 num_q = vport->num_txq + vport->num_rxq;
u32 k = 0;
- qs = idpf_alloc_queue_set(vport, num_q);
+ qs = idpf_alloc_queue_set(vport, &vport->dflt_qv_rsrc, num_q);
if (!qs)
return -ENOMEM;
/**
* idpf_vport_alloc_vec_indexes - Get relative vector indexes
* @vport: virtual port data struct
+ * @rsrc: pointer to queue and vector resources
*
* This function requests the vector information required for the vport and
* stores the vector indexes received from the 'global vector distribution'
* in the vport's queue vectors array.
*
- * Return 0 on success, error on failure
+ * Return: 0 on success, error on failure
*/
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc)
{
struct idpf_vector_info vec_info;
int num_alloc_vecs;
u32 req;
- vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_curr_vecs = rsrc->num_q_vectors;
if (vec_info.num_curr_vecs)
vec_info.num_curr_vecs += IDPF_RESERVED_VECS;
vec_info.index = vport->idx;
num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
- vport->q_vector_idxs,
+ rsrc->q_vector_idxs,
&vec_info);
if (num_alloc_vecs <= 0) {
dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
return -EINVAL;
}
- vport->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
+ rsrc->num_q_vectors = num_alloc_vecs - IDPF_RESERVED_VECS;
return 0;
}
*/
int idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
{
+ struct idpf_q_vec_rsrc *rsrc = &vport->dflt_qv_rsrc;
struct idpf_adapter *adapter = vport->adapter;
struct virtchnl2_create_vport *vport_msg;
struct idpf_vport_config *vport_config;
idpf_vport_init_num_qs(vport, vport_msg);
idpf_vport_calc_num_q_desc(vport);
idpf_vport_calc_num_q_groups(vport);
- idpf_vport_alloc_vec_indexes(vport);
+ idpf_vport_alloc_vec_indexes(vport, rsrc);
vport->crc_enable = adapter->crc_enable;
struct idpf_queue_set {
struct idpf_vport *vport;
+ struct idpf_q_vec_rsrc *qv_rsrc;
u32 num;
struct idpf_queue_ptr qs[] __counted_by(num);
};
-struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport, u32 num);
+struct idpf_queue_set *idpf_alloc_queue_set(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc,
+ u32 num);
int idpf_send_enable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_disable_queue_set_msg(const struct idpf_queue_set *qs);
int idpf_send_delete_queues_msg(struct idpf_vport *vport,
struct idpf_queue_id_reg_info *chunks);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport,
+ struct idpf_q_vec_rsrc *rsrc);
int idpf_get_vec_ids(struct idpf_adapter *adapter,
u16 *vecids, int num_vecids,
struct virtchnl2_vector_chunks *chunks);