#endif
}
+static int cn20k_tc_get_entry_index(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct otx2_tc_flow *tmp;
+ int index = 0;
+
+ list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
+ if (tmp == node)
+ return index;
+
+ index++;
+ }
+
+ return -1;
+}
+
+int cn20k_tc_free_mcam_entry(struct otx2_nic *nic, u16 entry)
+{
+ struct npc_mcam_free_entry_req *req;
+ int err;
+
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&nic->mbox);
+ if (!req) {
+ mutex_unlock(&nic->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->entry = entry;
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&nic->mbox);
+ if (err) {
+ mutex_unlock(&nic->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+}
+
+static bool cn20k_tc_check_entry_shiftable(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node, int index,
+ bool error)
+{
+ struct otx2_tc_flow *first, *tmp, *n;
+ u32 prio = 0;
+ int i = 0;
+ u8 type;
+
+ first = list_first_entry(&flow_cfg->flow_list_tc, struct otx2_tc_flow,
+ list);
+ type = first->kw_type;
+
+ /* Check all the nodes from start to given index (including index) has
+ * same type i.e, either X2 or X4
+ */
+ list_for_each_entry_safe(tmp, n, &flow_cfg->flow_list_tc, list) {
+ if (i > index)
+ break;
+
+ if (type != tmp->kw_type) {
+ /* List has both X2 and X4 entries so entries cannot be
+ * shifted to save MCAM space.
+ */
+ if (error)
+ dev_err(nic->dev, "Rule %d cannot be shifted to %d\n",
+ tmp->prio, prio);
+ return false;
+ }
+
+ type = tmp->kw_type;
+ prio = tmp->prio;
+ i++;
+ }
+
+ return true;
+}
+
+void cn20k_tc_update_mcam_table_del_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct otx2_tc_flow *first, *tmp, *n;
+ int i = 0, index;
+ u16 cntr_val = 0;
+ u16 entry;
+
+ index = cn20k_tc_get_entry_index(flow_cfg, node);
+ if (index < 0) {
+ netdev_dbg(nic->netdev, "Could not find node\n");
+ return;
+ }
+
+ first = list_first_entry(&flow_cfg->flow_list_tc, struct otx2_tc_flow,
+ list);
+ entry = first->entry;
+
+ /* If entries cannot be shifted then delete given entry
+ * and free it to AF too.
+ */
+ if (!cn20k_tc_check_entry_shiftable(nic, flow_cfg, node,
+ index, false)) {
+ list_del(&node->list);
+ entry = node->entry;
+ goto free_mcam_entry;
+ }
+
+ /* Find and delete the entry from the list and re-install
+ * all the entries from beginning to the index of the
+ * deleted entry to higher mcam indexes.
+ */
+ list_for_each_entry_safe(tmp, n, &flow_cfg->flow_list_tc, list) {
+ if (node == tmp) {
+ list_del(&tmp->list);
+ break;
+ }
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ tmp->entry = (list_next_entry(tmp, list))->entry;
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ }
+
+ list_for_each_entry_safe(tmp, n, &flow_cfg->flow_list_tc, list) {
+ if (i == index)
+ break;
+
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ i++;
+ }
+
+free_mcam_entry:
+ if (cn20k_tc_free_mcam_entry(nic, entry))
+ netdev_err(nic->netdev, "Freeing entry %d to AF failed\n",
+ entry);
+}
+
+int cn20k_tc_update_mcam_table_add_req(struct otx2_nic *nic,
+ struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
+{
+ struct otx2_tc_flow *tmp;
+ u16 cntr_val = 0;
+ int list_idx, i;
+ int entry, prev;
+
+ /* Find the index of the entry(list_idx) whose priority
+ * is greater than the new entry and re-install all
+ * the entries from beginning to list_idx to higher
+ * mcam indexes.
+ */
+ list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
+ entry = node->entry;
+ if (!cn20k_tc_check_entry_shiftable(nic, flow_cfg, node,
+ list_idx, true))
+ /* Due to mix of X2 and X4, entries cannot be shifted.
+ * In this case free the entry allocated for this rule.
+ */
+ return -EINVAL;
+
+ for (i = 0; i < list_idx; i++) {
+ tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
+ if (!tmp)
+ return -ENOMEM;
+
+ otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
+ prev = tmp->entry;
+ tmp->entry = entry;
+ tmp->req.entry = tmp->entry;
+ tmp->req.cntr_val = cntr_val;
+ otx2_add_mcam_flow_entry(nic, &tmp->req);
+ entry = prev;
+ }
+
+ return entry;
+}
+
+#define MAX_TC_HW_PRIORITY 125
+#define MAX_TC_VF_PRIORITY 126
+#define MAX_TC_PF_PRIORITY 127
+
+static int __cn20k_tc_alloc_entry(struct otx2_nic *nic,
+ struct npc_install_flow_req *flow_req,
+ u16 *entry, u8 *type,
+ u32 tc_priority, bool hw_priority)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct npc_install_flow_req *req;
+ struct npc_install_flow_rsp *rsp;
+ struct otx2_tc_flow *tmp;
+ int ret = 0;
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ memcpy(&flow_req->hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, flow_req, sizeof(struct npc_install_flow_req));
+ req->alloc_entry = 1;
+
+ /* Allocate very least priority for first rule */
+ if (hw_priority || list_empty(&flow_cfg->flow_list_tc)) {
+ req->ref_prio = NPC_MCAM_LEAST_PRIO;
+ } else {
+ req->ref_prio = NPC_MCAM_HIGHER_PRIO;
+ tmp = list_first_entry(&flow_cfg->flow_list_tc,
+ struct otx2_tc_flow, list);
+ req->ref_entry = tmp->entry;
+ }
+
+ ret = otx2_sync_mbox_msg(&nic->mbox);
+ if (ret)
+ return ret;
+
+ rsp = (struct npc_install_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return -EFAULT;
+
+ if (entry)
+ *entry = rsp->entry;
+ if (type)
+ *type = rsp->kw_type;
+
+ return ret;
+}
+
+int cn20k_tc_alloc_entry(struct otx2_nic *nic,
+ struct flow_cls_offload *tc_flow_cmd,
+ struct otx2_tc_flow *new_node,
+ struct npc_install_flow_req *flow_req)
+{
+ bool hw_priority = false;
+ u16 entry_from_af;
+ u8 entry_type;
+ int ret;
+
+ if (is_otx2_vf(nic->pcifunc))
+ flow_req->hw_prio = MAX_TC_VF_PRIORITY;
+ else
+ flow_req->hw_prio = MAX_TC_PF_PRIORITY;
+
+ if (new_node->prio <= MAX_TC_HW_PRIORITY) {
+ flow_req->hw_prio = new_node->prio;
+ hw_priority = true;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ ret = __cn20k_tc_alloc_entry(nic, flow_req, &entry_from_af, &entry_type,
+ new_node->prio, hw_priority);
+ if (ret) {
+ mutex_unlock(&nic->mbox.lock);
+ return ret;
+ }
+
+ new_node->kw_type = entry_type;
+ new_node->entry = entry_from_af;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ return 0;
+}
+
static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
int pool_id, int numptrs)
{
#define MCAST_INVALID_GRP (-1U)
-struct otx2_tc_flow_stats {
- u64 bytes;
- u64 pkts;
- u64 used;
-};
-
-struct otx2_tc_flow {
- struct list_head list;
- unsigned long cookie;
- struct rcu_head rcu;
- struct otx2_tc_flow_stats stats;
- spinlock_t lock; /* lock for stats */
- u16 rq;
- u16 entry;
- u16 leaf_profile;
- bool is_act_police;
- u32 prio;
- struct npc_install_flow_req req;
- u32 mcast_grp_idx;
- u64 rate;
- u32 burst;
- bool is_pps;
-};
-
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
u32 *burst_exp, u32 *burst_mantissa)
{
}
}
-static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
- unsigned long cookie)
+static struct otx2_tc_flow *
+otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
+ unsigned long cookie)
{
struct otx2_tc_flow *tmp;
return NULL;
}
-static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
- int index)
+struct otx2_tc_flow *
+otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg, int index)
{
struct otx2_tc_flow *tmp;
int i = 0;
}
}
-static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
- struct otx2_tc_flow *node)
+int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
+ struct otx2_tc_flow *node)
{
struct list_head *pos, *n;
struct otx2_tc_flow *tmp;
return index;
}
-static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
+int otx2_add_mcam_flow_entry(struct otx2_nic *nic,
+ struct npc_install_flow_req *req)
{
struct npc_install_flow_req *tmp_req;
int err;
return 0;
}
-static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
+int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
{
struct npc_delete_flow_rsp *rsp;
struct npc_delete_flow_req *req;
int i = 0, index = 0;
u16 cntr_val = 0;
+ if (is_cn20k(nic->pdev)) {
+ cn20k_tc_update_mcam_table_del_req(nic, flow_cfg, node);
+ return 0;
+ }
+
/* Find and delete the entry from the list and re-install
* all the entries from beginning to the index of the
* deleted entry to higher mcam indexes.
int list_idx, i;
u16 cntr_val = 0;
+ if (is_cn20k(nic->pdev))
+ return cn20k_tc_update_mcam_table_add_req(nic, flow_cfg, node);
+
/* Find the index of the entry(list_idx) whose priority
* is greater than the new entry and re-install all
* the entries from beginning to list_idx to higher
mcam_idx++;
}
- return mcam_idx;
+ return flow_cfg->flow_ent[mcam_idx];
}
static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock);
}
-
free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct otx2_tc_flow *new_node, *old_node;
struct npc_install_flow_req *req, dummy;
- int rc, err, mcam_idx;
+ int rc, err, entry;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
return -EINVAL;
}
- if (flow_cfg->nr_flows == flow_cfg->max_flows) {
+ if (!is_cn20k(nic->pdev) && flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
return -ENOMEM;
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
- mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
+ if (is_cn20k(nic->pdev)) {
+ rc = cn20k_tc_alloc_entry(nic, tc_flow_cmd, new_node, &dummy);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MCAM rule allocation failed");
+ kfree_rcu(new_node, rcu);
+ return rc;
+ }
+ }
+
+ entry = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
+ if (entry < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Adding rule failed");
+ rc = entry;
+ goto free_leaf;
+ }
+
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
if (!req) {
memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
req->channel = nic->hw.rx_chan_base;
- req->entry = flow_cfg->flow_ent[mcam_idx];
+ req->entry = (u16)entry;
req->intf = NIX_INTF_RX;
req->vf = nic->pcifunc;
req->set_cntr = 1;
return 0;
free_leaf:
+ if (is_cn20k(nic->pdev))
+ cn20k_tc_free_mcam_entry(nic, new_node->entry);
otx2_tc_del_from_flow_list(flow_cfg, new_node);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);