]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
KVM: clean up directives to compile out irqfds
[thirdparty/kernel/stable.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / tc / post_act.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
4 #include "en/tc_priv.h"
5 #include "en_tc.h"
6 #include "post_act.h"
7 #include "mlx5_core.h"
8 #include "fs_core.h"
9
10 struct mlx5e_post_act {
11 enum mlx5_flow_namespace_type ns_type;
12 struct mlx5_fs_chains *chains;
13 struct mlx5_flow_table *ft;
14 struct mlx5e_priv *priv;
15 struct xarray ids;
16 };
17
18 struct mlx5e_post_act_handle {
19 enum mlx5_flow_namespace_type ns_type;
20 struct mlx5_flow_attr *attr;
21 struct mlx5_flow_handle *rule;
22 u32 id;
23 };
24
25 #define MLX5_POST_ACTION_BITS MLX5_REG_MAPPING_MBITS(FTEID_TO_REG)
26 #define MLX5_POST_ACTION_MASK MLX5_REG_MAPPING_MASK(FTEID_TO_REG)
27 #define MLX5_POST_ACTION_MAX MLX5_POST_ACTION_MASK
28
29 struct mlx5e_post_act *
30 mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
31 enum mlx5_flow_namespace_type ns_type)
32 {
33 enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ?
34 FS_FT_FDB : FS_FT_NIC_RX;
35 struct mlx5e_post_act *post_act;
36 int err;
37
38 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
39 if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
40 mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
41 err = -EOPNOTSUPP;
42 goto err_check;
43 }
44
45 post_act = kzalloc(sizeof(*post_act), GFP_KERNEL);
46 if (!post_act) {
47 err = -ENOMEM;
48 goto err_check;
49 }
50 post_act->ft = mlx5_chains_create_global_table(chains);
51 if (IS_ERR(post_act->ft)) {
52 err = PTR_ERR(post_act->ft);
53 mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err);
54 goto err_ft;
55 }
56 post_act->chains = chains;
57 post_act->ns_type = ns_type;
58 post_act->priv = priv;
59 xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1);
60 return post_act;
61
62 err_ft:
63 kfree(post_act);
64 err_check:
65 return ERR_PTR(err);
66 }
67
68 void
69 mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
70 {
71 if (IS_ERR_OR_NULL(post_act))
72 return;
73
74 xa_destroy(&post_act->ids);
75 mlx5_chains_destroy_global_table(post_act->chains, post_act->ft);
76 kfree(post_act);
77 }
78
79 int
80 mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
81 struct mlx5e_post_act_handle *handle)
82 {
83 struct mlx5_flow_spec *spec;
84 int err;
85
86 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
87 if (!spec)
88 return -ENOMEM;
89
90 /* Post action rule matches on fte_id and executes original rule's tc rule action */
91 mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
92
93 handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
94 if (IS_ERR(handle->rule)) {
95 err = PTR_ERR(handle->rule);
96 netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
97 goto err_rule;
98 }
99
100 kvfree(spec);
101 return 0;
102
103 err_rule:
104 kvfree(spec);
105 return err;
106 }
107
108 struct mlx5e_post_act_handle *
109 mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *post_attr)
110 {
111 struct mlx5e_post_act_handle *handle;
112 int err;
113
114 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
115 if (!handle)
116 return ERR_PTR(-ENOMEM);
117
118 post_attr->chain = 0;
119 post_attr->prio = 0;
120 post_attr->ft = post_act->ft;
121 post_attr->inner_match_level = MLX5_MATCH_NONE;
122 post_attr->outer_match_level = MLX5_MATCH_NONE;
123 post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
124 post_attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
125
126 handle->ns_type = post_act->ns_type;
127 /* Splits were handled before post action */
128 if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB)
129 post_attr->esw_attr->split_count = 0;
130
131 err = xa_alloc(&post_act->ids, &handle->id, post_attr,
132 XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL);
133 if (err)
134 goto err_xarray;
135
136 handle->attr = post_attr;
137
138 return handle;
139
140 err_xarray:
141 kfree(handle);
142 return ERR_PTR(err);
143 }
144
145 void
146 mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
147 struct mlx5e_post_act_handle *handle)
148 {
149 mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
150 handle->rule = NULL;
151 }
152
153 void
154 mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
155 {
156 if (!IS_ERR_OR_NULL(handle->rule))
157 mlx5e_tc_post_act_unoffload(post_act, handle);
158 xa_erase(&post_act->ids, handle->id);
159 kfree(handle);
160 }
161
162 struct mlx5_flow_table *
163 mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act)
164 {
165 return post_act->ft;
166 }
167
168 /* Allocate a header modify action to write the post action handle fte id to a register. */
169 int
170 mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev,
171 struct mlx5e_post_act_handle *handle,
172 struct mlx5e_tc_mod_hdr_acts *acts)
173 {
174 return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id);
175 }