]>
Commit | Line | Data |
---|---|---|
073bb189 SM |
1 | /* |
2 | * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #ifndef __MLX5_ESWITCH_H__ | |
34 | #define __MLX5_ESWITCH_H__ | |
35 | ||
77256579 SM |
36 | #include <linux/if_ether.h> |
37 | #include <linux/if_link.h> | |
525e84be | 38 | #include <linux/atomic.h> |
84ae9c1f | 39 | #include <linux/xarray.h> |
feae9087 | 40 | #include <net/devlink.h> |
073bb189 | 41 | #include <linux/mlx5/device.h> |
57cbd893 | 42 | #include <linux/mlx5/eswitch.h> |
a1b3839a | 43 | #include <linux/mlx5/vport.h> |
cc495188 | 44 | #include <linux/mlx5/fs.h> |
eeb66cdb | 45 | #include "lib/mpfs.h" |
ae430332 | 46 | #include "lib/fs_chains.h" |
d7f33a45 | 47 | #include "sf/sf.h" |
4c3844d9 | 48 | #include "en/tc_ct.h" |
0027d70c | 49 | #include "en/tc/sample.h" |
073bb189 | 50 | |
a91d98a0 CM |
51 | enum mlx5_mapped_obj_type { |
52 | MLX5_MAPPED_OBJ_CHAIN, | |
be9dc004 | 53 | MLX5_MAPPED_OBJ_SAMPLE, |
4f4edcc2 | 54 | MLX5_MAPPED_OBJ_INT_PORT_METADATA, |
67027828 | 55 | MLX5_MAPPED_OBJ_ACT_MISS, |
a91d98a0 CM |
56 | }; |
57 | ||
58 | struct mlx5_mapped_obj { | |
59 | enum mlx5_mapped_obj_type type; | |
60 | union { | |
61 | u32 chain; | |
67027828 | 62 | u64 act_miss_cookie; |
be9dc004 CM |
63 | struct { |
64 | u32 group_id; | |
65 | u32 rate; | |
66 | u32 trunc_size; | |
ee950e5d | 67 | u32 tunnel_id; |
be9dc004 | 68 | } sample; |
4f4edcc2 | 69 | u32 int_port_metadata; |
a91d98a0 CM |
70 | }; |
71 | }; | |
72 | ||
e80541ec SM |
73 | #ifdef CONFIG_MLX5_ESWITCH |
74 | ||
87dac697 JL |
75 | #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 |
76 | ||
073bb189 SM |
77 | #define MLX5_MAX_UC_PER_VPORT(dev) \ |
78 | (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) | |
79 | ||
80 | #define MLX5_MAX_MC_PER_VPORT(dev) \ | |
81 | (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) | |
82 | ||
a842dd04 CM |
83 | #define mlx5_esw_has_fwd_fdb(dev) \ |
84 | MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) | |
85 | ||
ae430332 AL |
86 | #define esw_chains(esw) \ |
87 | ((esw)->fdb_table.offloads.esw_chains_priv) | |
88 | ||
2198b932 RD |
89 | enum { |
90 | MAPPING_TYPE_CHAIN, | |
91 | MAPPING_TYPE_TUNNEL, | |
92 | MAPPING_TYPE_TUNNEL_ENC_OPTS, | |
93 | MAPPING_TYPE_LABELS, | |
94 | MAPPING_TYPE_ZONE, | |
4f4edcc2 | 95 | MAPPING_TYPE_INT_PORT, |
2198b932 RD |
96 | }; |
97 | ||
5742df0f MHY |
98 | struct vport_ingress { |
99 | struct mlx5_flow_table *acl; | |
10652f39 | 100 | struct mlx5_flow_handle *allow_rule; |
853b5352 | 101 | struct { |
10652f39 PP |
102 | struct mlx5_flow_group *allow_spoofchk_only_grp; |
103 | struct mlx5_flow_group *allow_untagged_spoofchk_grp; | |
104 | struct mlx5_flow_group *allow_untagged_only_grp; | |
105 | struct mlx5_flow_group *drop_grp; | |
853b5352 PP |
106 | struct mlx5_flow_handle *drop_rule; |
107 | struct mlx5_fc *drop_counter; | |
108 | } legacy; | |
d68316b5 | 109 | struct { |
b7826076 PP |
110 | /* Optional group to add an FTE to do internal priority |
111 | * tagging on ingress packets. | |
112 | */ | |
113 | struct mlx5_flow_group *metadata_prio_tag_grp; | |
114 | /* Group to add default match-all FTE entry to tag ingress | |
115 | * packet with metadata. | |
116 | */ | |
117 | struct mlx5_flow_group *metadata_allmatch_grp; | |
1749c4c5 MB |
118 | /* Optional group to add a drop all rule */ |
119 | struct mlx5_flow_group *drop_grp; | |
d68316b5 PP |
120 | struct mlx5_modify_hdr *modify_metadata; |
121 | struct mlx5_flow_handle *modify_metadata_rule; | |
1749c4c5 | 122 | struct mlx5_flow_handle *drop_rule; |
d68316b5 | 123 | } offloads; |
5742df0f MHY |
124 | }; |
125 | ||
5e0202eb SD |
126 | enum vport_egress_acl_type { |
127 | VPORT_EGRESS_ACL_TYPE_DEFAULT, | |
128 | VPORT_EGRESS_ACL_TYPE_SHARED_FDB, | |
129 | }; | |
130 | ||
5742df0f MHY |
131 | struct vport_egress { |
132 | struct mlx5_flow_table *acl; | |
5e0202eb | 133 | enum vport_egress_acl_type type; |
74491de9 | 134 | struct mlx5_flow_handle *allowed_vlan; |
ea651a86 | 135 | struct mlx5_flow_group *vlan_grp; |
bf773dc0 VP |
136 | union { |
137 | struct { | |
138 | struct mlx5_flow_group *drop_grp; | |
139 | struct mlx5_flow_handle *drop_rule; | |
140 | struct mlx5_fc *drop_counter; | |
141 | } legacy; | |
142 | struct { | |
143 | struct mlx5_flow_group *fwd_grp; | |
144 | struct mlx5_flow_handle *fwd_rule; | |
5e0202eb | 145 | struct xarray bounce_rules; |
db202995 | 146 | struct mlx5_flow_group *bounce_grp; |
bf773dc0 VP |
147 | } offloads; |
148 | }; | |
b8a0dbe3 EE |
149 | }; |
150 | ||
151 | struct mlx5_vport_drop_stats { | |
152 | u64 rx_dropped; | |
153 | u64 tx_dropped; | |
5742df0f MHY |
154 | }; |
155 | ||
1ab2068a MHY |
156 | struct mlx5_vport_info { |
157 | u8 mac[ETH_ALEN]; | |
158 | u16 vlan; | |
1ab2068a MHY |
159 | u64 node_guid; |
160 | int link_state; | |
cadb129f PP |
161 | u8 qos; |
162 | u8 spoofchk: 1; | |
163 | u8 trusted: 1; | |
7db98396 | 164 | u8 roce_enabled: 1; |
e5b9642a | 165 | u8 mig_enabled: 1; |
06bab696 | 166 | u8 ipsec_crypto_enabled: 1; |
b691b111 | 167 | u8 ipsec_packet_enabled: 1; |
1ab2068a MHY |
168 | }; |
169 | ||
5019833d PP |
170 | /* Vport context events */ |
171 | enum mlx5_eswitch_vport_event { | |
172 | MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), | |
173 | MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), | |
174 | MLX5_VPORT_PROMISC_CHANGE = BIT(3), | |
175 | }; | |
176 | ||
7d833520 JP |
177 | struct mlx5_vport; |
178 | ||
2c5f33f6 JP |
179 | struct mlx5_devlink_port { |
180 | struct devlink_port dl_port; | |
7d833520 | 181 | struct mlx5_vport *vport; |
2c5f33f6 JP |
182 | }; |
183 | ||
7d833520 JP |
184 | static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port, |
185 | struct mlx5_vport *vport) | |
186 | { | |
187 | dl_port->vport = vport; | |
188 | } | |
189 | ||
190 | static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port) | |
191 | { | |
192 | return container_of(dl_port, struct mlx5_devlink_port, dl_port); | |
193 | } | |
194 | ||
195 | static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port) | |
196 | { | |
197 | return mlx5_devlink_port_get(dl_port)->vport; | |
198 | } | |
199 | ||
073bb189 SM |
200 | struct mlx5_vport { |
201 | struct mlx5_core_dev *dev; | |
073bb189 | 202 | struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; |
81848731 | 203 | struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; |
74491de9 MB |
204 | struct mlx5_flow_handle *promisc_rule; |
205 | struct mlx5_flow_handle *allmulti_rule; | |
073bb189 SM |
206 | struct work_struct vport_change_handler; |
207 | ||
5742df0f MHY |
208 | struct vport_ingress ingress; |
209 | struct vport_egress egress; | |
133dcfc5 VP |
210 | u32 default_metadata; |
211 | u32 metadata; | |
5742df0f | 212 | |
1ab2068a MHY |
213 | struct mlx5_vport_info info; |
214 | ||
1bd27b11 MHY |
215 | struct { |
216 | bool enabled; | |
217 | u32 esw_tsar_ix; | |
c9497c98 | 218 | u32 bw_share; |
e591605f PP |
219 | u32 min_rate; |
220 | u32 max_rate; | |
0fe132ea | 221 | struct mlx5_esw_rate_group *group; |
1bd27b11 MHY |
222 | } qos; |
223 | ||
6308a5f0 | 224 | u16 vport; |
073bb189 | 225 | bool enabled; |
5019833d | 226 | enum mlx5_eswitch_vport_event enabled_events; |
47dd7e60 | 227 | int index; |
2c5f33f6 | 228 | struct mlx5_devlink_port *dl_port; |
073bb189 SM |
229 | }; |
230 | ||
34ca6535 VB |
231 | struct mlx5_esw_indir_table; |
232 | ||
81848731 | 233 | struct mlx5_eswitch_fdb { |
6ab36e35 OG |
234 | union { |
235 | struct legacy_fdb { | |
52fff327 | 236 | struct mlx5_flow_table *fdb; |
6ab36e35 OG |
237 | struct mlx5_flow_group *addr_grp; |
238 | struct mlx5_flow_group *allmulti_grp; | |
239 | struct mlx5_flow_group *promisc_grp; | |
8da202b2 HN |
240 | struct mlx5_flow_table *vepa_fdb; |
241 | struct mlx5_flow_handle *vepa_uplink_rule; | |
242 | struct mlx5_flow_handle *vepa_star_rule; | |
6ab36e35 | 243 | } legacy; |
69697b6e OG |
244 | |
245 | struct offloads_fdb { | |
8463daf1 | 246 | struct mlx5_flow_namespace *ns; |
ec3be887 | 247 | struct mlx5_flow_table *tc_miss_table; |
52fff327 | 248 | struct mlx5_flow_table *slow_fdb; |
69697b6e | 249 | struct mlx5_flow_group *send_to_vport_grp; |
8e404fef | 250 | struct mlx5_flow_group *send_to_vport_meta_grp; |
ac004b83 | 251 | struct mlx5_flow_group *peer_miss_grp; |
9bee385a | 252 | struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS]; |
69697b6e | 253 | struct mlx5_flow_group *miss_grp; |
8e404fef | 254 | struct mlx5_flow_handle **send_to_vport_meta_rules; |
f80be543 MB |
255 | struct mlx5_flow_handle *miss_rule_uni; |
256 | struct mlx5_flow_handle *miss_rule_multi; | |
e52c2802 | 257 | |
ae430332 | 258 | struct mlx5_fs_chains *esw_chains_priv; |
96e32687 EC |
259 | struct { |
260 | DECLARE_HASHTABLE(table, 8); | |
261 | /* Protects vports.table */ | |
262 | struct mutex lock; | |
263 | } vports; | |
264 | ||
34ca6535 VB |
265 | struct mlx5_esw_indir_table *indir; |
266 | ||
69697b6e | 267 | } offloads; |
6ab36e35 | 268 | }; |
e52c2802 | 269 | u32 flags; |
6ab36e35 OG |
270 | }; |
271 | ||
c116c6ee | 272 | struct mlx5_esw_offload { |
11b717d6 PB |
273 | struct mlx5_flow_table *ft_offloads_restore; |
274 | struct mlx5_flow_group *restore_group; | |
6724e66b | 275 | struct mlx5_modify_hdr *restore_copy_hdr_id; |
c9355682 | 276 | struct mapping_ctx *reg_c0_obj_pool; |
11b717d6 | 277 | |
c116c6ee | 278 | struct mlx5_flow_table *ft_offloads; |
fed9ce22 | 279 | struct mlx5_flow_group *vport_rx_group; |
8ea7bcf6 JL |
280 | struct mlx5_flow_group *vport_rx_drop_group; |
281 | struct mlx5_flow_handle *vport_rx_drop_rule; | |
c6c2bf5d | 282 | struct mlx5_flow_table *ft_ipsec_tx_pol; |
47dd7e60 | 283 | struct xarray vport_reps; |
9be6c21f | 284 | struct list_head peer_flows[MLX5_MAX_PORTS]; |
04de7dda | 285 | struct mutex peer_mutex; |
61086f39 | 286 | struct mutex encap_tbl_lock; /* protects encap_tbl */ |
a54e20b4 | 287 | DECLARE_HASHTABLE(encap_tbl, 8); |
14e6b038 EC |
288 | struct mutex decap_tbl_lock; /* protects decap_tbl */ |
289 | DECLARE_HASHTABLE(decap_tbl, 8); | |
dd58edc3 | 290 | struct mod_hdr_tbl mod_hdr; |
10caabda OS |
291 | DECLARE_HASHTABLE(termtbl_tbl, 8); |
292 | struct mutex termtbl_mutex; /* protects termtbl hash */ | |
84ae9c1f | 293 | struct xarray vhca_map; |
8693115a | 294 | const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; |
bffaa916 | 295 | u8 inline_mode; |
525e84be | 296 | atomic64_t num_flows; |
acc10929 | 297 | u64 num_block_encap; |
366e4624 | 298 | u64 num_block_mode; |
98fdbea5 | 299 | enum devlink_eswitch_encap_mode encap; |
133dcfc5 | 300 | struct ida vport_metadata_ida; |
a53cf949 | 301 | unsigned int host_number; /* ECPF supports one external host */ |
c116c6ee OG |
302 | }; |
303 | ||
0a0ab1d2 EC |
304 | /* E-Switch MC FDB table hash node */ |
305 | struct esw_mc_addr { /* SRIOV only */ | |
306 | struct l2addr_node node; | |
307 | struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ | |
308 | u32 refcnt; | |
309 | }; | |
310 | ||
a3888f33 BW |
311 | struct mlx5_host_work { |
312 | struct work_struct work; | |
313 | struct mlx5_eswitch *esw; | |
314 | }; | |
315 | ||
cd56f929 | 316 | struct mlx5_esw_functions { |
a3888f33 BW |
317 | struct mlx5_nb nb; |
318 | u16 num_vfs; | |
a7719b29 | 319 | u16 num_ec_vfs; |
a3888f33 BW |
320 | }; |
321 | ||
7445cfb1 JL |
322 | enum { |
323 | MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), | |
5b7cb745 | 324 | MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), |
ea5872dd | 325 | MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), |
7445cfb1 JL |
326 | }; |
327 | ||
19e9bfa0 VB |
328 | struct mlx5_esw_bridge_offloads; |
329 | ||
fbd43b72 CM |
330 | enum { |
331 | MLX5_ESW_FDB_CREATED = BIT(0), | |
332 | }; | |
333 | ||
f405787a VB |
334 | struct dentry; |
335 | ||
073bb189 SM |
336 | struct mlx5_eswitch { |
337 | struct mlx5_core_dev *dev; | |
6933a937 | 338 | struct mlx5_nb nb; |
81848731 | 339 | struct mlx5_eswitch_fdb fdb_table; |
99ecd646 | 340 | /* legacy data structures */ |
81848731 | 341 | struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; |
131ce701 PP |
342 | struct esw_mc_addr mc_promisc; |
343 | /* end of legacy */ | |
f405787a | 344 | struct dentry *debugfs_root; |
073bb189 | 345 | struct workqueue_struct *work_queue; |
47dd7e60 | 346 | struct xarray vports; |
7445cfb1 | 347 | u32 flags; |
073bb189 | 348 | int total_vports; |
81848731 | 349 | int enabled_vports; |
dfcb1ed3 MHY |
350 | /* Synchronize between vport change events |
351 | * and async SRIOV admin state changes | |
352 | */ | |
353 | struct mutex state_lock; | |
1bd27b11 | 354 | |
8e0aa4bc PP |
355 | /* Protects eswitch mode change that occurs via one or more |
356 | * user commands, i.e. sriov state change, devlink commands. | |
357 | */ | |
c55479d0 | 358 | struct rw_semaphore mode_lock; |
7dc84de9 | 359 | atomic64_t user_count; |
8e0aa4bc | 360 | |
1bd27b11 | 361 | struct { |
2d116e3e | 362 | u32 root_tsar_ix; |
1ae258f8 | 363 | struct mlx5_esw_rate_group *group0; |
f47e04eb | 364 | struct list_head groups; /* Protected by esw->state_lock */ |
85c5f7c9 DL |
365 | |
366 | /* Protected by esw->state_lock. | |
367 | * Initially 0, meaning no QoS users and QoS is disabled. | |
368 | */ | |
369 | refcount_t refcnt; | |
1bd27b11 MHY |
370 | } qos; |
371 | ||
19e9bfa0 | 372 | struct mlx5_esw_bridge_offloads *br_offloads; |
c116c6ee | 373 | struct mlx5_esw_offload offloads; |
6ab36e35 | 374 | int mode; |
a1b3839a | 375 | u16 manager_vport; |
411ec9e0 | 376 | u16 first_host_vport; |
8611df72 | 377 | u8 num_peers; |
cd56f929 | 378 | struct mlx5_esw_functions esw_funcs; |
87dac697 JL |
379 | struct { |
380 | u32 large_group_num; | |
381 | } params; | |
8f010541 | 382 | struct blocking_notifier_head n_head; |
70c36438 | 383 | struct xarray paired; |
88d162b4 | 384 | struct mlx5_devcom_comp_dev *devcom; |
8efd7b17 | 385 | u16 enabled_ipsec_vf_count; |
baac8351 | 386 | bool eswitch_operation_in_progress; |
073bb189 SM |
387 | }; |
388 | ||
5896b972 PP |
389 | void esw_offloads_disable(struct mlx5_eswitch *esw); |
390 | int esw_offloads_enable(struct mlx5_eswitch *esw); | |
d2a651ef JP |
391 | void esw_offloads_cleanup(struct mlx5_eswitch *esw); |
392 | int esw_offloads_init(struct mlx5_eswitch *esw); | |
430e2d5e RD |
393 | |
394 | struct mlx5_flow_handle * | |
395 | mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); | |
396 | void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule); | |
ea651a86 | 397 | |
7bf481d7 | 398 | bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); |
133dcfc5 VP |
399 | u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); |
400 | void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); | |
401 | ||
2d116e3e | 402 | int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); |
766a0e97 | 403 | |
073bb189 SM |
404 | /* E-Switch API */ |
405 | int mlx5_eswitch_init(struct mlx5_core_dev *dev); | |
406 | void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); | |
ebf77bb8 PP |
407 | |
408 | #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) | |
b6f2846a | 409 | int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); |
8e0aa4bc | 410 | int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); |
f019679e CM |
411 | void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); |
412 | void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); | |
413 | void mlx5_eswitch_disable(struct mlx5_eswitch *esw); | |
1161d22d | 414 | void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key); |
2be5bd42 | 415 | void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); |
88d162b4 | 416 | bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); |
77256579 | 417 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
fa997825 | 418 | u16 vport, const u8 *mac); |
77256579 | 419 | int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, |
02f3afd9 | 420 | u16 vport, int link_state); |
9e7ea352 | 421 | int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, |
02f3afd9 | 422 | u16 vport, u16 vlan, u8 qos); |
f942380c | 423 | int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, |
02f3afd9 | 424 | u16 vport, bool spoofchk); |
1edc57e2 | 425 | int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, |
02f3afd9 PP |
426 | u16 vport_num, bool setting); |
427 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, | |
c9497c98 | 428 | u32 max_rate, u32 min_rate); |
0fe132ea DL |
429 | int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, |
430 | struct mlx5_vport *vport, | |
431 | struct mlx5_esw_rate_group *group, | |
432 | struct netlink_ext_ack *extack); | |
8da202b2 HN |
433 | int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); |
434 | int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); | |
77256579 | 435 | int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, |
02f3afd9 | 436 | u16 vport, struct ifla_vf_info *ivi); |
3b751a2a | 437 | int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, |
02f3afd9 | 438 | u16 vport, |
3b751a2a | 439 | struct ifla_vf_stats *vf_stats); |
159fe639 | 440 | void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); |
073bb189 | 441 | |
238302fa | 442 | int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, |
e08a6832 | 443 | bool other_vport, void *in); |
57843868 | 444 | |
3d80d1a2 | 445 | struct mlx5_flow_spec; |
776b12b6 | 446 | struct mlx5_esw_flow_attr; |
10caabda OS |
447 | struct mlx5_termtbl_handle; |
448 | ||
449 | bool | |
450 | mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, | |
c620b772 | 451 | struct mlx5_flow_attr *attr, |
10caabda OS |
452 | struct mlx5_flow_act *flow_act, |
453 | struct mlx5_flow_spec *spec); | |
454 | ||
455 | struct mlx5_flow_handle * | |
456 | mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, | |
457 | struct mlx5_flow_table *ft, | |
458 | struct mlx5_flow_spec *spec, | |
459 | struct mlx5_esw_flow_attr *attr, | |
460 | struct mlx5_flow_act *flow_act, | |
461 | struct mlx5_flow_destination *dest, | |
462 | int num_dest); | |
463 | ||
464 | void | |
465 | mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, | |
466 | struct mlx5_termtbl_handle *tt); | |
3d80d1a2 | 467 | |
f94d6389 CM |
468 | void |
469 | mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); | |
470 | ||
74491de9 | 471 | struct mlx5_flow_handle * |
3d80d1a2 OG |
472 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
473 | struct mlx5_flow_spec *spec, | |
c620b772 | 474 | struct mlx5_flow_attr *attr); |
e4ad91f2 CM |
475 | struct mlx5_flow_handle * |
476 | mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, | |
477 | struct mlx5_flow_spec *spec, | |
c620b772 | 478 | struct mlx5_flow_attr *attr); |
d85cdccb OG |
479 | void |
480 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | |
481 | struct mlx5_flow_handle *rule, | |
c620b772 | 482 | struct mlx5_flow_attr *attr); |
48265006 OG |
483 | void |
484 | mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, | |
485 | struct mlx5_flow_handle *rule, | |
c620b772 | 486 | struct mlx5_flow_attr *attr); |
d85cdccb | 487 | |
74491de9 | 488 | struct mlx5_flow_handle * |
02f3afd9 | 489 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, |
c966f7d5 | 490 | struct mlx5_flow_destination *dest); |
fed9ce22 | 491 | |
e33dfe31 OG |
492 | enum { |
493 | SET_VLAN_STRIP = BIT(0), | |
494 | SET_VLAN_INSERT = BIT(1) | |
495 | }; | |
496 | ||
d708f902 OG |
497 | enum mlx5_flow_match_level { |
498 | MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, | |
499 | MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, | |
500 | MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, | |
501 | MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, | |
502 | }; | |
503 | ||
592d3651 | 504 | /* current maximum for flow based vport multicasting */ |
d40bfedd | 505 | #define MLX5_MAX_FLOW_FWD_VPORTS 32 |
592d3651 | 506 | |
f493f155 EB |
507 | enum { |
508 | MLX5_ESW_DEST_ENCAP = BIT(0), | |
8c4dc42b | 509 | MLX5_ESW_DEST_ENCAP_VALID = BIT(1), |
10742efc | 510 | MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), |
f493f155 EB |
511 | }; |
512 | ||
776b12b6 OG |
513 | struct mlx5_esw_flow_attr { |
514 | struct mlx5_eswitch_rep *in_rep; | |
10ff5359 | 515 | struct mlx5_core_dev *in_mdev; |
f9392795 | 516 | struct mlx5_core_dev *counter_dev; |
27484f71 | 517 | struct mlx5e_tc_int_port *dest_int_port; |
166f431e | 518 | struct mlx5e_tc_int_port *int_port; |
776b12b6 | 519 | |
e85e02ba | 520 | int split_count; |
592d3651 CM |
521 | int out_count; |
522 | ||
cc495188 JL |
523 | __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; |
524 | u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; | |
525 | u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; | |
526 | u8 total_vlan; | |
df65a573 | 527 | struct { |
f493f155 | 528 | u32 flags; |
df65a573 | 529 | struct mlx5_eswitch_rep *rep; |
2b688ea5 | 530 | struct mlx5_pkt_reformat *pkt_reformat; |
df65a573 | 531 | struct mlx5_core_dev *mdev; |
10caabda | 532 | struct mlx5_termtbl_handle *termtbl; |
8914add2 | 533 | int src_port_rewrite_act_id; |
df65a573 | 534 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; |
34ca6535 | 535 | struct mlx5_rx_tun_attr *rx_tun_attr; |
697319b2 | 536 | struct ethhdr eth; |
14e6b038 | 537 | struct mlx5_pkt_reformat *decap_pkt_reformat; |
776b12b6 OG |
538 | }; |
539 | ||
db7ff19e EB |
540 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, |
541 | struct netlink_ext_ack *extack); | |
feae9087 | 542 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); |
db7ff19e EB |
543 | int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, |
544 | struct netlink_ext_ack *extack); | |
bffaa916 | 545 | int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); |
98fdbea5 LR |
546 | int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, |
547 | enum devlink_eswitch_encap_mode encap, | |
db7ff19e | 548 | struct netlink_ext_ack *extack); |
98fdbea5 LR |
549 | int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, |
550 | enum devlink_eswitch_encap_mode *encap); | |
71c93e37 JP |
551 | int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, |
552 | u8 *hw_addr, int *hw_addr_len, | |
553 | struct netlink_ext_ack *extack); | |
554 | int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, | |
555 | const u8 *hw_addr, int hw_addr_len, | |
556 | struct netlink_ext_ack *extack); | |
7db98396 YH |
557 | int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, |
558 | struct netlink_ext_ack *extack); | |
559 | int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, | |
560 | struct netlink_ext_ack *extack); | |
e5b9642a SD |
561 | int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, |
562 | struct netlink_ext_ack *extack); | |
563 | int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, | |
564 | struct netlink_ext_ack *extack); | |
06bab696 DC |
565 | #ifdef CONFIG_XFRM_OFFLOAD |
566 | int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, | |
567 | struct netlink_ext_ack *extack); | |
568 | int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, | |
569 | struct netlink_ext_ack *extack); | |
b691b111 DC |
570 | int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, |
571 | struct netlink_ext_ack *extack); | |
572 | int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, | |
573 | struct netlink_ext_ack *extack); | |
06bab696 | 574 | #endif /* CONFIG_XFRM_OFFLOAD */ |
a4b97ab4 | 575 | void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); |
feae9087 | 576 | |
f5f82476 | 577 | int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, |
02f3afd9 | 578 | u16 vport, u16 vlan, u8 qos, u8 set_flags); |
f5f82476 | 579 | |
1f0ae22a MS |
580 | static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) |
581 | { | |
582 | return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && | |
583 | MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); | |
584 | } | |
585 | ||
cc495188 JL |
586 | static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, |
587 | u8 vlan_depth) | |
6acfbf38 | 588 | { |
cc495188 JL |
589 | bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && |
590 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); | |
591 | ||
592 | if (vlan_depth == 1) | |
593 | return ret; | |
594 | ||
595 | return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && | |
596 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); | |
6acfbf38 OG |
597 | } |
598 | ||
544fe7c2 RD |
599 | bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, |
600 | struct mlx5_core_dev *dev1); | |
eff849b2 | 601 | |
dd28087c | 602 | const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); |
cd56f929 | 603 | |
69697b6e OG |
604 | #define MLX5_DEBUG_ESWITCH_MASK BIT(3) |
605 | ||
27b942fb PP |
606 | #define esw_info(__dev, format, ...) \ |
607 | dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) | |
69697b6e | 608 | |
27b942fb PP |
609 | #define esw_warn(__dev, format, ...) \ |
610 | dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) | |
69697b6e OG |
611 | |
612 | #define esw_debug(dev, format, ...) \ | |
613 | mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) | |
a1b3839a | 614 | |
b16f2bb6 PP |
615 | static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) |
616 | { | |
617 | return esw && MLX5_ESWITCH_MANAGER(esw->dev); | |
618 | } | |
619 | ||
a1b3839a BW |
620 | /* The returned number is valid only when the dev is eswitch manager. */ |
621 | static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) | |
622 | { | |
623 | return mlx5_core_is_ecpf_esw_manager(dev) ? | |
624 | MLX5_VPORT_ECPF : MLX5_VPORT_PF; | |
625 | } | |
626 | ||
ea2300e0 PP |
627 | static inline bool |
628 | mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) | |
629 | { | |
630 | return esw->manager_vport == vport_num; | |
631 | } | |
632 | ||
90ca127c SM |
633 | static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num, |
634 | u16 esw_owner_vhca_id) | |
635 | { | |
636 | return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) || | |
637 | (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev)); | |
638 | } | |
639 | ||
411ec9e0 BW |
640 | static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) |
641 | { | |
642 | return mlx5_core_is_ecpf_esw_manager(dev) ? | |
643 | MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; | |
644 | } | |
645 | ||
3d5f41ca | 646 | static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) |
6706a3b9 | 647 | { |
3d5f41ca | 648 | return mlx5_core_is_ecpf_esw_manager(dev); |
6706a3b9 VP |
649 | } |
650 | ||
443bf36e PP |
651 | static inline unsigned int |
652 | mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, | |
653 | u16 vport_num) | |
654 | { | |
655 | return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; | |
656 | } | |
657 | ||
f099fde1 PP |
658 | static inline u16 |
659 | mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) | |
660 | { | |
661 | return dl_port_index & 0xffff; | |
662 | } | |
663 | ||
fbd43b72 CM |
664 | static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) |
665 | { | |
666 | return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; | |
667 | } | |
668 | ||
ee576ec1 SM |
669 | /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ |
670 | void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); | |
671 | ||
47dd7e60 PP |
672 | /* Each mark identifies eswitch vport type. |
673 | * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using | |
674 | * a single mark. | |
675 | * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. | |
676 | * MLX5_ESW_VPT_SF identifies SF vport. | |
786ef904 | 677 | */ |
47dd7e60 PP |
678 | #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 |
679 | #define MLX5_ESW_VPT_VF XA_MARK_1 | |
680 | #define MLX5_ESW_VPT_SF XA_MARK_2 | |
681 | ||
682 | /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. | |
683 | * Borrowed the idea from xa_for_each_marked() but with support for desired last element. | |
786ef904 | 684 | */ |
47dd7e60 PP |
685 | |
686 | #define mlx5_esw_for_each_vport(esw, index, vport) \ | |
687 | xa_for_each(&((esw)->vports), index, vport) | |
688 | ||
689 | #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ | |
690 | for (index = 0, entry = xa_find(xa, &index, last, filter); \ | |
691 | entry; entry = xa_find_after(xa, &index, last, filter)) | |
692 | ||
693 | #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ | |
694 | mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) | |
695 | ||
696 | #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ | |
697 | mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) | |
698 | ||
699 | #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ | |
700 | mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) | |
d7f33a45 | 701 | |
a7719b29 DJ |
702 | /* This macro should only be used if EC SRIOV is enabled. |
703 | * | |
704 | * Because there were no more marks available on the xarray this uses a | |
705 | * for_each_range approach. The range is only valid when EC SRIOV is enabled | |
706 | */ | |
707 | #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \ | |
708 | xa_for_each_range(&((esw)->vports), \ | |
709 | index, \ | |
710 | vport, \ | |
711 | MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \ | |
b3bd6892 | 712 | MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ |
a7719b29 DJ |
713 | (last) - 1) |
714 | ||
5c632cc3 JP |
715 | struct mlx5_eswitch *__must_check |
716 | mlx5_devlink_eswitch_get(struct devlink *devlink); | |
717 | ||
718 | struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink); | |
719 | ||
5d9986a3 BW |
720 | struct mlx5_vport *__must_check |
721 | mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); | |
722 | ||
47dd7e60 | 723 | bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
13f878a2 | 724 | bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
47dd7e60 | 725 | bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
91d6291c | 726 | |
16fff98a | 727 | int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); |
062f4bf4 | 728 | |
925a6acc | 729 | int |
5019833d PP |
730 | mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, |
731 | enum mlx5_eswitch_vport_event enabled_events); | |
732 | void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); | |
733 | ||
2caa2a39 | 734 | int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
d970812b | 735 | enum mlx5_eswitch_vport_event enabled_events); |
2caa2a39 | 736 | void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
d970812b | 737 | |
748da30b VP |
738 | int |
739 | esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, | |
740 | struct mlx5_vport *vport); | |
741 | void | |
742 | esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, | |
743 | struct mlx5_vport *vport); | |
744 | ||
c796bb7c CM |
745 | struct esw_vport_tbl_namespace { |
746 | int max_fte; | |
747 | int max_num_groups; | |
748 | u32 flags; | |
749 | }; | |
750 | ||
4c7f4028 | 751 | struct mlx5_vport_tbl_attr { |
740452e0 | 752 | u32 chain; |
4c7f4028 CM |
753 | u16 prio; |
754 | u16 vport; | |
fd745f4c | 755 | struct esw_vport_tbl_namespace *vport_ns; |
4c7f4028 CM |
756 | }; |
757 | ||
758 | struct mlx5_flow_table * | |
0a9e2307 | 759 | mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); |
4c7f4028 | 760 | void |
0a9e2307 | 761 | mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); |
96e32687 | 762 | |
11b717d6 PB |
763 | struct mlx5_flow_handle * |
764 | esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); | |
11b717d6 | 765 | |
7eb197fd RD |
766 | void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, |
767 | u32 *flow_group_in, | |
768 | int match_params); | |
769 | ||
770 | void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, | |
771 | u16 vport, | |
772 | struct mlx5_flow_spec *spec); | |
773 | ||
2caa2a39 JP |
774 | int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
775 | void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); | |
e855afd7 | 776 | |
2caa2a39 | 777 | int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
2c5f33f6 | 778 | struct mlx5_devlink_port *dl_port, |
e855afd7 | 779 | u32 controller, u32 sfnum); |
2caa2a39 | 780 | void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
e855afd7 | 781 | |
2caa2a39 JP |
782 | int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
783 | void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); | |
d970812b | 784 | |
e855afd7 JP |
785 | int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, |
786 | enum mlx5_eswitch_vport_event enabled_events, | |
2c5f33f6 | 787 | struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum); |
e855afd7 JP |
788 | void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
789 | ||
23bb50cf BW |
790 | int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, |
791 | enum mlx5_eswitch_vport_event enabled_events); | |
792 | void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); | |
793 | ||
2caa2a39 JP |
794 | int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, |
795 | struct mlx5_vport *vport); | |
796 | void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, | |
797 | struct mlx5_vport *vport); | |
e855afd7 | 798 | |
2caa2a39 | 799 | int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
2c5f33f6 | 800 | struct mlx5_devlink_port *dl_port, |
e855afd7 | 801 | u32 controller, u32 sfnum); |
2caa2a39 | 802 | void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
e855afd7 | 803 | |
2caa2a39 JP |
804 | int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
805 | void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport); | |
c7eddc60 | 806 | struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); |
d970812b | 807 | |
87bd418e | 808 | int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); |
d970812b | 809 | |
84ae9c1f VB |
810 | int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); |
811 | void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); | |
812 | int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); | |
813 | ||
8f010541 PP |
814 | /** |
815 | * mlx5_esw_event_info - Indicates eswitch mode changed/changing. | |
816 | * | |
817 | * @new_mode: New mode of eswitch. | |
818 | */ | |
819 | struct mlx5_esw_event_info { | |
820 | u16 new_mode; | |
821 | }; | |
822 | ||
823 | int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); | |
824 | void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); | |
7dc84de9 RD |
825 | |
826 | bool mlx5_esw_hold(struct mlx5_core_dev *dev); | |
827 | void mlx5_esw_release(struct mlx5_core_dev *dev); | |
828 | void mlx5_esw_get(struct mlx5_core_dev *dev); | |
829 | void mlx5_esw_put(struct mlx5_core_dev *dev); | |
830 | int mlx5_esw_try_lock(struct mlx5_eswitch *esw); | |
baac8351 | 831 | int mlx5_esw_lock(struct mlx5_eswitch *esw); |
7dc84de9 RD |
832 | void mlx5_esw_unlock(struct mlx5_eswitch *esw); |
833 | ||
b55b3538 PP |
834 | void esw_vport_change_handle_locked(struct mlx5_vport *vport); |
835 | ||
f1b9acd3 PP |
836 | bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); |
837 | ||
014e4d48 SD |
838 | int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, |
839 | struct mlx5_eswitch *slave_esw, int max_slaves); | |
840 | void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, | |
db202995 MB |
841 | struct mlx5_eswitch *slave_esw); |
842 | int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); | |
843 | ||
acc10929 LR |
844 | bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); |
845 | void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); | |
846 | ||
e2537341 LR |
847 | int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); |
848 | void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); | |
366e4624 | 849 | |
e87c6a83 CM |
850 | static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) |
851 | { | |
852 | if (mlx5_esw_allowed(esw)) | |
853 | return esw->esw_funcs.num_vfs; | |
854 | ||
855 | return 0; | |
856 | } | |
857 | ||
4c103aea SD |
858 | static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) |
859 | { | |
860 | if (mlx5_esw_allowed(esw)) | |
861 | return esw->num_peers; | |
862 | return 0; | |
863 | } | |
864 | ||
dcf19b9c MD |
865 | static inline struct mlx5_flow_table * |
866 | mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) | |
867 | { | |
868 | return esw->fdb_table.offloads.slow_fdb; | |
869 | } | |
acc10929 | 870 | |
d1569537 JL |
871 | int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, |
872 | struct mlx5_esw_flow_attr *esw_attr, int attr_idx); | |
8efd7b17 LR |
873 | bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev); |
874 | void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev); | |
06bab696 DC |
875 | bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev); |
876 | int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, | |
877 | struct mlx5_vport *vport); | |
878 | int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, | |
879 | u16 vport_num); | |
880 | int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, | |
881 | bool enable); | |
b691b111 DC |
882 | int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
883 | bool enable); | |
884 | int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, | |
885 | u16 vport_num); | |
886 | void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw); | |
887 | void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw); | |
888 | ||
e80541ec SM |
889 | #else /* CONFIG_MLX5_ESWITCH */ |
890 | /* eswitch API stubs */ | |
891 | static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } | |
892 | static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} | |
8e0aa4bc | 893 | static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } |
f019679e CM |
894 | static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} |
895 | static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} | |
1161d22d | 896 | static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {} |
2be5bd42 | 897 | static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} |
88d162b4 | 898 | static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } |
6706a3b9 | 899 | static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } |
7d0314b1 RD |
900 | static inline |
901 | int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } | |
dd28087c | 902 | static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) |
10ee82ce | 903 | { |
dd28087c | 904 | return ERR_PTR(-EOPNOTSUPP); |
10ee82ce | 905 | } |
328edb49 | 906 | |
9d3faa51 | 907 | static inline struct mlx5_flow_handle * |
11b717d6 PB |
908 | esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) |
909 | { | |
910 | return ERR_PTR(-EOPNOTSUPP); | |
911 | } | |
5a65d85d RD |
912 | |
913 | static inline unsigned int | |
914 | mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, | |
915 | u16 vport_num) | |
916 | { | |
917 | return vport_num; | |
918 | } | |
db202995 MB |
919 | |
920 | static inline int | |
014e4d48 SD |
921 | mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, |
922 | struct mlx5_eswitch *slave_esw, int max_slaves) | |
db202995 MB |
923 | { |
924 | return 0; | |
925 | } | |
926 | ||
927 | static inline void | |
014e4d48 | 928 | mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, |
db202995 MB |
929 | struct mlx5_eswitch *slave_esw) {} |
930 | ||
4c103aea SD |
931 | static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } |
932 | ||
db202995 MB |
933 | static inline int |
934 | mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) | |
935 | { | |
936 | return 0; | |
937 | } | |
acc10929 LR |
938 | |
939 | static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) | |
940 | { | |
941 | return true; | |
942 | } | |
943 | ||
944 | static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) | |
945 | { | |
946 | } | |
366e4624 | 947 | |
e2537341 LR |
948 | static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } |
949 | static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} | |
8efd7b17 LR |
950 | static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) |
951 | { | |
952 | return false; | |
953 | } | |
954 | ||
955 | static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} | |
e80541ec SM |
956 | #endif /* CONFIG_MLX5_ESWITCH */ |
957 | ||
073bb189 | 958 | #endif /* __MLX5_ESWITCH_H__ */ |