Add support for devlink port function state get/set operations for the
host physical function (PF). Until now, mlx5 only allowed state get/set
for subfunctions (SFs) ports. This change enables an administrator with
eSwitch manager privileges to query or modify the host PF’s function
state, allowing it to be explicitly inactivated or activated. While
inactivated, the administrator can modify the functions attributes, such
as enable/disable roce.
$ devlink port show pci/0000:03:00.0/196608
pci/0000:03:00.0/196608: type eth netdev eth1 flavour pcipf controller 1 pfnum 0 external true splittable false
function:
hw_addr a0:88:c2:45:17:7c state active opstate attached roce enable max_io_eqs 120
$ devlink port function set pci/0000:03:00.0/196608 state inactive
$ devlink port show pci/0000:03:00.0/196608
pci/0000:03:00.0/196608: type eth netdev eth1 flavour pcipf controller 1 pfnum 0 external true splittable false
function:
hw_addr a0:88:c2:45:17:7c state inactive opstate detached roce enable max_io_eqs 120
Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Reviewed-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20260203102402.1712218-1-tariqt@nvidia.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
/* Copyright (c) 2019 Mellanox Technologies. */
#include "ecpf.h"
+#include "eswitch.h"
bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
{
/* ECPF shall enable HCA for host PF in the same way a PF
* does this for its VFs when ECPF is not a eswitch manager.
*/
- err = mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_esw_host_pf_enable_hca(dev);
if (err)
mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);
if (mlx5_ecpf_esw_admins_host_pf(dev))
return;
- err = mlx5_cmd_host_pf_disable_hca(dev);
+ err = mlx5_esw_host_pf_disable_hca(dev);
if (err) {
mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
return;
.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+ .port_fn_state_get = mlx5_devlink_pf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_pf_port_fn_state_set,
#ifdef CONFIG_XFRM_OFFLOAD
.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
return err;
}
-static int host_pf_enable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
+
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
return 0;
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
/* Once vport and representor are ready, take out the external host PF
* out of initializing state. Enabling HCA clears the iser->initializing
* bit and host PF driver loading can progress.
*/
- return mlx5_cmd_host_pf_enable_hca(dev);
+ err = mlx5_cmd_host_pf_enable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = true;
+
+ return 0;
}
-static void host_pf_disable_hca(struct mlx5_core_dev *dev)
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev)
{
- if (!mlx5_core_is_ecpf(dev))
- return;
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_vport *vport;
+ int err;
- mlx5_cmd_host_pf_disable_hca(dev);
+ if (!mlx5_core_is_ecpf(dev) || !mlx5_esw_allowed(esw))
+ return 0;
+
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ err = mlx5_cmd_host_pf_disable_hca(dev);
+ if (err)
+ return err;
+
+ vport->pf_activated = false;
+
+ return 0;
}
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
if (mlx5_esw_host_functions_enabled(esw->dev)) {
/* Enable external host PF HCA */
- ret = host_pf_enable_hca(esw->dev);
+ ret = mlx5_esw_host_pf_enable_hca(esw->dev);
if (ret)
goto pf_hca_err;
}
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
pf_hca_err:
if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev))
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
if (mlx5_esw_host_functions_enabled(esw->dev))
- host_pf_disable_hca(esw->dev);
+ mlx5_esw_host_pf_disable_hca(esw->dev);
if ((mlx5_core_is_ecpf_esw_manager(esw->dev) ||
esw->mode == MLX5_ESWITCH_LEGACY) &&
u16 vport;
bool enabled;
bool max_eqs_set;
+ bool pf_activated;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
#ifdef CONFIG_XFRM_OFFLOAD
int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack);
struct mlx5_core_dev *dev1);
const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_enable_hca(struct mlx5_core_dev *dev);
+int mlx5_esw_host_pf_disable_hca(struct mlx5_core_dev *dev);
void mlx5_esw_adjacent_vhcas_setup(struct mlx5_eswitch *esw);
void mlx5_esw_adjacent_vhcas_cleanup(struct mlx5_eswitch *esw);
return err;
}
+int mlx5_devlink_pf_port_fn_state_get(struct devlink_port *port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ const u32 *query_out;
+ bool pf_disabled;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State get is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ *state = vport->pf_activated ?
+ DEVLINK_PORT_FN_STATE_ACTIVE : DEVLINK_PORT_FN_STATE_INACTIVE;
+
+ query_out = mlx5_esw_query_functions(vport->dev);
+ if (IS_ERR(query_out))
+ return PTR_ERR(query_out);
+
+ pf_disabled = MLX5_GET(query_esw_functions_out, query_out,
+ host_params_context.host_pf_disabled);
+
+ *opstate = pf_disabled ? DEVLINK_PORT_FN_OPSTATE_DETACHED :
+ DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+
+ kvfree(query_out);
+ return 0;
+}
+
+int mlx5_devlink_pf_port_fn_state_set(struct devlink_port *port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ struct mlx5_core_dev *dev;
+
+ if (vport->vport != MLX5_VPORT_PF) {
+ NL_SET_ERR_MSG_MOD(extack, "State set is not supported for VF");
+ return -EOPNOTSUPP;
+ }
+
+ dev = vport->dev;
+
+ switch (state) {
+ case DEVLINK_PORT_FN_STATE_ACTIVE:
+ return mlx5_esw_host_pf_enable_hca(dev);
+ case DEVLINK_PORT_FN_STATE_INACTIVE:
+ return mlx5_esw_host_pf_disable_hca(dev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int
mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *esw_attr, int attr_idx)