/* Minimum supported BW share value by the HW is 1 Mbit/sec */
#define MLX5_MIN_BW_SHARE 1
-/* Holds rate groups associated with an E-Switch. */
+/* Holds rate nodes associated with an E-Switch. */
struct mlx5_qos_domain {
/* Serializes access to all qos changes in the qos domain. */
struct mutex lock;
- /* List of all mlx5_esw_rate_groups. */
- struct list_head groups;
+ /* List of all mlx5_esw_sched_nodes. */
+ struct list_head nodes;
};
static void esw_qos_lock(struct mlx5_eswitch *esw)
return NULL;
mutex_init(&qos_domain->lock);
- INIT_LIST_HEAD(&qos_domain->groups);
+ INIT_LIST_HEAD(&qos_domain->nodes);
return qos_domain;
}
SCHED_NODE_TYPE_VPORTS_TSAR,
};
-struct mlx5_esw_rate_group {
- u32 tsar_ix;
+struct mlx5_esw_sched_node {
+ u32 ix;
/* Bandwidth parameters. */
u32 max_rate;
u32 min_rate;
- /* A computed value indicating relative min_rate between group members. */
+ /* A computed value indicating relative min_rate between node's children. */
u32 bw_share;
- /* The parent group of this group. */
- struct mlx5_esw_rate_group *parent;
- /* Membership in the parent list. */
- struct list_head parent_entry;
- /* The type of this group node in the rate hierarchy. */
+ /* The parent node in the rate hierarchy. */
+ struct mlx5_esw_sched_node *parent;
+ /* Entry in the parent node's children list. */
+ struct list_head entry;
+ /* The type of this node in the rate hierarchy. */
enum sched_node_type type;
- /* The eswitch this group belongs to. */
+ /* The eswitch this node belongs to. */
struct mlx5_eswitch *esw;
- /* Members of this group.*/
- struct list_head members;
+ /* The children nodes of this node, empty list for leaf nodes. */
+ struct list_head children;
};
-static void esw_qos_vport_set_parent(struct mlx5_vport *vport, struct mlx5_esw_rate_group *parent)
+static void esw_qos_vport_set_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *parent)
{
list_del_init(&vport->qos.parent_entry);
vport->qos.parent = parent;
- list_add_tail(&vport->qos.parent_entry, &parent->members);
+ list_add_tail(&vport->qos.parent_entry, &parent->children);
}
static int esw_qos_sched_elem_config(struct mlx5_core_dev *dev, u32 sched_elem_ix,
bitmask);
}
-static int esw_qos_group_config(struct mlx5_esw_rate_group *group,
- u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
+static int esw_qos_node_config(struct mlx5_esw_sched_node *node,
+ u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack)
{
- struct mlx5_core_dev *dev = group->esw->dev;
+ struct mlx5_core_dev *dev = node->esw->dev;
int err;
- err = esw_qos_sched_elem_config(dev, group->tsar_ix, max_rate, bw_share);
+ err = esw_qos_sched_elem_config(dev, node->ix, max_rate, bw_share);
if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch modify node TSAR element failed");
- trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate);
+ trace_mlx5_esw_node_qos_config(dev, node, node->ix, bw_share, max_rate);
return err;
}
return 0;
}
-static u32 esw_qos_calculate_group_min_rate_divider(struct mlx5_esw_rate_group *group)
+static u32 esw_qos_calculate_node_min_rate_divider(struct mlx5_esw_sched_node *node)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(group->esw->dev, max_tsar_bw_share);
+ u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
struct mlx5_vport *vport;
u32 max_guarantee = 0;
- /* Find max min_rate across all vports in this group.
+ /* Find max min_rate across all vports in this node.
* This will correspond to fw_max_bw_share in the final bw_share calculation.
*/
- list_for_each_entry(vport, &group->members, qos.parent_entry) {
+ list_for_each_entry(vport, &node->children, qos.parent_entry) {
if (vport->qos.min_rate > max_guarantee)
max_guarantee = vport->qos.min_rate;
}
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If vports max min_rate divider is 0 but their group has bw_share
+ /* If vports max min_rate divider is 0 but their node has bw_share
* configured, then set bw_share for vports to minimal value.
*/
- if (group->bw_share)
+ if (node->bw_share)
return 1;
- /* A divider of 0 sets bw_share for all group vports to 0,
+ /* A divider of 0 sets bw_share for all node vports to 0,
* effectively disabling min guarantees.
*/
return 0;
static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
u32 max_guarantee = 0;
- /* Find max min_rate across all esw groups.
+ /* Find max min_rate across all esw nodes.
* This will correspond to fw_max_bw_share in the final bw_share calculation.
*/
- list_for_each_entry(group, &esw->qos.domain->groups, parent_entry) {
- if (group->esw == esw && group->tsar_ix != esw->qos.root_tsar_ix &&
- group->min_rate > max_guarantee)
- max_guarantee = group->min_rate;
+ list_for_each_entry(node, &esw->qos.domain->nodes, entry) {
+ if (node->esw == esw && node->ix != esw->qos.root_tsar_ix &&
+ node->min_rate > max_guarantee)
+ max_guarantee = node->min_rate;
}
if (max_guarantee)
return max_t(u32, max_guarantee / fw_max_bw_share, 1);
- /* If no group has min_rate configured, a divider of 0 sets all
- * groups' bw_share to 0, effectively disabling min guarantees.
+ /* If no node has min_rate configured, a divider of 0 sets all
+ * nodes' bw_share to 0, effectively disabling min guarantees.
*/
return 0;
}
return min_t(u32, max_t(u32, DIV_ROUND_UP(min_rate, divider), MLX5_MIN_BW_SHARE), fw_max);
}
-static int esw_qos_normalize_group_min_rate(struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int esw_qos_normalize_node_min_rate(struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
- u32 fw_max_bw_share = MLX5_CAP_QOS(group->esw->dev, max_tsar_bw_share);
- u32 divider = esw_qos_calculate_group_min_rate_divider(group);
+ u32 fw_max_bw_share = MLX5_CAP_QOS(node->esw->dev, max_tsar_bw_share);
+ u32 divider = esw_qos_calculate_node_min_rate_divider(node);
struct mlx5_vport *vport;
u32 bw_share;
int err;
- list_for_each_entry(vport, &group->members, qos.parent_entry) {
+ list_for_each_entry(vport, &node->children, qos.parent_entry) {
bw_share = esw_qos_calc_bw_share(vport->qos.min_rate, divider, fw_max_bw_share);
if (bw_share == vport->qos.bw_share)
{
u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
u32 divider = esw_qos_calculate_min_rate_divider(esw);
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
u32 bw_share;
int err;
- list_for_each_entry(group, &esw->qos.domain->groups, parent_entry) {
- if (group->esw != esw || group->tsar_ix == esw->qos.root_tsar_ix)
+ list_for_each_entry(node, &esw->qos.domain->nodes, entry) {
+ if (node->esw != esw || node->ix == esw->qos.root_tsar_ix)
continue;
- bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share);
+ bw_share = esw_qos_calc_bw_share(node->min_rate, divider,
+ fw_max_bw_share);
- if (bw_share == group->bw_share)
+ if (bw_share == node->bw_share)
continue;
- err = esw_qos_group_config(group, group->max_rate, bw_share, extack);
+ err = esw_qos_node_config(node, node->max_rate, bw_share, extack);
if (err)
return err;
- group->bw_share = bw_share;
+ node->bw_share = bw_share;
- /* All the group's vports need to be set with default bw_share
+ /* All the node's vports need to be set with default bw_share
* to enable them with QOS
*/
- err = esw_qos_normalize_group_min_rate(group, extack);
+ err = esw_qos_normalize_node_min_rate(node, extack);
if (err)
return err;
previous_min_rate = vport->qos.min_rate;
vport->qos.min_rate = min_rate;
- err = esw_qos_normalize_group_min_rate(vport->qos.parent, extack);
+ err = esw_qos_normalize_node_min_rate(vport->qos.parent, extack);
if (err)
vport->qos.min_rate = previous_min_rate;
if (max_rate == vport->qos.max_rate)
return 0;
- /* Use parent group limit if new max rate is 0. */
+ /* Use parent node limit if new max rate is 0. */
if (!max_rate)
act_max_rate = vport->qos.parent->max_rate;
return err;
}
-static int esw_qos_set_group_min_rate(struct mlx5_esw_rate_group *group,
- u32 min_rate, struct netlink_ext_ack *extack)
+static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node,
+ u32 min_rate, struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = group->esw;
+ struct mlx5_eswitch *esw = node->esw;
u32 previous_min_rate;
int err;
MLX5_CAP_QOS(esw->dev, max_tsar_bw_share) < MLX5_MIN_BW_SHARE)
return -EOPNOTSUPP;
- if (min_rate == group->min_rate)
+ if (min_rate == node->min_rate)
return 0;
- previous_min_rate = group->min_rate;
- group->min_rate = min_rate;
+ previous_min_rate = node->min_rate;
+ node->min_rate = min_rate;
err = esw_qos_normalize_min_rate(esw, extack);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch node min rate setting failed");
/* Attempt restoring previous configuration */
- group->min_rate = previous_min_rate;
+ node->min_rate = previous_min_rate;
if (esw_qos_normalize_min_rate(esw, extack))
NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed");
}
return err;
}
-static int esw_qos_set_group_max_rate(struct mlx5_esw_rate_group *group,
- u32 max_rate, struct netlink_ext_ack *extack)
+static int esw_qos_set_node_max_rate(struct mlx5_esw_sched_node *node,
+ u32 max_rate, struct netlink_ext_ack *extack)
{
struct mlx5_vport *vport;
int err;
- if (group->max_rate == max_rate)
+ if (node->max_rate == max_rate)
return 0;
- err = esw_qos_group_config(group, max_rate, group->bw_share, extack);
+ err = esw_qos_node_config(node, max_rate, node->bw_share, extack);
if (err)
return err;
- group->max_rate = max_rate;
+ node->max_rate = max_rate;
- /* Any unlimited vports in the group should be set with the value of the group. */
- list_for_each_entry(vport, &group->members, qos.parent_entry) {
+ /* Any unlimited vports in the node should be set with the value of the node. */
+ list_for_each_entry(vport, &node->children, qos.parent_entry) {
if (vport->qos.max_rate)
continue;
return err;
}
-static int esw_qos_create_group_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
- u32 *tsar_ix)
+static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id,
+ u32 *tsar_ix)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
void *attr;
static int esw_qos_vport_create_sched_element(struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
{
- struct mlx5_esw_rate_group *parent = vport->qos.parent;
+ struct mlx5_esw_sched_node *parent = vport->qos.parent;
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_core_dev *dev = parent->esw->dev;
void *attr;
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
MLX5_SET(vport_element, attr, vport_number, vport->vport);
- MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->tsar_ix);
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent->ix);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
return 0;
}
-static int esw_qos_update_group_scheduling_element(struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *curr_group,
- struct mlx5_esw_rate_group *new_group,
- struct netlink_ext_ack *extack)
+static int esw_qos_update_node_scheduling_element(struct mlx5_vport *vport,
+ struct mlx5_esw_sched_node *curr_node,
+ struct mlx5_esw_sched_node *new_node,
+ struct netlink_ext_ack *extack)
{
u32 max_rate;
int err;
- err = mlx5_destroy_scheduling_element_cmd(curr_group->esw->dev,
+ err = mlx5_destroy_scheduling_element_cmd(curr_node->esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
vport->qos.esw_sched_elem_ix);
if (err) {
return err;
}
- esw_qos_vport_set_parent(vport, new_group);
- /* Use new group max rate if vport max rate is unlimited. */
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate;
+ esw_qos_vport_set_parent(vport, new_node);
+ /* Use new node max rate if vport max rate is unlimited. */
+ max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_node->max_rate;
err = esw_qos_vport_create_sched_element(vport, max_rate, vport->qos.bw_share);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed.");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch vport node set failed.");
goto err_sched;
}
return 0;
err_sched:
- esw_qos_vport_set_parent(vport, curr_group);
- max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate;
+ esw_qos_vport_set_parent(vport, curr_node);
+ max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_node->max_rate;
if (esw_qos_vport_create_sched_element(vport, max_rate, vport->qos.bw_share))
- esw_warn(curr_group->esw->dev, "E-Switch vport group restore failed (vport=%d)\n",
+ esw_warn(curr_node->esw->dev, "E-Switch vport node restore failed (vport=%d)\n",
vport->vport);
return err;
}
-static int esw_qos_vport_update_group(struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int esw_qos_vport_update_node(struct mlx5_vport *vport,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
- struct mlx5_esw_rate_group *new_group, *curr_group;
+ struct mlx5_esw_sched_node *new_node, *curr_node;
int err;
esw_assert_qos_lock_held(esw);
- curr_group = vport->qos.parent;
- new_group = group ?: esw->qos.group0;
- if (curr_group == new_group)
+ curr_node = vport->qos.parent;
+ new_node = node ?: esw->qos.node0;
+ if (curr_node == new_node)
return 0;
- err = esw_qos_update_group_scheduling_element(vport, curr_group, new_group, extack);
+ err = esw_qos_update_node_scheduling_element(vport, curr_node, new_node, extack);
if (err)
return err;
- /* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share || new_group->bw_share) {
- esw_qos_normalize_group_min_rate(curr_group, extack);
- esw_qos_normalize_group_min_rate(new_group, extack);
+ /* Recalculate bw share weights of old and new nodes */
+ if (vport->qos.bw_share || new_node->bw_share) {
+ esw_qos_normalize_node_min_rate(curr_node, extack);
+ esw_qos_normalize_node_min_rate(new_node, extack);
}
return 0;
}
-static struct mlx5_esw_rate_group *
-__esw_qos_alloc_rate_group(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
- struct mlx5_esw_rate_group *parent)
+static struct mlx5_esw_sched_node *
+__esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type,
+ struct mlx5_esw_sched_node *parent)
{
- struct mlx5_esw_rate_group *group;
- struct list_head *parent_list;
+ struct list_head *parent_children;
+ struct mlx5_esw_sched_node *node;
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
return NULL;
- group->esw = esw;
- group->tsar_ix = tsar_ix;
- group->type = type;
- group->parent = parent;
- INIT_LIST_HEAD(&group->members);
- parent_list = parent ? &parent->members : &esw->qos.domain->groups;
- list_add_tail(&group->parent_entry, parent_list);
+ node->esw = esw;
+ node->ix = tsar_ix;
+ node->type = type;
+ node->parent = parent;
+ INIT_LIST_HEAD(&node->children);
+ parent_children = parent ? &parent->children : &esw->qos.domain->nodes;
+ list_add_tail(&node->entry, parent_children);
- return group;
+ return node;
}
-static void __esw_qos_free_rate_group(struct mlx5_esw_rate_group *group)
+static void __esw_qos_free_node(struct mlx5_esw_sched_node *node)
{
- list_del(&group->parent_entry);
- kfree(group);
+ list_del(&node->entry);
+ kfree(node);
}
-static struct mlx5_esw_rate_group *
-__esw_qos_create_vports_rate_group(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *parent,
+static struct mlx5_esw_sched_node *
+__esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sched_node *parent,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
u32 tsar_ix;
int err;
- err = esw_qos_create_group_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed");
return ERR_PTR(err);
}
- group = __esw_qos_alloc_rate_group(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent);
- if (!group) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc group failed");
+ node = __esw_qos_alloc_node(esw, tsar_ix, SCHED_NODE_TYPE_VPORTS_TSAR, parent);
+ if (!node) {
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch alloc node failed");
err = -ENOMEM;
- goto err_alloc_group;
+ goto err_alloc_node;
}
err = esw_qos_normalize_min_rate(esw, extack);
if (err) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed");
goto err_min_rate;
}
- trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix);
+ trace_mlx5_esw_node_qos_create(esw->dev, node, node->ix);
- return group;
+ return node;
err_min_rate:
- __esw_qos_free_rate_group(group);
-err_alloc_group:
+ __esw_qos_free_node(node);
+err_alloc_node:
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
tsar_ix))
- NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for node failed");
return ERR_PTR(err);
}
static int esw_qos_get(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack);
static void esw_qos_put(struct mlx5_eswitch *esw);
-static struct mlx5_esw_rate_group *
-esw_qos_create_vports_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+static struct mlx5_esw_sched_node *
+esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
int err;
esw_assert_qos_lock_held(esw);
if (err)
return ERR_PTR(err);
- group = __esw_qos_create_vports_rate_group(esw, NULL, extack);
- if (IS_ERR(group))
+ node = __esw_qos_create_vports_sched_node(esw, NULL, extack);
+ if (IS_ERR(node))
esw_qos_put(esw);
- return group;
+ return node;
}
-static int __esw_qos_destroy_rate_group(struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+static int __esw_qos_destroy_node(struct mlx5_esw_sched_node *node, struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = group->esw;
+ struct mlx5_eswitch *esw = node->esw;
int err;
- trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix);
+ trace_mlx5_esw_node_qos_destroy(esw->dev, node, node->ix);
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
+ node->ix);
if (err)
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed");
- __esw_qos_free_rate_group(group);
+ __esw_qos_free_node(node);
err = esw_qos_normalize_min_rate(esw, extack);
if (err)
- NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed");
+ NL_SET_ERR_MSG_MOD(extack, "E-Switch nodes normalization failed");
return err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return -EOPNOTSUPP;
- err = esw_qos_create_group_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
+ err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix);
if (err) {
esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err);
return err;
}
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
- esw->qos.group0 = __esw_qos_create_vports_rate_group(esw, NULL, extack);
+ esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
} else {
- /* The eswitch doesn't support scheduling groups.
- * Create a software-only group0 using the root TSAR to attach vport QoS to.
+ /* The eswitch doesn't support scheduling nodes.
+ * Create a software-only node0 using the root TSAR to attach vport QoS to.
*/
- if (!__esw_qos_alloc_rate_group(esw, esw->qos.root_tsar_ix,
- SCHED_NODE_TYPE_VPORTS_TSAR, NULL))
- esw->qos.group0 = ERR_PTR(-ENOMEM);
+ if (!__esw_qos_alloc_node(esw,
+ esw->qos.root_tsar_ix,
+ SCHED_NODE_TYPE_VPORTS_TSAR,
+ NULL))
+ esw->qos.node0 = ERR_PTR(-ENOMEM);
}
- if (IS_ERR(esw->qos.group0)) {
- err = PTR_ERR(esw->qos.group0);
- esw_warn(dev, "E-Switch create rate group 0 failed (%d)\n", err);
- goto err_group0;
+ if (IS_ERR(esw->qos.node0)) {
+ err = PTR_ERR(esw->qos.node0);
+ esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
+ goto err_node0;
}
refcount_set(&esw->qos.refcnt, 1);
return 0;
-err_group0:
+err_node0:
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
esw->qos.root_tsar_ix))
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
{
int err;
- if (esw->qos.group0->tsar_ix != esw->qos.root_tsar_ix)
- __esw_qos_destroy_rate_group(esw->qos.group0, NULL);
+ if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
+ __esw_qos_destroy_node(esw->qos.node0, NULL);
else
- __esw_qos_free_rate_group(esw->qos.group0);
- esw->qos.group0 = NULL;
+ __esw_qos_free_node(esw->qos.node0);
+ esw->qos.node0 = NULL;
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
SCHEDULING_HIERARCHY_E_SWITCH,
return err;
INIT_LIST_HEAD(&vport->qos.parent_entry);
- esw_qos_vport_set_parent(vport, esw->qos.group0);
+ esw_qos_vport_set_parent(vport, esw->qos.node0);
err = esw_qos_vport_create_sched_element(vport, max_rate, bw_share);
if (err)
esw_qos_lock(esw);
if (!vport->qos.enabled)
goto unlock;
- WARN(vport->qos.parent != esw->qos.group0,
- "Disabling QoS on port before detaching it from group");
+ WARN(vport->qos.parent != esw->qos.node0,
+ "Disabling QoS on port before detaching it from node");
dev = vport->qos.parent->esw->dev;
err = mlx5_destroy_scheduling_element_cmd(dev,
int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group = priv;
- struct mlx5_eswitch *esw = group->esw;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_share", &tx_share, extack);
return err;
esw_qos_lock(esw);
- err = esw_qos_set_group_min_rate(group, tx_share, extack);
+ err = esw_qos_set_node_min_rate(node, tx_share, extack);
esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group = priv;
- struct mlx5_eswitch *esw = group->esw;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
err = esw_qos_devlink_rate_to_mbps(esw->dev, "tx_max", &tx_max, extack);
return err;
esw_qos_lock(esw);
- err = esw_qos_set_group_max_rate(group, tx_max, extack);
+ err = esw_qos_set_node_max_rate(node, tx_max, extack);
esw_qos_unlock(esw);
return err;
}
int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
struct mlx5_eswitch *esw;
int err = 0;
goto unlock;
}
- group = esw_qos_create_vports_rate_group(esw, extack);
- if (IS_ERR(group)) {
- err = PTR_ERR(group);
+ node = esw_qos_create_vports_sched_node(esw, extack);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
goto unlock;
}
- *priv = group;
+ *priv = node;
unlock:
esw_qos_unlock(esw);
return err;
int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group = priv;
- struct mlx5_eswitch *esw = group->esw;
+ struct mlx5_esw_sched_node *node = priv;
+ struct mlx5_eswitch *esw = node->esw;
int err;
esw_qos_lock(esw);
- err = __esw_qos_destroy_rate_group(group, extack);
+ err = __esw_qos_destroy_node(node, extack);
esw_qos_put(esw);
esw_qos_unlock(esw);
return err;
}
-int mlx5_esw_qos_vport_update_group(struct mlx5_vport *vport,
- struct mlx5_esw_rate_group *group,
- struct netlink_ext_ack *extack)
+int mlx5_esw_qos_vport_update_node(struct mlx5_vport *vport,
+ struct mlx5_esw_sched_node *node,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
int err = 0;
- if (group && group->esw != esw) {
+ if (node && node->esw != esw) {
NL_SET_ERR_MSG_MOD(extack, "Cross E-Switch scheduling is not supported");
return -EOPNOTSUPP;
}
esw_qos_lock(esw);
- if (!vport->qos.enabled && !group)
+ if (!vport->qos.enabled && !node)
goto unlock;
err = esw_qos_vport_enable(vport, 0, 0, extack);
if (!err)
- err = esw_qos_vport_update_group(vport, group, extack);
+ err = esw_qos_vport_update_node(vport, node, extack);
unlock:
esw_qos_unlock(esw);
return err;
void *priv, void *parent_priv,
struct netlink_ext_ack *extack)
{
- struct mlx5_esw_rate_group *group;
+ struct mlx5_esw_sched_node *node;
struct mlx5_vport *vport = priv;
if (!parent)
- return mlx5_esw_qos_vport_update_group(vport, NULL, extack);
+ return mlx5_esw_qos_vport_update_node(vport, NULL, extack);
- group = parent_priv;
- return mlx5_esw_qos_vport_update_group(vport, group, extack);
+ node = parent_priv;
+ return mlx5_esw_qos_vport_update_node(vport, node, extack);
}