u8 flow_mode;
u8 atm_mode;
u8 ack_filter;
+ u8 is_shared;
};
struct cake_sched_data {
q->buffer_config_limit));
}
-static int cake_change(struct Qdisc *sch, struct nlattr *opt,
- struct netlink_ext_ack *extack)
+static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt,
+ struct netlink_ext_ack *extack, bool *overhead_changed)
{
- struct cake_sched_data *qd = qdisc_priv(sch);
- struct cake_sched_config *q = qd->config;
struct nlattr *tb[TCA_CAKE_MAX + 1];
- u16 rate_flags;
- u8 flow_mode;
+ u16 rate_flags = q->rate_flags;
+ u8 flow_mode = q->flow_mode;
int err;
err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy,
if (err < 0)
return err;
- flow_mode = q->flow_mode;
if (tb[TCA_CAKE_NAT]) {
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
flow_mode &= ~CAKE_FLOW_NAT_FLAG;
#endif
}
+ if (tb[TCA_CAKE_AUTORATE]) {
+ if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) {
+ if (q->is_shared) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_AUTORATE],
+ "Can't use autorate-ingress with cake_mq");
+ return -EOPNOTSUPP;
+ }
+ rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
+ } else {
+ rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
+ }
+ }
+
if (tb[TCA_CAKE_BASE_RATE64])
WRITE_ONCE(q->rate_bps,
nla_get_u64(tb[TCA_CAKE_BASE_RATE64]));
WRITE_ONCE(q->tin_mode,
nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]));
- rate_flags = q->rate_flags;
if (tb[TCA_CAKE_WASH]) {
if (!!nla_get_u32(tb[TCA_CAKE_WASH]))
rate_flags |= CAKE_FLAG_WASH;
WRITE_ONCE(q->rate_overhead,
nla_get_s32(tb[TCA_CAKE_OVERHEAD]));
rate_flags |= CAKE_FLAG_OVERHEAD;
-
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ *overhead_changed = true;
}
if (tb[TCA_CAKE_RAW]) {
rate_flags &= ~CAKE_FLAG_OVERHEAD;
-
- qd->max_netlen = 0;
- qd->max_adjlen = 0;
- qd->min_netlen = ~0;
- qd->min_adjlen = ~0;
+ *overhead_changed = true;
}
if (tb[TCA_CAKE_MPU])
WRITE_ONCE(q->target, max(target, 1U));
}
- if (tb[TCA_CAKE_AUTORATE]) {
- if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
- rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
- else
- rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
- }
-
if (tb[TCA_CAKE_INGRESS]) {
if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
rate_flags |= CAKE_FLAG_INGRESS;
WRITE_ONCE(q->rate_flags, rate_flags);
WRITE_ONCE(q->flow_mode, flow_mode);
+
+ return 0;
+}
+
+static int cake_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+{
+ struct cake_sched_data *qd = qdisc_priv(sch);
+ struct cake_sched_config *q = qd->config;
+ bool overhead_changed = false;
+ int ret;
+
+ if (q->is_shared) {
+ NL_SET_ERR_MSG(extack, "can't reconfigure cake_mq sub-qdiscs");
+ return -EOPNOTSUPP;
+ }
+
+ ret = cake_config_change(q, opt, extack, &overhead_changed);
+ if (ret)
+ return ret;
+
+ if (overhead_changed) {
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
+ }
+
if (qd->tins) {
sch_tree_lock(sch);
cake_reconfigure(sch);
qdisc_watchdog_cancel(&q->watchdog);
tcf_block_put(q->block);
kvfree(q->tins);
- kvfree(q->config);
+ if (q->config && !q->config->is_shared)
+ kvfree(q->config);
+}
+
+static void cake_config_init(struct cake_sched_config *q, bool is_shared)
+{
+ q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
+ q->flow_mode = CAKE_FLOW_TRIPLE;
+
+ q->rate_bps = 0; /* unlimited by default */
+
+ q->interval = 100000; /* 100ms default */
+ q->target = 5000; /* 5ms: codel RFC argues
+ * for 5 to 10% of interval
+ */
+ q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
+ q->is_shared = is_shared;
}
static int cake_init(struct Qdisc *sch, struct nlattr *opt,
if (!q)
return -ENOMEM;
+ cake_config_init(q, false);
+
sch->limit = 10240;
sch->flags |= TCQ_F_DEQUEUE_DROPS;
- q->tin_mode = CAKE_DIFFSERV_DIFFSERV3;
- q->flow_mode = CAKE_FLOW_TRIPLE;
-
- q->rate_bps = 0; /* unlimited by default */
-
- q->interval = 100000; /* 100ms default */
- q->target = 5000; /* 5ms: codel RFC argues
- * for 5 to 10% of interval
- */
- q->rate_flags |= CAKE_FLAG_SPLIT_GSO;
qd->cur_tin = 0;
qd->cur_flow = 0;
qd->config = q;
return err;
}
-static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+static void cake_config_replace(struct Qdisc *sch, struct cake_sched_config *cfg)
{
struct cake_sched_data *qd = qdisc_priv(sch);
struct cake_sched_config *q = qd->config;
+
+ qd->config = cfg;
+
+ if (!q->is_shared)
+ kvfree(q);
+
+ cake_reconfigure(sch);
+}
+
+static int cake_config_dump(struct cake_sched_config *q, struct sk_buff *skb)
+{
struct nlattr *opts;
u16 rate_flags;
u8 flow_mode;
return -1;
}
+static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct cake_sched_data *qd = qdisc_priv(sch);
+
+ return cake_config_dump(qd->config, skb);
+}
+
static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct nlattr *stats = nla_nest_start_noflag(d->skb, TCA_STATS_APP);
struct cake_mq_sched {
struct mq_sched mq_priv; /* must be first */
+ struct cake_sched_config cake_config;
};
static void cake_mq_destroy(struct Qdisc *sch)
static int cake_mq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- int ret;
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ int ret, ntx;
+ bool _unused;
+
+ cake_config_init(&priv->cake_config, true);
+ if (opt) {
+ ret = cake_config_change(&priv->cake_config, opt, extack, &_unused);
+ if (ret)
+ return ret;
+ }
ret = mq_init_common(sch, opt, extack, &cake_qdisc_ops);
if (ret)
return ret;
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++)
+ cake_config_replace(priv->mq_priv.qdiscs[ntx], &priv->cake_config);
+
return 0;
}
static int cake_mq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+
mq_dump_common(sch, skb);
- return 0;
+ return cake_config_dump(&priv->cake_config, skb);
}
static int cake_mq_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
- return -EOPNOTSUPP;
+ struct cake_mq_sched *priv = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ bool overhead_changed = false;
+ unsigned int ntx;
+ int ret;
+
+ ret = cake_config_change(&priv->cake_config, opt, extack, &overhead_changed);
+ if (ret)
+ return ret;
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ struct Qdisc *chld = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
+ struct cake_sched_data *qd = qdisc_priv(chld);
+
+ if (overhead_changed) {
+ qd->max_netlen = 0;
+ qd->max_adjlen = 0;
+ qd->min_netlen = ~0;
+ qd->min_adjlen = ~0;
+ }
+
+ if (qd->tins) {
+ sch_tree_lock(chld);
+ cake_reconfigure(chld);
+ sch_tree_unlock(chld);
+ }
+ }
+
+ return 0;
}
static int cake_mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,