]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: airoha: Add sched ETS offload support
authorLorenzo Bianconi <lorenzo@kernel.org>
Fri, 3 Jan 2025 12:17:04 +0000 (13:17 +0100)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 7 Jan 2025 11:32:50 +0000 (12:32 +0100)
Introduce support for ETS Qdisc offload available on the Airoha EN7581
ethernet controller. In order to be effective, ETS Qdisc must configured
as leaf of a HTB Qdisc (HTB Qdisc offload will be added in the following
patch). ETS Qdisc available on EN7581 ethernet controller supports at
most 8 concurrent bands (QoS queues). We can enable an ETS Qdisc for
each available QoS channel.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/mediatek/airoha_eth.c

index 4b361f30829f4f5c40b1d8a97d43fd3a206e4206..76cdab2499043adc96e058f8f562676455b6d36d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/u64_stats_sync.h>
 #include <net/dsa.h>
 #include <net/page_pool/helpers.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/ppp_defs.h>
 
 #define AIROHA_MAX_NUM_GDM_PORTS       1
 #define INGRESS_SLOW_TICK_RATIO_MASK   GENMASK(29, 16)
 #define INGRESS_FAST_TICK_MASK         GENMASK(15, 0)
 
+#define REG_QUEUE_CLOSE_CFG(_n)                (0x00a0 + ((_n) & 0xfc))
+#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m)    BIT((_m) + (((_n) & 0x3) << 3))
+
 #define REG_TXQ_DIS_CFG_BASE(_n)       ((_n) ? 0x20a0 : 0x00a0)
 #define REG_TXQ_DIS_CFG(_n, _m)                (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
 
+#define REG_CNTR_CFG(_n)               (0x0400 + ((_n) << 3))
+#define CNTR_EN_MASK                   BIT(31)
+#define CNTR_ALL_CHAN_EN_MASK          BIT(30)
+#define CNTR_ALL_QUEUE_EN_MASK         BIT(29)
+#define CNTR_ALL_DSCP_RING_EN_MASK     BIT(28)
+#define CNTR_SRC_MASK                  GENMASK(27, 24)
+#define CNTR_DSCP_RING_MASK            GENMASK(20, 16)
+#define CNTR_CHAN_MASK                 GENMASK(7, 3)
+#define CNTR_QUEUE_MASK                        GENMASK(2, 0)
+
+#define REG_CNTR_VAL(_n)               (0x0404 + ((_n) << 3))
+
 #define REG_LMGR_INIT_CFG              0x1000
 #define LMGR_INIT_START                        BIT(31)
 #define LMGR_SRAM_MODE_MASK            BIT(30)
 #define TWRR_WEIGHT_SCALE_MASK         BIT(31)
 #define TWRR_WEIGHT_BASE_MASK          BIT(3)
 
+#define REG_TXWRR_WEIGHT_CFG           0x1024
+#define TWRR_RW_CMD_MASK               BIT(31)
+#define TWRR_RW_CMD_DONE               BIT(30)
+#define TWRR_CHAN_IDX_MASK             GENMASK(23, 19)
+#define TWRR_QUEUE_IDX_MASK            GENMASK(18, 16)
+#define TWRR_VALUE_MASK                        GENMASK(15, 0)
+
 #define REG_PSE_BUF_USAGE_CFG          0x1028
 #define PSE_BUF_ESTIMATE_EN_MASK       BIT(29)
 
+#define REG_CHAN_QOS_MODE(_n)          (0x1040 + ((_n) << 2))
+#define CHAN_QOS_MODE_MASK(_n)         GENMASK(2 + ((_n) << 2), (_n) << 2)
+
 #define REG_GLB_TRTCM_CFG              0x1080
 #define GLB_TRTCM_EN_MASK              BIT(31)
 #define GLB_TRTCM_MODE_MASK            BIT(30)
@@ -722,6 +748,17 @@ enum {
        FE_PSE_PORT_DROP = 0xf,
 };
 
+enum tx_sched_mode {
+       TC_SCH_WRR8,
+       TC_SCH_SP,
+       TC_SCH_WRR7,
+       TC_SCH_WRR6,
+       TC_SCH_WRR5,
+       TC_SCH_WRR4,
+       TC_SCH_WRR3,
+       TC_SCH_WRR2,
+};
+
 struct airoha_queue_entry {
        union {
                void *buf;
@@ -812,6 +849,10 @@ struct airoha_gdm_port {
        int id;
 
        struct airoha_hw_stats stats;
+
+       /* qos stats counters */
+       u64 cpu_tx_packets;
+       u64 fwd_tx_packets;
 };
 
 struct airoha_eth {
@@ -1961,6 +2002,27 @@ static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
                        FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
 }
 
+static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
+{
+       int i;
+
+       for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
+               /* Tx-cpu transferred count */
+               airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
+               airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+                              CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+                              CNTR_ALL_DSCP_RING_EN_MASK |
+                              FIELD_PREP(CNTR_CHAN_MASK, i));
+               /* Tx-fwd transferred count */
+               airoha_qdma_wr(qdma, REG_CNTR_VAL((i << 1) + 1), 0);
+               airoha_qdma_wr(qdma, REG_CNTR_CFG(i << 1),
+                              CNTR_EN_MASK | CNTR_ALL_QUEUE_EN_MASK |
+                              CNTR_ALL_DSCP_RING_EN_MASK |
+                              FIELD_PREP(CNTR_SRC_MASK, 1) |
+                              FIELD_PREP(CNTR_CHAN_MASK, i));
+       }
+}
+
 static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
 {
        int i;
@@ -2011,6 +2073,7 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
 
        airoha_qdma_set(qdma, REG_TXQ_CNGST_CFG,
                        TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+       airoha_qdma_init_qos_stats(qdma);
 
        return 0;
 }
@@ -2638,6 +2701,135 @@ airoha_ethtool_get_rmon_stats(struct net_device *dev,
        } while (u64_stats_fetch_retry(&port->stats.syncp, start));
 }
 
+static int airoha_qdma_set_chan_tx_sched(struct airoha_gdm_port *port,
+                                        int channel, enum tx_sched_mode mode,
+                                        const u16 *weights, u8 n_weights)
+{
+       int i;
+
+       for (i = 0; i < AIROHA_NUM_TX_RING; i++)
+               airoha_qdma_clear(port->qdma, REG_QUEUE_CLOSE_CFG(channel),
+                                 TXQ_DISABLE_CHAN_QUEUE_MASK(channel, i));
+
+       for (i = 0; i < n_weights; i++) {
+               u32 status;
+               int err;
+
+               airoha_qdma_wr(port->qdma, REG_TXWRR_WEIGHT_CFG,
+                              TWRR_RW_CMD_MASK |
+                              FIELD_PREP(TWRR_CHAN_IDX_MASK, channel) |
+                              FIELD_PREP(TWRR_QUEUE_IDX_MASK, i) |
+                              FIELD_PREP(TWRR_VALUE_MASK, weights[i]));
+               err = read_poll_timeout(airoha_qdma_rr, status,
+                                       status & TWRR_RW_CMD_DONE,
+                                       USEC_PER_MSEC, 10 * USEC_PER_MSEC,
+                                       true, port->qdma,
+                                       REG_TXWRR_WEIGHT_CFG);
+               if (err)
+                       return err;
+       }
+
+       airoha_qdma_rmw(port->qdma, REG_CHAN_QOS_MODE(channel >> 3),
+                       CHAN_QOS_MODE_MASK(channel),
+                       mode << __ffs(CHAN_QOS_MODE_MASK(channel)));
+
+       return 0;
+}
+
+static int airoha_qdma_set_tx_prio_sched(struct airoha_gdm_port *port,
+                                        int channel)
+{
+       static const u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+
+       return airoha_qdma_set_chan_tx_sched(port, channel, TC_SCH_SP, w,
+                                            ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_set_tx_ets_sched(struct airoha_gdm_port *port,
+                                       int channel,
+                                       struct tc_ets_qopt_offload *opt)
+{
+       struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+       enum tx_sched_mode mode = TC_SCH_SP;
+       u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+       int i, nstrict = 0;
+
+       if (p->bands > AIROHA_NUM_QOS_QUEUES)
+               return -EINVAL;
+
+       for (i = 0; i < p->bands; i++) {
+               if (!p->quanta[i])
+                       nstrict++;
+       }
+
+       /* this configuration is not supported by the hw */
+       if (nstrict == AIROHA_NUM_QOS_QUEUES - 1)
+               return -EINVAL;
+
+       for (i = 0; i < p->bands - nstrict; i++)
+               w[i] = p->weights[nstrict + i];
+
+       if (!nstrict)
+               mode = TC_SCH_WRR8;
+       else if (nstrict < AIROHA_NUM_QOS_QUEUES - 1)
+               mode = nstrict + 1;
+
+       return airoha_qdma_set_chan_tx_sched(port, channel, mode, w,
+                                            ARRAY_SIZE(w));
+}
+
+static int airoha_qdma_get_tx_ets_stats(struct airoha_gdm_port *port,
+                                       int channel,
+                                       struct tc_ets_qopt_offload *opt)
+{
+       u64 cpu_tx_packets = airoha_qdma_rr(port->qdma,
+                                           REG_CNTR_VAL(channel << 1));
+       u64 fwd_tx_packets = airoha_qdma_rr(port->qdma,
+                                           REG_CNTR_VAL((channel << 1) + 1));
+       u64 tx_packets = (cpu_tx_packets - port->cpu_tx_packets) +
+                        (fwd_tx_packets - port->fwd_tx_packets);
+       _bstats_update(opt->stats.bstats, 0, tx_packets);
+
+       port->cpu_tx_packets = cpu_tx_packets;
+       port->fwd_tx_packets = fwd_tx_packets;
+
+       return 0;
+}
+
+static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
+                                    struct tc_ets_qopt_offload *opt)
+{
+       int channel = TC_H_MAJ(opt->handle) >> 16;
+
+       if (opt->parent == TC_H_ROOT)
+               return -EINVAL;
+
+       switch (opt->command) {
+       case TC_ETS_REPLACE:
+               return airoha_qdma_set_tx_ets_sched(port, channel, opt);
+       case TC_ETS_DESTROY:
+               /* PRIO is default qdisc scheduler */
+               return airoha_qdma_set_tx_prio_sched(port, channel);
+       case TC_ETS_STATS:
+               return airoha_qdma_get_tx_ets_stats(port, channel, opt);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
+                              void *type_data)
+{
+       struct airoha_gdm_port *port = netdev_priv(dev);
+
+       switch (type) {
+       case TC_SETUP_QDISC_ETS:
+               return airoha_tc_setup_qdisc_ets(port, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct net_device_ops airoha_netdev_ops = {
        .ndo_init               = airoha_dev_init,
        .ndo_open               = airoha_dev_open,
@@ -2646,6 +2838,7 @@ static const struct net_device_ops airoha_netdev_ops = {
        .ndo_start_xmit         = airoha_dev_xmit,
        .ndo_get_stats64        = airoha_dev_get_stats64,
        .ndo_set_mac_address    = airoha_dev_set_macaddr,
+       .ndo_setup_tc           = airoha_dev_tc_setup,
 };
 
 static const struct ethtool_ops airoha_ethtool_ops = {
@@ -2695,7 +2888,8 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
        dev->watchdog_timeo = 5 * HZ;
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
                           NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
-                          NETIF_F_SG | NETIF_F_TSO;
+                          NETIF_F_SG | NETIF_F_TSO |
+                          NETIF_F_HW_TC;
        dev->features |= dev->hw_features;
        dev->dev.of_node = np;
        dev->irq = qdma->irq;