]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
ixgbe: add MDD support
authorPaul Greenwalt <paul.greenwalt@intel.com>
Mon, 17 Feb 2025 09:06:33 +0000 (10:06 +0100)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Thu, 3 Jul 2025 16:39:03 +0000 (09:39 -0700)
Add malicious driver detection to ixgbe driver. The supported devices
are E610 and X550.

Handling MDD events is enabled while VFs are created and turned off
when they are disabled. There is no runtime command to enable or
disable MDD independently.

MDD event is logged when malicious VF driver is detected. For example VF
can try to send incorrect Tx descriptor (TSO on, but length field not
correct). It can be reproduced by manipulating the driver, or using
driver with incorrect descriptor values.

Example log:
"Malicious event on VF 0 tx:128 rx:128"

Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
Signed-off-by: Paul Greenwalt <paul.greenwalt@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h

index 71ea25de1bac7af36399833250d7e6a6a9a2919d..87b03c1992a8f60e271b3a9443ee18aa017fc2e6 100644 (file)
@@ -3965,6 +3965,10 @@ static const struct ixgbe_mac_operations mac_ops_e610 = {
        .prot_autoc_write               = prot_autoc_write_generic,
        .setup_fc                       = ixgbe_setup_fc_e610,
        .fc_autoneg                     = ixgbe_fc_autoneg_e610,
+       .enable_mdd                     = ixgbe_enable_mdd_x550,
+       .disable_mdd                    = ixgbe_disable_mdd_x550,
+       .restore_mdd_vf                 = ixgbe_restore_mdd_vf_x550,
+       .handle_mdd                     = ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_phy_operations phy_ops_e610 = {
index 892fa6c1f87974de99094c6e3b26ac56bd2727fa..2a25abc0b17a03e88a8f09c11b3ba0d4787cecac 100644 (file)
@@ -2746,6 +2746,28 @@ enum ixgbe_fdir_pballoc_type {
 #define FW_PHY_INFO_ID_HI_MASK         0xFFFF0000u
 #define FW_PHY_INFO_ID_LO_MASK         0x0000FFFFu
 
+/* There are only 3 options for VFs creation on this device:
+ * 16 VFs pool with 8 queues each
+ * 32 VFs pool with 4 queues each
+ * 64 VFs pool with 2 queues each
+ *
+ * That means reading some VF registers that map VF to queue depending on
+ * chosen option. Define values that help dealing with each scenario.
+ */
+/* Number of queues based on VFs pool */
+#define IXGBE_16VFS_QUEUES             8
+#define IXGBE_32VFS_QUEUES             4
+#define IXGBE_64VFS_QUEUES             2
+/* Mask for getting queues bits based on VFs pool */
+#define IXGBE_16VFS_BITMASK            GENMASK(IXGBE_16VFS_QUEUES - 1, 0)
+#define IXGBE_32VFS_BITMASK            GENMASK(IXGBE_32VFS_QUEUES - 1, 0)
+#define IXGBE_64VFS_BITMASK            GENMASK(IXGBE_64VFS_QUEUES - 1, 0)
+/* Convert queue index to register number.
+ * We have 4 registers with 32 queues in each.
+ */
+#define IXGBE_QUEUES_PER_REG           32
+#define IXGBE_QUEUES_REG_AMOUNT                4
+
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
        u8 cmd;
@@ -3539,6 +3561,12 @@ struct ixgbe_mac_operations {
        int (*dmac_config_tcs)(struct ixgbe_hw *hw);
        int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
        int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
+
+       /* MDD events */
+       void (*enable_mdd)(struct ixgbe_hw *hw);
+       void (*disable_mdd)(struct ixgbe_hw *hw);
+       void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
+       void (*handle_mdd)(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
 };
 
 struct ixgbe_phy_operations {
index 7461367a18682c176cc50c2d80f416128e73727d..a8263f59ebba023e5d16bb9b3ca9e586490fbfa9 100644 (file)
@@ -3800,6 +3800,122 @@ static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
        return status;
 }
 
+static void ixgbe_set_mdd_x550(struct ixgbe_hw *hw, bool ena)
+{
+       u32 reg_dma, reg_rdr;
+
+       reg_dma = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+       reg_rdr = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+
+       if (ena) {
+               reg_dma |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+               reg_rdr |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+       } else {
+               reg_dma &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN);
+               reg_rdr &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN);
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_dma);
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_rdr);
+}
+
+/**
+ * ixgbe_enable_mdd_x550 - enable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw)
+{
+       ixgbe_set_mdd_x550(hw, true);
+}
+
+/**
+ * ixgbe_disable_mdd_x550 - disable malicious driver detection
+ * @hw: pointer to hardware structure
+ */
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw)
+{
+       ixgbe_set_mdd_x550(hw, false);
+}
+
+/**
+ * ixgbe_restore_mdd_vf_x550 - restore VF that was disabled during MDD event
+ * @hw: pointer to hardware structure
+ * @vf: vf index
+ */
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf)
+{
+       u32 idx, reg, val, num_qs, start_q, bitmask;
+
+       /* Map VF to queues */
+       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       switch (reg & IXGBE_MRQC_MRQE_MASK) {
+       case IXGBE_MRQC_VMDQRT8TCEN:
+               num_qs = IXGBE_16VFS_QUEUES;
+               bitmask = IXGBE_16VFS_BITMASK;
+               break;
+       case IXGBE_MRQC_VMDQRSS32EN:
+       case IXGBE_MRQC_VMDQRT4TCEN:
+               num_qs = IXGBE_32VFS_QUEUES;
+               bitmask = IXGBE_32VFS_BITMASK;
+               break;
+       default:
+               num_qs = IXGBE_64VFS_QUEUES;
+               bitmask = IXGBE_64VFS_BITMASK;
+               break;
+       }
+       start_q = vf * num_qs;
+
+       /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */
+       idx = start_q / IXGBE_QUEUES_PER_REG;
+       val = bitmask << (start_q % IXGBE_QUEUES_PER_REG);
+       IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), val);
+       IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), val);
+}
+
+/**
+ * ixgbe_handle_mdd_x550 - handle malicious driver detection event
+ * @hw: pointer to hardware structure
+ * @vf_bitmap: output vf bitmap of malicious vfs
+ */
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap)
+{
+       u32 i, j, reg, q, div, vf;
+       unsigned long wqbr;
+
+       /* figure out pool size for mapping to vf's */
+       reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       switch (reg & IXGBE_MRQC_MRQE_MASK) {
+       case IXGBE_MRQC_VMDQRT8TCEN:
+               div = IXGBE_16VFS_QUEUES;
+               break;
+       case IXGBE_MRQC_VMDQRSS32EN:
+       case IXGBE_MRQC_VMDQRT4TCEN:
+               div = IXGBE_32VFS_QUEUES;
+               break;
+       default:
+               div = IXGBE_64VFS_QUEUES;
+               break;
+       }
+
+       /* Read WQBR_TX and WQBR_RX and check for malicious queues */
+       for (i = 0; i < IXGBE_QUEUES_REG_AMOUNT; i++) {
+               wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)) |
+                      IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i));
+               if (!wqbr)
+                       continue;
+
+               /* Get malicious queue */
+               for_each_set_bit(j, (unsigned long *)&wqbr,
+                                IXGBE_QUEUES_PER_REG) {
+                       /* Get queue from bitmask */
+                       q = j + (i * IXGBE_QUEUES_PER_REG);
+                       /* Map queue to vf */
+                       vf = q / div;
+                       set_bit(vf, vf_bitmap);
+               }
+       }
+}
+
 #define X550_COMMON_MAC \
        .init_hw                        = &ixgbe_init_hw_generic, \
        .start_hw                       = &ixgbe_start_hw_X540, \
@@ -3863,6 +3979,10 @@ static const struct ixgbe_mac_operations mac_ops_X550 = {
        .prot_autoc_write       = prot_autoc_write_generic,
        .setup_fc               = ixgbe_setup_fc_generic,
        .fc_autoneg             = ixgbe_fc_autoneg,
+       .enable_mdd             = ixgbe_enable_mdd_x550,
+       .disable_mdd            = ixgbe_disable_mdd_x550,
+       .restore_mdd_vf         = ixgbe_restore_mdd_vf_x550,
+       .handle_mdd             = ixgbe_handle_mdd_x550,
 };
 
 static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
index 3e4092f8da3eb5c16201f47445524a10444aef27..2a11147fb1bcc5aa604f0c4465bf70de01acdef6 100644 (file)
@@ -17,4 +17,9 @@ void ixgbe_set_source_address_pruning_x550(struct ixgbe_hw *hw,
 void ixgbe_set_ethertype_anti_spoofing_x550(struct ixgbe_hw *hw,
                                            bool enable, int vf);
 
+void ixgbe_enable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_disable_mdd_x550(struct ixgbe_hw *hw);
+void ixgbe_restore_mdd_vf_x550(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_handle_mdd_x550(struct ixgbe_hw *hw, unsigned long *vf_bitmap);
+
 #endif /* _IXGBE_X550_H_ */