]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
idpf: implement get LAN MMIO memory regions
authorJoshua Hay <joshua.a.hay@intel.com>
Tue, 8 Jul 2025 21:05:54 +0000 (16:05 -0500)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 14 Jul 2025 17:57:51 +0000 (10:57 -0700)
The RDMA driver needs to map its own MMIO regions for the sake of
performance, meaning the IDPF needs to avoid mapping portions of the BAR
space. However, to be HW agnostic, the IDPF cannot assume where
these are and must avoid mapping hard coded regions as much as possible.

The IDPF maps the bare minimum to load and communicate with the
control plane, i.e., the mailbox registers and the reset state
registers. Because of how and when mailbox register offsets are
initialized, it is easier to adjust the existing defines to be relative
to the mailbox region starting address. Use a specific mailbox register
write function that uses these relative offsets. The reset state
register addresses are calculated the same way as for other registers,
described below.

The IDPF then calls a new virtchnl op to fetch a list of MMIO regions
that it should map. The addresses for the registers in these regions are
calculated by determining what region the register resides in, adjusting
the offset to be relative to that region, and then adding the
register's offset to that region's mapped address.

If the new virtchnl op is not supported, the IDPF will fallback to
mapping the whole bar. However, it will still map them as separate
regions outside the mailbox and reset state registers. This way we can
use the same logic in both cases to access the MMIO space.

Reviewed-by: Madhu Chittim <madhu.chittim@intel.com>
Signed-off-by: Joshua Hay <joshua.a.hay@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/idpf.h
drivers/net/ethernet/intel/idpf/idpf_controlq.c
drivers/net/ethernet/intel/idpf/idpf_controlq.h
drivers/net/ethernet/intel/idpf/idpf_dev.c
drivers/net/ethernet/intel/idpf/idpf_idc.c
drivers/net/ethernet/intel/idpf/idpf_main.c
drivers/net/ethernet/intel/idpf/idpf_mem.h
drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
drivers/net/ethernet/intel/idpf/virtchnl2.h
include/linux/net/intel/iidc_rdma_idpf.h

index 79379d6db015570bc894099085234d20be2185bd..0cf9120d1f97ce0c99352b45290ff31ebaa71694 100644 (file)
@@ -12,6 +12,7 @@ struct idpf_vport_max_q;
 #include <net/pkt_sched.h>
 #include <linux/aer.h>
 #include <linux/etherdevice.h>
+#include <linux/ioport.h>
 #include <linux/pci.h>
 #include <linux/bitfield.h>
 #include <linux/sctp.h>
@@ -197,7 +198,8 @@ struct idpf_vport_max_q {
  * @ptp_reg_init: PTP register initialization
  */
 struct idpf_reg_ops {
-       void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+       void (*ctlq_reg_init)(struct idpf_adapter *adapter,
+                             struct idpf_ctlq_create_info *cq);
        int (*intr_reg_init)(struct idpf_vport *vport);
        void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
        void (*reset_reg_init)(struct idpf_adapter *adapter);
@@ -206,15 +208,25 @@ struct idpf_reg_ops {
        void (*ptp_reg_init)(const struct idpf_adapter *adapter);
 };
 
+#define IDPF_MMIO_REG_NUM_STATIC       2
+#define IDPF_PF_MBX_REGION_SZ          4096
+#define IDPF_PF_RSTAT_REGION_SZ                2048
+#define IDPF_VF_MBX_REGION_SZ          10240
+#define IDPF_VF_RSTAT_REGION_SZ                2048
+
 /**
  * struct idpf_dev_ops - Device specific operations
  * @reg_ops: Register operations
  * @idc_init: IDC initialization
+ * @static_reg_info: array of mailbox and rstat register info
  */
 struct idpf_dev_ops {
        struct idpf_reg_ops reg_ops;
 
        int (*idc_init)(struct idpf_adapter *adapter);
+
+       /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
+       struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
 };
 
 /**
@@ -755,6 +767,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
        return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
 }
 
+/**
+ * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 mailbox register address based on register offset.
+ */
+static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
+                                                 resource_size_t reg_offset)
+{
+       return adapter->hw.mbx.vaddr + reg_offset;
+}
+
+/**
+ * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 rstat register address based on register offset.
+ */
+static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
+                                                   resource_size_t reg_offset)
+{
+       reg_offset -= adapter->dev_ops.static_reg_info[1].start;
+
+       return adapter->hw.rstat.vaddr + reg_offset;
+}
+
 /**
  * idpf_get_reg_addr - Get BAR0 register address
  * @adapter: private data struct
@@ -765,7 +805,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
 static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
                                              resource_size_t reg_offset)
 {
-       return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+       struct idpf_hw *hw = &adapter->hw;
+
+       for (int i = 0; i < hw->num_lan_regs; i++) {
+               struct idpf_mmio_reg *region = &hw->lan_regs[i];
+
+               if (reg_offset >= region->addr_start &&
+                   reg_offset < (region->addr_start + region->addr_len)) {
+                       /* Convert the offset so that it is relative to the
+                        * start of the region.  Then add the base address of
+                        * the region to get the final address.
+                        */
+                       reg_offset -= region->addr_start;
+
+                       return region->vaddr + reg_offset;
+               }
+       }
+
+       /* It's impossible to hit this case with offsets from the CP. But if we
+        * do for any other reason, the kernel will panic on that register
+        * access. Might as well do it here to make it clear what's happening.
+        */
+       BUG();
+
+       return NULL;
 }
 
 /**
@@ -779,7 +842,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
        if (!adapter->hw.arq)
                return true;
 
-       return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+       return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
                 adapter->hw.arq->reg.len_mask);
 }
 
index b28991dd18703664f4bf5189eb3fb42b4acc2c1d..9c5c628eb469d7432f63b6e84f42cbf911f5da13 100644 (file)
@@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
 {
        /* Update tail to post pre-allocated buffers for rx queues */
        if (is_rxq)
-               wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+               idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
 
        /* For non-Mailbox control queues only TAIL need to be set */
        if (cq->q_id != -1)
                return;
 
        /* Clear Head for both send or receive */
-       wr32(hw, cq->reg.head, 0);
+       idpf_mbx_wr32(hw, cq->reg.head, 0);
 
        /* set starting point */
-       wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
-       wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
-       wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+       idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+       idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+       idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
 }
 
 /**
@@ -329,7 +329,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
         */
        dma_wmb();
 
-       wr32(hw, cq->reg.tail, cq->next_to_use);
+       idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
 
 err_unlock:
        mutex_unlock(&cq->cq_lock);
@@ -521,7 +521,7 @@ post_buffs_out:
 
                dma_wmb();
 
-               wr32(hw, cq->reg.tail, cq->next_to_post);
+               idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
        }
 
        mutex_unlock(&cq->cq_lock);
index c1aba09e98562493fdde55f341fa94f590be939d..de4ece40c2ff34369ce9e7659d6801e479ebc6a6 100644 (file)
@@ -94,12 +94,26 @@ struct idpf_mbxq_desc {
        u32 pf_vf_id;           /* used by CP when sending to PF */
 };
 
+/* Max number of MMIO regions not including the mailbox and rstat regions in
+ * the fallback case when the whole bar is mapped.
+ */
+#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING           3
+
+struct idpf_mmio_reg {
+       void __iomem *vaddr;
+       resource_size_t addr_start;
+       resource_size_t addr_len;
+};
+
 /* Define the driver hardware struct to replace other control structs as needed
  * Align to ctlq_hw_info
  */
 struct idpf_hw {
-       void __iomem *hw_addr;
-       resource_size_t hw_addr_len;
+       struct idpf_mmio_reg mbx;
+       struct idpf_mmio_reg rstat;
+       /* Array of remaining LAN BAR regions */
+       int num_lan_regs;
+       struct idpf_mmio_reg *lan_regs;
 
        struct idpf_adapter *back;
 
index dd227a4368fb3166cbc6dfe7335a2797e78df0f1..bfa60f7d43de3f03523f345241919f3a82c7245e 100644 (file)
 
 /**
  * idpf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
  * @cq: pointer to the array of create control queues
  */
-static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,
+                              struct idpf_ctlq_create_info *cq)
 {
+       resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
        int i;
 
        for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -22,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
                switch (ccq->type) {
                case IDPF_CTLQ_TYPE_MAILBOX_TX:
                        /* set head and tail registers in our local struct */
-                       ccq->reg.head = PF_FW_ATQH;
-                       ccq->reg.tail = PF_FW_ATQT;
-                       ccq->reg.len = PF_FW_ATQLEN;
-                       ccq->reg.bah = PF_FW_ATQBAH;
-                       ccq->reg.bal = PF_FW_ATQBAL;
+                       ccq->reg.head = PF_FW_ATQH - mbx_start;
+                       ccq->reg.tail = PF_FW_ATQT - mbx_start;
+                       ccq->reg.len = PF_FW_ATQLEN - mbx_start;
+                       ccq->reg.bah = PF_FW_ATQBAH - mbx_start;
+                       ccq->reg.bal = PF_FW_ATQBAL - mbx_start;
                        ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
                        ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
                        ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
                        break;
                case IDPF_CTLQ_TYPE_MAILBOX_RX:
                        /* set head and tail registers in our local struct */
-                       ccq->reg.head = PF_FW_ARQH;
-                       ccq->reg.tail = PF_FW_ARQT;
-                       ccq->reg.len = PF_FW_ARQLEN;
-                       ccq->reg.bah = PF_FW_ARQBAH;
-                       ccq->reg.bal = PF_FW_ARQBAL;
+                       ccq->reg.head = PF_FW_ARQH - mbx_start;
+                       ccq->reg.tail = PF_FW_ARQT - mbx_start;
+                       ccq->reg.len = PF_FW_ARQLEN - mbx_start;
+                       ccq->reg.bah = PF_FW_ARQBAH - mbx_start;
+                       ccq->reg.bal = PF_FW_ARQBAL - mbx_start;
                        ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
                        ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
                        ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
@@ -130,7 +133,7 @@ free_reg_vals:
  */
 static void idpf_reset_reg_init(struct idpf_adapter *adapter)
 {
-       adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+       adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT);
        adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
 }
 
@@ -144,9 +147,9 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter,
 {
        u32 reset_reg;
 
-       reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+       reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
        writel(reset_reg | PFGEN_CTRL_PFSWR,
-              idpf_get_reg_addr(adapter, PFGEN_CTRL));
+              idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
 }
 
 /**
@@ -195,4 +198,9 @@ void idpf_dev_ops_init(struct idpf_adapter *adapter)
        idpf_reg_ops_init(adapter);
 
        adapter->dev_ops.idc_init = idpf_idc_register;
+
+       resource_set_range(&adapter->dev_ops.static_reg_info[0],
+                          PF_FW_BASE, IDPF_PF_MBX_REGION_SZ);
+       resource_set_range(&adapter->dev_ops.static_reg_info[1],
+                          PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ);
 }
index 2443337c83de0f471c12f48e39ea18751665fd4f..4d29051032153facba5186d22ea185f8b6c66fbd 100644 (file)
@@ -412,7 +412,7 @@ int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
 {
        struct iidc_rdma_core_dev_info *cdev_info;
        struct iidc_rdma_priv_dev_info *privd;
-       int err;
+       int err, i;
 
        adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL);
        if (!adapter->cdev_info)
@@ -430,14 +430,36 @@ int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter,
        cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2;
        privd->ftype = ftype;
 
+       privd->mapped_mem_regions =
+               kcalloc(adapter->hw.num_lan_regs,
+                       sizeof(struct iidc_rdma_lan_mapped_mem_region),
+                       GFP_KERNEL);
+       if (!privd->mapped_mem_regions) {
+               err = -ENOMEM;
+               goto err_plug_aux_dev;
+       }
+
+       privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs);
+       for (i = 0; i < adapter->hw.num_lan_regs; i++) {
+               privd->mapped_mem_regions[i].region_addr =
+                       adapter->hw.lan_regs[i].vaddr;
+               privd->mapped_mem_regions[i].size =
+                       cpu_to_le64(adapter->hw.lan_regs[i].addr_len);
+               privd->mapped_mem_regions[i].start_offset =
+                       cpu_to_le64(adapter->hw.lan_regs[i].addr_start);
+       }
+
        idpf_idc_init_msix_data(adapter);
 
        err = idpf_plug_core_aux_dev(cdev_info);
        if (err)
-               goto err_plug_aux_dev;
+               goto err_free_mem_regions;
 
        return 0;
 
+err_free_mem_regions:
+       kfree(privd->mapped_mem_regions);
+       privd->mapped_mem_regions = NULL;
 err_plug_aux_dev:
        kfree(privd);
 err_privd_alloc:
@@ -453,12 +475,16 @@ err_privd_alloc:
  */
 void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info)
 {
+       struct iidc_rdma_priv_dev_info *privd;
+
        if (!cdev_info)
                return;
 
        idpf_unplug_aux_dev(cdev_info->adev);
 
-       kfree(cdev_info->iidc_priv);
+       privd = cdev_info->iidc_priv;
+       kfree(privd->mapped_mem_regions);
+       kfree(privd);
        kfree(cdev_info);
 }
 
index 0efd9c0c7a90fe207f2af4f3e9e06f54a64eb4ad..b7422be3e967c240183552d84a211dbab4caf5f5 100644 (file)
@@ -106,15 +106,37 @@ static void idpf_shutdown(struct pci_dev *pdev)
  */
 static int idpf_cfg_hw(struct idpf_adapter *adapter)
 {
+       resource_size_t res_start, mbx_start, rstat_start;
        struct pci_dev *pdev = adapter->pdev;
        struct idpf_hw *hw = &adapter->hw;
+       struct device *dev = &pdev->dev;
+       long len;
+
+       res_start = pci_resource_start(pdev, 0);
+
+       /* Map mailbox space for virtchnl communication */
+       mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
+       len = resource_size(&adapter->dev_ops.static_reg_info[0]);
+       hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
+       if (!hw->mbx.vaddr) {
+               pci_err(pdev, "failed to allocate BAR0 mbx region\n");
+
+               return -ENOMEM;
+       }
+       hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
+       hw->mbx.addr_len = len;
 
-       hw->hw_addr = pcim_iomap_table(pdev)[0];
-       if (!hw->hw_addr) {
-               pci_err(pdev, "failed to allocate PCI iomap table\n");
+       /* Map rstat space for resets */
+       rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
+       len = resource_size(&adapter->dev_ops.static_reg_info[1]);
+       hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
+       if (!hw->rstat.vaddr) {
+               pci_err(pdev, "failed to allocate BAR0 rstat region\n");
 
                return -ENOMEM;
        }
+       hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
+       hw->rstat.addr_len = len;
 
        hw->back = adapter;
 
@@ -161,9 +183,9 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_free;
 
-       err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+       err = pcim_request_region(pdev, 0, pci_name(pdev));
        if (err) {
-               pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+               pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
 
                goto err_free;
        }
index b21a04fccf0f0dd3b70b7fbf988297720601599a..2aaabdc02dd2422aea9bdf6ece025ed5a9764715 100644 (file)
@@ -12,9 +12,9 @@ struct idpf_dma_mem {
        size_t size;
 };
 
-#define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg)           readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value)    writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg)           readq((a)->hw_addr + (reg))
+#define idpf_mbx_wr32(a, reg, value)   writel((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd32(a, reg)          readl((a)->mbx.vaddr + (reg))
+#define idpf_mbx_wr64(a, reg, value)   writeq((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd64(a, reg)          readq((a)->mbx.vaddr + (reg))
 
 #endif /* _IDPF_MEM_H_ */
index 2f84bd596ae49d7d7951de9dd952303c0f7cbf95..259d50fded67b11674f8c0fd0f2235f4a9697699 100644 (file)
@@ -9,10 +9,13 @@
 
 /**
  * idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
  * @cq: pointer to the array of create control queues
  */
-static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,
+                                 struct idpf_ctlq_create_info *cq)
 {
+       resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
        int i;
 
        for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
@@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
                switch (ccq->type) {
                case IDPF_CTLQ_TYPE_MAILBOX_TX:
                        /* set head and tail registers in our local struct */
-                       ccq->reg.head = VF_ATQH;
-                       ccq->reg.tail = VF_ATQT;
-                       ccq->reg.len = VF_ATQLEN;
-                       ccq->reg.bah = VF_ATQBAH;
-                       ccq->reg.bal = VF_ATQBAL;
+                       ccq->reg.head = VF_ATQH - mbx_start;
+                       ccq->reg.tail = VF_ATQT - mbx_start;
+                       ccq->reg.len = VF_ATQLEN - mbx_start;
+                       ccq->reg.bah = VF_ATQBAH - mbx_start;
+                       ccq->reg.bal = VF_ATQBAL - mbx_start;
                        ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
                        ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
                        ccq->reg.head_mask = VF_ATQH_ATQH_M;
                        break;
                case IDPF_CTLQ_TYPE_MAILBOX_RX:
                        /* set head and tail registers in our local struct */
-                       ccq->reg.head = VF_ARQH;
-                       ccq->reg.tail = VF_ARQT;
-                       ccq->reg.len = VF_ARQLEN;
-                       ccq->reg.bah = VF_ARQBAH;
-                       ccq->reg.bal = VF_ARQBAL;
+                       ccq->reg.head = VF_ARQH - mbx_start;
+                       ccq->reg.tail = VF_ARQT - mbx_start;
+                       ccq->reg.len = VF_ARQLEN - mbx_start;
+                       ccq->reg.bah = VF_ARQBAH - mbx_start;
+                       ccq->reg.bal = VF_ARQBAL - mbx_start;
                        ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
                        ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
                        ccq->reg.head_mask = VF_ARQH_ARQH_M;
@@ -129,7 +132,7 @@ free_reg_vals:
  */
 static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
 {
-       adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+       adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT);
        adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
 }
 
@@ -180,4 +183,9 @@ void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
        idpf_vf_reg_ops_init(adapter);
 
        adapter->dev_ops.idc_init = idpf_idc_vf_register;
+
+       resource_set_range(&adapter->dev_ops.static_reg_info[0],
+                          VF_BASE, IDPF_VF_MBX_REGION_SZ);
+       resource_set_range(&adapter->dev_ops.static_reg_info[1],
+                          VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ);
 }
index 957b3b77700a7c94bc48e346789cfbe94dc55c63..0d2199ac5c3e4f7b597e31a4b954c4807f46df16 100644 (file)
@@ -870,6 +870,7 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
        caps.other_caps =
                cpu_to_le64(VIRTCHNL2_CAP_SRIOV                 |
                            VIRTCHNL2_CAP_RDMA                  |
+                           VIRTCHNL2_CAP_LAN_MEMORY_REGIONS    |
                            VIRTCHNL2_CAP_MACFILTER             |
                            VIRTCHNL2_CAP_SPLITQ_QSCHED         |
                            VIRTCHNL2_CAP_PROMISC               |
@@ -892,6 +893,128 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
        return 0;
 }
 
+/**
+ * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
+ * @adapter: Driver specific private struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
+{
+       struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
+       struct idpf_vc_xn_params xn_params = {
+               .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
+               .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+               .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+       };
+       int num_regions, size;
+       struct idpf_hw *hw;
+       ssize_t reply_sz;
+       int err = 0;
+
+       rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+       if (!rcvd_regions)
+               return -ENOMEM;
+
+       xn_params.recv_buf.iov_base = rcvd_regions;
+       reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+       if (reply_sz < 0)
+               return reply_sz;
+
+       num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
+       size = struct_size(rcvd_regions, mem_reg, num_regions);
+       if (reply_sz < size)
+               return -EIO;
+
+       if (size > IDPF_CTLQ_MAX_BUF_LEN)
+               return -EINVAL;
+
+       hw = &adapter->hw;
+       hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
+       if (!hw->lan_regs)
+               return -ENOMEM;
+
+       for (int i = 0; i < num_regions; i++) {
+               hw->lan_regs[i].addr_len =
+                       le64_to_cpu(rcvd_regions->mem_reg[i].size);
+               hw->lan_regs[i].addr_start =
+                       le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
+       }
+       hw->num_lan_regs = num_regions;
+
+       return err;
+}
+
+/**
+ * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
+ * @adapter: Driver specific private structure
+ *
+ * Called when idpf_send_get_lan_memory_regions is not supported. This will
+ * calculate the offsets and sizes for the regions before, in between, and
+ * after the mailbox and rstat MMIO mappings.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
+{
+       struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
+       struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
+       struct idpf_hw *hw = &adapter->hw;
+
+       hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
+       hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
+                              GFP_KERNEL);
+       if (!hw->lan_regs)
+               return -ENOMEM;
+
+       /* Region preceding mailbox */
+       hw->lan_regs[0].addr_start = 0;
+       hw->lan_regs[0].addr_len = mbx_reg->start;
+       /* Region between mailbox and rstat */
+       hw->lan_regs[1].addr_start = mbx_reg->end + 1;
+       hw->lan_regs[1].addr_len = rstat_reg->start -
+                                       hw->lan_regs[1].addr_start;
+       /* Region after rstat */
+       hw->lan_regs[2].addr_start = rstat_reg->end + 1;
+       hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
+                                       hw->lan_regs[2].addr_start;
+
+       return 0;
+}
+
+/**
+ * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
+{
+       struct pci_dev *pdev = adapter->pdev;
+       struct idpf_hw *hw = &adapter->hw;
+       resource_size_t res_start;
+
+       res_start = pci_resource_start(pdev, 0);
+
+       for (int i = 0; i < hw->num_lan_regs; i++) {
+               resource_size_t start;
+               long len;
+
+               len = hw->lan_regs[i].addr_len;
+               if (!len)
+                       continue;
+               start = hw->lan_regs[i].addr_start + res_start;
+
+               hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
+               if (!hw->lan_regs[i].vaddr) {
+                       pci_err(pdev, "failed to allocate BAR0 region\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
 /**
  * idpf_vport_alloc_max_qs - Allocate max queues for a vport
  * @adapter: Driver specific private structure
@@ -2803,7 +2926,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
        struct idpf_hw *hw = &adapter->hw;
        int err;
 
-       adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+       adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
 
        err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
        if (err)
@@ -2963,6 +3086,30 @@ restart:
                msleep(task_delay);
        }
 
+       if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
+               err = idpf_send_get_lan_memory_regions(adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
+                               err);
+                       return -EINVAL;
+               }
+       } else {
+               /* Fallback to mapping the remaining regions of the entire BAR */
+               err = idpf_calc_remaining_mmio_regs(adapter);
+               if (err) {
+                       dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
+                               err);
+                       return -ENOMEM;
+               }
+       }
+
+       err = idpf_map_lan_mmio_regs(adapter);
+       if (err) {
+               dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
+                       err);
+               return -ENOMEM;
+       }
+
        pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
        num_max_vports = idpf_get_max_vports(adapter);
        adapter->max_vports = num_max_vports;
index b82218d20909a27f50e29cdad5797bb14a67183d..48d3cc9236a46dba5d4cde98642febb027fcac51 100644 (file)
@@ -79,6 +79,7 @@ enum virtchnl2_op {
        VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE               = 546,
        VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME               = 547,
        VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS       = 548,
+       VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS             = 549,
 };
 
 /**
@@ -212,7 +213,8 @@ enum virtchnl2_cap_other {
        VIRTCHNL2_CAP_RX_FLEX_DESC              = BIT_ULL(17),
        VIRTCHNL2_CAP_PTYPE                     = BIT_ULL(18),
        VIRTCHNL2_CAP_LOOPBACK                  = BIT_ULL(19),
-       /* Other capability 20 is reserved */
+       /* Other capability 20-21 is reserved */
+       VIRTCHNL2_CAP_LAN_MEMORY_REGIONS        = BIT_ULL(22),
 
        /* this must be the last capability */
        VIRTCHNL2_CAP_OEM                       = BIT_ULL(63),
@@ -1587,4 +1589,30 @@ struct virtchnl2_ptp_adj_dev_clk_time {
 };
 VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
 
+/**
+ * struct virtchnl2_mem_region - MMIO memory region
+ * @start_offset: starting offset of the MMIO memory region
+ * @size: size of the MMIO memory region
+ */
+struct virtchnl2_mem_region {
+       __le64 start_offset;
+       __le64 size;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
+
+/**
+ * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
+ * @num_memory_regions: number of memory regions
+ * @pad: Padding
+ * @mem_reg: List with memory region info
+ *
+ * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
+ */
+struct virtchnl2_get_lan_memory_regions {
+       __le16 num_memory_regions;
+       u8 pad[6];
+       struct virtchnl2_mem_region mem_reg[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+
 #endif /* _VIRTCHNL_2_H_ */
index 16c970dd4c6e02e9f02b2f3cd90683e52c428b3b..bab697e18fd6cba1d01589b2bd0d364c1e813e8e 100644 (file)
@@ -31,10 +31,18 @@ enum iidc_function_type {
        IIDC_FUNCTION_TYPE_VF,
 };
 
+struct iidc_rdma_lan_mapped_mem_region {
+       u8 __iomem *region_addr;
+       __le64 size;
+       __le64 start_offset;
+};
+
 struct iidc_rdma_priv_dev_info {
        struct msix_entry *msix_entries;
        u16 msix_count; /* How many vectors are reserved for this device */
        enum iidc_function_type ftype;
+       __le16 num_memory_regions;
+       struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions;
 };
 
 int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up);