#include <net/pkt_sched.h>
#include <linux/aer.h>
#include <linux/etherdevice.h>
+#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/bitfield.h>
#include <linux/sctp.h>
* @ptp_reg_init: PTP register initialization
*/
struct idpf_reg_ops {
- void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ void (*ctlq_reg_init)(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq);
int (*intr_reg_init)(struct idpf_vport *vport);
void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
void (*reset_reg_init)(struct idpf_adapter *adapter);
void (*ptp_reg_init)(const struct idpf_adapter *adapter);
};
+#define IDPF_MMIO_REG_NUM_STATIC 2
+#define IDPF_PF_MBX_REGION_SZ 4096
+#define IDPF_PF_RSTAT_REGION_SZ 2048
+#define IDPF_VF_MBX_REGION_SZ 10240
+#define IDPF_VF_RSTAT_REGION_SZ 2048
+
/**
* struct idpf_dev_ops - Device specific operations
* @reg_ops: Register operations
* @idc_init: IDC initialization
+ * @static_reg_info: array of mailbox and rstat register info
*/
struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
int (*idc_init)(struct idpf_adapter *adapter);
+
+ /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */
+ struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC];
};
/**
return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
}
+/**
+ * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 mailbox register address based on register offset.
+ */
+static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return adapter->hw.mbx.vaddr + reg_offset;
+}
+
+/**
+ * idpf_get_rstat_reg_addr - Get BAR0 rstat register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Return: BAR0 rstat register address based on register offset.
+ */
+static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ reg_offset -= adapter->dev_ops.static_reg_info[1].start;
+
+ return adapter->hw.rstat.vaddr + reg_offset;
+}
+
/**
* idpf_get_reg_addr - Get BAR0 register address
* @adapter: private data struct
static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
resource_size_t reg_offset)
{
- return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+ struct idpf_hw *hw = &adapter->hw;
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ struct idpf_mmio_reg *region = &hw->lan_regs[i];
+
+ if (reg_offset >= region->addr_start &&
+ reg_offset < (region->addr_start + region->addr_len)) {
+ /* Convert the offset so that it is relative to the
+ * start of the region. Then add the base address of
+ * the region to get the final address.
+ */
+ reg_offset -= region->addr_start;
+
+ return region->vaddr + reg_offset;
+ }
+ }
+
+ /* It's impossible to hit this case with offsets from the CP. But if we
+ * do for any other reason, the kernel will panic on that register
+ * access. Might as well do it here to make it clear what's happening.
+ */
+ BUG();
+
+ return NULL;
}
/**
if (!adapter->hw.arq)
return true;
- return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) &
adapter->hw.arq->reg.len_mask);
}
{
/* Update tail to post pre-allocated buffers for rx queues */
if (is_rxq)
- wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+ idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
/* For non-Mailbox control queues only TAIL need to be set */
if (cq->q_id != -1)
return;
/* Clear Head for both send or receive */
- wr32(hw, cq->reg.head, 0);
+ idpf_mbx_wr32(hw, cq->reg.head, 0);
/* set starting point */
- wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
- wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+ idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
}
/**
*/
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_use);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use);
err_unlock:
mutex_unlock(&cq->cq_lock);
dma_wmb();
- wr32(hw, cq->reg.tail, cq->next_to_post);
+ idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post);
}
mutex_unlock(&cq->cq_lock);
u32 pf_vf_id; /* used by CP when sending to PF */
};
+/* Max number of MMIO regions not including the mailbox and rstat regions in
+ * the fallback case when the whole bar is mapped.
+ */
+#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3
+
+struct idpf_mmio_reg {
+ void __iomem *vaddr;
+ resource_size_t addr_start;
+ resource_size_t addr_len;
+};
+
/* Define the driver hardware struct to replace other control structs as needed
* Align to ctlq_hw_info
*/
struct idpf_hw {
- void __iomem *hw_addr;
- resource_size_t hw_addr_len;
+ struct idpf_mmio_reg mbx;
+ struct idpf_mmio_reg rstat;
+ /* Array of remaining LAN BAR regions */
+ int num_lan_regs;
+ struct idpf_mmio_reg *lan_regs;
struct idpf_adapter *back;
/**
* idpf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ATQH;
- ccq->reg.tail = PF_FW_ATQT;
- ccq->reg.len = PF_FW_ATQLEN;
- ccq->reg.bah = PF_FW_ATQBAH;
- ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.head = PF_FW_ATQH - mbx_start;
+ ccq->reg.tail = PF_FW_ATQT - mbx_start;
+ ccq->reg.len = PF_FW_ATQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ATQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ATQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = PF_FW_ARQH;
- ccq->reg.tail = PF_FW_ARQT;
- ccq->reg.len = PF_FW_ARQLEN;
- ccq->reg.bah = PF_FW_ARQBAH;
- ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.head = PF_FW_ARQH - mbx_start;
+ ccq->reg.tail = PF_FW_ARQT - mbx_start;
+ ccq->reg.len = PF_FW_ARQLEN - mbx_start;
+ ccq->reg.bah = PF_FW_ARQBAH - mbx_start;
+ ccq->reg.bal = PF_FW_ARQBAL - mbx_start;
ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
*/
static void idpf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT);
adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
}
{
u32 reset_reg;
- reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
writel(reset_reg | PFGEN_CTRL_PFSWR,
- idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL));
}
/**
idpf_reg_ops_init(adapter);
adapter->dev_ops.idc_init = idpf_idc_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ PF_FW_BASE, IDPF_PF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ);
}
{
struct iidc_rdma_core_dev_info *cdev_info;
struct iidc_rdma_priv_dev_info *privd;
- int err;
+ int err, i;
adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL);
if (!adapter->cdev_info)
cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2;
privd->ftype = ftype;
+ privd->mapped_mem_regions =
+ kcalloc(adapter->hw.num_lan_regs,
+ sizeof(struct iidc_rdma_lan_mapped_mem_region),
+ GFP_KERNEL);
+ if (!privd->mapped_mem_regions) {
+ err = -ENOMEM;
+ goto err_plug_aux_dev;
+ }
+
+ privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs);
+ for (i = 0; i < adapter->hw.num_lan_regs; i++) {
+ privd->mapped_mem_regions[i].region_addr =
+ adapter->hw.lan_regs[i].vaddr;
+ privd->mapped_mem_regions[i].size =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_len);
+ privd->mapped_mem_regions[i].start_offset =
+ cpu_to_le64(adapter->hw.lan_regs[i].addr_start);
+ }
+
idpf_idc_init_msix_data(adapter);
err = idpf_plug_core_aux_dev(cdev_info);
if (err)
- goto err_plug_aux_dev;
+ goto err_free_mem_regions;
return 0;
+err_free_mem_regions:
+ kfree(privd->mapped_mem_regions);
+ privd->mapped_mem_regions = NULL;
err_plug_aux_dev:
kfree(privd);
err_privd_alloc:
*/
void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info)
{
+ struct iidc_rdma_priv_dev_info *privd;
+
if (!cdev_info)
return;
idpf_unplug_aux_dev(cdev_info->adev);
- kfree(cdev_info->iidc_priv);
+ privd = cdev_info->iidc_priv;
+ kfree(privd->mapped_mem_regions);
+ kfree(privd);
kfree(cdev_info);
}
*/
static int idpf_cfg_hw(struct idpf_adapter *adapter)
{
+ resource_size_t res_start, mbx_start, rstat_start;
struct pci_dev *pdev = adapter->pdev;
struct idpf_hw *hw = &adapter->hw;
+ struct device *dev = &pdev->dev;
+ long len;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ /* Map mailbox space for virtchnl communication */
+ mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[0]);
+ hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
+ if (!hw->mbx.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 mbx region\n");
+
+ return -ENOMEM;
+ }
+ hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
+ hw->mbx.addr_len = len;
- hw->hw_addr = pcim_iomap_table(pdev)[0];
- if (!hw->hw_addr) {
- pci_err(pdev, "failed to allocate PCI iomap table\n");
+ /* Map rstat space for resets */
+ rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
+ len = resource_size(&adapter->dev_ops.static_reg_info[1]);
+ hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
+ if (!hw->rstat.vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 rstat region\n");
return -ENOMEM;
}
+ hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
+ hw->rstat.addr_len = len;
hw->back = adapter;
if (err)
goto err_free;
- err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ err = pcim_request_region(pdev, 0, pci_name(pdev));
if (err) {
- pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+ pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
goto err_free;
}
size_t size;
};
-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg) readl((a)->hw_addr + (reg))
-#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
-#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg))
+#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg)))
+#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg))
#endif /* _IDPF_MEM_H_ */
/**
* idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @adapter: adapter structure
* @cq: pointer to the array of create control queues
*/
-static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter,
+ struct idpf_ctlq_create_info *cq)
{
+ resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start;
int i;
for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
switch (ccq->type) {
case IDPF_CTLQ_TYPE_MAILBOX_TX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ATQH;
- ccq->reg.tail = VF_ATQT;
- ccq->reg.len = VF_ATQLEN;
- ccq->reg.bah = VF_ATQBAH;
- ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.head = VF_ATQH - mbx_start;
+ ccq->reg.tail = VF_ATQT - mbx_start;
+ ccq->reg.len = VF_ATQLEN - mbx_start;
+ ccq->reg.bah = VF_ATQBAH - mbx_start;
+ ccq->reg.bal = VF_ATQBAL - mbx_start;
ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
ccq->reg.head_mask = VF_ATQH_ATQH_M;
break;
case IDPF_CTLQ_TYPE_MAILBOX_RX:
/* set head and tail registers in our local struct */
- ccq->reg.head = VF_ARQH;
- ccq->reg.tail = VF_ARQT;
- ccq->reg.len = VF_ARQLEN;
- ccq->reg.bah = VF_ARQBAH;
- ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.head = VF_ARQH - mbx_start;
+ ccq->reg.tail = VF_ARQT - mbx_start;
+ ccq->reg.len = VF_ARQLEN - mbx_start;
+ ccq->reg.bah = VF_ARQBAH - mbx_start;
+ ccq->reg.bal = VF_ARQBAL - mbx_start;
ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
ccq->reg.head_mask = VF_ARQH_ARQH_M;
*/
static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
{
- adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT);
adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
}
idpf_vf_reg_ops_init(adapter);
adapter->dev_ops.idc_init = idpf_idc_vf_register;
+
+ resource_set_range(&adapter->dev_ops.static_reg_info[0],
+ VF_BASE, IDPF_VF_MBX_REGION_SZ);
+ resource_set_range(&adapter->dev_ops.static_reg_info[1],
+ VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ);
}
caps.other_caps =
cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
VIRTCHNL2_CAP_RDMA |
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS |
VIRTCHNL2_CAP_MACFILTER |
VIRTCHNL2_CAP_SPLITQ_QSCHED |
VIRTCHNL2_CAP_PROMISC |
return 0;
}
+/**
+ * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg
+ * @adapter: Driver specific private struct
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree);
+ struct idpf_vc_xn_params xn_params = {
+ .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS,
+ .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN,
+ .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC,
+ };
+ int num_regions, size;
+ struct idpf_hw *hw;
+ ssize_t reply_sz;
+ int err = 0;
+
+ rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_regions)
+ return -ENOMEM;
+
+ xn_params.recv_buf.iov_base = rcvd_regions;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+
+ num_regions = le16_to_cpu(rcvd_regions->num_memory_regions);
+ size = struct_size(rcvd_regions, mem_reg, num_regions);
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
+
+ hw = &adapter->hw;
+ hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ for (int i = 0; i < num_regions; i++) {
+ hw->lan_regs[i].addr_len =
+ le64_to_cpu(rcvd_regions->mem_reg[i].size);
+ hw->lan_regs[i].addr_start =
+ le64_to_cpu(rcvd_regions->mem_reg[i].start_offset);
+ }
+ hw->num_lan_regs = num_regions;
+
+ return err;
+}
+
+/**
+ * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat
+ * @adapter: Driver specific private structure
+ *
+ * Called when idpf_send_get_lan_memory_regions is not supported. This will
+ * calculate the offsets and sizes for the regions before, in between, and
+ * after the mailbox and rstat MMIO mappings.
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1];
+ struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0];
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING;
+ hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs),
+ GFP_KERNEL);
+ if (!hw->lan_regs)
+ return -ENOMEM;
+
+ /* Region preceding mailbox */
+ hw->lan_regs[0].addr_start = 0;
+ hw->lan_regs[0].addr_len = mbx_reg->start;
+ /* Region between mailbox and rstat */
+ hw->lan_regs[1].addr_start = mbx_reg->end + 1;
+ hw->lan_regs[1].addr_len = rstat_reg->start -
+ hw->lan_regs[1].addr_start;
+ /* Region after rstat */
+ hw->lan_regs[2].addr_start = rstat_reg->end + 1;
+ hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) -
+ hw->lan_regs[2].addr_start;
+
+ return 0;
+}
+
+/**
+ * idpf_map_lan_mmio_regs - map remaining LAN BAR regions
+ * @adapter: Driver specific private structure
+ *
+ * Return: 0 on success or error code on failure.
+ */
+static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+ resource_size_t res_start;
+
+ res_start = pci_resource_start(pdev, 0);
+
+ for (int i = 0; i < hw->num_lan_regs; i++) {
+ resource_size_t start;
+ long len;
+
+ len = hw->lan_regs[i].addr_len;
+ if (!len)
+ continue;
+ start = hw->lan_regs[i].addr_start + res_start;
+
+ hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len);
+ if (!hw->lan_regs[i].vaddr) {
+ pci_err(pdev, "failed to allocate BAR0 region\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
/**
* idpf_vport_alloc_max_qs - Allocate max queues for a vport
* @adapter: Driver specific private structure
struct idpf_hw *hw = &adapter->hw;
int err;
- adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+ adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info);
err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
if (err)
msleep(task_delay);
}
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) {
+ err = idpf_send_get_lan_memory_regions(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n",
+ err);
+ return -EINVAL;
+ }
+ } else {
+ /* Fallback to mapping the remaining regions of the entire BAR */
+ err = idpf_calc_remaining_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+ }
+
+ err = idpf_map_lan_mmio_regs(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n",
+ err);
+ return -ENOMEM;
+ }
+
pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
num_max_vports = idpf_get_max_vports(adapter);
adapter->max_vports = num_max_vports;
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546,
VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547,
VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548,
+ VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549,
};
/**
VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
- /* Other capability 20 is reserved */
+ /* Other capability 20-21 is reserved */
+ VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22),
/* this must be the last capability */
VIRTCHNL2_CAP_OEM = BIT_ULL(63),
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time);
+/**
+ * struct virtchnl2_mem_region - MMIO memory region
+ * @start_offset: starting offset of the MMIO memory region
+ * @size: size of the MMIO memory region
+ */
+struct virtchnl2_mem_region {
+ __le64 start_offset;
+ __le64 size;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region);
+
+/**
+ * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions
+ * @num_memory_regions: number of memory regions
+ * @pad: Padding
+ * @mem_reg: List with memory region info
+ *
+ * PF/VF sends this message to learn what LAN MMIO memory regions it should map.
+ */
+struct virtchnl2_get_lan_memory_regions {
+ __le16 num_memory_regions;
+ u8 pad[6];
+ struct virtchnl2_mem_region mem_reg[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions);
+
#endif /* _VIRTCHNL_2_H_ */
IIDC_FUNCTION_TYPE_VF,
};
+struct iidc_rdma_lan_mapped_mem_region {
+ u8 __iomem *region_addr;
+ __le64 size;
+ __le64 start_offset;
+};
+
struct iidc_rdma_priv_dev_info {
struct msix_entry *msix_entries;
u16 msix_count; /* How many vectors are reserved for this device */
enum iidc_function_type ftype;
+ __le16 num_memory_regions;
+ struct iidc_rdma_lan_mapped_mem_region *mapped_mem_regions;
};
int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up);