hmc_fpm_misc->loc_mem_pages = (u32)FIELD_GET(IRDMA_QUERY_FPM_LOC_MEM_PAGES, temp);
if (!hmc_fpm_misc->loc_mem_pages)
return -EINVAL;
+
+ get_64bit_val(buf, 184, &temp);
+ if (temp) {
+ hmc_fpm_misc->fw_scratch_buf0.size = temp;
+ hmc_fpm_misc->fw_scratch_buf0.va =
+ dma_alloc_coherent(dev->hw->device,
+ hmc_fpm_misc->fw_scratch_buf0.size,
+ &hmc_fpm_misc->fw_scratch_buf0.pa,
+ GFP_KERNEL);
+
+ if (!hmc_fpm_misc->fw_scratch_buf0.va) {
+ hmc_fpm_misc->fw_scratch_buf0.size = 0;
+ return -ENOMEM;
+ }
+ }
+ get_64bit_val(buf, 192, &temp);
+ if (temp) {
+ hmc_fpm_misc->fw_scratch_buf1.size = temp;
+ hmc_fpm_misc->fw_scratch_buf1.va =
+ dma_alloc_coherent(dev->hw->device,
+ hmc_fpm_misc->fw_scratch_buf1.size,
+ &hmc_fpm_misc->fw_scratch_buf1.pa,
+ GFP_KERNEL);
+
+ if (!hmc_fpm_misc->fw_scratch_buf1.va) {
+ hmc_fpm_misc->fw_scratch_buf1.size = 0;
+ dma_free_coherent(dev->hw->device,
+ hmc_fpm_misc->fw_scratch_buf0.size,
+ hmc_fpm_misc->fw_scratch_buf0.va,
+ hmc_fpm_misc->fw_scratch_buf0.pa);
+ hmc_fpm_misc->fw_scratch_buf0.va = NULL;
+ hmc_fpm_misc->fw_scratch_buf0.size = 0;
+ return -ENOMEM;
+ }
+ }
}
return 0;
hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_CFPM_FW_SCRATCH_BUF_PRESENT,
+ cqp->dev->hmc_fpm_misc.fw_scratch_buf0.va != NULL) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
for (offset = 0; offset < IRDMA_COMMIT_FPM_BUF_SIZE;
offset += sizeof(__le64)) {
- if (offset == IRDMA_PBLE_COMMIT_OFFSET)
+ if (offset == IRDMA_PBLE_COMMIT_OFFSET ||
+ offset == IRDMA_SCRATCH_BUF0_COMMIT_OFFSET ||
+ offset == IRDMA_SCRATCH_BUF1_COMMIT_OFFSET)
continue;
get_64bit_val(buf, offset, &temp);
if (temp)
(u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
set_64bit_val(buf, 168,
(u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
+ set_64bit_val(buf, 192, dev->hmc_fpm_misc.fw_scratch_buf0.pa);
+ set_64bit_val(buf, 200, dev->hmc_fpm_misc.fw_scratch_buf1.pa);
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
dev->hmc_fpm_misc.loc_mem_pages)
irdma_set_loc_mem(buf);
#define MAX_MR_PER_SD 0x8000
#define MAX_MR_SD_PER_FCN 0x80
#define IRDMA_PBLE_COMMIT_OFFSET 112
+#define IRDMA_SCRATCH_BUF0_COMMIT_OFFSET 192
+#define IRDMA_SCRATCH_BUF1_COMMIT_OFFSET 200
#define IRDMA_MAX_QUANTA_PER_WR 8
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_CFPM_FW_SCRATCH_BUF_PRESENT_S 38
+#define IRDMA_CQPSQ_CFPM_FW_SCRATCH_BUF_PRESENT BIT_ULL(38)
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
static void irdma_del_init_mem(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_dma_mem *fw_scratch_buf0;
+ struct irdma_dma_mem *fw_scratch_buf1;
if (!rf->sc_dev.privileged)
irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
rf->iw_msixtbl = NULL;
kfree(rf->hmc_info_mem);
rf->hmc_info_mem = NULL;
+
+ fw_scratch_buf0 = &dev->hmc_fpm_misc.fw_scratch_buf0;
+ fw_scratch_buf1 = &dev->hmc_fpm_misc.fw_scratch_buf1;
+ if (fw_scratch_buf0->va)
+ dma_free_coherent(dev->hw->device, fw_scratch_buf0->size,
+ fw_scratch_buf0->va, fw_scratch_buf0->pa);
+ if (fw_scratch_buf1->va)
+ dma_free_coherent(dev->hw->device, fw_scratch_buf1->size,
+ fw_scratch_buf1->va, fw_scratch_buf1->pa);
}
/**