return 0;
}
-/**
- * irdma_sc_cq_ack - acknowledge completion q
- * @cq: cq struct
- */
-static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
-{
- writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
-}
-
/**
* irdma_sc_cq_init - initialize completion q
* @cq: cq struct
return -ENOMEM;
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 8, cq->cq_uk.cq_id);
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
return -ENOMEM;
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 8, cq->cq_uk.cq_id);
set_64bit_val(wqe, 40, cq->shadow_area_pa);
set_64bit_val(wqe, 48,
(cq->virtual_map ? cq->first_pm_pbl_idx : 0));
return -ENOMEM;
set_64bit_val(wqe, 0, info->cq_size);
- set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 8, cq->cq_uk.cq_id);
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
set_64bit_val(wqe, 32, info->cq_pa);
* irdma_sc_process_ceq - process ceq
* @dev: sc device struct
* @ceq: ceq sc structure
+ * @cq_idx: Pointer to a CQ ID that will be populated.
*
* It is expected caller serializes this function with cleanup_ceqes()
* because these functions manipulate the same ceq
+ *
+ * Return: True if cq_idx has been populated with a CQ ID.
*/
-void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
+bool irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq,
+ u32 *cq_idx)
{
u64 temp;
__le64 *ceqe;
- struct irdma_sc_cq *cq = NULL;
- struct irdma_sc_cq *temp_cq;
u8 polarity;
- u32 cq_idx;
do {
- cq_idx = 0;
ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
get_64bit_val(ceqe, 0, &temp);
polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
if (polarity != ceq->polarity)
- return NULL;
+ return false;
- temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
- if (!temp_cq) {
- cq_idx = IRDMA_INVALID_CQ_IDX;
- IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
-
- if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
- ceq->polarity ^= 1;
- continue;
- }
-
- cq = temp_cq;
+ /* Truncate. Discard valid bit which is MSb of temp. */
+ *cq_idx = temp;
+ if (*cq_idx >= dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt)
+ *cq_idx = IRDMA_INVALID_CQ_IDX;
IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
ceq->polarity ^= 1;
- } while (cq_idx == IRDMA_INVALID_CQ_IDX);
+ } while (*cq_idx == IRDMA_INVALID_CQ_IDX);
- if (cq)
- irdma_sc_cq_ack(cq);
- return cq;
+ return true;
}
/**
*/
void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
{
- struct irdma_sc_cq *next_cq;
u8 ceq_polarity = ceq->polarity;
__le64 *ceqe;
u8 polarity;
+ u32 cq_idx;
u64 temp;
int next;
u32 i;
if (polarity != ceq_polarity)
return;
- next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
- if (cq == next_cq)
- set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
+ cq_idx = temp;
+ if (cq_idx == cq->cq_uk.cq_id)
+ set_64bit_val(ceqe, 0, (temp & IRDMA_CEQE_VALID) |
+ IRDMA_INVALID_CQ_IDX);
next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
if (!next)
return -ENOMEM;
set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
- set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
+ set_64bit_val(wqe, 8, ccq->cq_uk.cq_id);
set_64bit_val(wqe, 40, ccq->shadow_area_pa);
hdr = ccq->cq_uk.cq_id |
int ret_code = 0;
u8 db_size;
+ spin_lock_init(&dev->puda_cq_lock);
+ dev->ilq_cq = NULL;
+ dev->ieq_cq = NULL;
INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
mutex_init(&dev->ws_mutex);
dev->hmc_fn_id = info->hmc_fn_id;
irdma_sc_ccq_arm(cq);
}
+/**
+ * irdma_process_normal_ceqe - Handle a CEQE for a normal CQ.
+ * @rf: RDMA PCI function.
+ * @dev: iWARP device.
+ * @cq_idx: CQ ID. Must be in table bounds.
+ *
+ * Context: Atomic (CEQ lock must be held)
+ */
+static void irdma_process_normal_ceqe(struct irdma_pci_f *rf,
+ struct irdma_sc_dev *dev, u32 cq_idx)
+{
+ /* cq_idx bounds validated in irdma_sc_process_ceq. */
+ struct irdma_cq *icq = READ_ONCE(rf->cq_table[cq_idx]);
+ struct irdma_sc_cq *cq;
+
+ if (unlikely(!icq)) {
+ /* Should not happen since CEQ is scrubbed upon CQ delete. */
+ ibdev_warn_ratelimited(to_ibdev(dev), "Stale CEQE for CQ %u",
+ cq_idx);
+ return;
+ }
+
+ cq = &icq->sc_cq;
+
+ if (unlikely(cq->cq_type != IRDMA_CQ_TYPE_IWARP)) {
+ ibdev_warn_ratelimited(to_ibdev(dev), "Unexpected CQ type %u",
+ cq->cq_type);
+ return;
+ }
+
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+ irdma_iwarp_ce_handler(cq);
+}
+
+/**
+ * irdma_process_reserved_ceqe - Handle a CEQE for a reserved CQ.
+ * @rf: RDMA PCI function.
+ * @dev: iWARP device.
+ * @cq_idx: CQ ID.
+ *
+ * Context: Atomic
+ */
+static void irdma_process_reserved_ceqe(struct irdma_pci_f *rf,
+ struct irdma_sc_dev *dev, u32 cq_idx)
+{
+ struct irdma_sc_cq *cq;
+
+ if (cq_idx == IRDMA_RSVD_CQ_ID_CQP) {
+ cq = &rf->ccq.sc_cq;
+ /* CQP CQ lifetime > CEQ. */
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+ queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
+ } else if (cq_idx == IRDMA_RSVD_CQ_ID_ILQ ||
+ cq_idx == IRDMA_RSVD_CQ_ID_IEQ) {
+ scoped_guard(spinlock_irqsave, &dev->puda_cq_lock) {
+ cq = (cq_idx == IRDMA_RSVD_CQ_ID_ILQ) ?
+ dev->ilq_cq : dev->ieq_cq;
+ if (!cq) {
+ ibdev_warn_ratelimited(to_ibdev(dev),
+ "Stale ILQ/IEQ CEQE");
+ return;
+ }
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+ irdma_puda_ce_handler(rf, cq);
+ }
+ }
+}
+
/**
* irdma_process_ceq - handle ceq for completions
* @rf: RDMA PCI function
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_sc_ceq *sc_ceq;
- struct irdma_sc_cq *cq;
unsigned long flags;
+ u32 cq_idx;
sc_ceq = &ceq->sc_ceq;
do {
spin_lock_irqsave(&ceq->ce_lock, flags);
- cq = irdma_sc_process_ceq(dev, sc_ceq);
- if (!cq) {
+
+ if (!irdma_sc_process_ceq(dev, sc_ceq, &cq_idx)) {
spin_unlock_irqrestore(&ceq->ce_lock, flags);
break;
}
- if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
- irdma_iwarp_ce_handler(cq);
+ /* Normal CQs must be handled while holding CEQ lock. */
+ if (likely(cq_idx > IRDMA_RSVD_CQ_ID_IEQ)) {
+ irdma_process_normal_ceqe(rf, dev, cq_idx);
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&ceq->ce_lock, flags);
- if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
- queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
- else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
- cq->cq_type == IRDMA_CQ_TYPE_IEQ)
- irdma_puda_ce_handler(rf, cq);
+ irdma_process_reserved_ceqe(rf, dev, cq_idx);
} while (1);
}
dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
rsrc->cqmem.va, rsrc->cqmem.pa);
rsrc->cqmem.va = NULL;
+ } else {
+ scoped_guard(spinlock_irqsave, &dev->puda_cq_lock) {
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ dev->ilq_cq = cq;
+ else
+ dev->ieq_cq = cq;
+ }
}
return ret;
struct irdma_ccq_cqe_info compl_info;
struct irdma_sc_dev *dev = rsrc->dev;
+ scoped_guard(spinlock_irqsave, &dev->puda_cq_lock) {
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ dev->ilq_cq = NULL;
+ else
+ dev->ieq_cq = NULL;
+ }
+
if (rsrc->dev->ceq_valid) {
irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
return;
struct irdma_sc_aeq *aeq;
struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
struct irdma_sc_cq *ccq;
+ spinlock_t puda_cq_lock;
+ struct irdma_sc_cq *ilq_cq;
+ struct irdma_sc_cq *ieq_cq;
const struct irdma_irq_ops *irq_ops;
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_hmc_fpm_misc hmc_fpm_misc;
int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
struct irdma_ceq_init_info *info);
void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
-void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
+bool irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq,
+ u32 *cq_idx);
int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info);
return;
}
- iwdev->rf->cq_table[iwcq->cq_num] = NULL;
+ /* May be asynchronously sampled by CEQ ISR without holding tbl lock. */
+ WRITE_ONCE(iwdev->rf->cq_table[iwcq->cq_num], NULL);
spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
complete(&iwcq->free_cq);
}
goto cq_destroy;
}
}
- rf->cq_table[cq_num] = iwcq;
+
init_completion(&iwcq->free_cq);
+ /* Populate table entry after CQ is fully created. */
+ smp_store_release(&rf->cq_table[cq_num], iwcq);
+
return 0;
cq_destroy:
irdma_cq_wq_destroy(rf, cq);