return 0;
}
-static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
- uchar *rx_packet)
+static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index)
{
struct airoha_qdma_desc *desc;
+ uchar *rx_packet;
u32 val;
desc = &q->desc[index];
+ rx_packet = net_rx_packets[index];
index = (index + 1) % q->ndesc;
dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
int i;
for (i = 0; i < q->ndesc; i++)
- airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
+ airoha_qdma_reset_rx_desc(q, i);
}
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
RX_RING_SIZE_MASK,
FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+ /*
+ * See arht_eth_free_pkt() for the reasons used to fill
+ * REG_RX_CPU_IDX(qid) register.
+ */
airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
FIELD_PREP(RX_RING_THR_MASK, 0));
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 3));
airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
struct airoha_qdma *qdma = ð->qdma[0];
struct airoha_queue *q;
int qid;
+ u16 prev, pprev;
if (!packet)
return 0;
qid = 0;
q = &qdma->q_rx[qid];
- dma_map_single(packet, length, DMA_TO_DEVICE);
-
- airoha_qdma_reset_rx_desc(q, q->head, packet);
+ /*
+ * Due to cpu cache issue the airoha_qdma_reset_rx_desc() function
+ * will always touch 2 descriptors:
+ * - if current descriptor is even, then the previous and the one
+ * before previous descriptors will be touched (previous cacheline)
+ * - if current descriptor is odd, then only current and previous
+ * descriptors will be touched (current cacheline)
+ *
+ * Thus, to prevent possible destroying of rx queue, only (q->ndesc - 2)
+ * descriptors might be used for packet receiving.
+ */
+ prev = (q->head + q->ndesc - 1) % q->ndesc;
+ pprev = (q->head + q->ndesc - 2) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
+ airoha_qdma_reset_rx_desc(q, prev);
airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
- q->head = (q->head + 1) % q->ndesc;
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, pprev));
return 0;
}