#define airoha_switch_wr(eth, offset, val) \
airoha_wr((eth)->switch_regs, (offset), (val))
+static inline dma_addr_t dma_map_unaligned(void *vaddr, size_t len,
+ enum dma_data_direction dir)
+{
+ uintptr_t start, end;
+
+ start = ALIGN_DOWN((uintptr_t)vaddr, ARCH_DMA_MINALIGN);
+ end = ALIGN((uintptr_t)(vaddr + len), ARCH_DMA_MINALIGN);
+
+ return dma_map_single((void *)start, end - start, dir);
+}
+
+static inline void dma_unmap_unaligned(dma_addr_t addr, size_t len,
+ enum dma_data_direction dir)
+{
+ uintptr_t start, end;
+
+ start = ALIGN_DOWN((uintptr_t)addr, ARCH_DMA_MINALIGN);
+ end = ALIGN((uintptr_t)(addr + len), ARCH_DMA_MINALIGN);
+ dma_unmap_single(start, end - start, dir);
+}
+
static void airoha_fe_maccr_init(struct airoha_eth *eth)
{
int p;
val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN);
WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
- dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+ dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
}
static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
- dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
+ dma_map_unaligned(desc, sizeof(*desc), DMA_TO_DEVICE);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
for (i = 0; i < 100; i++) {
- dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
- DMA_FROM_DEVICE);
+ dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
if (desc->ctrl & QDMA_DESC_DONE_MASK)
break;
q = &qdma->q_rx[qid];
desc = &q->desc[q->head];
- dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
- DMA_FROM_DEVICE);
+ dma_unmap_unaligned(virt_to_phys(desc), sizeof(*desc),
+ DMA_FROM_DEVICE);
if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
return -EAGAIN;