struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring)
{
- int ring_sz;
- u8 *head;
- dma_addr_t dma;
-
- head = tx_ring->bd_ring.head;
- dma = tx_ring->bd_ring.dma;
- ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
tx_ring->bd_ring.head = NULL;
}
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
int i;
rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
struct sk_buff *skb;
dma_addr_t dma;
u32 buf_sz;
- u8 *head;
- int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
int i;
buf_sz = rx_ring->buf_sz;
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
}
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
int i;
rx_ring = &rtwpci->rx.rings[i];
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
}
static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_tx_ring *tx_ring,
u32 desc_size, u32 len,
- enum rtw89_tx_channel txch)
+ enum rtw89_tx_channel txch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_ch_dma_addr *txch_addr;
- int ring_sz = desc_size * len;
- u8 *head;
- dma_addr_t dma;
int ret;
ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
goto err_free_wd_ring;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err_free_wd_ring;
- }
-
INIT_LIST_HEAD(&tx_ring->busy_pages);
tx_ring->bd_ring.head = head;
tx_ring->bd_ring.dma = dma;
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->tx.bd_pool;
const struct rtw89_pci_info *info = rtwdev->pci_info;
struct rtw89_pci_tx_ring *tx_ring;
+ u32 i, tx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ u32 ch_num;
+ void *head;
u32 len;
- u32 i, tx_allocated;
int ret;
+ BUILD_BUG_ON(RTW89_PCI_TXBD_NUM_MAX % 16);
+
+ desc_size = sizeof(struct rtw89_pci_tx_bd_32);
+ len = RTW89_PCI_TXBD_NUM_MAX;
+ ch_num = RTW89_TXCH_NUM - hweight32(info->tx_dma_ch_mask);
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * ch_num;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_TXCH_NUM; i++) {
if (info->tx_dma_ch_mask & BIT(i))
continue;
tx_ring = &rtwpci->tx.rings[i];
- desc_size = sizeof(struct rtw89_pci_tx_bd_32);
- len = RTW89_PCI_TXBD_NUM_MAX;
ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
- desc_size, len, i);
+ desc_size, len, i, head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}
static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
struct pci_dev *pdev,
struct rtw89_pci_rx_ring *rx_ring,
- u32 desc_size, u32 len, u32 rxch)
+ u32 desc_size, u32 len, u32 rxch,
+ void *head, dma_addr_t dma)
{
const struct rtw89_pci_info *info = rtwdev->pci_info;
const struct rtw89_pci_ch_dma_addr *rxch_addr;
struct sk_buff *skb;
- u8 *head;
- dma_addr_t dma;
- int ring_sz = desc_size * len;
int buf_sz = RTW89_PCI_RX_BUF_SIZE;
int i, allocated;
int ret;
return ret;
}
- head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
- if (!head) {
- ret = -ENOMEM;
- goto err;
- }
-
rx_ring->bd_ring.head = head;
rx_ring->bd_ring.dma = dma;
rx_ring->bd_ring.len = len;
rx_ring->buf[i] = NULL;
}
- head = rx_ring->bd_ring.head;
- dma = rx_ring->bd_ring.dma;
- dma_free_coherent(&pdev->dev, ring_sz, head, dma);
-
rx_ring->bd_ring.head = NULL;
-err:
+
return ret;
}
struct pci_dev *pdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_dma_pool *bd_pool = &rtwpci->rx.bd_pool;
struct rtw89_pci_rx_ring *rx_ring;
+ int i, rx_allocated;
+ dma_addr_t dma;
u32 desc_size;
+ u32 ring_sz;
+ u32 pool_sz;
+ void *head;
u32 len;
- int i, rx_allocated;
int ret;
+ desc_size = sizeof(struct rtw89_pci_rx_bd_32);
+ len = RTW89_PCI_RXBD_NUM_MAX;
+ ring_sz = desc_size * len;
+ pool_sz = ring_sz * RTW89_RXCH_NUM;
+
+ head = dma_alloc_coherent(&pdev->dev, pool_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ bd_pool->head = head;
+ bd_pool->dma = dma;
+ bd_pool->size = pool_sz;
+
for (i = 0; i < RTW89_RXCH_NUM; i++) {
rx_ring = &rtwpci->rx.rings[i];
- desc_size = sizeof(struct rtw89_pci_rx_bd_32);
- len = RTW89_PCI_RXBD_NUM_MAX;
+
ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
- desc_size, len, i);
+ desc_size, len, i,
+ head, dma);
if (ret) {
rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
goto err_free;
}
+
+ head += ring_sz;
+ dma += ring_sz;
}
return 0;
rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
}
+ dma_free_coherent(&pdev->dev, bd_pool->size, bd_pool->head, bd_pool->dma);
+
return ret;
}