]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
wifi: iwlwifi: keep the TSO and workaround pages mapped
authorBenjamin Berg <benjamin.berg@intel.com>
Wed, 3 Jul 2024 09:58:54 +0000 (12:58 +0300)
committerJohannes Berg <johannes.berg@intel.com>
Thu, 4 Jul 2024 11:50:05 +0000 (13:50 +0200)
Map the pages when allocating them so that we will not need to map each
of the used fragments at a later point.

For now the mapping is not used, this will be changed in a later commit.

Signed-off-by: Benjamin Berg <benjamin.berg@intel.com>
Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
Reviewed-by: Johannes Berg <johannes.berg@intel.com>
Link: https://patch.msgid.link/20240703125541.7ced468fe431.Ibb109867dc680c37fe8d891e9ab9ef64ed5c5d2d@changeid
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c

index d63c1c284f7096969f788a11dd4bf797433284ab..b59de4f80b4b895b8ea92232830e84c684d195ec 100644 (file)
@@ -603,6 +603,22 @@ struct iwl_tso_hdr_page {
        u8 *pos;
 };
 
+/*
+ * Note that we put this struct *last* in the page. By doing that, we ensure
+ * that no TB referencing this page can trigger the 32-bit boundary hardware
+ * bug.
+ */
+struct iwl_tso_page_info {
+       dma_addr_t dma_addr;
+       struct page *next;
+       refcount_t use_count;
+};
+
+#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
+#define IWL_TSO_PAGE_INFO(addr)        \
+       ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
+                                     IWL_TSO_PAGE_DATA_SIZE))
+
 int iwl_pcie_tx_init(struct iwl_trans *trans);
 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
 int iwl_pcie_tx_stop(struct iwl_trans *trans);
@@ -628,8 +644,18 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_cmd_meta *cmd_meta,
                                   u8 **hdr, unsigned int hdr_room);
 
-void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
-                           struct iwl_cmd_meta *cmd_meta);
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+                            struct iwl_cmd_meta *cmd_meta);
+
+static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
+{
+       dma_addr_t res;
+
+       res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
+       res += (unsigned long)addr & ~PAGE_MASK;
+
+       return res;
+}
 
 static inline dma_addr_t
 iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
index 3dcce6a8da502680f87b6beb3c6e932da8eb9fc4..10ee2c328458bea19382345b3a7ec5bcd7f54bff 100644 (file)
@@ -19,8 +19,10 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
                                        struct sk_buff *skb)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       struct iwl_tso_page_info *info;
        struct page **page_ptr;
        struct page *ret;
+       dma_addr_t phys;
 
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
 
@@ -28,8 +30,22 @@ static struct page *get_workaround_page(struct iwl_trans *trans,
        if (!ret)
                return NULL;
 
+       info = IWL_TSO_PAGE_INFO(page_address(ret));
+
+       /* Create a DMA mapping for the page */
+       phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
+                                 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (unlikely(dma_mapping_error(trans->dev, phys))) {
+               __free_page(ret);
+               return NULL;
+       }
+
+       /* Store physical address and set use count */
+       info->dma_addr = phys;
+       refcount_set(&info->use_count, 1);
+
        /* set the chaining pointer to the previous page if there */
-       *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+       info->next = *page_ptr;
        *page_ptr = ret;
 
        return ret;
@@ -76,7 +92,7 @@ static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
         * a new mapping for it so the device will not fail.
         */
 
-       if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+       if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
                ret = -ENOBUFS;
                goto unmap;
        }
@@ -782,7 +798,7 @@ static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
                        struct sk_buff *skb = txq->entries[idx].skb;
 
                        if (!WARN_ON_ONCE(!skb))
-                               iwl_pcie_free_tso_page(trans, skb, cmd_meta);
+                               iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
                }
                iwl_txq_gen2_free_tfd(trans, txq);
                txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
index ac545a39ad2a57e59268f94e411bd9098885f386..e00d85866de95eb657b1a7129b4f629dc24688b4 100644 (file)
@@ -209,8 +209,22 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
        spin_unlock(&trans_pcie->reg_lock);
 }
 
-void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
-                           struct iwl_cmd_meta *cmd_meta)
+static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
+                                            struct page *page)
+{
+       struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
+
+       /* Decrease internal use count and unmap/free page if needed */
+       if (refcount_dec_and_test(&info->use_count)) {
+               dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
+                              DMA_TO_DEVICE);
+
+               __free_page(page);
+       }
+}
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+                            struct iwl_cmd_meta *cmd_meta)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct page **page_ptr;
@@ -221,10 +235,11 @@ void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
        *page_ptr = NULL;
 
        while (next) {
+               struct iwl_tso_page_info *info;
                struct page *tmp = next;
 
-               next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
-                                 sizeof(void *));
+               info = IWL_TSO_PAGE_INFO(page_address(next));
+               next = info->next;
 
                /* Unmap the scatter gather list that is on the last page */
                if (!next && cmd_meta->sg_offset) {
@@ -236,7 +251,7 @@ void iwl_pcie_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb,
                        dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
                }
 
-               __free_page(tmp);
+               iwl_pcie_free_and_unmap_tso_page(trans, tmp);
        }
 }
 
@@ -381,7 +396,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                        if (WARN_ON_ONCE(!skb))
                                continue;
 
-                       iwl_pcie_free_tso_page(trans, skb, cmd_meta);
+                       iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
                }
                iwl_txq_free_tfd(trans, txq);
                txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1722,7 +1737,9 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
+       struct iwl_tso_page_info *info;
        struct page **page_ptr;
+       dma_addr_t phys;
        void *ret;
 
        page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
@@ -1743,23 +1760,42 @@ static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
         *
         * (see also get_workaround_page() in tx-gen2.c)
         */
-       if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
-                          sizeof(void *))
+       if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
+               info = IWL_TSO_PAGE_INFO(page_address(ret));
                goto out;
+       }
 
        /* We don't have enough room on this page, get a new one. */
-       __free_page(p->page);
+       iwl_pcie_free_and_unmap_tso_page(trans, p->page);
 
 alloc:
        p->page = alloc_page(GFP_ATOMIC);
        if (!p->page)
                return NULL;
        p->pos = page_address(p->page);
+
+       info = IWL_TSO_PAGE_INFO(page_address(ret));
+
        /* set the chaining pointer to NULL */
-       *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+       info->next = NULL;
+
+       /* Create a DMA mapping for the page */
+       phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
+                                 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (unlikely(dma_mapping_error(trans->dev, phys))) {
+               __free_page(p->page);
+               p->page = NULL;
+
+               return NULL;
+       }
+
+       /* Store physical address and set use count */
+       info->dma_addr = phys;
+       refcount_set(&info->use_count, 1);
 out:
        *page_ptr = p->page;
-       get_page(p->page);
+       /* Return an internal reference for the caller */
+       refcount_inc(&info->use_count);
        ret = p->pos;
        p->pos += len;
 
@@ -2330,7 +2366,7 @@ void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                              read_ptr, txq->read_ptr, txq_id))
                        continue;
 
-               iwl_pcie_free_tso_page(trans, skb, cmd_meta);
+               iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
 
                __skb_queue_tail(skbs, skb);