]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
eth: fbnic: remove the debugging trick of super high page bias
authorJakub Kicinski <kuba@kernel.org>
Fri, 1 Aug 2025 17:07:54 +0000 (10:07 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 15 Aug 2025 10:14:07 +0000 (12:14 +0200)
[ Upstream commit e407fceeaf1b2959892b4fc9b584843d3f2bfc05 ]

Alex added page bias of LONG_MAX, which is admittedly quite
a clever way of catching overflows of the pp ref count.
The page pool code was "optimized" to leave the ref at 1
for freed pages so it can't catch basic bugs by itself any more.
(Something we should probably address under DEBUG_NET...)

Unfortunately for fbnic since commit f7dc3248dcfb ("skbuff: Optimization
of SKB coalescing for page pool") core _may_ actually take two extra
pp refcounts, if one of them is returned before driver gives up the bias
the ret < 0 check in page_pool_unref_netmem() will trigger.

While at it add a FBNIC_ to the name of the driver constant.

Fixes: 0cb4c0a13723 ("eth: fbnic: Implement Rx queue alloc/start/stop/free")
Link: https://patch.msgid.link/20250801170754.2439577-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
drivers/net/ethernet/meta/fbnic/fbnic_txrx.h

index 6a6d7e22f1a722314d6eee593933e170b7c3fb1b..fc52db8e36f2e114280a7b9c5d59326dcfde7590 100644 (file)
@@ -389,8 +389,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
 {
        struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
 
-       page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
-       rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
+       page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
+       rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
        rx_buf->page = page;
 }
 
index 2f91f68d11d57c41ffa0b3ac719028ef95ff9dd2..05cde71db9dfda1338ca5e2bd38a85f7ef953613 100644 (file)
@@ -59,10 +59,8 @@ struct fbnic_queue_stats {
        struct u64_stats_sync syncp;
 };
 
-/* Pagecnt bias is long max to reserve the last bit to catch overflow
- * cases where if we overcharge the bias it will flip over to be negative.
- */
-#define PAGECNT_BIAS_MAX       LONG_MAX
+#define FBNIC_PAGECNT_BIAS_MAX PAGE_SIZE
+
 struct fbnic_rx_buf {
        struct page *page;
        long pagecnt_bias;