For DQO, change QPL page registration logic to be more flexible to honor
the "max_registered_pages" parameter from the gVNIC device.
Previously the number of RX pages per QPL was hardcoded to twice the
ring size, and the number of TX pages per QPL was dictated by the device
in the DQO-QPL device option. Now [in DQO-QPL mode], the driver will
ignore the "tx_pages_per_qpl" parameter indicated in the DQO-QPL device
option and instead allocate up to (tx_queue_length / 2) pages per TX QPL
and up to (rx_queue_length * 2) pages per RX QPL while keeping the total
number of pages under the "max_registered_pages".
Merge DQO and GQI QPL page calculation logic into a unified
gve_update_num_qpl_pages function. Add rx_pages_per_qpl to the priv
struct for consumption by both DQO and GQI.
Signed-off-by: Matt Olson <maolson@google.com>
Signed-off-by: Max Yuan <maxyuan@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Link: https://patch.msgid.link/20260225182342.1049816-2-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
-#define DQO_QPL_DEFAULT_TX_PAGES 512
-
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_tx_queue_config *qcfg;
+ u16 pages_per_qpl;
u16 num_xdp_rings;
/* tx config is also needed to determine QPL ids */
struct gve_rx_queue_config *qcfg_rx;
struct gve_tx_queue_config *qcfg_tx;
+ u16 pages_per_qpl;
u16 ring_size;
u16 packet_buffer_size;
u16 min_rx_desc_cnt;
bool modify_ring_size_enabled;
bool default_min_ring_size;
- u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
+ u16 tx_pages_per_qpl;
+ u16 rx_pages_per_qpl;
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
return gve_get_rx_qpl_id(tx_cfg, 0);
}
-static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
-{
- /* For DQO, page count should be more than ring size for
- * out-of-order completions. Set it to two times of ring size.
- */
- return 2 * rx_desc_cnt;
-}
-
/* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+void gve_update_num_qpl_pages(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
}
- /* Override pages for qpl for DQO-QPL */
- if (dev_op_dqo_qpl) {
- priv->tx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- if (priv->tx_pages_per_qpl == 0)
- priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- }
-
if (dev_op_buffer_sizes &&
(supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
priv->max_rx_buffer_size =
u32 idx;
idx = rx->dqo.next_qpl_page_idx;
- if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
+ if (idx >= priv->rx_pages_per_qpl) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->ring_size = priv->tx_desc_cnt;
+ cfg->pages_per_qpl = priv->tx_pages_per_qpl;
cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues;
cfg->tx = priv->tx;
}
}
}
+void gve_update_num_qpl_pages(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg)
+{
+ u64 ideal_tx_pages, ideal_rx_pages;
+ u16 tx_num_queues, rx_num_queues;
+ u64 max_pages, tx_pages;
+
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+ rx_alloc_cfg->pages_per_qpl = rx_alloc_cfg->ring_size;
+ } else if (priv->queue_format == GVE_DQO_QPL_FORMAT) {
+ /*
+ * We want 2 pages per RX descriptor and half a page per TX
+ * descriptor, which means the fraction ideal_tx_pages /
+ * (ideal_tx_pages + ideal_rx_pages) of the pages we allocate
+ * should be for TX. Shrink proportionally as necessary to avoid
+ * allocating more than max_registered_pages total pages.
+ */
+ tx_num_queues = tx_alloc_cfg->qcfg->num_queues;
+ rx_num_queues = rx_alloc_cfg->qcfg_rx->num_queues;
+
+ ideal_tx_pages = tx_alloc_cfg->ring_size * tx_num_queues / 2;
+ ideal_rx_pages = rx_alloc_cfg->ring_size * rx_num_queues * 2;
+ max_pages = min(priv->max_registered_pages,
+ ideal_tx_pages + ideal_rx_pages);
+
+ tx_pages = div64_u64(max_pages * ideal_tx_pages,
+ ideal_tx_pages + ideal_rx_pages);
+ tx_alloc_cfg->pages_per_qpl = div_u64(tx_pages, tx_num_queues);
+ rx_alloc_cfg->pages_per_qpl = div_u64(max_pages - tx_pages,
+ rx_num_queues);
+ }
+}
+
static int gve_queues_mem_alloc(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
+ gve_update_num_qpl_pages(priv, rx_alloc_cfg, tx_alloc_cfg);
+
if (gve_is_gqi(priv))
err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
cfg->ring_size = priv->rx_desc_cnt;
+ cfg->pages_per_qpl = priv->rx_pages_per_qpl;
cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size;
cfg->rx = priv->rx;
cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues;
priv->rx_cfg = *rx_alloc_cfg->qcfg_rx;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
priv->rx_desc_cnt = rx_alloc_cfg->ring_size;
+ priv->tx_pages_per_qpl = tx_alloc_cfg->pages_per_qpl;
+ priv->rx_pages_per_qpl = rx_alloc_cfg->pages_per_qpl;
gve_tx_start_rings(priv, gve_num_tx_queues(priv));
gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues);
struct device *hdev = &priv->pdev->dev;
u32 slots = cfg->ring_size;
int filled_pages;
- int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
int err;
if (!rx->data.raw_addressing) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
- qpl_page_cnt = cfg->ring_size;
-
rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
- qpl_page_cnt);
+ cfg->pages_per_qpl);
if (!rx->data.qpl) {
err = -ENOMEM;
goto abort_with_copy_pool;
{
struct device *hdev = &priv->pdev->dev;
struct page_pool *pool;
- int qpl_page_cnt;
size_t size;
u32 qpl_id;
XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
- gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+ cfg->pages_per_qpl;
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL, priv->numa_node);
rx->dqo.page_pool = pool;
} else {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
- qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
- qpl_page_cnt);
+ cfg->pages_per_qpl);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
int idx)
{
struct device *hdev = &priv->pdev->dev;
- int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
tx->dev = hdev;
if (!tx->raw_addressing) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
- qpl_page_cnt = priv->tx_pages_per_qpl;
-
tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
- qpl_page_cnt);
+ cfg->pages_per_qpl);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
- int qpl_page_cnt;
size_t bytes;
u32 qpl_id;
int i;
if (!cfg->raw_addressing) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
- qpl_page_cnt = priv->tx_pages_per_qpl;
tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
- qpl_page_cnt);
+ cfg->pages_per_qpl);
if (!tx->dqo.qpl)
goto err;