summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/google/gve/gve_rx_dqo.c
diff options
context:
space:
mode:
authorShailend Chand <shailend@google.com>2024-05-02 01:25:48 +0200
committerDavid S. Miller <davem@davemloft.net>2024-05-05 15:35:34 +0200
commitee24284e2a1075966f0f2c5499c59b7d2b9bc2de (patch)
treee1a650c406567d5292c2b252d5bac40d7e855fb9 /drivers/net/ethernet/google/gve/gve_rx_dqo.c
parentgve: Account for stopped queues when reading NIC stats (diff)
downloadlinux-ee24284e2a1075966f0f2c5499c59b7d2b9bc2de.tar.xz
linux-ee24284e2a1075966f0f2c5499c59b7d2b9bc2de.zip
gve: Alloc and free QPLs with the rings
Every tx and rx ring has its own queue-page-list (QPL) that serves as the bounce buffer. Previously we were allocating QPLs for all queues before the queues themselves were allocated and later associating a QPL with a queue. This is avoidable complexity: it is much more natural for each queue to allocate and free its own QPL. Moreover, the advent of new queue-manipulating ndo hooks make it hard to keep things as is: we would need to transfer a QPL from an old queue to a new queue, and that is unpleasant. Tested-by: Mina Almasry <almasrymina@google.com> Reviewed-by: Praveen Kaligineedi <pkaligineedi@google.com> Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com> Signed-off-by: Shailend Chand <shailend@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_rx_dqo.c')
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c23
1 files changed, 14 insertions, 9 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 7c2980c212f4..4ea8ecc3b2d5 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -307,6 +307,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
+ u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@@ -325,7 +326,11 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
- rx->dqo.qpl = NULL;
+ if (rx->dqo.qpl) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
+ rx->dqo.qpl = NULL;
+ }
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
@@ -377,7 +382,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
size_t size;
+ u32 qpl_id;
const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
@@ -418,9 +425,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
- rx->dqo.qpl = &cfg->qpls[qpl_id];
+ rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
+ if (!rx->dqo.qpl)
+ goto err;
rx->dqo.next_qpl_page_idx = 0;
}
@@ -454,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)