diff options
author | David Awogbemila <awogbemila@google.com> | 2021-10-24 20:42:37 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-10-25 15:13:12 +0200 |
commit | 37149e9374bf7271563f7477ace9014ebc65a8af (patch) | |
tree | 05877f2c1e56b83ed4c49a87aa2487897f11ee9c /drivers/net/ethernet/google/gve/gve_utils.c | |
parent | gve: Add RX context. (diff) | |
download | linux-37149e9374bf7271563f7477ace9014ebc65a8af.tar.xz linux-37149e9374bf7271563f7477ace9014ebc65a8af.zip |
gve: Implement packet continuation for RX.
This enables the driver to receive RX packets spread across multiple
buffers:
For a given multi-fragment packet the "packet continuation" bit is set
on all descriptors except the last one. These descriptors' payloads are
combined into a single SKB before the SKB is handed to the
networking stack.
This change adds a "packet buffer size" notion for RX queues. The
CreateRxQueue AdminQueue command sent to the device now includes the
packet_buffer_size.
We opt for a packet_buffer_size of PAGE_SIZE / 2 to give the
driver the opportunity to flip pages where we can instead of copying.
Signed-off-by: David Awogbemila <awogbemila@google.com>
Signed-off-by: Jeroen de Borst <jeroendb@google.com>
Reviewed-by: Catherine Sullivan <csully@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_utils.c')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_utils.c | 33 |
1 files changed, 22 insertions, 11 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 45ff7a9ab5f9..88ca49cbc1e2 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -50,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx) struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, u16 len, - u16 pad) + u16 padding, struct gve_rx_ctx *ctx) { - struct sk_buff *skb = napi_alloc_skb(napi, len); - void *va = page_info->page_address + pad + - page_info->page_offset; - - if (unlikely(!skb)) - return NULL; - + void *va = page_info->page_address + padding + page_info->page_offset; + int skb_linear_offset = 0; + bool set_protocol = false; + struct sk_buff *skb; + + if (ctx) { + if (!ctx->skb_head) + ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size); + + if (unlikely(!ctx->skb_head)) + return NULL; + skb = ctx->skb_head; + skb_linear_offset = skb->len; + set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1; + } else { + skb = napi_alloc_skb(napi, len); + set_protocol = true; + } __skb_put(skb, len); + skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len); - skb_copy_to_linear_data(skb, va, len); - - skb->protocol = eth_type_trans(skb, dev); + if (set_protocol) + skb->protocol = eth_type_trans(skb, dev); return skb; } |