summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/xen-netfront.c')
-rw-r--r--drivers/net/xen-netfront.c272
1 files changed, 169 insertions, 103 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 44275908d61a..e31b98403f31 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -126,21 +126,17 @@ struct netfront_queue {
/*
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
- * are linked from tx_skb_freelist through skb_entry.link.
- *
- * NB. Freelist index entries are always going to be less than
- * PAGE_OFFSET, whereas pointers to skbs will always be equal or
- * greater than PAGE_OFFSET: we use this property to distinguish
- * them.
+ * are linked from tx_skb_freelist through tx_link.
*/
- union skb_entry {
- struct sk_buff *skb;
- unsigned long link;
- } tx_skbs[NET_TX_RING_SIZE];
+ struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
+ unsigned short tx_link[NET_TX_RING_SIZE];
+#define TX_LINK_NONE 0xffff
+#define TX_PENDING 0xfffe
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
+ unsigned int tx_pend_queue;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
struct xen_netif_rx_front_ring rx;
@@ -173,6 +169,9 @@ struct netfront_info {
bool netback_has_xdp_headroom;
bool netfront_xdp_enabled;
+ /* Is device behaving sane? */
+ bool broken;
+
atomic_t rx_gso_checksum_fixup;
};
@@ -181,33 +180,25 @@ struct netfront_rx_info {
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
};
-static void skb_entry_set_link(union skb_entry *list, unsigned short id)
-{
- list->link = id;
-}
-
-static int skb_entry_is_link(const union skb_entry *list)
-{
- BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
- return (unsigned long)list->skb < PAGE_OFFSET;
-}
-
/*
* Access macros for acquiring freeing slots in tx_skbs[].
*/
-static void add_id_to_freelist(unsigned *head, union skb_entry *list,
- unsigned short id)
+static void add_id_to_list(unsigned *head, unsigned short *list,
+ unsigned short id)
{
- skb_entry_set_link(&list[id], *head);
+ list[id] = *head;
*head = id;
}
-static unsigned short get_id_from_freelist(unsigned *head,
- union skb_entry *list)
+static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
{
unsigned int id = *head;
- *head = list[id].link;
+
+ if (id != TX_LINK_NONE) {
+ *head = list[id];
+ list[id] = TX_LINK_NONE;
+ }
return id;
}
@@ -363,7 +354,7 @@ static int xennet_open(struct net_device *dev)
unsigned int i = 0;
struct netfront_queue *queue = NULL;
- if (!np->queues)
+ if (!np->queues || np->broken)
return -ENODEV;
for (i = 0; i < num_queues; ++i) {
@@ -391,27 +382,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
unsigned short id;
struct sk_buff *skb;
bool more_to_do;
+ const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev));
do {
prod = queue->tx.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
+ dev_alert(dev, "Illegal number of responses %u\n",
+ prod - queue->tx.rsp_cons);
+ goto err;
+ }
rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
- struct xen_netif_tx_response *txrsp;
+ struct xen_netif_tx_response txrsp;
- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
- if (txrsp->status == XEN_NETIF_RSP_NULL)
+ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+ if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
- id = txrsp->id;
- skb = queue->tx_skbs[id].skb;
+ id = txrsp.id;
+ if (id >= RING_SIZE(&queue->tx)) {
+ dev_alert(dev,
+ "Response has incorrect id (%u)\n",
+ id);
+ goto err;
+ }
+ if (queue->tx_link[id] != TX_PENDING) {
+ dev_alert(dev,
+ "Response for inactive request\n");
+ goto err;
+ }
+
+ queue->tx_link[id] = TX_LINK_NONE;
+ skb = queue->tx_skbs[id];
+ queue->tx_skbs[id] = NULL;
if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) {
- pr_alert("%s: warning -- grant still in use by backend domain\n",
- __func__);
- BUG();
+ dev_alert(dev,
+ "Grant still in use by backend domain\n");
+ goto err;
}
gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly);
@@ -419,7 +430,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
&queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
queue->grant_tx_page[id] = NULL;
- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
+ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb);
}
@@ -429,13 +440,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
} while (more_to_do);
xennet_maybe_wake_tx(queue);
+
+ return;
+
+ err:
+ queue->info->broken = true;
+ dev_alert(dev, "Disabled for further use\n");
}
struct xennet_gnttab_make_txreq {
struct netfront_queue *queue;
struct sk_buff *skb;
struct page *page;
- struct xen_netif_tx_request *tx; /* Last request */
+ struct xen_netif_tx_request *tx; /* Last request on ring page */
+ struct xen_netif_tx_request tx_local; /* Last request local copy*/
unsigned int size;
};
@@ -451,7 +469,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
struct netfront_queue *queue = info->queue;
struct sk_buff *skb = info->skb;
- id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
@@ -459,34 +477,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
- queue->tx_skbs[id].skb = skb;
+ queue->tx_skbs[id] = skb;
queue->grant_tx_page[id] = page;
queue->grant_tx_ref[id] = ref;
- tx->id = id;
- tx->gref = ref;
- tx->offset = offset;
- tx->size = len;
- tx->flags = 0;
+ info->tx_local.id = id;
+ info->tx_local.gref = ref;
+ info->tx_local.offset = offset;
+ info->tx_local.size = len;
+ info->tx_local.flags = 0;
+
+ *tx = info->tx_local;
+
+ /*
+ * Put the request in the pending queue, it will be set to be pending
+ * when the producer index is about to be raised.
+ */
+ add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
info->tx = tx;
- info->size += tx->size;
+ info->size += info->tx_local.size;
}
static struct xen_netif_tx_request *xennet_make_first_txreq(
- struct netfront_queue *queue, struct sk_buff *skb,
- struct page *page, unsigned int offset, unsigned int len)
+ struct xennet_gnttab_make_txreq *info,
+ unsigned int offset, unsigned int len)
{
- struct xennet_gnttab_make_txreq info = {
- .queue = queue,
- .skb = skb,
- .page = page,
- .size = 0,
- };
+ info->size = 0;
- gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
+ gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
- return info.tx;
+ return info->tx;
}
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
@@ -499,35 +520,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
xennet_tx_setup_grant(gfn, offset, len, data);
}
-static struct xen_netif_tx_request *xennet_make_txreqs(
- struct netfront_queue *queue, struct xen_netif_tx_request *tx,
- struct sk_buff *skb, struct page *page,
+static void xennet_make_txreqs(
+ struct xennet_gnttab_make_txreq *info,
+ struct page *page,
unsigned int offset, unsigned int len)
{
- struct xennet_gnttab_make_txreq info = {
- .queue = queue,
- .skb = skb,
- .tx = tx,
- };
-
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (len) {
- info.page = page;
- info.size = 0;
+ info->page = page;
+ info->size = 0;
gnttab_foreach_grant_in_range(page, offset, len,
xennet_make_one_txreq,
- &info);
+ info);
page++;
offset = 0;
- len -= info.size;
+ len -= info->size;
}
-
- return info.tx;
}
/*
@@ -574,19 +587,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue_idx;
}
+static void xennet_mark_tx_pending(struct netfront_queue *queue)
+{
+ unsigned int i;
+
+ while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
+ TX_LINK_NONE)
+ queue->tx_link[i] = TX_PENDING;
+}
+
static int xennet_xdp_xmit_one(struct net_device *dev,
struct netfront_queue *queue,
struct xdp_frame *xdpf)
{
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+ struct xennet_gnttab_make_txreq info = {
+ .queue = queue,
+ .skb = NULL,
+ .page = virt_to_page(xdpf->data),
+ };
int notify;
- xennet_make_first_txreq(queue, NULL,
- virt_to_page(xdpf->data),
+ xennet_make_first_txreq(&info,
offset_in_page(xdpf->data),
xdpf->len);
+ xennet_mark_tx_pending(queue);
+
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
@@ -611,6 +639,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
int nxmit = 0;
int i;
+ if (unlikely(np->broken))
+ return -ENODEV;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
@@ -638,7 +668,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
{
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
- struct xen_netif_tx_request *tx, *first_tx;
+ struct xen_netif_tx_request *first_tx;
unsigned int i;
int notify;
int slots;
@@ -647,6 +677,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
unsigned int len;
unsigned long flags;
struct netfront_queue *queue = NULL;
+ struct xennet_gnttab_make_txreq info = { };
unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index;
struct sk_buff *nskb;
@@ -654,6 +685,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Drop the packet if no queues are set up */
if (num_queues < 1)
goto drop;
+ if (unlikely(np->broken))
+ goto drop;
/* Determine which queue to transmit this SKB on */
queue_index = skb_get_queue_mapping(skb);
queue = &np->queues[queue_index];
@@ -704,21 +737,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
}
/* First request for the linear area. */
- first_tx = tx = xennet_make_first_txreq(queue, skb,
- page, offset, len);
- offset += tx->size;
+ info.queue = queue;
+ info.skb = skb;
+ info.page = page;
+ first_tx = xennet_make_first_txreq(&info, offset, len);
+ offset += info.tx_local.size;
if (offset == PAGE_SIZE) {
page++;
offset = 0;
}
- len -= tx->size;
+ len -= info.tx_local.size;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
- tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
+ first_tx->flags |= XEN_NETTXF_csum_blank |
+ XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
- tx->flags |= XEN_NETTXF_data_validated;
+ first_tx->flags |= XEN_NETTXF_data_validated;
/* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) {
@@ -727,7 +763,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
- tx->flags |= XEN_NETTXF_extra_info;
+ first_tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
@@ -741,12 +777,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
}
/* Requests for the rest of the linear area. */
- tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
+ xennet_make_txreqs(&info, page, offset, len);
/* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
+ xennet_make_txreqs(&info, skb_frag_page(frag),
skb_frag_off(frag),
skb_frag_size(frag));
}
@@ -757,6 +793,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* timestamp packet in software */
skb_tx_timestamp(skb);
+ xennet_mark_tx_pending(queue);
+
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify)
notify_remote_via_irq(queue->tx_irq);
@@ -814,7 +852,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
RING_IDX rp)
{
- struct xen_netif_extra_info *extra;
+ struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
@@ -830,24 +868,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
break;
}
- extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&queue->rx, ++cons);
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
- if (unlikely(!extra->type ||
- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n",
- extra->type);
+ extra.type);
err = -EINVAL;
} else {
- memcpy(&extras[extra->type - 1], extra,
- sizeof(*extra));
+ extras[extra.type - 1] = extra;
}
skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref);
- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons;
return err;
@@ -905,7 +941,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct sk_buff_head *list,
bool *need_xdp_flush)
{
- struct xen_netif_rx_response *rx = &rinfo->rx;
+ struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
@@ -989,7 +1025,8 @@ next:
break;
}
- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
+ rx = &rx_local;
skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
@@ -1044,10 +1081,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
- struct xen_netif_rx_response *rx =
- RING_GET_RESPONSE(&queue->rx, ++cons);
+ struct xen_netif_rx_response rx;
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
+
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
@@ -1062,7 +1100,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
- rx->offset, rx->status, PAGE_SIZE);
+ rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
@@ -1156,12 +1194,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
skb_queue_head_init(&tmpq);
rp = queue->rx.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
+ dev_alert(&dev->dev, "Illegal number of responses %u\n",
+ rp - queue->rx.rsp_cons);
+ queue->info->broken = true;
+ spin_unlock(&queue->rx_lock);
+ return 0;
+ }
rmb(); /* Ensure we see queued responses up to 'rp'. */
i = queue->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
+ RING_COPY_RESPONSE(&queue->rx, i, rx);
memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
@@ -1286,17 +1331,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */
- if (skb_entry_is_link(&queue->tx_skbs[i]))
+ if (!queue->tx_skbs[i])
continue;
- skb = queue->tx_skbs[i].skb;
+ skb = queue->tx_skbs[i];
+ queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly,
(unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
- add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
+ add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb);
}
}
@@ -1376,6 +1422,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id;
unsigned long flags;
+ if (queue->info->broken)
+ return IRQ_HANDLED;
+
spin_lock_irqsave(&queue->tx_lock, flags);
xennet_tx_buf_gc(queue);
spin_unlock_irqrestore(&queue->tx_lock, flags);
@@ -1388,6 +1437,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id;
struct net_device *dev = queue->info->netdev;
+ if (queue->info->broken)
+ return IRQ_HANDLED;
+
if (likely(netif_carrier_ok(dev) &&
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
napi_schedule(&queue->napi);
@@ -1409,6 +1461,10 @@ static void xennet_poll_controller(struct net_device *dev)
struct netfront_info *info = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues;
unsigned int i;
+
+ if (info->broken)
+ return;
+
for (i = 0; i < num_queues; ++i)
xennet_interrupt(0, &info->queues[i]);
}
@@ -1480,6 +1536,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
+ struct netfront_info *np = netdev_priv(dev);
+
+ if (np->broken)
+ return -ENODEV;
+
switch (xdp->command) {
case XDP_SETUP_PROG:
return xennet_xdp_set(dev, xdp->prog, xdp->extack);
@@ -1853,13 +1914,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
devid, queue->id);
- /* Initialise tx_skbs as a free chain containing every entry. */
+ /* Initialise tx_skb_freelist as a free chain containing every entry. */
queue->tx_skb_freelist = 0;
+ queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
- skb_entry_set_link(&queue->tx_skbs[i], i+1);
+ queue->tx_link[i] = i + 1;
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue->grant_tx_page[i] = NULL;
}
+ queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
@@ -2128,6 +2191,9 @@ static int talk_to_netback(struct xenbus_device *dev,
if (info->queues)
xennet_destroy_queues(info);
+ /* For the case of a reconnect reset the "broken" indicator. */
+ info->broken = false;
+
err = xennet_create_queues(info, &num_queues);
if (err < 0) {
xenbus_dev_fatal(dev, err, "creating queues");