diff options
author | Xie He <xie.he.0141@gmail.com> | 2021-04-02 11:30:00 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-04-05 20:42:12 +0200 |
commit | 514e1150da9cd8d7978d990a353636cf1a7a87c2 (patch) | |
tree | 64715b8f2fbe5cb42256ba74c1974f6db3f008e0 /drivers/net/wan/hdlc_x25.c | |
parent | net: openvswitch: Use 'skb_push_rcsum()' instead of hand coding it (diff) | |
download | linux-514e1150da9cd8d7978d990a353636cf1a7a87c2.tar.xz linux-514e1150da9cd8d7978d990a353636cf1a7a87c2.zip |
net: x25: Queue received packets in the drivers instead of per-CPU queues
X.25 Layer 3 (the Packet Layer) expects layer 2 to provide a reliable
datalink service such that no packets are reordered or dropped. And
X.25 Layer 2 (the LAPB layer) is indeed designed to provide such service.
However, this reliability is not preserved when a driver calls "netif_rx"
to deliver the received packets to layer 3, because "netif_rx" will put
the packets into per-CPU queues before they are delivered to layer 3.
If there are multiple CPUs, the order of the packets may not be preserved.
The per-CPU queues may also drop packets if there are too many.
Therefore, we should not call "netif_rx" to let it queue the packets.
Instead, we should use our own queue that won't reorder or drop packets.
This patch changes all X.25 drivers to use their own queues instead of
calling "netif_rx". The patch also documents this requirement in the
"x25-iface" documentation.
Cc: Martin Schiller <ms@dev.tdt.de>
Signed-off-by: Xie He <xie.he.0141@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wan/hdlc_x25.c')
-rw-r--r-- | drivers/net/wan/hdlc_x25.c | 30 |
1 files changed, 27 insertions, 3 deletions
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index 5a6a945f6c81..ba8c36c7ea91 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -25,6 +25,8 @@ struct x25_state { x25_hdlc_proto settings; bool up; spinlock_t up_lock; /* Protects "up" */ + struct sk_buff_head rx_queue; + struct tasklet_struct rx_tasklet; }; static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); @@ -34,14 +36,27 @@ static struct x25_state *state(hdlc_device *hdlc) return hdlc->state; } +static void x25_rx_queue_kick(struct tasklet_struct *t) +{ + struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet); + struct sk_buff *skb = skb_dequeue(&x25st->rx_queue); + + while (skb) { + netif_receive_skb_core(skb); + skb = skb_dequeue(&x25st->rx_queue); + } +} + /* These functions are callbacks called by LAPB layer */ static void x25_connect_disconnect(struct net_device *dev, int reason, int code) { + struct x25_state *x25st = state(dev_to_hdlc(dev)); struct sk_buff *skb; unsigned char *ptr; - if ((skb = dev_alloc_skb(1)) == NULL) { + skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC); + if (!skb) { netdev_err(dev, "out of memory\n"); return; } @@ -50,7 +65,9 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code) *ptr = code; skb->protocol = x25_type_trans(skb, dev); - netif_rx(skb); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); } @@ -71,6 +88,7 @@ static void x25_disconnected(struct net_device *dev, int reason) static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) { + struct x25_state *x25st = state(dev_to_hdlc(dev)); unsigned char *ptr; if (skb_cow(skb, 1)) { @@ -84,7 +102,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb) *ptr = X25_IFACE_DATA; skb->protocol = x25_type_trans(skb, dev); - return netif_rx(skb); + + skb_queue_tail(&x25st->rx_queue, skb); + tasklet_schedule(&x25st->rx_tasklet); + return NET_RX_SUCCESS; } @@ -223,6 +244,7 @@ static void x25_close(struct net_device *dev) spin_unlock_bh(&x25st->up_lock); lapb_unregister(dev); + tasklet_kill(&x25st->rx_tasklet); } @@ -338,6 +360,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) memcpy(&state(hdlc)->settings, &new_settings, size); state(hdlc)->up = false; spin_lock_init(&state(hdlc)->up_lock); + skb_queue_head_init(&state(hdlc)->rx_queue); + tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick); /* There's no header_ops so hard_header_len should be 0. */ dev->hard_header_len = 0; |