diff options
Diffstat (limited to 'net')
221 files changed, 7100 insertions, 5911 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index bc2528624583..2b5fcde1f629 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -591,18 +591,17 @@ static void vlan_dev_uninit(struct net_device *dev) } } -static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) +static netdev_features_t vlan_dev_fix_features(struct net_device *dev, + netdev_features_t features) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; u32 old_features = features; - features &= real_dev->features; features &= real_dev->vlan_features; + features |= NETIF_F_RXCSUM; + features &= real_dev->features; features |= old_features & NETIF_F_SOFT_FEATURES; - - if (dev_ethtool_get_rx_csum(real_dev)) - features |= NETIF_F_RXCSUM; features |= NETIF_F_LLTX; return features; diff --git a/net/Kconfig b/net/Kconfig index a07314844238..2d998735c4d8 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -232,6 +232,19 @@ config XPS depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y +config NETPRIO_CGROUP + tristate "Network priority cgroup" + depends on CGROUPS + ---help--- + Cgroup subsystem for use in assigning processes to network priorities on + a per-interface basis + +config BQL + boolean + depends on SYSFS + select DQL + default y + config HAVE_BPF_JIT bool diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c index f41f02656ff4..876fbe83e2e4 100644 --- a/net/atm/atm_misc.c +++ b/net/atm/atm_misc.c @@ -26,7 +26,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size, gfp_t gfp_flags) { struct sock *sk = sk_atm(vcc); - int guess = atm_guess_pdu2truesize(pdu_size); + int guess = SKB_TRUESIZE(pdu_size); atm_force_charge(vcc, guess); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { diff --git a/net/atm/br2684.c b/net/atm/br2684.c index d07223c834af..353fccf1cde3 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -53,6 +53,7 @@ static const unsigned char ethertype_ipv4[] = { ETHERTYPE_IPV4 }; static const unsigned char ethertype_ipv6[] = { ETHERTYPE_IPV6 }; static const unsigned char llc_oui_pid_pad[] = { LLC, SNAP_BRIDGED, PID_ETHERNET, PAD_BRIDGED }; +static const unsigned char pad[] = { PAD_BRIDGED }; static const unsigned char llc_oui_ipv4[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV4 }; static const unsigned char llc_oui_ipv6[] = { LLC, SNAP_ROUTED, ETHERTYPE_IPV6 }; @@ -202,7 +203,10 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev, { struct br2684_dev *brdev = BRPRIV(dev); struct atm_vcc *atmvcc; - int minheadroom = (brvcc->encaps == e_llc) ? 10 : 2; + int minheadroom = (brvcc->encaps == e_llc) ? + ((brdev->payload == p_bridged) ? + sizeof(llc_oui_pid_pad) : sizeof(llc_oui_ipv4)) : + ((brdev->payload == p_bridged) ? BR2684_PAD_LEN : 0); if (skb_headroom(skb) < minheadroom) { struct sk_buff *skb2 = skb_realloc_headroom(skb, minheadroom); @@ -450,7 +454,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; } else { /* p_bridged */ /* first 2 chars should be 0 */ - if (*((u16 *) (skb->data)) != 0) + if (memcmp(skb->data, pad, BR2684_PAD_LEN) != 0) goto error; skb_pull(skb, BR2684_PAD_LEN); skb->protocol = eth_type_trans(skb, net_dev); @@ -489,15 +493,11 @@ free_skb: */ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) { - struct sk_buff_head queue; - int err; struct br2684_vcc *brvcc; - struct sk_buff *skb, *tmp; - struct sk_buff_head *rq; struct br2684_dev *brdev; struct net_device *net_dev; struct atm_backend_br2684 be; - unsigned long flags; + int err; if (copy_from_user(&be, arg, sizeof be)) return -EFAULT; @@ -550,23 +550,6 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) atmvcc->push = br2684_push; atmvcc->pop = br2684_pop; - __skb_queue_head_init(&queue); - rq = &sk_atm(atmvcc)->sk_receive_queue; - - spin_lock_irqsave(&rq->lock, flags); - skb_queue_splice_init(rq, &queue); - spin_unlock_irqrestore(&rq->lock, flags); - - skb_queue_walk_safe(&queue, skb, tmp) { - struct net_device *dev; - - br2684_push(atmvcc, skb); - dev = skb->dev; - - dev->stats.rx_bytes -= skb->len; - dev->stats.rx_packets--; - } - /* initialize netdev carrier state */ if (atmvcc->dev->signal == ATM_PHY_SIG_LOST) netif_carrier_off(net_dev); @@ -574,6 +557,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) netif_carrier_on(net_dev); __module_get(THIS_MODULE); + + /* re-process everything received between connection setup and + backend setup */ + vcc_process_recv_queue(atmvcc); return 0; error: @@ -600,6 +587,7 @@ static void br2684_setup(struct net_device *netdev) struct br2684_dev *brdev = BRPRIV(netdev); ether_setup(netdev); + netdev->hard_header_len += sizeof(llc_oui_pid_pad); /* worst case */ brdev->net_dev = netdev; netdev->netdev_ops = &br2684_netdev_ops; @@ -612,7 +600,7 @@ static void br2684_setup_routed(struct net_device *netdev) struct br2684_dev *brdev = BRPRIV(netdev); brdev->net_dev = netdev; - netdev->hard_header_len = 0; + netdev->hard_header_len = sizeof(llc_oui_ipv4); /* worst case */ netdev->netdev_ops = &br2684_netdev_ops_routed; netdev->addr_len = 0; netdev->mtu = 1500; diff --git a/net/atm/clip.c b/net/atm/clip.c index 852394072fa1..f3b36154b0c5 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <net/route.h> /* for struct rtable and routing */ #include <net/icmp.h> /* icmp_send */ +#include <net/arp.h> #include <linux/param.h> /* for HZ */ #include <linux/uaccess.h> #include <asm/byteorder.h> /* for htons etc. */ @@ -119,7 +120,7 @@ out: /* The neighbour entry n->lock is held. */ static int neigh_check_cb(struct neighbour *n) { - struct atmarp_entry *entry = NEIGH2ENTRY(n); + struct atmarp_entry *entry = neighbour_priv(n); struct clip_vcc *cv; for (cv = entry->vccs; cv; cv = cv->next) { @@ -189,6 +190,13 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) struct clip_vcc *clip_vcc = CLIP_VCC(vcc); pr_debug("\n"); + + if (!clip_devs) { + atm_return(vcc, skb->truesize); + kfree_skb(skb); + return; + } + if (!skb) { pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) @@ -255,8 +263,10 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb) static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb) { + __be32 *ip = (__be32 *) neigh->primary_key; + pr_debug("(neigh %p, skb %p)\n", neigh, skb); - to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip); + to_atmarpd(act_need, PRIV(neigh->dev)->number, *ip); } static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb) @@ -277,72 +287,24 @@ static const struct neigh_ops clip_neigh_ops = { static int clip_constructor(struct neighbour *neigh) { - struct atmarp_entry *entry = NEIGH2ENTRY(neigh); - struct net_device *dev = neigh->dev; - struct in_device *in_dev; - struct neigh_parms *parms; + struct atmarp_entry *entry = neighbour_priv(neigh); - pr_debug("(neigh %p, entry %p)\n", neigh, entry); - neigh->type = inet_addr_type(&init_net, entry->ip); - if (neigh->type != RTN_UNICAST) + if (neigh->tbl->family != AF_INET) return -EINVAL; - rcu_read_lock(); - in_dev = __in_dev_get_rcu(dev); - if (!in_dev) { - rcu_read_unlock(); + if (neigh->type != RTN_UNICAST) return -EINVAL; - } - - parms = in_dev->arp_parms; - __neigh_parms_put(neigh->parms); - neigh->parms = neigh_parms_clone(parms); - rcu_read_unlock(); + neigh->nud_state = NUD_NONE; neigh->ops = &clip_neigh_ops; - neigh->output = neigh->nud_state & NUD_VALID ? - neigh->ops->connected_output : neigh->ops->output; + neigh->output = neigh->ops->output; entry->neigh = neigh; entry->vccs = NULL; entry->expires = jiffies - 1; + return 0; } -static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd) -{ - return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd); -} - -static struct neigh_table clip_tbl = { - .family = AF_INET, - .entry_size = sizeof(struct neighbour)+sizeof(struct atmarp_entry), - .key_len = 4, - .hash = clip_hash, - .constructor = clip_constructor, - .id = "clip_arp_cache", - - /* parameters are copied from ARP ... */ - .parms = { - .tbl = &clip_tbl, - .base_reachable_time = 30 * HZ, - .retrans_time = 1 * HZ, - .gc_staletime = 60 * HZ, - .reachable_time = 30 * HZ, - .delay_probe_time = 5 * HZ, - .queue_len = 3, - .ucast_probes = 3, - .mcast_probes = 3, - .anycast_delay = 1 * HZ, - .proxy_delay = (8 * HZ) / 10, - .proxy_qlen = 64, - .locktime = 1 * HZ, - }, - .gc_interval = 30 * HZ, - .gc_thresh1 = 128, - .gc_thresh2 = 512, - .gc_thresh3 = 1024, -}; - /* @@@ copy bh locking from arp.c -- need to bh-enable atm code before */ /* @@ -392,12 +354,12 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, dev->stats.tx_dropped++; return NETDEV_TX_OK; } - entry = NEIGH2ENTRY(n); + entry = neighbour_priv(n); if (!entry->vccs) { if (time_after(jiffies, entry->expires)) { /* should be resolved */ entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ; - to_atmarpd(act_need, PRIV(dev)->number, entry->ip); + to_atmarpd(act_need, PRIV(dev)->number, *((__be32 *)n->primary_key)); } if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS) skb_queue_tail(&entry->neigh->arp_queue, skb); @@ -448,10 +410,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, static int clip_mkip(struct atm_vcc *vcc, int timeout) { - struct sk_buff_head *rq, queue; struct clip_vcc *clip_vcc; - struct sk_buff *skb, *tmp; - unsigned long flags; if (!vcc->push) return -EBADFD; @@ -472,29 +431,9 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout) vcc->push = clip_push; vcc->pop = clip_pop; - __skb_queue_head_init(&queue); - rq = &sk_atm(vcc)->sk_receive_queue; - - spin_lock_irqsave(&rq->lock, flags); - skb_queue_splice_init(rq, &queue); - spin_unlock_irqrestore(&rq->lock, flags); - /* re-process everything received between connection setup and MKIP */ - skb_queue_walk_safe(&queue, skb, tmp) { - if (!clip_devs) { - atm_return(vcc, skb->truesize); - kfree_skb(skb); - } else { - struct net_device *dev = skb->dev; - unsigned int len = skb->len; - - skb_get(skb); - clip_push(vcc, skb); - dev->stats.rx_packets--; - dev->stats.rx_bytes -= len; - kfree_skb(skb); - } - } + vcc_process_recv_queue(vcc); + return 0; } @@ -523,11 +462,11 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) rt = ip_route_output(&init_net, ip, 0, 1, 0); if (IS_ERR(rt)) return PTR_ERR(rt); - neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1); + neigh = __neigh_lookup(&arp_tbl, &ip, rt->dst.dev, 1); ip_rt_put(rt); if (!neigh) return -ENOMEM; - entry = NEIGH2ENTRY(neigh); + entry = neighbour_priv(neigh); if (entry != clip_vcc->entry) { if (!clip_vcc->entry) pr_debug("add\n"); @@ -544,13 +483,15 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip) } static const struct net_device_ops clip_netdev_ops = { - .ndo_start_xmit = clip_start_xmit, + .ndo_start_xmit = clip_start_xmit, + .ndo_neigh_construct = clip_constructor, }; static void clip_setup(struct net_device *dev) { dev->netdev_ops = &clip_netdev_ops; dev->type = ARPHRD_ATM; + dev->neigh_priv_len = sizeof(struct atmarp_entry); dev->hard_header_len = RFC1483LLC_LEN; dev->mtu = RFC1626_MTU; dev->tx_queue_len = 100; /* "normal" queue (packets) */ @@ -604,10 +545,8 @@ static int clip_device_event(struct notifier_block *this, unsigned long event, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (event == NETDEV_UNREGISTER) { - neigh_ifdown(&clip_tbl, dev); + if (event == NETDEV_UNREGISTER) return NOTIFY_DONE; - } /* ignore non-CLIP devices */ if (dev->type != ARPHRD_ATM || dev->netdev_ops != &clip_netdev_ops) @@ -787,9 +726,10 @@ static void svc_addr(struct seq_file *seq, struct sockaddr_atmsvc *addr) /* This means the neighbour entry has no attached VCC objects. */ #define SEQ_NO_VCC_TOKEN ((void *) 2) -static void atmarp_info(struct seq_file *seq, struct net_device *dev, +static void atmarp_info(struct seq_file *seq, struct neighbour *n, struct atmarp_entry *entry, struct clip_vcc *clip_vcc) { + struct net_device *dev = n->dev; unsigned long exp; char buf[17]; int svc, llc, off; @@ -809,8 +749,7 @@ static void atmarp_info(struct seq_file *seq, struct net_device *dev, seq_printf(seq, "%-6s%-4s%-4s%5ld ", dev->name, svc ? "SVC" : "PVC", llc ? "LLC" : "NULL", exp); - off = scnprintf(buf, sizeof(buf) - 1, "%pI4", - &entry->ip); + off = scnprintf(buf, sizeof(buf) - 1, "%pI4", n->primary_key); while (off < 16) buf[off++] = ' '; buf[off] = '\0'; @@ -881,14 +820,17 @@ static void *clip_seq_sub_iter(struct neigh_seq_state *_state, { struct clip_seq_state *state = (struct clip_seq_state *)_state; - return clip_seq_vcc_walk(state, NEIGH2ENTRY(n), pos); + if (n->dev->type != ARPHRD_ATM) + return NULL; + + return clip_seq_vcc_walk(state, neighbour_priv(n), pos); } static void *clip_seq_start(struct seq_file *seq, loff_t * pos) { struct clip_seq_state *state = seq->private; state->ns.neigh_sub_iter = clip_seq_sub_iter; - return neigh_seq_start(seq, pos, &clip_tbl, NEIGH_SEQ_NEIGH_ONLY); + return neigh_seq_start(seq, pos, &arp_tbl, NEIGH_SEQ_NEIGH_ONLY); } static int clip_seq_show(struct seq_file *seq, void *v) @@ -900,10 +842,10 @@ static int clip_seq_show(struct seq_file *seq, void *v) seq_puts(seq, atm_arp_banner); } else { struct clip_seq_state *state = seq->private; - struct neighbour *n = v; struct clip_vcc *vcc = state->vcc; + struct neighbour *n = v; - atmarp_info(seq, n->dev, NEIGH2ENTRY(n), vcc); + atmarp_info(seq, n, neighbour_priv(n), vcc); } return 0; } @@ -934,9 +876,6 @@ static void atm_clip_exit_noproc(void); static int __init atm_clip_init(void) { - neigh_table_init_no_netlink(&clip_tbl); - - clip_tbl_hook = &clip_tbl; register_atm_ioctl(&clip_ioctl_ops); register_netdevice_notifier(&clip_dev_notifier); register_inetaddr_notifier(&clip_inet_notifier); @@ -973,12 +912,6 @@ static void atm_clip_exit_noproc(void) */ del_timer_sync(&idle_timer); - /* Next, purge the table, so that the device - * unregister loop below does not hang due to - * device references remaining in the table. - */ - neigh_ifdown(&clip_tbl, NULL); - dev = clip_devs; while (dev) { next = PRIV(dev)->next; @@ -986,11 +919,6 @@ static void atm_clip_exit_noproc(void) free_netdev(dev); dev = next; } - - /* Now it is safe to fully shutdown whole table. */ - neigh_table_clear(&clip_tbl); - - clip_tbl_hook = NULL; } static void __exit atm_clip_exit(void) diff --git a/net/atm/common.c b/net/atm/common.c index 14ff9fe39989..b4b44dbed645 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -214,6 +214,26 @@ void vcc_release_async(struct atm_vcc *vcc, int reply) } EXPORT_SYMBOL(vcc_release_async); +void vcc_process_recv_queue(struct atm_vcc *vcc) +{ + struct sk_buff_head queue, *rq; + struct sk_buff *skb, *tmp; + unsigned long flags; + + __skb_queue_head_init(&queue); + rq = &sk_atm(vcc)->sk_receive_queue; + + spin_lock_irqsave(&rq->lock, flags); + skb_queue_splice_init(rq, &queue); + spin_unlock_irqrestore(&rq->lock, flags); + + skb_queue_walk_safe(&queue, skb, tmp) { + __skb_unlink(skb, &queue); + vcc->push(vcc, skb); + } +} +EXPORT_SYMBOL(vcc_process_recv_queue); + void atm_dev_signal_change(struct atm_dev *dev, char signal) { pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n", @@ -502,8 +522,11 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, if (sock->state != SS_CONNECTED) return -ENOTCONN; - if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ + + /* only handle MSG_DONTWAIT and MSG_PEEK */ + if (flags & ~(MSG_DONTWAIT | MSG_PEEK)) return -EOPNOTSUPP; + vcc = ATM_SD(sock); if (test_bit(ATM_VF_RELEASED, &vcc->flags) || test_bit(ATM_VF_CLOSE, &vcc->flags) || @@ -524,8 +547,13 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); - pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize); - atm_return(vcc, skb->truesize); + + if (!(flags & MSG_PEEK)) { + pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), + skb->truesize); + atm_return(vcc, skb->truesize); + } + skb_free_datagram(sk, skb); return copied; } diff --git a/net/atm/common.h b/net/atm/common.h index f48a76b6cdf4..cc3c2dae4d79 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -24,6 +24,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); int vcc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); +void vcc_process_recv_queue(struct atm_vcc *vcc); int atmpvc_init(void); void atmpvc_exit(void); diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c index db4a11c61d15..df35d9a3b5fe 100644 --- a/net/atm/pppoatm.c +++ b/net/atm/pppoatm.c @@ -303,6 +303,10 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg) atmvcc->push = pppoatm_push; atmvcc->pop = pppoatm_pop; __module_get(THIS_MODULE); + + /* re-process everything received between connection setup and + backend setup */ + vcc_process_recv_queue(atmvcc); return 0; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index e7c69f4619ec..b863c1877c80 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -402,14 +402,14 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) break; case AX25_T1: - if (ax25_ctl.arg < 1) + if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->rtt = (ax25_ctl.arg * HZ) / 2; ax25->t1 = ax25_ctl.arg * HZ; break; case AX25_T2: - if (ax25_ctl.arg < 1) + if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->t2 = ax25_ctl.arg * HZ; break; @@ -422,10 +422,15 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) break; case AX25_T3: + if (ax25_ctl.arg > ULONG_MAX / HZ) + goto einval_put; ax25->t3 = ax25_ctl.arg * HZ; break; case AX25_IDLE: + if (ax25_ctl.arg > ULONG_MAX / (60 * HZ)) + goto einval_put; + ax25->idle = ax25_ctl.arg * 60 * HZ; break; @@ -571,7 +576,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T1: - if (opt < 1) { + if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } @@ -580,7 +585,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T2: - if (opt < 1) { + if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } @@ -596,7 +601,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_T3: - if (opt < 1) { + if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } @@ -604,7 +609,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; case AX25_IDLE: - if (opt < 0) { + if (opt < 0 || opt > ULONG_MAX / (60 * HZ)) { res = -EINVAL; break; } diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c index b8a7414c3571..c25492f7d665 100644 --- a/net/batman-adv/bat_sysfs.c +++ b/net/batman-adv/bat_sysfs.c @@ -174,7 +174,7 @@ static int store_uint_attr(const char *buff, size_t count, unsigned long uint_val; int ret; - ret = strict_strtoul(buff, 10, &uint_val); + ret = kstrtoul(buff, 10, &uint_val); if (ret) { bat_info(net_dev, "%s: Invalid parameter received: %s\n", @@ -239,7 +239,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr, unsigned long val; int ret, vis_mode_tmp = -1; - ret = strict_strtoul(buff, 10, &val); + ret = kstrtoul(buff, 10, &val); if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) || (strncmp(buff, "client", 6) == 0) || diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 0be9ff346fa0..9bc63b209b3f 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -155,7 +155,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits, /* sequence number is much newer, probably missed a lot of packets */ if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) - || (seq_num_diff < EXPECTED_SEQNO_RANGE)) { + && (seq_num_diff < EXPECTED_SEQNO_RANGE)) { bat_dbg(DBG_BATMAN, bat_priv, "We missed a lot of packets (%i) !\n", seq_num_diff - 1); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 619fb73b3b76..9373a143c6d4 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -25,6 +25,7 @@ #include "gateway_common.h" #include "hard-interface.h" #include "originator.h" +#include "translation-table.h" #include "routing.h" #include <linux/ip.h> #include <linux/ipv6.h> @@ -572,108 +573,142 @@ out: return ret; } -int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, - struct orig_node *old_gw) +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) { struct ethhdr *ethhdr; struct iphdr *iphdr; struct ipv6hdr *ipv6hdr; struct udphdr *udphdr; - struct gw_node *curr_gw; - struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; - unsigned int header_len = 0; - int ret = 1; - - if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) - return 0; /* check for ethernet header */ - if (!pskb_may_pull(skb, header_len + ETH_HLEN)) - return 0; + if (!pskb_may_pull(skb, *header_len + ETH_HLEN)) + return false; ethhdr = (struct ethhdr *)skb->data; - header_len += ETH_HLEN; + *header_len += ETH_HLEN; /* check for initial vlan header */ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { - if (!pskb_may_pull(skb, header_len + VLAN_HLEN)) - return 0; + if (!pskb_may_pull(skb, *header_len + VLAN_HLEN)) + return false; ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); - header_len += VLAN_HLEN; + *header_len += VLAN_HLEN; } /* check for ip header */ switch (ntohs(ethhdr->h_proto)) { case ETH_P_IP: - if (!pskb_may_pull(skb, header_len + sizeof(*iphdr))) - return 0; - iphdr = (struct iphdr *)(skb->data + header_len); - header_len += iphdr->ihl * 4; + if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr))) + return false; + iphdr = (struct iphdr *)(skb->data + *header_len); + *header_len += iphdr->ihl * 4; /* check for udp header */ if (iphdr->protocol != IPPROTO_UDP) - return 0; + return false; break; case ETH_P_IPV6: - if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr))) - return 0; - ipv6hdr = (struct ipv6hdr *)(skb->data + header_len); - header_len += sizeof(*ipv6hdr); + if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr))) + return false; + ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len); + *header_len += sizeof(*ipv6hdr); /* check for udp header */ if (ipv6hdr->nexthdr != IPPROTO_UDP) - return 0; + return false; break; default: - return 0; + return false; } - if (!pskb_may_pull(skb, header_len + sizeof(*udphdr))) - return 0; - udphdr = (struct udphdr *)(skb->data + header_len); - header_len += sizeof(*udphdr); + if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) + return false; + udphdr = (struct udphdr *)(skb->data + *header_len); + *header_len += sizeof(*udphdr); /* check for bootp port */ if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && (ntohs(udphdr->dest) != 67)) - return 0; + return false; if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) && (ntohs(udphdr->dest) != 547)) - return 0; + return false; - if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) - return -1; + return true; +} - curr_gw = gw_get_selected_gw_node(bat_priv); - if (!curr_gw) - return 0; - - /* If old_gw != NULL then this packet is unicast. - * So, at this point we have to check the message type: if it is a - * DHCPREQUEST we have to decide whether to drop it or not */ - if (old_gw && curr_gw->orig_node != old_gw) { - if (is_type_dhcprequest(skb, header_len)) { - /* If the dhcp packet has been sent to a different gw, - * we have to evaluate whether the old gw is still - * reliable enough */ - neigh_curr = find_router(bat_priv, curr_gw->orig_node, - NULL); - neigh_old = find_router(bat_priv, old_gw, NULL); - if (!neigh_curr || !neigh_old) - goto free_neigh; - if (neigh_curr->tq_avg - neigh_old->tq_avg < - GW_THRESHOLD) - ret = -1; - } +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr) +{ + struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; + struct orig_node *orig_dst_node = NULL; + struct gw_node *curr_gw = NULL; + bool ret, out_of_range = false; + unsigned int header_len = 0; + uint8_t curr_tq_avg; + + ret = gw_is_dhcp_target(skb, &header_len); + if (!ret) + goto out; + + orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); + if (!orig_dst_node) + goto out; + + if (!orig_dst_node->gw_flags) + goto out; + + ret = is_type_dhcprequest(skb, header_len); + if (!ret) + goto out; + + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_SERVER: + /* If we are a GW then we are our best GW. We can artificially + * set the tq towards ourself as the maximum value */ + curr_tq_avg = TQ_MAX_VALUE; + break; + case GW_MODE_CLIENT: + curr_gw = gw_get_selected_gw_node(bat_priv); + if (!curr_gw) + goto out; + + /* packet is going to our gateway */ + if (curr_gw->orig_node == orig_dst_node) + goto out; + + /* If the dhcp packet has been sent to a different gw, + * we have to evaluate whether the old gw is still + * reliable enough */ + neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); + if (!neigh_curr) + goto out; + + curr_tq_avg = neigh_curr->tq_avg; + break; + case GW_MODE_OFF: + default: + goto out; } -free_neigh: + + neigh_old = find_router(bat_priv, orig_dst_node, NULL); + if (!!neigh_old) + goto out; + + if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD) + out_of_range = true; + +out: + if (orig_dst_node) + orig_node_free_ref(orig_dst_node); + if (curr_gw) + gw_node_free_ref(curr_gw); if (neigh_old) neigh_node_free_ref(neigh_old); if (neigh_curr) neigh_node_free_ref(neigh_curr); - if (curr_gw) - gw_node_free_ref(curr_gw); - return ret; + return out_of_range; } diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index b9b983c07feb..e1edba08eb1d 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h @@ -31,7 +31,8 @@ void gw_node_update(struct bat_priv *bat_priv, void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); void gw_node_purge(struct bat_priv *bat_priv); int gw_client_seq_print_text(struct seq_file *seq, void *offset); -int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb, - struct orig_node *old_gw); +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 18661af0bc3b..c4ac7b0a2a63 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -97,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, *tmp_ptr = '\0'; } - ret = strict_strtol(buff, 10, &ldown); + ret = kstrtol(buff, 10, &ldown); if (ret) { bat_err(net_dev, "Download speed of gateway mode invalid: %s\n", @@ -122,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, *tmp_ptr = '\0'; } - ret = strict_strtol(slash_ptr + 1, 10, &lup); + ret = kstrtol(slash_ptr + 1, 10, &lup); if (ret) { bat_err(net_dev, "Upload speed of gateway mode invalid: " diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c index 2a172505f513..d1da29da333b 100644 --- a/net/batman-adv/hash.c +++ b/net/batman-adv/hash.c @@ -25,7 +25,7 @@ /* clears the hash */ static void hash_init(struct hashtable_t *hash) { - int i; + uint32_t i; for (i = 0 ; i < hash->size; i++) { INIT_HLIST_HEAD(&hash->table[i]); @@ -42,7 +42,7 @@ void hash_destroy(struct hashtable_t *hash) } /* allocates and clears the hash */ -struct hashtable_t *hash_new(int size) +struct hashtable_t *hash_new(uint32_t size) { struct hashtable_t *hash; diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h index d20aa71ba1e8..4768717f07f9 100644 --- a/net/batman-adv/hash.h +++ b/net/batman-adv/hash.h @@ -33,17 +33,17 @@ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); /* the hashfunction, should return an index * based on the key in the data of the first * argument and the size the second */ -typedef int (*hashdata_choose_cb)(const void *, int); +typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); typedef void (*hashdata_free_cb)(struct hlist_node *, void *); struct hashtable_t { struct hlist_head *table; /* the hashtable itself with the buckets */ spinlock_t *list_locks; /* spinlock for each hash list entry */ - int size; /* size of hashtable */ + uint32_t size; /* size of hashtable */ }; /* allocates and clears the hash */ -struct hashtable_t *hash_new(int size); +struct hashtable_t *hash_new(uint32_t size); /* free only the hashtable and the hash itself. */ void hash_destroy(struct hashtable_t *hash); @@ -57,7 +57,7 @@ static inline void hash_delete(struct hashtable_t *hash, struct hlist_head *head; struct hlist_node *node, *node_tmp; spinlock_t *list_lock; /* spinlock to protect write access */ - int i; + uint32_t i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -93,7 +93,8 @@ static inline int hash_add(struct hashtable_t *hash, hashdata_choose_cb choose, const void *data, struct hlist_node *data_node) { - int index, ret = -1; + uint32_t index; + int ret = -1; struct hlist_head *head; struct hlist_node *node; spinlock_t *list_lock; /* spinlock to protect write access */ @@ -137,7 +138,7 @@ static inline void *hash_remove(struct hashtable_t *hash, hashdata_compare_cb compare, hashdata_choose_cb choose, void *data) { - size_t index; + uint32_t index; struct hlist_node *node; struct hlist_head *head; void *data_save = NULL; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 964ad4d8ba33..86354e06eb48 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -28,7 +28,7 @@ #define DRIVER_DEVICE "batman-adv" #ifndef SOURCE_VERSION -#define SOURCE_VERSION "2011.4.0" +#define SOURCE_VERSION "2012.0.0" #endif /* B.A.T.M.A.N. parameters */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 0e5b77255d99..0bc2045a2f2e 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -164,7 +164,7 @@ void originator_free(struct bat_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; - int i; + uint32_t i; if (!hash) return; @@ -350,7 +350,7 @@ static void _purge_orig(struct bat_priv *bat_priv) struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; - int i; + uint32_t i; if (!hash) return; @@ -413,7 +413,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset) int batman_count = 0; int last_seen_secs; int last_seen_msecs; - int i, ret = 0; + uint32_t i; + int ret = 0; primary_if = primary_if_get_selected(bat_priv); @@ -519,7 +520,8 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; - int i, ret; + uint32_t i; + int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ @@ -601,7 +603,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) struct hlist_head *head; struct hard_iface *hard_iface_tmp; struct orig_node *orig_node; - int i, ret; + uint32_t i; + int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index cfc1f60a96a1..67765ffef731 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -42,7 +42,7 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); /* hashfunction to choose an entry in a hash table of given size */ /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static inline int choose_orig(const void *data, int32_t size) +static inline uint32_t choose_orig(const void *data, uint32_t size) { const unsigned char *key = data; uint32_t hash = 0; diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index f961cc5eade5..ef24a7205f65 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -39,7 +39,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface) struct hlist_head *head; struct orig_node *orig_node; unsigned long *word; - int i; + uint32_t i; size_t word_index; for (i = 0; i < hash->size; i++) { @@ -578,6 +578,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct tt_query_packet *tt_query; + uint16_t tt_len; struct ethhdr *ethhdr; /* drop packet if it has not necessary minimum size */ @@ -616,13 +617,22 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) } break; case TT_RESPONSE: - /* packet needs to be linearized to access the TT changes */ - if (skb_linearize(skb) < 0) - goto out; + if (is_my_mac(tt_query->dst)) { + /* packet needs to be linearized to access the TT + * changes */ + if (skb_linearize(skb) < 0) + goto out; + + tt_len = tt_query->tt_data * sizeof(struct tt_change); + + /* Ensure we have all the claimed data */ + if (unlikely(skb_headlen(skb) < + sizeof(struct tt_query_packet) + + tt_len)) + goto out; - if (is_my_mac(tt_query->dst)) handle_tt_response(bat_priv, tt_query); - else { + } else { bat_dbg(DBG_TT, bat_priv, "Routing TT_RESPONSE to %pM [%c]\n", tt_query->dst, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index f9cc95728989..45297c843092 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -563,10 +563,10 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) struct bcast_packet *bcast_packet; struct vlan_ethhdr *vhdr; struct softif_neigh *curr_softif_neigh = NULL; - struct orig_node *orig_node = NULL; + unsigned int header_len = 0; int data_len = skb->len, ret; short vid = -1; - bool do_bcast; + bool do_bcast = false; if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto dropped; @@ -598,17 +598,28 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* Register the client MAC in the transtable */ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); - orig_node = transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest); - do_bcast = is_multicast_ether_addr(ethhdr->h_dest); - if (do_bcast || (orig_node && orig_node->gw_flags)) { - ret = gw_is_target(bat_priv, skb, orig_node); + if (is_multicast_ether_addr(ethhdr->h_dest)) { + do_bcast = true; - if (ret < 0) - goto dropped; - - if (ret) - do_bcast = false; + switch (atomic_read(&bat_priv->gw_mode)) { + case GW_MODE_SERVER: + /* gateway servers should not send dhcp + * requests into the mesh */ + ret = gw_is_dhcp_target(skb, &header_len); + if (ret) + goto dropped; + break; + case GW_MODE_CLIENT: + /* gateway clients should send dhcp requests + * via unicast to their gateway */ + ret = gw_is_dhcp_target(skb, &header_len); + if (ret) + do_bcast = false; + break; + case GW_MODE_OFF: + default: + break; + } } /* ethernet packet should be broadcasted */ @@ -644,6 +655,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) /* unicast packet */ } else { + if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { + ret = gw_out_of_range(bat_priv, skb, ethhdr); + if (ret) + goto dropped; + } + ret = unicast_send_skb(skb, bat_priv); if (ret != 0) goto dropped_freed; @@ -662,8 +679,6 @@ end: softif_neigh_free_ref(curr_softif_neigh); if (primary_if) hardif_free_ref(primary_if); - if (orig_node) - orig_node_free_ref(orig_node); return NETDEV_TX_OK; } diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index c7aafc7c5ed4..78b9528bfc2a 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -67,7 +67,7 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, struct hlist_head *head; struct hlist_node *node; struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; - int index; + uint32_t index; if (!hash) return NULL; @@ -99,7 +99,7 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, struct hlist_node *node; struct tt_global_entry *tt_global_entry; struct tt_global_entry *tt_global_entry_tmp = NULL; - int index; + uint32_t index; if (!hash) return NULL; @@ -314,9 +314,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - size_t buf_size, pos; - char *buff; - int i, ret = 0; + uint32_t i; + int ret = 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@ -337,34 +336,13 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) "announced via TT (TTVN: %u):\n", net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn)); - buf_size = 1; - /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - __hlist_for_each_rcu(node, head) - buf_size += 29; - rcu_read_unlock(); - } - - buff = kmalloc(buf_size, GFP_ATOMIC); - if (!buff) { - ret = -ENOMEM; - goto out; - } - - buff[0] = '\0'; - pos = 0; - for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 30, " * %pM " - "[%c%c%c%c%c]\n", + seq_printf(seq, " * %pM [%c%c%c%c%c]\n", tt_local_entry->addr, (tt_local_entry->flags & TT_CLIENT_ROAM ? 'R' : '.'), @@ -379,9 +357,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset) } rcu_read_unlock(); } - - seq_printf(seq, "%s", buff); - kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@ -427,7 +402,7 @@ static void tt_local_purge(struct bat_priv *bat_priv) struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -465,7 +440,7 @@ static void tt_local_table_free(struct bat_priv *bat_priv) struct tt_local_entry *tt_local_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - int i; + uint32_t i; if (!bat_priv->tt_local_hash) return; @@ -590,9 +565,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - size_t buf_size, pos; - char *buff; - int i, ret = 0; + uint32_t i; + int ret = 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@ -615,35 +589,13 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) seq_printf(seq, " %-13s %s %-15s %s %s\n", "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags"); - buf_size = 1; - /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via - * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - __hlist_for_each_rcu(node, head) - buf_size += 67; - rcu_read_unlock(); - } - - buff = kmalloc(buf_size, GFP_ATOMIC); - if (!buff) { - ret = -ENOMEM; - goto out; - } - - buff[0] = '\0'; - pos = 0; - for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 69, - " * %pM (%3u) via %pM (%3u) " + seq_printf(seq, " * %pM (%3u) via %pM (%3u) " "[%c%c%c]\n", tt_global_entry->addr, tt_global_entry->ttvn, tt_global_entry->orig_node->orig, @@ -659,9 +611,6 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset) } rcu_read_unlock(); } - - seq_printf(seq, "%s", buff); - kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@ -716,7 +665,7 @@ void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message) { struct tt_global_entry *tt_global_entry; - int i; + uint32_t i; struct hashtable_t *hash = bat_priv->tt_global_hash; struct hlist_node *node, *safe; struct hlist_head *head; @@ -735,9 +684,10 @@ void tt_global_del_orig(struct bat_priv *bat_priv, if (tt_global_entry->orig_node == orig_node) { bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM " - "(via %pM): originator time out\n", + "(via %pM): %s\n", tt_global_entry->addr, - tt_global_entry->orig_node->orig); + tt_global_entry->orig_node->orig, + message); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@ -754,7 +704,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv) struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -788,7 +738,7 @@ static void tt_global_table_free(struct bat_priv *bat_priv) struct tt_global_entry *tt_global_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - int i; + uint32_t i; if (!bat_priv->tt_global_hash) return; @@ -874,7 +824,8 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) struct tt_global_entry *tt_global_entry; struct hlist_node *node; struct hlist_head *head; - int i, j; + uint32_t i; + int j; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -911,7 +862,8 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv) struct tt_local_entry *tt_local_entry; struct hlist_node *node; struct hlist_head *head; - int i, j; + uint32_t i; + int j; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -1048,7 +1000,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, struct sk_buff *skb = NULL; uint16_t tt_tot, tt_count; ssize_t tt_query_size = sizeof(struct tt_query_packet); - int i; + uint32_t i; if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { tt_len = primary_if->soft_iface->mtu - tt_query_size; @@ -1187,11 +1139,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, (tt_request->flags & TT_FULL_TABLE ? 'F' : '.')); /* Let's get the orig node of the REAL destination */ - req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); + req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); if (!req_dst_orig_node) goto out; - res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); + res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); if (!res_dst_orig_node) goto out; @@ -1317,7 +1269,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); req_ttvn = tt_request->ttvn; - orig_node = get_orig_node(bat_priv, tt_request->src); + orig_node = orig_hash_find(bat_priv, tt_request->src); if (!orig_node) goto out; @@ -1725,7 +1677,7 @@ void tt_free(struct bat_priv *bat_priv) * entry */ static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags) { - int i; + uint32_t i; struct hashtable_t *hash = bat_priv->tt_local_hash; struct hlist_head *head; struct hlist_node *node; @@ -1758,7 +1710,7 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i; if (!hash) return; diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index f81a6b668b0c..7445413253ca 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -66,7 +66,7 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2) /* hash function to choose an entry in a hash table of given size */ /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ -static int vis_info_choose(const void *data, int size) +static uint32_t vis_info_choose(const void *data, uint32_t size) { const struct vis_info *vis_info = data; const struct vis_packet *packet; @@ -96,7 +96,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, struct hlist_head *head; struct hlist_node *node; struct vis_info *vis_info, *vis_info_tmp = NULL; - int index; + uint32_t index; if (!hash) return NULL; @@ -202,7 +202,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset) HLIST_HEAD(vis_if_list); struct if_list_entry *entry; struct hlist_node *pos, *n; - int i, j, ret = 0; + uint32_t i; + int j, ret = 0; int vis_server = atomic_read(&bat_priv->vis_mode); size_t buff_pos, buf_size; char *buff; @@ -556,7 +557,8 @@ static int find_best_vis_server(struct bat_priv *bat_priv, struct hlist_head *head; struct orig_node *orig_node; struct vis_packet *packet; - int best_tq = -1, i; + int best_tq = -1; + uint32_t i; packet = (struct vis_packet *)info->skb_packet->data; @@ -608,7 +610,8 @@ static int generate_vis_packet(struct bat_priv *bat_priv) struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; struct vis_info_entry *entry; struct tt_local_entry *tt_local_entry; - int best_tq = -1, i; + int best_tq = -1; + uint32_t i; info->first_seen = jiffies; packet->vis_type = atomic_read(&bat_priv->vis_mode); @@ -696,7 +699,7 @@ unlock: * held */ static void purge_vis_packets(struct bat_priv *bat_priv) { - int i; + uint32_t i; struct hashtable_t *hash = bat_priv->vis_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; @@ -733,7 +736,7 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, struct sk_buff *skb; struct hard_iface *hard_iface; uint8_t dstaddr[ETH_ALEN]; - int i; + uint32_t i; packet = (struct vis_packet *)info->skb_packet->data; diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c index 91bcd3a961ec..a6cd856046ab 100644 --- a/net/bluetooth/bnep/core.c +++ b/net/bluetooth/bnep/core.c @@ -65,15 +65,13 @@ static DECLARE_RWSEM(bnep_session_sem); static struct bnep_session *__bnep_get_session(u8 *dst) { struct bnep_session *s; - struct list_head *p; BT_DBG(""); - list_for_each(p, &bnep_session_list) { - s = list_entry(p, struct bnep_session, list); + list_for_each_entry(s, &bnep_session_list, list) if (!compare_ether_addr(dst, s->eh.h_source)) return s; - } + return NULL; } @@ -667,17 +665,14 @@ static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) int bnep_get_connlist(struct bnep_connlist_req *req) { - struct list_head *p; + struct bnep_session *s; int err = 0, n = 0; down_read(&bnep_session_sem); - list_for_each(p, &bnep_session_list) { - struct bnep_session *s; + list_for_each_entry(s, &bnep_session_list, list) { struct bnep_conninfo ci; - s = list_entry(p, struct bnep_session, list); - __bnep_copy_ci(&ci, s); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 7d00ddf9e9dc..9e8940b24bba 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -53,15 +53,13 @@ static LIST_HEAD(cmtp_session_list); static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) { struct cmtp_session *session; - struct list_head *p; BT_DBG(""); - list_for_each(p, &cmtp_session_list) { - session = list_entry(p, struct cmtp_session, list); + list_for_each_entry(session, &cmtp_session_list, list) if (!bacmp(bdaddr, &session->bdaddr)) return session; - } + return NULL; } @@ -431,19 +429,16 @@ int cmtp_del_connection(struct cmtp_conndel_req *req) int cmtp_get_connlist(struct cmtp_connlist_req *req) { - struct list_head *p; + struct cmtp_session *session; int err = 0, n = 0; BT_DBG(""); down_read(&cmtp_session_sem); - list_for_each(p, &cmtp_session_list) { - struct cmtp_session *session; + list_for_each_entry(session, &cmtp_session_list, list) { struct cmtp_conninfo ci; - session = list_entry(p, struct cmtp_session, list); - __cmtp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index e0af7237cd92..de0b93e45980 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -374,6 +374,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) skb_queue_head_init(&conn->data_q); + hci_chan_hash_init(conn); + setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, @@ -432,6 +434,8 @@ int hci_conn_del(struct hci_conn *conn) tasklet_disable(&hdev->tx_task); + hci_chan_hash_flush(conn); + hci_conn_hash_del(hdev, conn); if (hdev->notify) hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); @@ -453,16 +457,13 @@ int hci_conn_del(struct hci_conn *conn) struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) { int use_src = bacmp(src, BDADDR_ANY); - struct hci_dev *hdev = NULL; - struct list_head *p; + struct hci_dev *hdev = NULL, *d; BT_DBG("%s -> %s", batostr(src), batostr(dst)); read_lock_bh(&hci_dev_list_lock); - list_for_each(p, &hci_dev_list) { - struct hci_dev *d = list_entry(p, struct hci_dev, list); - + list_for_each_entry(d, &hci_dev_list, list) { if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) continue; @@ -819,7 +820,7 @@ void hci_conn_hash_flush(struct hci_dev *hdev) c->state = BT_CLOSED; - hci_proto_disconn_cfm(c, 0x16); + hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); hci_conn_del(c); } } @@ -855,10 +856,10 @@ EXPORT_SYMBOL(hci_conn_put_device); int hci_get_conn_list(void __user *arg) { + register struct hci_conn *c; struct hci_conn_list_req req, *cl; struct hci_conn_info *ci; struct hci_dev *hdev; - struct list_head *p; int n = 0, size, err; if (copy_from_user(&req, arg, sizeof(req))) @@ -882,10 +883,7 @@ int hci_get_conn_list(void __user *arg) ci = cl->conn_info; hci_dev_lock_bh(hdev); - list_for_each(p, &hdev->conn_hash.list) { - register struct hci_conn *c; - c = list_entry(p, struct hci_conn, list); - + list_for_each_entry(c, &hdev->conn_hash.list, list) { bacpy(&(ci + n)->bdaddr, &c->dst); (ci + n)->handle = c->handle; (ci + n)->type = c->type; @@ -956,3 +954,52 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; } + +struct hci_chan *hci_chan_create(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_chan *chan; + + BT_DBG("%s conn %p", hdev->name, conn); + + chan = kzalloc(sizeof(struct hci_chan), GFP_ATOMIC); + if (!chan) + return NULL; + + chan->conn = conn; + skb_queue_head_init(&chan->data_q); + + tasklet_disable(&hdev->tx_task); + hci_chan_hash_add(conn, chan); + tasklet_enable(&hdev->tx_task); + + return chan; +} + +int hci_chan_del(struct hci_chan *chan) +{ + struct hci_conn *conn = chan->conn; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s conn %p chan %p", hdev->name, conn, chan); + + tasklet_disable(&hdev->tx_task); + hci_chan_hash_del(conn, chan); + tasklet_enable(&hdev->tx_task); + + skb_queue_purge(&chan->data_q); + kfree(chan); + + return 0; +} + +void hci_chan_hash_flush(struct hci_conn *conn) +{ + struct hci_chan_hash *h = &conn->chan_hash; + struct hci_chan *chan, *tmp; + + BT_DBG("conn %p", conn); + + list_for_each_entry_safe(chan, tmp, &h->list, list) + hci_chan_del(chan); +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index be84ae33ae36..fb3feeb185d7 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -319,8 +319,7 @@ static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { - struct hci_dev *hdev = NULL; - struct list_head *p; + struct hci_dev *hdev = NULL, *d; BT_DBG("%d", index); @@ -328,8 +327,7 @@ struct hci_dev *hci_dev_get(int index) return NULL; read_lock(&hci_dev_list_lock); - list_for_each(p, &hci_dev_list) { - struct hci_dev *d = list_entry(p, struct hci_dev, list); + list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); break; @@ -551,8 +549,11 @@ int hci_dev_open(__u16 dev) hci_dev_hold(hdev); set_bit(HCI_UP, &hdev->flags); hci_notify(hdev, HCI_DEV_UP); - if (!test_bit(HCI_SETUP, &hdev->flags)) - mgmt_powered(hdev->id, 1); + if (!test_bit(HCI_SETUP, &hdev->flags)) { + hci_dev_lock_bh(hdev); + mgmt_powered(hdev, 1); + hci_dev_unlock_bh(hdev); + } } else { /* Init failed, cleanup */ tasklet_kill(&hdev->rx_task); @@ -597,6 +598,14 @@ static int hci_dev_do_close(struct hci_dev *hdev) tasklet_kill(&hdev->rx_task); tasklet_kill(&hdev->tx_task); + if (hdev->discov_timeout > 0) { + cancel_delayed_work(&hdev->discov_off); + hdev->discov_timeout = 0; + } + + if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) + cancel_delayed_work(&hdev->power_off); + hci_dev_lock_bh(hdev); inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); @@ -636,7 +645,9 @@ static int hci_dev_do_close(struct hci_dev *hdev) * and no tasks are scheduled. */ hdev->close(hdev); - mgmt_powered(hdev->id, 0); + hci_dev_lock_bh(hdev); + mgmt_powered(hdev, 0); + hci_dev_unlock_bh(hdev); /* Clear flags */ hdev->flags = 0; @@ -794,9 +805,9 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) int hci_get_dev_list(void __user *arg) { + struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; - struct list_head *p; int n = 0, size, err; __u16 dev_num; @@ -815,12 +826,9 @@ int hci_get_dev_list(void __user *arg) dr = dl->dev_req; read_lock_bh(&hci_dev_list_lock); - list_for_each(p, &hci_dev_list) { - struct hci_dev *hdev; - - hdev = list_entry(p, struct hci_dev, list); - - hci_del_off_timer(hdev); + list_for_each_entry(hdev, &hci_dev_list, list) { + if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) + cancel_delayed_work(&hdev->power_off); if (!test_bit(HCI_MGMT, &hdev->flags)) set_bit(HCI_PAIRABLE, &hdev->flags); @@ -855,7 +863,8 @@ int hci_get_dev_info(void __user *arg) if (!hdev) return -ENODEV; - hci_del_off_timer(hdev); + if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) + cancel_delayed_work_sync(&hdev->power_off); if (!test_bit(HCI_MGMT, &hdev->flags)) set_bit(HCI_PAIRABLE, &hdev->flags); @@ -912,6 +921,7 @@ struct hci_dev *hci_alloc_dev(void) if (!hdev) return NULL; + hci_init_sysfs(hdev); skb_queue_head_init(&hdev->driver_init); return hdev; @@ -938,39 +948,41 @@ static void hci_power_on(struct work_struct *work) return; if (test_bit(HCI_AUTO_OFF, &hdev->flags)) - mod_timer(&hdev->off_timer, - jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); + queue_delayed_work(hdev->workqueue, &hdev->power_off, + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); if (test_and_clear_bit(HCI_SETUP, &hdev->flags)) - mgmt_index_added(hdev->id); + mgmt_index_added(hdev); } static void hci_power_off(struct work_struct *work) { - struct hci_dev *hdev = container_of(work, struct hci_dev, power_off); + struct hci_dev *hdev = container_of(work, struct hci_dev, + power_off.work); BT_DBG("%s", hdev->name); + clear_bit(HCI_AUTO_OFF, &hdev->flags); + hci_dev_close(hdev->id); } -static void hci_auto_off(unsigned long data) +static void hci_discov_off(struct work_struct *work) { - struct hci_dev *hdev = (struct hci_dev *) data; + struct hci_dev *hdev; + u8 scan = SCAN_PAGE; + + hdev = container_of(work, struct hci_dev, discov_off.work); BT_DBG("%s", hdev->name); - clear_bit(HCI_AUTO_OFF, &hdev->flags); + hci_dev_lock_bh(hdev); - queue_work(hdev->workqueue, &hdev->power_off); -} + hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan); -void hci_del_off_timer(struct hci_dev *hdev) -{ - BT_DBG("%s", hdev->name); + hdev->discov_timeout = 0; - clear_bit(HCI_AUTO_OFF, &hdev->flags); - del_timer(&hdev->off_timer); + hci_dev_unlock_bh(hdev); } int hci_uuids_clear(struct hci_dev *hdev) @@ -1007,16 +1019,11 @@ int hci_link_keys_clear(struct hci_dev *hdev) struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct list_head *p; - - list_for_each(p, &hdev->link_keys) { - struct link_key *k; - - k = list_entry(p, struct link_key, list); + struct link_key *k; + list_for_each_entry(k, &hdev->link_keys, list) if (bacmp(bdaddr, &k->bdaddr) == 0) return k; - } return NULL; } @@ -1138,7 +1145,7 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, persistent = hci_persistent_key(hdev, conn, type, old_key_type); - mgmt_new_key(hdev->id, key, persistent); + mgmt_new_link_key(hdev, key, persistent); if (!persistent) { list_del(&key->list); @@ -1181,7 +1188,7 @@ int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, memcpy(id->rand, rand, sizeof(id->rand)); if (new_key) - mgmt_new_key(hdev->id, key, old_key_type); + mgmt_new_link_key(hdev, key, old_key_type); return 0; } @@ -1279,16 +1286,11 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash, struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct list_head *p; - - list_for_each(p, &hdev->blacklist) { - struct bdaddr_list *b; - - b = list_entry(p, struct bdaddr_list, list); + struct bdaddr_list *b; + list_for_each_entry(b, &hdev->blacklist, list) if (bacmp(bdaddr, &b->bdaddr) == 0) return b; - } return NULL; } @@ -1327,7 +1329,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr) list_add(&entry->list, &hdev->blacklist); - return mgmt_device_blocked(hdev->id, bdaddr); + return mgmt_device_blocked(hdev, bdaddr); } int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) @@ -1346,7 +1348,7 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr) list_del(&entry->list); kfree(entry); - return mgmt_device_unblocked(hdev->id, bdaddr); + return mgmt_device_unblocked(hdev, bdaddr); } static void hci_clear_adv_cache(unsigned long arg) @@ -1425,7 +1427,7 @@ int hci_add_adv_entry(struct hci_dev *hdev, int hci_register_dev(struct hci_dev *hdev) { struct list_head *head = &hci_dev_list, *p; - int i, id = 0; + int i, id, error; BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, hdev->bus, hdev->owner); @@ -1433,6 +1435,11 @@ int hci_register_dev(struct hci_dev *hdev) if (!hdev->open || !hdev->close || !hdev->destruct) return -EINVAL; + /* Do not allow HCI_AMP devices to register at index 0, + * so the index can be used as the AMP controller ID. + */ + id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; + write_lock_bh(&hci_dev_list_lock); /* Find first available device id */ @@ -1479,6 +1486,8 @@ int hci_register_dev(struct hci_dev *hdev) hci_conn_hash_init(hdev); + INIT_LIST_HEAD(&hdev->mgmt_pending); + INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->uuids); @@ -1492,8 +1501,9 @@ int hci_register_dev(struct hci_dev *hdev) (unsigned long) hdev); INIT_WORK(&hdev->power_on, hci_power_on); - INIT_WORK(&hdev->power_off, hci_power_off); - setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); + INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); + + INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); @@ -1502,10 +1512,14 @@ int hci_register_dev(struct hci_dev *hdev) write_unlock_bh(&hci_dev_list_lock); hdev->workqueue = create_singlethread_workqueue(hdev->name); - if (!hdev->workqueue) - goto nomem; + if (!hdev->workqueue) { + error = -ENOMEM; + goto err; + } - hci_register_sysfs(hdev); + error = hci_add_sysfs(hdev); + if (error < 0) + goto err_wqueue; hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); @@ -1524,17 +1538,19 @@ int hci_register_dev(struct hci_dev *hdev) return id; -nomem: +err_wqueue: + destroy_workqueue(hdev->workqueue); +err: write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); - return -ENOMEM; + return error; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ -int hci_unregister_dev(struct hci_dev *hdev) +void hci_unregister_dev(struct hci_dev *hdev) { int i; @@ -1550,8 +1566,15 @@ int hci_unregister_dev(struct hci_dev *hdev) kfree_skb(hdev->reassembly[i]); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->flags)) - mgmt_index_removed(hdev->id); + !test_bit(HCI_SETUP, &hdev->flags)) { + hci_dev_lock_bh(hdev); + mgmt_index_removed(hdev); + hci_dev_unlock_bh(hdev); + } + + /* mgmt_index_removed should take care of emptying the + * pending list */ + BUG_ON(!list_empty(&hdev->mgmt_pending)); hci_notify(hdev, HCI_DEV_UNREG); @@ -1560,9 +1583,8 @@ int hci_unregister_dev(struct hci_dev *hdev) rfkill_destroy(hdev->rfkill); } - hci_unregister_sysfs(hdev); + hci_del_sysfs(hdev); - hci_del_off_timer(hdev); del_timer(&hdev->adv_timer); destroy_workqueue(hdev->workqueue); @@ -1576,8 +1598,6 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_dev_unlock_bh(hdev); __hci_dev_put(hdev); - - return 0; } EXPORT_SYMBOL(hci_unregister_dev); @@ -1948,23 +1968,18 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) hdr->dlen = cpu_to_le16(len); } -void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) +static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; - BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); - - skb->dev = (void *) hdev; - bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; - hci_add_acl_hdr(skb, conn->handle, flags); - list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); - skb_queue_tail(&conn->data_q, skb); + skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); @@ -1972,9 +1987,9 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically */ - spin_lock_bh(&conn->data_q.lock); + spin_lock_bh(&queue->lock); - __skb_queue_tail(&conn->data_q, skb); + __skb_queue_tail(queue, skb); flags &= ~ACL_START; flags |= ACL_CONT; @@ -1987,11 +2002,25 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); - __skb_queue_tail(&conn->data_q, skb); + __skb_queue_tail(queue, skb); } while (list); - spin_unlock_bh(&conn->data_q.lock); + spin_unlock_bh(&queue->lock); } +} + +void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) +{ + struct hci_conn *conn = chan->conn; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags); + + skb->dev = (void *) hdev; + bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags); + + hci_queue_acl(conn, &chan->data_q, skb, flags); tasklet_schedule(&hdev->tx_task); } @@ -2026,16 +2055,12 @@ EXPORT_SYMBOL(hci_send_sco); static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; - struct hci_conn *conn = NULL; + struct hci_conn *conn = NULL, *c; int num = 0, min = ~0; - struct list_head *p; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ - list_for_each(p, &h->list) { - struct hci_conn *c; - c = list_entry(p, struct hci_conn, list); - + list_for_each_entry(c, &h->list, list) { if (c->type != type || skb_queue_empty(&c->data_q)) continue; @@ -2084,14 +2109,12 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; - struct list_head *p; - struct hci_conn *c; + struct hci_conn *c; BT_ERR("%s link tx timeout", hdev->name); /* Kill stalled connections */ - list_for_each(p, &h->list) { - c = list_entry(p, struct hci_conn, list); + list_for_each_entry(c, &h->list, list) { if (c->type == type && c->sent) { BT_ERR("%s killing stalled connection %s", hdev->name, batostr(&c->dst)); @@ -2100,11 +2123,137 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) } } -static inline void hci_sched_acl(struct hci_dev *hdev) +static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, + int *quote) { + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_chan *chan = NULL; + int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; + int cnt, q, conn_num = 0; + + BT_DBG("%s", hdev->name); + + list_for_each_entry(conn, &h->list, list) { + struct hci_chan_hash *ch; + struct hci_chan *tmp; + + if (conn->type != type) + continue; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + continue; + + conn_num++; + + ch = &conn->chan_hash; + + list_for_each_entry(tmp, &ch->list, list) { + struct sk_buff *skb; + + if (skb_queue_empty(&tmp->data_q)) + continue; + + skb = skb_peek(&tmp->data_q); + if (skb->priority < cur_prio) + continue; + + if (skb->priority > cur_prio) { + num = 0; + min = ~0; + cur_prio = skb->priority; + } + + num++; + + if (conn->sent < min) { + min = conn->sent; + chan = tmp; + } + } + + if (hci_conn_num(hdev, type) == conn_num) + break; + } + + if (!chan) + return NULL; + + switch (chan->conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + default: + cnt = 0; + BT_ERR("Unknown link type"); + } + + q = cnt / num; + *quote = q ? q : 1; + BT_DBG("chan %p quote %d", chan, *quote); + return chan; +} + +static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *conn; + int num = 0; + + BT_DBG("%s", hdev->name); + + list_for_each_entry(conn, &h->list, list) { + struct hci_chan_hash *ch; + struct hci_chan *chan; + + if (conn->type != type) + continue; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + continue; + + num++; + + ch = &conn->chan_hash; + list_for_each_entry(chan, &ch->list, list) { + struct sk_buff *skb; + + if (chan->sent) { + chan->sent = 0; + continue; + } + + if (skb_queue_empty(&chan->data_q)) + continue; + + skb = skb_peek(&chan->data_q); + if (skb->priority >= HCI_PRIO_MAX - 1) + continue; + + skb->priority = HCI_PRIO_MAX - 1; + + BT_DBG("chan %p skb %p promoted to %d", chan, skb, + skb->priority); + } + + if (hci_conn_num(hdev, type) == num) + break; + } +} + +static inline void hci_sched_acl(struct hci_dev *hdev) +{ + struct hci_chan *chan; struct sk_buff *skb; int quote; + unsigned int cnt; BT_DBG("%s", hdev->name); @@ -2118,19 +2267,35 @@ static inline void hci_sched_acl(struct hci_dev *hdev) hci_link_tx_to(hdev, ACL_LINK); } - while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); + cnt = hdev->acl_cnt; + + while (hdev->acl_cnt && + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, + skb->len, skb->priority); - hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active); + /* Stop if priority has changed */ + if (skb->priority < priority) + break; + + skb = skb_dequeue(&chan->data_q); + + hci_conn_enter_active_mode(chan->conn, + bt_cb(skb)->force_active); hci_send_frame(skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; - conn->sent++; + chan->sent++; + chan->conn->sent++; } } + + if (cnt != hdev->acl_cnt) + hci_prio_recalculate(hdev, ACL_LINK); } /* Schedule SCO */ @@ -2182,9 +2347,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev) static inline void hci_sched_le(struct hci_dev *hdev) { - struct hci_conn *conn; + struct hci_chan *chan; struct sk_buff *skb; - int quote, cnt; + int quote, cnt, tmp; BT_DBG("%s", hdev->name); @@ -2200,21 +2365,35 @@ static inline void hci_sched_le(struct hci_dev *hdev) } cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; - while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); + tmp = cnt; + while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, + skb->len, skb->priority); + + /* Stop if priority has changed */ + if (skb->priority < priority) + break; + + skb = skb_dequeue(&chan->data_q); hci_send_frame(skb); hdev->le_last_tx = jiffies; cnt--; - conn->sent++; + chan->sent++; + chan->conn->sent++; } } + if (hdev->le_pkts) hdev->le_cnt = cnt; else hdev->acl_cnt = cnt; + + if (cnt != tmp) + hci_prio_recalculate(hdev, LE_LINK); } static void hci_tx_task(unsigned long arg) @@ -2407,3 +2586,31 @@ static void hci_cmd_task(unsigned long arg) } } } + +int hci_do_inquiry(struct hci_dev *hdev, u8 length) +{ + /* General inquiry access code (GIAC) */ + u8 lap[3] = { 0x33, 0x8b, 0x9e }; + struct hci_cp_inquiry cp; + + BT_DBG("%s", hdev->name); + + if (test_bit(HCI_INQUIRY, &hdev->flags)) + return -EINPROGRESS; + + memset(&cp, 0, sizeof(cp)); + memcpy(&cp.lap, lap, sizeof(cp.lap)); + cp.length = length; + + return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); +} + +int hci_cancel_inquiry(struct hci_dev *hdev) +{ + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_INQUIRY, &hdev->flags)) + return -EPERM; + + return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); +} diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d7d96b6b1f0d..a89cf1f24e47 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -58,9 +58,11 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && - test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 0); + clear_bit(HCI_INQUIRY, &hdev->flags); + + hci_dev_lock(hdev); + mgmt_discovering(hdev, 0); + hci_dev_unlock(hdev); hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); @@ -76,10 +78,6 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) if (status) return; - if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && - test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 0); - hci_conn_check_pending(hdev); } @@ -205,13 +203,15 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) if (!sent) return; + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_set_local_name_complete(hdev->id, sent, status); + mgmt_set_local_name_complete(hdev, sent, status); - if (status) - return; + if (status == 0) + memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); - memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); + hci_dev_unlock(hdev); } static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) @@ -274,7 +274,8 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb) static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { - __u8 status = *((__u8 *) skb->data); + __u8 param, status = *((__u8 *) skb->data); + int old_pscan, old_iscan; void *sent; BT_DBG("%s status 0x%x", hdev->name, status); @@ -283,28 +284,40 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) if (!sent) return; - if (!status) { - __u8 param = *((__u8 *) sent); - int old_pscan, old_iscan; - - old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); - old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); + param = *((__u8 *) sent); - if (param & SCAN_INQUIRY) { - set_bit(HCI_ISCAN, &hdev->flags); - if (!old_iscan) - mgmt_discoverable(hdev->id, 1); - } else if (old_iscan) - mgmt_discoverable(hdev->id, 0); + hci_dev_lock(hdev); - if (param & SCAN_PAGE) { - set_bit(HCI_PSCAN, &hdev->flags); - if (!old_pscan) - mgmt_connectable(hdev->id, 1); - } else if (old_pscan) - mgmt_connectable(hdev->id, 0); + if (status != 0) { + mgmt_write_scan_failed(hdev, param, status); + hdev->discov_timeout = 0; + goto done; } + old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags); + old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags); + + if (param & SCAN_INQUIRY) { + set_bit(HCI_ISCAN, &hdev->flags); + if (!old_iscan) + mgmt_discoverable(hdev, 1); + if (hdev->discov_timeout > 0) { + int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + queue_delayed_work(hdev->workqueue, &hdev->discov_off, + to); + } + } else if (old_iscan) + mgmt_discoverable(hdev, 0); + + if (param & SCAN_PAGE) { + set_bit(HCI_PSCAN, &hdev->flags); + if (!old_pscan) + mgmt_connectable(hdev, 1); + } else if (old_pscan) + mgmt_connectable(hdev, 0); + +done: + hci_dev_unlock(hdev); hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status); } @@ -748,6 +761,30 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status); } +static void hci_cc_read_local_amp_info(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_rp_read_local_amp_info *rp = (void *) skb->data; + + BT_DBG("%s status 0x%x", hdev->name, rp->status); + + if (rp->status) + return; + + hdev->amp_status = rp->amp_status; + hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); + hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); + hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); + hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); + hdev->amp_type = rp->amp_type; + hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); + hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); + hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); + hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); + + hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status); +} + static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, struct sk_buff *skb) { @@ -804,19 +841,24 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_pin_code_reply_complete(hdev->id, &rp->bdaddr, rp->status); + mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); if (rp->status != 0) - return; + goto unlock; cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); if (!cp) - return; + goto unlock; conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn) conn->pin_length = cp->pin_len; + +unlock: + hci_dev_unlock(hdev); } static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) @@ -825,10 +867,15 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_pin_code_neg_reply_complete(hdev->id, &rp->bdaddr, + mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, rp->status); + + hci_dev_unlock(hdev); } + static void hci_cc_le_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) { @@ -855,9 +902,13 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s status 0x%x", hdev->name, rp->status); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, + mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, rp->status); + + hci_dev_unlock(hdev); } static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, @@ -867,9 +918,13 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, BT_DBG("%s status 0x%x", hdev->name, rp->status); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, + mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, rp->status); + + hci_dev_unlock(hdev); } static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, @@ -879,8 +934,10 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, BT_DBG("%s status 0x%x", hdev->name, rp->status); - mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash, + hci_dev_lock(hdev); + mgmt_read_local_oob_data_reply_complete(hdev, rp->hash, rp->randomizer, rp->status); + hci_dev_unlock(hdev); } static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, @@ -955,12 +1012,18 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) if (status) { hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); + hci_dev_lock(hdev); + if (test_bit(HCI_MGMT, &hdev->flags)) + mgmt_inquiry_failed(hdev, status); + hci_dev_unlock(hdev); return; } - if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags) && - test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 1); + set_bit(HCI_INQUIRY, &hdev->flags); + + hci_dev_lock(hdev); + mgmt_discovering(hdev, 1); + hci_dev_unlock(hdev); } static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) @@ -1339,13 +1402,16 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff BT_DBG("%s status %d", hdev->name, status); - if (test_and_clear_bit(HCI_INQUIRY, &hdev->flags) && - test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 0); - hci_req_complete(hdev, HCI_OP_INQUIRY, status); hci_conn_check_pending(hdev); + + if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + return; + + hci_dev_lock(hdev); + mgmt_discovering(hdev, 0); + hci_dev_unlock(hdev); } static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) @@ -1361,12 +1427,6 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * hci_dev_lock(hdev); - if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 1); - } - for (; num_rsp; num_rsp--, info++) { bacpy(&data.bdaddr, &info->bdaddr); data.pscan_rep_mode = info->pscan_rep_mode; @@ -1377,8 +1437,8 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff * data.rssi = 0x00; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0, - NULL); + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, + info->dev_class, 0, NULL); } hci_dev_unlock(hdev); @@ -1412,7 +1472,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s conn->state = BT_CONFIG; hci_conn_hold(conn); conn->disc_timeout = HCI_DISCONN_TIMEOUT; - mgmt_connected(hdev->id, &ev->bdaddr, conn->type); + mgmt_connected(hdev, &ev->bdaddr, conn->type); } else conn->state = BT_CONNECTED; @@ -1444,7 +1504,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s } else { conn->state = BT_CLOSED; if (conn->type == ACL_LINK) - mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); + mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, + ev->status); } if (conn->type == ACL_LINK) @@ -1531,7 +1592,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk struct hci_cp_reject_conn_req cp; bacpy(&cp.bdaddr, &ev->bdaddr); - cp.reason = 0x0f; + cp.reason = HCI_ERROR_REJ_BAD_ADDR; hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); } } @@ -1544,7 +1605,9 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff BT_DBG("%s status %d", hdev->name, ev->status); if (ev->status) { - mgmt_disconnect_failed(hdev->id); + hci_dev_lock(hdev); + mgmt_disconnect_failed(hdev); + hci_dev_unlock(hdev); return; } @@ -1557,7 +1620,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff conn->state = BT_CLOSED; if (conn->type == ACL_LINK || conn->type == LE_LINK) - mgmt_disconnected(hdev->id, &conn->dst); + mgmt_disconnected(hdev, &conn->dst, conn->type); hci_proto_disconn_cfm(conn, ev->reason); hci_conn_del(conn); @@ -1588,7 +1651,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s conn->sec_level = conn->pending_sec_level; } } else { - mgmt_auth_failed(hdev->id, &conn->dst, ev->status); + mgmt_auth_failed(hdev, &conn->dst, ev->status); } clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); @@ -1643,7 +1706,7 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb hci_dev_lock(hdev); if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags)) - mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name); + mgmt_remote_name(hdev, &ev->bdaddr, ev->name); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (!conn) @@ -1898,6 +1961,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk hci_cc_write_ca_timeout(hdev, skb); break; + case HCI_OP_READ_LOCAL_AMP_INFO: + hci_cc_read_local_amp_info(hdev, skb); + break; + case HCI_OP_DELETE_STORED_LINK_KEY: hci_cc_delete_stored_link_key(hdev, skb); break; @@ -2029,7 +2096,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) case HCI_OP_DISCONNECT: if (ev->status != 0) - mgmt_disconnect_failed(hdev->id); + mgmt_disconnect_failed(hdev); break; case HCI_OP_LE_CREATE_CONN: @@ -2194,7 +2261,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff else secure = 0; - mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure); + mgmt_pin_code_request(hdev, &ev->bdaddr, secure); } unlock: @@ -2363,12 +2430,6 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct hci_dev_lock(hdev); - if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 1); - } - if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); @@ -2383,7 +2444,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev->id, &info->bdaddr, + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, info->dev_class, info->rssi, NULL); } @@ -2400,7 +2461,7 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x00; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev->id, &info->bdaddr, + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, info->dev_class, info->rssi, NULL); } @@ -2531,12 +2592,6 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct if (!num_rsp) return; - if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) { - - if (test_bit(HCI_MGMT, &hdev->flags)) - mgmt_discovering(hdev->id, 1); - } - hci_dev_lock(hdev); for (; num_rsp; num_rsp--, info++) { @@ -2549,8 +2604,8 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct data.rssi = info->rssi; data.ssp_mode = 0x01; hci_inquiry_cache_update(hdev, &data); - mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, - info->rssi, info->data); + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, + info->dev_class, info->rssi, info->data); } hci_dev_unlock(hdev); @@ -2614,7 +2669,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff struct hci_cp_io_capability_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); - cp.reason = 0x18; /* Pairing not allowed */ + cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, sizeof(cp), &cp); @@ -2706,7 +2761,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, } confirm: - mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey, + mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey, confirm_hint); unlock: @@ -2732,7 +2787,7 @@ static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_ * event gets always produced as initiator and is also mapped to * the mgmt_auth_failed event */ if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) - mgmt_auth_failed(hdev->id, &conn->dst, ev->status); + mgmt_auth_failed(hdev, &conn->dst, ev->status); hci_conn_put(conn); @@ -2813,14 +2868,14 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff } if (ev->status) { - mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status); + mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, ev->status); hci_proto_connect_cfm(conn, ev->status); conn->state = BT_CLOSED; hci_conn_del(conn); goto unlock; } - mgmt_connected(hdev->id, &ev->bdaddr, conn->type); + mgmt_connected(hdev, &ev->bdaddr, conn->type); conn->sec_level = BT_SECURITY_LOW; conn->handle = __le16_to_cpu(ev->handle); @@ -3104,5 +3159,5 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) kfree_skb(skb); } -module_param(enable_le, bool, 0444); +module_param(enable_le, bool, 0644); MODULE_PARM_DESC(enable_le, "Enable LE support"); diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 661b461cf0b0..c62d254a1379 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -436,17 +436,12 @@ static const struct file_operations inquiry_cache_fops = { static int blacklist_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; - struct list_head *l; + struct bdaddr_list *b; hci_dev_lock_bh(hdev); - list_for_each(l, &hdev->blacklist) { - struct bdaddr_list *b; - - b = list_entry(l, struct bdaddr_list, list); - + list_for_each_entry(b, &hdev->blacklist, list) seq_printf(f, "%s\n", batostr(&b->bdaddr)); - } hci_dev_unlock_bh(hdev); @@ -485,17 +480,12 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) static int uuids_show(struct seq_file *f, void *p) { struct hci_dev *hdev = f->private; - struct list_head *l; + struct bt_uuid *uuid; hci_dev_lock_bh(hdev); - list_for_each(l, &hdev->uuids) { - struct bt_uuid *uuid; - - uuid = list_entry(l, struct bt_uuid, list); - + list_for_each_entry(uuid, &hdev->uuids, list) print_bt_uuid(f, uuid->uuid); - } hci_dev_unlock_bh(hdev); @@ -543,22 +533,28 @@ static int auto_accept_delay_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, auto_accept_delay_set, "%llu\n"); -int hci_register_sysfs(struct hci_dev *hdev) +void hci_init_sysfs(struct hci_dev *hdev) +{ + struct device *dev = &hdev->dev; + + dev->type = &bt_host; + dev->class = bt_class; + + dev_set_drvdata(dev, hdev); + device_initialize(dev); +} + +int hci_add_sysfs(struct hci_dev *hdev) { struct device *dev = &hdev->dev; int err; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - dev->type = &bt_host; - dev->class = bt_class; dev->parent = hdev->parent; - dev_set_name(dev, "%s", hdev->name); - dev_set_drvdata(dev, hdev); - - err = device_register(dev); + err = device_add(dev); if (err < 0) return err; @@ -582,7 +578,7 @@ int hci_register_sysfs(struct hci_dev *hdev) return 0; } -void hci_unregister_sysfs(struct hci_dev *hdev) +void hci_del_sysfs(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index 075a3e920caf..3c2d888925d7 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -81,24 +81,20 @@ static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) { struct hidp_session *session; - struct list_head *p; BT_DBG(""); - list_for_each(p, &hidp_session_list) { - session = list_entry(p, struct hidp_session, list); + list_for_each_entry(session, &hidp_session_list, list) { if (!bacmp(bdaddr, &session->bdaddr)) return session; } + return NULL; } static void __hidp_link_session(struct hidp_session *session) { - __module_get(THIS_MODULE); list_add(&session->list, &hidp_session_list); - - hci_conn_hold_device(session->conn); } static void __hidp_unlink_session(struct hidp_session *session) @@ -106,7 +102,6 @@ static void __hidp_unlink_session(struct hidp_session *session) hci_conn_put_device(session->conn); list_del(&session->list); - module_put(THIS_MODULE); } static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) @@ -255,6 +250,9 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, BT_DBG("session %p data %p size %d", session, data, size); + if (atomic_read(&session->terminate)) + return -EIO; + skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { BT_ERR("Can't allocate memory for new frame"); @@ -329,6 +327,7 @@ static int hidp_get_raw_report(struct hid_device *hid, struct sk_buff *skb; size_t len; int numbered_reports = hid->report_enum[report_type].numbered; + int ret; switch (report_type) { case HID_FEATURE_REPORT: @@ -352,8 +351,9 @@ static int hidp_get_raw_report(struct hid_device *hid, session->waiting_report_number = numbered_reports ? report_number : -1; set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); data[0] = report_number; - if (hidp_send_ctrl_message(hid->driver_data, report_type, data, 1)) - goto err_eio; + ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, 1); + if (ret) + goto err; /* Wait for the return of the report. The returned report gets put in session->report_return. */ @@ -365,11 +365,13 @@ static int hidp_get_raw_report(struct hid_device *hid, 5*HZ); if (res == 0) { /* timeout */ - goto err_eio; + ret = -EIO; + goto err; } if (res < 0) { /* signal */ - goto err_restartsys; + ret = -ERESTARTSYS; + goto err; } } @@ -390,14 +392,10 @@ static int hidp_get_raw_report(struct hid_device *hid, return len; -err_restartsys: - clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); - mutex_unlock(&session->report_mutex); - return -ERESTARTSYS; -err_eio: +err: clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); mutex_unlock(&session->report_mutex); - return -EIO; + return ret; } static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count, @@ -422,11 +420,10 @@ static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, s /* Set up our wait, and send the report request to the device. */ set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); - if (hidp_send_ctrl_message(hid->driver_data, report_type, - data, count)) { - ret = -ENOMEM; + ret = hidp_send_ctrl_message(hid->driver_data, report_type, data, + count); + if (ret) goto err; - } /* Wait for the ACK from the device. */ while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { @@ -496,10 +493,9 @@ static void hidp_process_handshake(struct hidp_session *session, case HIDP_HSHK_ERR_INVALID_REPORT_ID: case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: case HIDP_HSHK_ERR_INVALID_PARAMETER: - if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) { - clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) wake_up_interruptible(&session->report_queue); - } + /* FIXME: Call into SET_ GET_ handlers here */ break; @@ -520,10 +516,8 @@ static void hidp_process_handshake(struct hidp_session *session, } /* Wake up the waiting thread. */ - if (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) { - clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); + if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) wake_up_interruptible(&session->report_queue); - } } static void hidp_process_hid_control(struct hidp_session *session, @@ -663,25 +657,32 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static void hidp_process_transmit(struct hidp_session *session) +static void hidp_process_intr_transmit(struct hidp_session *session) { struct sk_buff *skb; BT_DBG("session %p", session); - while ((skb = skb_dequeue(&session->ctrl_transmit))) { - if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { - skb_queue_head(&session->ctrl_transmit, skb); + while ((skb = skb_dequeue(&session->intr_transmit))) { + if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->intr_transmit, skb); break; } hidp_set_timer(session); kfree_skb(skb); } +} - while ((skb = skb_dequeue(&session->intr_transmit))) { - if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { - skb_queue_head(&session->intr_transmit, skb); +static void hidp_process_ctrl_transmit(struct hidp_session *session) +{ + struct sk_buff *skb; + + BT_DBG("session %p", session); + + while ((skb = skb_dequeue(&session->ctrl_transmit))) { + if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->ctrl_transmit, skb); break; } @@ -700,6 +701,7 @@ static int hidp_session(void *arg) BT_DBG("session %p", session); + __module_get(THIS_MODULE); set_user_nice(current, -15); init_waitqueue_entry(&ctrl_wait, current); @@ -714,23 +716,25 @@ static int hidp_session(void *arg) intr_sk->sk_state != BT_CONNECTED) break; - while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { + while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) - hidp_recv_ctrl_frame(session, skb); + hidp_recv_intr_frame(session, skb); else kfree_skb(skb); } - while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { + hidp_process_intr_transmit(session); + + while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { skb_orphan(skb); if (!skb_linearize(skb)) - hidp_recv_intr_frame(session, skb); + hidp_recv_ctrl_frame(session, skb); else kfree_skb(skb); } - hidp_process_transmit(session); + hidp_process_ctrl_transmit(session); schedule(); set_current_state(TASK_INTERRUPTIBLE); @@ -739,6 +743,10 @@ static int hidp_session(void *arg) remove_wait_queue(sk_sleep(intr_sk), &intr_wait); remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait); + clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + wake_up_interruptible(&session->report_queue); + down_write(&hidp_session_sem); hidp_del_timer(session); @@ -772,34 +780,37 @@ static int hidp_session(void *arg) kfree(session->rd_data); kfree(session); + module_put_and_exit(0); return 0; } -static struct device *hidp_get_device(struct hidp_session *session) +static struct hci_conn *hidp_get_connection(struct hidp_session *session) { bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src; bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst; - struct device *device = NULL; + struct hci_conn *conn; struct hci_dev *hdev; hdev = hci_get_route(dst, src); if (!hdev) return NULL; - session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); - if (session->conn) - device = &session->conn->dev; + hci_dev_lock_bh(hdev); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (conn) + hci_conn_hold_device(conn); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); - return device; + return conn; } static int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) { struct input_dev *input; - int err, i; + int i; input = input_allocate_device(); if (!input) @@ -842,17 +853,10 @@ static int hidp_setup_input(struct hidp_session *session, input->relbit[0] |= BIT_MASK(REL_WHEEL); } - input->dev.parent = hidp_get_device(session); + input->dev.parent = &session->conn->dev; input->event = hidp_input_event; - err = input_register_device(input); - if (err < 0) { - input_free_device(input); - session->input = NULL; - return err; - } - return 0; } @@ -949,7 +953,7 @@ static int hidp_setup_hid(struct hidp_session *session, strncpy(hid->phys, batostr(&bt_sk(session->ctrl_sock->sk)->src), 64); strncpy(hid->uniq, batostr(&bt_sk(session->ctrl_sock->sk)->dst), 64); - hid->dev.parent = hidp_get_device(session); + hid->dev.parent = &session->conn->dev; hid->ll_driver = &hidp_hid_driver; hid->hid_get_raw_report = hidp_get_raw_report; @@ -976,18 +980,20 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) return -ENOTUNIQ; - session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); - if (!session) - return -ENOMEM; - BT_DBG("rd_data %p rd_size %d", req->rd_data, req->rd_size); down_write(&hidp_session_sem); s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); if (s && s->state == BT_CONNECTED) { - err = -EEXIST; - goto failed; + up_write(&hidp_session_sem); + return -EEXIST; + } + + session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); + if (!session) { + up_write(&hidp_session_sem); + return -ENOMEM; } bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); @@ -1003,6 +1009,12 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->intr_sock = intr_sock; session->state = BT_CONNECTED; + session->conn = hidp_get_connection(session); + if (!session->conn) { + err = -ENOTCONN; + goto failed; + } + setup_timer(&session->timer, hidp_idle_timeout, (unsigned long)session); skb_queue_head_init(&session->ctrl_transmit); @@ -1015,9 +1027,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); session->idle_to = req->idle_to; + __hidp_link_session(session); + if (req->rd_size > 0) { err = hidp_setup_hid(session, req); - if (err && err != -ENODEV) + if (err) goto purge; } @@ -1027,8 +1041,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, goto purge; } - __hidp_link_session(session); - hidp_set_timer(session); if (session->hid) { @@ -1054,7 +1066,11 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, !session->waiting_for_startup); } - err = hid_add_device(session->hid); + if (session->hid) + err = hid_add_device(session->hid); + else + err = input_register_device(session->input); + if (err < 0) { atomic_inc(&session->terminate); wake_up_process(session->task); @@ -1077,8 +1093,6 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, unlink: hidp_del_timer(session); - __hidp_unlink_session(session); - if (session->input) { input_unregister_device(session->input); session->input = NULL; @@ -1093,6 +1107,8 @@ unlink: session->rd_data = NULL; purge: + __hidp_unlink_session(session); + skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); @@ -1134,19 +1150,16 @@ int hidp_del_connection(struct hidp_conndel_req *req) int hidp_get_connlist(struct hidp_connlist_req *req) { - struct list_head *p; + struct hidp_session *session; int err = 0, n = 0; BT_DBG(""); down_read(&hidp_session_sem); - list_for_each(p, &hidp_session_list) { - struct hidp_session *session; + list_for_each_entry(session, &hidp_session_list, list) { struct hidp_conninfo ci; - session = list_entry(p, struct hidp_session, list); - __hidp_copy_session(session, &ci); if (copy_to_user(req->ci, &ci, sizeof(ci))) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 5ea94a1eecf2..e8a6837996cf 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -57,9 +57,10 @@ #include <net/bluetooth/smp.h> int disable_ertm; +int enable_hs; static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; -static u8 l2cap_fixed_chan[8] = { 0x02, }; +static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, }; static LIST_HEAD(chan_list); static DEFINE_RWLOCK(chan_list_lock); @@ -219,7 +220,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn) static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout) { - BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout); + BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout); if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout))) chan_hold(chan); @@ -293,6 +294,8 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk) atomic_set(&chan->refcnt, 1); + BT_DBG("sk %p chan %p", sk, chan); + return chan; } @@ -310,7 +313,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, chan->psm, chan->dcid); - conn->disc_reason = 0x13; + conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; chan->conn = conn; @@ -337,6 +340,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->omtu = L2CAP_DEFAULT_MTU; } + chan->local_id = L2CAP_BESTEFFORT_ID; + chan->local_stype = L2CAP_SERV_BESTEFFORT; + chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; + chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; + chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; + chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO; + chan_hold(chan); list_add(&chan->list, &conn->chan_l); @@ -556,34 +566,58 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, flags = ACL_START; bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; + skb->priority = HCI_PRIO_MAX; + + hci_send_acl(conn->hchan, skb, flags); +} + +static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct hci_conn *hcon = chan->conn->hcon; + u16 flags; + + BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, + skb->priority); + + if (!test_bit(FLAG_FLUSHABLE, &chan->flags) && + lmp_no_flush_capable(hcon->hdev)) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; - hci_send_acl(conn->hcon, skb, flags); + bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); + hci_send_acl(chan->conn->hchan, skb, flags); } -static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) +static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; struct l2cap_hdr *lh; struct l2cap_conn *conn = chan->conn; - int count, hlen = L2CAP_HDR_SIZE + 2; - u8 flags; + int count, hlen; if (chan->state != BT_CONNECTED) return; + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; + if (chan->fcs == L2CAP_FCS_CRC16) - hlen += 2; + hlen += L2CAP_FCS_SIZE; - BT_DBG("chan %p, control 0x%2.2x", chan, control); + BT_DBG("chan %p, control 0x%8.8x", chan, control); count = min_t(unsigned int, conn->mtu, hlen); - control |= L2CAP_CTRL_FRAME_TYPE; + + control |= __set_sframe(chan); if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= L2CAP_CTRL_FINAL; + control |= __set_ctrl_final(chan); if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) - control |= L2CAP_CTRL_POLL; + control |= __set_ctrl_poll(chan); skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) @@ -592,32 +626,27 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control) lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - put_unaligned_le16(control, skb_put(skb, 2)); + + __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *)lh, count - 2); - put_unaligned_le16(fcs, skb_put(skb, 2)); + u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } - if (lmp_no_flush_capable(conn->hcon->hdev)) - flags = ACL_START_NO_FLUSH; - else - flags = ACL_START; - - bt_cb(skb)->force_active = chan->force_active; - - hci_send_acl(chan->conn->hcon, skb, flags); + skb->priority = HCI_PRIO_MAX; + l2cap_do_send(chan, skb); } -static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) +static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) { if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= L2CAP_SUPER_RCV_NOT_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); set_bit(CONN_RNR_SENT, &chan->conn_state); } else - control |= L2CAP_SUPER_RCV_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= __set_reqseq(chan, chan->buffer_seq); l2cap_send_sframe(chan, control); } @@ -947,7 +976,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) list_for_each_entry(chan, &conn->chan_l, list) { struct sock *sk = chan->sk; - if (chan->force_reliable) + if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) sk->sk_err = err; } @@ -986,6 +1015,8 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) chan->ops->close(chan->data); } + hci_chan_del(conn->hchan); + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) del_timer_sync(&conn->info_timer); @@ -1008,18 +1039,26 @@ static void security_timeout(unsigned long arg) static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) { struct l2cap_conn *conn = hcon->l2cap_data; + struct hci_chan *hchan; if (conn || status) return conn; + hchan = hci_chan_create(hcon); + if (!hchan) + return NULL; + conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC); - if (!conn) + if (!conn) { + hci_chan_del(hchan); return NULL; + } hcon->l2cap_data = conn; conn->hcon = hcon; + conn->hchan = hchan; - BT_DBG("hcon %p conn %p", hcon, conn); + BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); if (hcon->hdev->le_mtu && hcon->type == LE_LINK) conn->mtu = hcon->hdev->le_mtu; @@ -1043,7 +1082,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) setup_timer(&conn->info_timer, l2cap_info_timeout, (unsigned long) conn); - conn->disc_reason = 0x13; + conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; return conn; } @@ -1245,47 +1284,35 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan) __clear_retrans_timer(chan); } -static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) -{ - struct hci_conn *hcon = chan->conn->hcon; - u16 flags; - - BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len); - - if (!chan->flushable && lmp_no_flush_capable(hcon->hdev)) - flags = ACL_START_NO_FLUSH; - else - flags = ACL_START; - - bt_cb(skb)->force_active = chan->force_active; - hci_send_acl(hcon, skb, flags); -} - static void l2cap_streaming_send(struct l2cap_chan *chan) { struct sk_buff *skb; - u16 control, fcs; + u32 control; + u16 fcs; while ((skb = skb_dequeue(&chan->tx_q))) { - control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); - control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; - put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); + control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); + control |= __set_txseq(chan, chan->next_tx_seq); + __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, skb->len - 2); - put_unaligned_le16(fcs, skb->data + skb->len - 2); + fcs = crc16(0, (u8 *)skb->data, + skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + skb->data + skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, skb); - chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); } } -static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) +static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb, *tx_skb; - u16 control, fcs; + u16 fcs; + u32 control; skb = skb_peek(&chan->tx_q); if (!skb) @@ -1308,20 +1335,23 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) tx_skb = skb_clone(skb, GFP_ATOMIC); bt_cb(skb)->retries++; - control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); - control &= L2CAP_CTRL_SAR; + + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= L2CAP_CTRL_FINAL; + control |= __set_ctrl_final(chan); - control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) - | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, tx_seq); - put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); - put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); + fcs = crc16(0, (u8 *)tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); @@ -1330,7 +1360,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq) static int l2cap_ertm_send(struct l2cap_chan *chan) { struct sk_buff *skb, *tx_skb; - u16 control, fcs; + u16 fcs; + u32 control; int nsent = 0; if (chan->state != BT_CONNECTED) @@ -1348,20 +1379,22 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) bt_cb(skb)->retries++; - control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); - control &= L2CAP_CTRL_SAR; + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control |= L2CAP_CTRL_FINAL; + control |= __set_ctrl_final(chan); - control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) - | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); - put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, chan->next_tx_seq); + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); - put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); + fcs = crc16(0, (u8 *)skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb->data + + tx_skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); @@ -1369,7 +1402,8 @@ static int l2cap_ertm_send(struct l2cap_chan *chan) __set_retrans_timer(chan); bt_cb(skb)->tx_seq = chan->next_tx_seq; - chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); if (bt_cb(skb)->retries == 1) chan->unacked_frames++; @@ -1401,12 +1435,12 @@ static int l2cap_retransmit_frames(struct l2cap_chan *chan) static void l2cap_send_ack(struct l2cap_chan *chan) { - u16 control = 0; + u32 control = 0; - control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= __set_reqseq(chan, chan->buffer_seq); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= L2CAP_SUPER_RCV_NOT_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); set_bit(CONN_RNR_SENT, &chan->conn_state); l2cap_send_sframe(chan, control); return; @@ -1415,20 +1449,20 @@ static void l2cap_send_ack(struct l2cap_chan *chan) if (l2cap_ertm_send(chan) > 0) return; - control |= L2CAP_SUPER_RCV_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); l2cap_send_sframe(chan, control); } static void l2cap_send_srejtail(struct l2cap_chan *chan) { struct srej_list *tail; - u16 control; + u32 control; - control = L2CAP_SUPER_SELECT_REJECT; - control |= L2CAP_CTRL_FINAL; + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_ctrl_final(chan); tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); - control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= __set_reqseq(chan, tail->tx_seq); l2cap_send_sframe(chan, control); } @@ -1456,6 +1490,8 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) return -EFAULT; + (*frag)->priority = skb->priority; + sent += count; len -= count; @@ -1465,15 +1501,17 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in return sent; } -static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, + struct msghdr *msg, size_t len, + u32 priority) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; - int err, count, hlen = L2CAP_HDR_SIZE + 2; + int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; struct l2cap_hdr *lh; - BT_DBG("sk %p len %d", sk, (int)len); + BT_DBG("sk %p len %d priority %u", sk, (int)len, priority); count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, @@ -1481,6 +1519,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct if (!skb) return ERR_PTR(err); + skb->priority = priority; + /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); @@ -1495,7 +1535,9 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct return skb; } -static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, + struct msghdr *msg, size_t len, + u32 priority) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; @@ -1511,6 +1553,8 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms if (!skb) return ERR_PTR(err); + skb->priority = priority; + /* Create L2CAP header */ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); @@ -1526,12 +1570,12 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct ms static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, - u16 control, u16 sdulen) + u32 control, u16 sdulen) { struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; struct sk_buff *skb; - int err, count, hlen = L2CAP_HDR_SIZE + 2; + int err, count, hlen; struct l2cap_hdr *lh; BT_DBG("sk %p len %d", sk, (int)len); @@ -1539,11 +1583,16 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, if (!conn) return ERR_PTR(-ENOTCONN); + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; + if (sdulen) - hlen += 2; + hlen += L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) - hlen += 2; + hlen += L2CAP_FCS_SIZE; count = min_t(unsigned int, (conn->mtu - hlen), len); skb = bt_skb_send_alloc(sk, count + hlen, @@ -1555,9 +1604,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - put_unaligned_le16(control, skb_put(skb, 2)); + + __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); + if (sdulen) - put_unaligned_le16(sdulen, skb_put(skb, 2)); + put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); if (unlikely(err < 0)) { @@ -1566,7 +1617,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, } if (chan->fcs == L2CAP_FCS_CRC16) - put_unaligned_le16(0, skb_put(skb, 2)); + put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); bt_cb(skb)->retries = 0; return skb; @@ -1576,11 +1627,11 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si { struct sk_buff *skb; struct sk_buff_head sar_queue; - u16 control; + u32 control; size_t size = 0; skb_queue_head_init(&sar_queue); - control = L2CAP_SDU_START; + control = __set_ctrl_sar(chan, L2CAP_SAR_START); skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1593,10 +1644,10 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si size_t buflen; if (len > chan->remote_mps) { - control = L2CAP_SDU_CONTINUE; + control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE); buflen = chan->remote_mps; } else { - control = L2CAP_SDU_END; + control = __set_ctrl_sar(chan, L2CAP_SAR_END); buflen = len; } @@ -1617,15 +1668,16 @@ static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, si return size; } -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + u32 priority) { struct sk_buff *skb; - u16 control; + u32 control; int err; /* Connectionless channel */ if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { - skb = l2cap_create_connless_pdu(chan, msg, len); + skb = l2cap_create_connless_pdu(chan, msg, len, priority); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1640,7 +1692,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) return -EMSGSIZE; /* Create a basic PDU */ - skb = l2cap_create_basic_pdu(chan, msg, len); + skb = l2cap_create_basic_pdu(chan, msg, len, priority); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1652,7 +1704,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) case L2CAP_MODE_STREAMING: /* Entire SDU fits into one PDU */ if (len <= chan->remote_mps) { - control = L2CAP_SDU_UNSEGMENTED; + control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED); skb = l2cap_create_iframe_pdu(chan, msg, len, control, 0); if (IS_ERR(skb)) @@ -1850,6 +1902,37 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) *ptr += L2CAP_CONF_OPT_SIZE + len; } +static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) +{ + struct l2cap_conf_efs efs; + + switch(chan->mode) { + case L2CAP_MODE_ERTM: + efs.id = chan->local_id; + efs.stype = chan->local_stype; + efs.msdu = cpu_to_le16(chan->local_msdu); + efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); + efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); + efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); + break; + + case L2CAP_MODE_STREAMING: + efs.id = 1; + efs.stype = L2CAP_SERV_BESTEFFORT; + efs.msdu = cpu_to_le16(chan->local_msdu); + efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); + efs.acc_lat = 0; + efs.flush_to = 0; + break; + + default: + return; + } + + l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), + (unsigned long) &efs); +} + static void l2cap_ack_timeout(unsigned long arg) { struct l2cap_chan *chan = (void *) arg; @@ -1896,11 +1979,36 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) } } +static inline bool __l2cap_ews_supported(struct l2cap_chan *chan) +{ + return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW; +} + +static inline bool __l2cap_efs_supported(struct l2cap_chan *chan) +{ + return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW; +} + +static inline void l2cap_txwin_setup(struct l2cap_chan *chan) +{ + if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && + __l2cap_ews_supported(chan)) { + /* use extended control field */ + set_bit(FLAG_EXT_CTRL, &chan->flags); + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; + } else { + chan->tx_win = min_t(u16, chan->tx_win, + L2CAP_DEFAULT_TX_WINDOW); + chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; + } +} + static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) { struct l2cap_conf_req *req = data; struct l2cap_conf_rfc rfc = { .mode = chan->mode }; void *ptr = req->data; + u16 size; BT_DBG("chan %p", chan); @@ -1913,6 +2021,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) break; + if (__l2cap_efs_supported(chan)) + set_bit(FLAG_EFS_ENABLE, &chan->flags); + /* fall through */ default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); @@ -1942,17 +2053,27 @@ done: case L2CAP_MODE_ERTM: rfc.mode = L2CAP_MODE_ERTM; - rfc.txwin_size = chan->tx_win; rfc.max_transmit = chan->max_tx; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; - rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); - if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) - rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); + + size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + + l2cap_txwin_setup(chan); + + rfc.txwin_size = min_t(u16, chan->tx_win, + L2CAP_DEFAULT_TX_WINDOW); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) + l2cap_add_opt_efs(&ptr, chan); + if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -1961,6 +2082,10 @@ done: chan->fcs = L2CAP_FCS_NONE; l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); } + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, + chan->tx_win); break; case L2CAP_MODE_STREAMING: @@ -1969,13 +2094,19 @@ done: rfc.max_transmit = 0; rfc.retrans_timeout = 0; rfc.monitor_timeout = 0; - rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); - if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10) - rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); + + size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) + l2cap_add_opt_efs(&ptr, chan); + if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS)) break; @@ -2002,8 +2133,11 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) int type, hint, olen; unsigned long val; struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_efs efs; + u8 remote_efs = 0; u16 mtu = L2CAP_DEFAULT_MTU; u16 result = L2CAP_CONF_SUCCESS; + u16 size; BT_DBG("chan %p", chan); @@ -2033,7 +2167,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) case L2CAP_CONF_FCS: if (val == L2CAP_FCS_NONE) set_bit(CONF_NO_FCS_RECV, &chan->conf_state); + break; + + case L2CAP_CONF_EFS: + remote_efs = 1; + if (olen == sizeof(efs)) + memcpy(&efs, (void *) val, olen); + break; + + case L2CAP_CONF_EWS: + if (!enable_hs) + return -ECONNREFUSED; + set_bit(FLAG_EXT_CTRL, &chan->flags); + set_bit(CONF_EWS_RECV, &chan->conf_state); + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; + chan->remote_tx_win = val; break; default: @@ -2058,6 +2207,13 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) break; } + if (remote_efs) { + if (__l2cap_efs_supported(chan)) + set_bit(FLAG_EFS_ENABLE, &chan->flags); + else + return -ECONNREFUSED; + } + if (chan->mode != rfc.mode) return -ECONNREFUSED; @@ -2076,7 +2232,6 @@ done: sizeof(rfc), (unsigned long) &rfc); } - if (result == L2CAP_CONF_SUCCESS) { /* Configure output options and let the other side know * which ones we don't like. */ @@ -2089,6 +2244,26 @@ done: } l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); + if (remote_efs) { + if (chan->local_stype != L2CAP_SERV_NOTRAFIC && + efs.stype != L2CAP_SERV_NOTRAFIC && + efs.stype != chan->local_stype) { + + result = L2CAP_CONF_UNACCEPT; + + if (chan->num_conf_req >= 1) + return -ECONNREFUSED; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, + sizeof(efs), + (unsigned long) &efs); + } else { + /* Send PENDING Conf Rsp */ + result = L2CAP_CONF_PENDING; + set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); + } + } + switch (rfc.mode) { case L2CAP_MODE_BASIC: chan->fcs = L2CAP_FCS_NONE; @@ -2096,13 +2271,20 @@ done: break; case L2CAP_MODE_ERTM: - chan->remote_tx_win = rfc.txwin_size; - chan->remote_max_tx = rfc.max_transmit; + if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) + chan->remote_tx_win = rfc.txwin_size; + else + rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; - if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) - rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); + chan->remote_max_tx = rfc.max_transmit; - chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); + size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), + chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + chan->remote_mps = size; rfc.retrans_timeout = le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); @@ -2114,13 +2296,29 @@ done: l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + chan->remote_id = efs.id; + chan->remote_stype = efs.stype; + chan->remote_msdu = le16_to_cpu(efs.msdu); + chan->remote_flush_to = + le32_to_cpu(efs.flush_to); + chan->remote_acc_lat = + le32_to_cpu(efs.acc_lat); + chan->remote_sdu_itime = + le32_to_cpu(efs.sdu_itime); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, + sizeof(efs), (unsigned long) &efs); + } break; case L2CAP_MODE_STREAMING: - if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10) - rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10); - - chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); + size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), + chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + chan->remote_mps = size; set_bit(CONF_MODE_DONE, &chan->conf_state); @@ -2153,6 +2351,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi int type, olen; unsigned long val; struct l2cap_conf_rfc rfc; + struct l2cap_conf_efs efs; BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); @@ -2188,6 +2387,26 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), (unsigned long) &rfc); break; + + case L2CAP_CONF_EWS: + chan->tx_win = min_t(u16, val, + L2CAP_DEFAULT_EXT_WINDOW); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, + chan->tx_win); + break; + + case L2CAP_CONF_EFS: + if (olen == sizeof(efs)) + memcpy(&efs, (void *)val, olen); + + if (chan->local_stype != L2CAP_SERV_NOTRAFIC && + efs.stype != L2CAP_SERV_NOTRAFIC && + efs.stype != chan->local_stype) + return -ECONNREFUSED; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, + sizeof(efs), (unsigned long) &efs); + break; } } @@ -2196,13 +2415,23 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi chan->mode = rfc.mode; - if (*result == L2CAP_CONF_SUCCESS) { + if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { switch (rfc.mode) { case L2CAP_MODE_ERTM: chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); chan->mps = le16_to_cpu(rfc.max_pdu_size); + + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + chan->local_msdu = le16_to_cpu(efs.msdu); + chan->local_sdu_itime = + le32_to_cpu(efs.sdu_itime); + chan->local_acc_lat = le32_to_cpu(efs.acc_lat); + chan->local_flush_to = + le32_to_cpu(efs.flush_to); + } break; + case L2CAP_MODE_STREAMING: chan->mps = le16_to_cpu(rfc.max_pdu_size); } @@ -2330,7 +2559,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { - conn->disc_reason = 0x05; + conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; } @@ -2602,6 +2831,21 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr chan->num_conf_req++; } + /* Got Conf Rsp PENDING from remote side and asume we sent + Conf Rsp PENDING in the code above */ + if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && + test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { + + /* check compatibility */ + + clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); + set_bit(CONF_OUTPUT_DONE, &chan->conf_state); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(chan, rsp, + L2CAP_CONF_SUCCESS, 0x0000), rsp); + } + unlock: bh_unlock_sock(sk); return 0; @@ -2631,8 +2875,33 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr switch (result) { case L2CAP_CONF_SUCCESS: l2cap_conf_rfc_get(chan, rsp->data, len); + clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); break; + case L2CAP_CONF_PENDING: + set_bit(CONF_REM_CONF_PEND, &chan->conf_state); + + if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { + char buf[64]; + + len = l2cap_parse_conf_rsp(chan, rsp->data, len, + buf, &result); + if (len < 0) { + l2cap_send_disconn_req(conn, chan, ECONNRESET); + goto done; + } + + /* check compatibility */ + + clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); + set_bit(CONF_OUTPUT_DONE, &chan->conf_state); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(chan, buf, + L2CAP_CONF_SUCCESS, 0x0000), buf); + } + goto done; + case L2CAP_CONF_UNACCEPT: if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { char req[64]; @@ -2782,15 +3051,25 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; + if (enable_hs) + feat_mask |= L2CAP_FEAT_EXT_FLOW + | L2CAP_FEAT_EXT_WINDOW; + put_unaligned_le32(feat_mask, rsp->data); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else if (type == L2CAP_IT_FIXED_CHAN) { u8 buf[12]; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + + if (enable_hs) + l2cap_fixed_chan[0] |= L2CAP_FC_A2MP; + else + l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; + rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); - memcpy(buf + 4, l2cap_fixed_chan, 8); + memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { @@ -2857,6 +3136,165 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm return 0; } +static inline int l2cap_create_channel_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + void *data) +{ + struct l2cap_create_chan_req *req = data; + struct l2cap_create_chan_rsp rsp; + u16 psm, scid; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + if (!enable_hs) + return -EINVAL; + + psm = le16_to_cpu(req->psm); + scid = le16_to_cpu(req->scid); + + BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id); + + /* Placeholder: Always reject */ + rsp.dcid = 0; + rsp.scid = cpu_to_le16(scid); + rsp.result = L2CAP_CR_NO_MEM; + rsp.status = L2CAP_CS_NO_INFO; + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, + sizeof(rsp), &rsp); + + return 0; +} + +static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, void *data) +{ + BT_DBG("conn %p", conn); + + return l2cap_connect_rsp(conn, cmd, data); +} + +static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident, + u16 icid, u16 result) +{ + struct l2cap_move_chan_rsp rsp; + + BT_DBG("icid %d, result %d", icid, result); + + rsp.icid = cpu_to_le16(icid); + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp); +} + +static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn, + struct l2cap_chan *chan, u16 icid, u16 result) +{ + struct l2cap_move_chan_cfm cfm; + u8 ident; + + BT_DBG("icid %d, result %d", icid, result); + + ident = l2cap_get_ident(conn); + if (chan) + chan->ident = ident; + + cfm.icid = cpu_to_le16(icid); + cfm.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm); +} + +static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, + u16 icid) +{ + struct l2cap_move_chan_cfm_rsp rsp; + + BT_DBG("icid %d", icid); + + rsp.icid = cpu_to_le16(icid); + l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); +} + +static inline int l2cap_move_channel_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) +{ + struct l2cap_move_chan_req *req = data; + u16 icid = 0; + u16 result = L2CAP_MR_NOT_ALLOWED; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + icid = le16_to_cpu(req->icid); + + BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id); + + if (!enable_hs) + return -EINVAL; + + /* Placeholder: Always refuse */ + l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result); + + return 0; +} + +static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) +{ + struct l2cap_move_chan_rsp *rsp = data; + u16 icid, result; + + if (cmd_len != sizeof(*rsp)) + return -EPROTO; + + icid = le16_to_cpu(rsp->icid); + result = le16_to_cpu(rsp->result); + + BT_DBG("icid %d, result %d", icid, result); + + /* Placeholder: Always unconfirmed */ + l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED); + + return 0; +} + +static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) +{ + struct l2cap_move_chan_cfm *cfm = data; + u16 icid, result; + + if (cmd_len != sizeof(*cfm)) + return -EPROTO; + + icid = le16_to_cpu(cfm->icid); + result = le16_to_cpu(cfm->result); + + BT_DBG("icid %d, result %d", icid, result); + + l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); + + return 0; +} + +static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data) +{ + struct l2cap_move_chan_cfm_rsp *rsp = data; + u16 icid; + + if (cmd_len != sizeof(*rsp)) + return -EPROTO; + + icid = le16_to_cpu(rsp->icid); + + BT_DBG("icid %d", icid); + + return 0; +} + static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency, u16 to_multiplier) { @@ -2969,6 +3407,30 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, err = l2cap_information_rsp(conn, cmd, data); break; + case L2CAP_CREATE_CHAN_REQ: + err = l2cap_create_channel_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_CREATE_CHAN_RSP: + err = l2cap_create_channel_rsp(conn, cmd, data); + break; + + case L2CAP_MOVE_CHAN_REQ: + err = l2cap_move_channel_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_RSP: + err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_CFM: + err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_CFM_RSP: + err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); + break; + default: BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); err = -EINVAL; @@ -3047,10 +3509,15 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) { u16 our_fcs, rcv_fcs; - int hdr_size = L2CAP_HDR_SIZE + 2; + int hdr_size; + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hdr_size = L2CAP_EXT_HDR_SIZE; + else + hdr_size = L2CAP_ENH_HDR_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) { - skb_trim(skb, skb->len - 2); + skb_trim(skb, skb->len - L2CAP_FCS_SIZE); rcv_fcs = get_unaligned_le16(skb->data + skb->len); our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); @@ -3062,14 +3529,14 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { - u16 control = 0; + u32 control = 0; chan->frames_sent = 0; - control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control |= __set_reqseq(chan, chan->buffer_seq); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control |= L2CAP_SUPER_RCV_NOT_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); } @@ -3081,12 +3548,12 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && chan->frames_sent == 0) { - control |= L2CAP_SUPER_RCV_READY; + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); l2cap_send_sframe(chan, control); } } -static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar) +static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) { struct sk_buff *next_skb; int tx_seq_offset, next_tx_seq_offset; @@ -3100,18 +3567,14 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, return 0; } - tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; - if (tx_seq_offset < 0) - tx_seq_offset += 64; + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); do { if (bt_cb(next_skb)->tx_seq == tx_seq) return -EINVAL; - next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - - chan->buffer_seq) % 64; - if (next_tx_seq_offset < 0) - next_tx_seq_offset += 64; + next_tx_seq_offset = __seq_offset(chan, + bt_cb(next_skb)->tx_seq, chan->buffer_seq); if (next_tx_seq_offset > tx_seq_offset) { __skb_queue_before(&chan->srej_q, next_skb, skb); @@ -3147,24 +3610,24 @@ static void append_skb_frag(struct sk_buff *skb, skb->truesize += new_frag->truesize; } -static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) { int err = -EINVAL; - switch (control & L2CAP_CTRL_SAR) { - case L2CAP_SDU_UNSEGMENTED: + switch (__get_ctrl_sar(chan, control)) { + case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break; err = chan->ops->recv(chan->data, skb); break; - case L2CAP_SDU_START: + case L2CAP_SAR_START: if (chan->sdu) break; chan->sdu_len = get_unaligned_le16(skb->data); - skb_pull(skb, 2); + skb_pull(skb, L2CAP_SDULEN_SIZE); if (chan->sdu_len > chan->imtu) { err = -EMSGSIZE; @@ -3181,7 +3644,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 err = 0; break; - case L2CAP_SDU_CONTINUE: + case L2CAP_SAR_CONTINUE: if (!chan->sdu) break; @@ -3195,7 +3658,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 err = 0; break; - case L2CAP_SDU_END: + case L2CAP_SAR_END: if (!chan->sdu) break; @@ -3230,14 +3693,14 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u1 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { - u16 control; + u32 control; BT_DBG("chan %p, Enter local busy", chan); set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - control |= L2CAP_SUPER_RCV_NOT_READY; + control = __set_reqseq(chan, chan->buffer_seq); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); l2cap_send_sframe(chan, control); set_bit(CONN_RNR_SENT, &chan->conn_state); @@ -3247,13 +3710,14 @@ static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { - u16 control; + u32 control; if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) goto done; - control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; - control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; + control = __set_reqseq(chan, chan->buffer_seq); + control |= __set_ctrl_poll(chan); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); l2cap_send_sframe(chan, control); chan->retry_count = 1; @@ -3279,10 +3743,10 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy) } } -static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) +static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb; - u16 control; + u32 control; while ((skb = skb_peek(&chan->srej_q)) && !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { @@ -3292,7 +3756,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) break; skb = skb_dequeue(&chan->srej_q); - control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; + control = __set_ctrl_sar(chan, bt_cb(skb)->sar); err = l2cap_reassemble_sdu(chan, skb, control); if (err < 0) { @@ -3300,16 +3764,15 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq) break; } - chan->buffer_seq_srej = - (chan->buffer_seq_srej + 1) % 64; - tx_seq = (tx_seq + 1) % 64; + chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); + tx_seq = __next_seq(chan, tx_seq); } } -static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) +static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) { struct srej_list *l, *tmp; - u16 control; + u32 control; list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { if (l->tx_seq == tx_seq) { @@ -3317,45 +3780,48 @@ static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq) kfree(l); return; } - control = L2CAP_SUPER_SELECT_REJECT; - control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, l->tx_seq); l2cap_send_sframe(chan, control); list_del(&l->list); list_add_tail(&l->list, &chan->srej_l); } } -static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq) +static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) { struct srej_list *new; - u16 control; + u32 control; while (tx_seq != chan->expected_tx_seq) { - control = L2CAP_SUPER_SELECT_REJECT; - control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, chan->expected_tx_seq); l2cap_send_sframe(chan, control); new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); new->tx_seq = chan->expected_tx_seq; - chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; + + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + list_add_tail(&new->list, &chan->srej_l); } - chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; + + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); } -static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) +static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) { - u8 tx_seq = __get_txseq(rx_control); - u8 req_seq = __get_reqseq(rx_control); - u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; + u16 tx_seq = __get_txseq(chan, rx_control); + u16 req_seq = __get_reqseq(chan, rx_control); + u8 sar = __get_ctrl_sar(chan, rx_control); int tx_seq_offset, expected_tx_seq_offset; int num_to_ack = (chan->tx_win/6) + 1; int err = 0; - BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len, + BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, tx_seq, rx_control); - if (L2CAP_CTRL_FINAL & rx_control && + if (__is_ctrl_final(chan, rx_control) && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) @@ -3366,9 +3832,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont chan->expected_ack_seq = req_seq; l2cap_drop_acked_frames(chan); - tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; - if (tx_seq_offset < 0) - tx_seq_offset += 64; + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); /* invalid tx_seq */ if (tx_seq_offset >= chan->tx_win) { @@ -3416,10 +3880,8 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont l2cap_send_srejframe(chan, tx_seq); } } else { - expected_tx_seq_offset = - (chan->expected_tx_seq - chan->buffer_seq) % 64; - if (expected_tx_seq_offset < 0) - expected_tx_seq_offset += 64; + expected_tx_seq_offset = __seq_offset(chan, + chan->expected_tx_seq, chan->buffer_seq); /* duplicated tx_seq */ if (tx_seq_offset < expected_tx_seq_offset) @@ -3444,7 +3906,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont return 0; expected: - chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { bt_cb(skb)->tx_seq = tx_seq; @@ -3454,13 +3916,14 @@ expected: } err = l2cap_reassemble_sdu(chan, skb, rx_control); - chan->buffer_seq = (chan->buffer_seq + 1) % 64; + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + if (err < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); return err; } - if (rx_control & L2CAP_CTRL_FINAL) { + if (__is_ctrl_final(chan, rx_control)) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } @@ -3478,15 +3941,15 @@ drop: return 0; } -static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control) +static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) { - BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control), - rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, + __get_reqseq(chan, rx_control), rx_control); - chan->expected_ack_seq = __get_reqseq(rx_control); + chan->expected_ack_seq = __get_reqseq(chan, rx_control); l2cap_drop_acked_frames(chan); - if (rx_control & L2CAP_CTRL_POLL) { + if (__is_ctrl_poll(chan, rx_control)) { set_bit(CONN_SEND_FBIT, &chan->conn_state); if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && @@ -3499,7 +3962,7 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co l2cap_send_i_or_rr_or_rnr(chan); } - } else if (rx_control & L2CAP_CTRL_FINAL) { + } else if (__is_ctrl_final(chan, rx_control)) { clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) @@ -3518,18 +3981,18 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co } } -static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control) +static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) { - u8 tx_seq = __get_reqseq(rx_control); + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); - if (rx_control & L2CAP_CTRL_FINAL) { + if (__is_ctrl_final(chan, rx_control)) { if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) l2cap_retransmit_frames(chan); } else { @@ -3539,15 +4002,15 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c set_bit(CONN_REJ_ACT, &chan->conn_state); } } -static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) +static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) { - u8 tx_seq = __get_reqseq(rx_control); + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (rx_control & L2CAP_CTRL_POLL) { + if (__is_ctrl_poll(chan, rx_control)) { chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); @@ -3560,7 +4023,7 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_ chan->srej_save_reqseq = tx_seq; set_bit(CONN_SREJ_ACT, &chan->conn_state); } - } else if (rx_control & L2CAP_CTRL_FINAL) { + } else if (__is_ctrl_final(chan, rx_control)) { if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && chan->srej_save_reqseq == tx_seq) clear_bit(CONN_SREJ_ACT, &chan->conn_state); @@ -3575,37 +4038,39 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_ } } -static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control) +static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) { - u8 tx_seq = __get_reqseq(rx_control); + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); set_bit(CONN_REMOTE_BUSY, &chan->conn_state); chan->expected_ack_seq = tx_seq; l2cap_drop_acked_frames(chan); - if (rx_control & L2CAP_CTRL_POLL) + if (__is_ctrl_poll(chan, rx_control)) set_bit(CONN_SEND_FBIT, &chan->conn_state); if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { __clear_retrans_timer(chan); - if (rx_control & L2CAP_CTRL_POLL) + if (__is_ctrl_poll(chan, rx_control)) l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); return; } - if (rx_control & L2CAP_CTRL_POLL) + if (__is_ctrl_poll(chan, rx_control)) { l2cap_send_srejtail(chan); - else - l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY); + } else { + rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, rx_control); + } } -static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb) +static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) { - BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); + BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); - if (L2CAP_CTRL_FINAL & rx_control && + if (__is_ctrl_final(chan, rx_control) && test_bit(CONN_WAIT_F, &chan->conn_state)) { __clear_monitor_timer(chan); if (chan->unacked_frames > 0) @@ -3613,20 +4078,20 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont clear_bit(CONN_WAIT_F, &chan->conn_state); } - switch (rx_control & L2CAP_CTRL_SUPERVISE) { - case L2CAP_SUPER_RCV_READY: + switch (__get_ctrl_super(chan, rx_control)) { + case L2CAP_SUPER_RR: l2cap_data_channel_rrframe(chan, rx_control); break; - case L2CAP_SUPER_REJECT: + case L2CAP_SUPER_REJ: l2cap_data_channel_rejframe(chan, rx_control); break; - case L2CAP_SUPER_SELECT_REJECT: + case L2CAP_SUPER_SREJ: l2cap_data_channel_srejframe(chan, rx_control); break; - case L2CAP_SUPER_RCV_NOT_READY: + case L2CAP_SUPER_RNR: l2cap_data_channel_rnrframe(chan, rx_control); break; } @@ -3638,12 +4103,12 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) { struct l2cap_chan *chan = l2cap_pi(sk)->chan; - u16 control; - u8 req_seq; + u32 control; + u16 req_seq; int len, next_tx_seq_offset, req_seq_offset; - control = get_unaligned_le16(skb->data); - skb_pull(skb, 2); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); len = skb->len; /* @@ -3654,26 +4119,23 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) if (l2cap_check_fcs(chan, skb)) goto drop; - if (__is_sar_start(control) && __is_iframe(control)) - len -= 2; + if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) + len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) - len -= 2; + len -= L2CAP_FCS_SIZE; if (len > chan->mps) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } - req_seq = __get_reqseq(control); - req_seq_offset = (req_seq - chan->expected_ack_seq) % 64; - if (req_seq_offset < 0) - req_seq_offset += 64; + req_seq = __get_reqseq(chan, control); - next_tx_seq_offset = - (chan->next_tx_seq - chan->expected_ack_seq) % 64; - if (next_tx_seq_offset < 0) - next_tx_seq_offset += 64; + req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); + + next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, + chan->expected_ack_seq); /* check for invalid req-seq */ if (req_seq_offset > next_tx_seq_offset) { @@ -3681,7 +4143,7 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } - if (__is_iframe(control)) { + if (!__is_sframe(chan, control)) { if (len < 0) { l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; @@ -3709,8 +4171,8 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk { struct l2cap_chan *chan; struct sock *sk = NULL; - u16 control; - u8 tx_seq; + u32 control; + u16 tx_seq; int len; chan = l2cap_get_chan_by_scid(conn, cid); @@ -3751,23 +4213,23 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk goto done; case L2CAP_MODE_STREAMING: - control = get_unaligned_le16(skb->data); - skb_pull(skb, 2); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); len = skb->len; if (l2cap_check_fcs(chan, skb)) goto drop; - if (__is_sar_start(control)) - len -= 2; + if (__is_sar_start(chan, control)) + len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) - len -= 2; + len -= L2CAP_FCS_SIZE; - if (len > chan->mps || len < 0 || __is_sframe(control)) + if (len > chan->mps || len < 0 || __is_sframe(chan, control)) goto drop; - tx_seq = __get_txseq(control); + tx_seq = __get_txseq(chan, control); if (chan->expected_tx_seq != tx_seq) { /* Frame(s) missing - must discard partial SDU */ @@ -3779,7 +4241,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk /* TODO: Notify userland of missing data */ } - chan->expected_tx_seq = (tx_seq + 1) % 64; + chan->expected_tx_seq = __next_seq(chan, tx_seq); if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); @@ -3933,12 +4395,12 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { lm1 |= HCI_LM_ACCEPT; - if (c->role_switch) + if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm1 |= HCI_LM_MASTER; exact++; } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm2 |= HCI_LM_ACCEPT; - if (c->role_switch) + if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) lm2 |= HCI_LM_MASTER; } } @@ -3973,7 +4435,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon) BT_DBG("hcon %p", hcon); if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn) - return 0x13; + return HCI_ERROR_REMOTE_USER_TERM; return conn->disc_reason; } @@ -4306,3 +4768,6 @@ void l2cap_exit(void) module_param(disable_ertm, bool, 0644); MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); + +module_param(enable_hs, bool, 0644); +MODULE_PARM_DESC(enable_hs, "Enable High Speed"); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 5c406d3136f7..e2e785c74630 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -334,7 +334,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; - opts.txwin_size = (__u16)chan->tx_win; + opts.txwin_size = chan->tx_win; len = min_t(unsigned int, len, sizeof(opts)); if (copy_to_user(optval, (char *) &opts, len)) @@ -359,10 +359,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us break; } - if (chan->role_switch) + if (test_bit(FLAG_ROLE_SWITCH, &chan->flags)) opt |= L2CAP_LM_MASTER; - if (chan->force_reliable) + if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) opt |= L2CAP_LM_RELIABLE; if (put_user(opt, (u32 __user *) optval)) @@ -449,7 +449,8 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; case BT_FLUSHABLE: - if (put_user(chan->flushable, (u32 __user *) optval)) + if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), + (u32 __user *) optval)) err = -EFAULT; break; @@ -461,7 +462,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; } - pwr.force_active = chan->force_active; + pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); len = min_t(unsigned int, len, sizeof(pwr)); if (copy_to_user(optval, (char *) &pwr, len)) @@ -469,6 +470,16 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch break; + case BT_CHANNEL_POLICY: + if (!enable_hs) { + err = -ENOPROTOOPT; + break; + } + + if (put_user(chan->chan_policy, (u32 __user *) optval)) + err = -EFAULT; + break; + default: err = -ENOPROTOOPT; break; @@ -503,7 +514,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us opts.mode = chan->mode; opts.fcs = chan->fcs; opts.max_tx = chan->max_tx; - opts.txwin_size = (__u16)chan->tx_win; + opts.txwin_size = chan->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); if (copy_from_user((char *) &opts, optval, len)) { @@ -511,7 +522,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us break; } - if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) { + if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { err = -EINVAL; break; } @@ -535,7 +546,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us chan->omtu = opts.omtu; chan->fcs = opts.fcs; chan->max_tx = opts.max_tx; - chan->tx_win = (__u8)opts.txwin_size; + chan->tx_win = opts.txwin_size; break; case L2CAP_LM: @@ -551,8 +562,15 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us if (opt & L2CAP_LM_SECURE) chan->sec_level = BT_SECURITY_HIGH; - chan->role_switch = (opt & L2CAP_LM_MASTER); - chan->force_reliable = (opt & L2CAP_LM_RELIABLE); + if (opt & L2CAP_LM_MASTER) + set_bit(FLAG_ROLE_SWITCH, &chan->flags); + else + clear_bit(FLAG_ROLE_SWITCH, &chan->flags); + + if (opt & L2CAP_LM_RELIABLE) + set_bit(FLAG_FORCE_RELIABLE, &chan->flags); + else + clear_bit(FLAG_FORCE_RELIABLE, &chan->flags); break; default: @@ -658,7 +676,10 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch } } - chan->flushable = opt; + if (opt) + set_bit(FLAG_FLUSHABLE, &chan->flags); + else + clear_bit(FLAG_FLUSHABLE, &chan->flags); break; case BT_POWER: @@ -675,7 +696,36 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch err = -EFAULT; break; } - chan->force_active = pwr.force_active; + + if (pwr.force_active) + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); + else + clear_bit(FLAG_FORCE_ACTIVE, &chan->flags); + break; + + case BT_CHANNEL_POLICY: + if (!enable_hs) { + err = -ENOPROTOOPT; + break; + } + + if (get_user(opt, (u32 __user *) optval)) { + err = -EFAULT; + break; + } + + if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { + err = -EINVAL; + break; + } + + if (chan->mode != L2CAP_MODE_ERTM && + chan->mode != L2CAP_MODE_STREAMING) { + err = -EOPNOTSUPP; + break; + } + + chan->chan_policy = (u8) opt; break; default: @@ -709,7 +759,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms return -ENOTCONN; } - err = l2cap_chan_send(chan, msg, len); + err = l2cap_chan_send(chan, msg, len, sk->sk_priority); release_sock(sk); return err; @@ -931,11 +981,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->fcs = pchan->fcs; chan->max_tx = pchan->max_tx; chan->tx_win = pchan->tx_win; + chan->tx_win_max = pchan->tx_win_max; chan->sec_level = pchan->sec_level; - chan->role_switch = pchan->role_switch; - chan->force_reliable = pchan->force_reliable; - chan->flushable = pchan->flushable; - chan->force_active = pchan->force_active; + chan->flags = pchan->flags; security_sk_clone(parent, sk); } else { @@ -964,12 +1012,10 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) chan->max_tx = L2CAP_DEFAULT_MAX_TX; chan->fcs = L2CAP_FCS_CRC16; chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; + chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; chan->sec_level = BT_SECURITY_LOW; - chan->role_switch = 0; - chan->force_reliable = 0; - chan->flushable = BT_FLUSHABLE_OFF; - chan->force_active = BT_POWER_FORCE_ACTIVE_ON; - + chan->flags = 0; + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); } /* Default config options */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 2c7634296866..94739d3c4f59 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -33,22 +33,23 @@ #define MGMT_VERSION 0 #define MGMT_REVISION 1 +#define INQUIRY_LEN_BREDR 0x08 /* TGAP(100) */ + struct pending_cmd { struct list_head list; - __u16 opcode; + u16 opcode; int index; void *param; struct sock *sk; void *user_data; }; -static LIST_HEAD(cmd_list); - static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) { struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_status *ev; + int err; BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); @@ -66,10 +67,11 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) ev->status = status; put_unaligned_le16(cmd, &ev->opcode); - if (sock_queue_rcv_skb(sk, skb) < 0) + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) kfree_skb(skb); - return 0; + return err; } static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, @@ -78,6 +80,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, struct sk_buff *skb; struct mgmt_hdr *hdr; struct mgmt_ev_cmd_complete *ev; + int err; BT_DBG("sock %p", sk); @@ -97,10 +100,11 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, if (rp) memcpy(ev->data, rp, rp_len); - if (sock_queue_rcv_skb(sk, skb) < 0) + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) kfree_skb(skb); - return 0; + return err;; } static int read_version(struct sock *sk) @@ -120,6 +124,7 @@ static int read_index_list(struct sock *sk) { struct mgmt_rp_read_index_list *rp; struct list_head *p; + struct hci_dev *d; size_t rp_len; u16 count; int i, err; @@ -143,10 +148,9 @@ static int read_index_list(struct sock *sk) put_unaligned_le16(count, &rp->num_controllers); i = 0; - list_for_each(p, &hci_dev_list) { - struct hci_dev *d = list_entry(p, struct hci_dev, list); - - hci_del_off_timer(d); + list_for_each_entry(d, &hci_dev_list, list) { + if (test_and_clear_bit(HCI_AUTO_OFF, &d->flags)) + cancel_delayed_work(&d->power_off); if (test_bit(HCI_SETUP, &d->flags)) continue; @@ -176,7 +180,8 @@ static int read_controller_info(struct sock *sk, u16 index) if (!hdev) return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); - hci_del_off_timer(hdev); + if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags)) + cancel_delayed_work_sync(&hdev->power_off); hci_dev_lock_bh(hdev); @@ -221,7 +226,8 @@ static void mgmt_pending_free(struct pending_cmd *cmd) } static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, - u16 index, void *data, u16 len) + struct hci_dev *hdev, + void *data, u16 len) { struct pending_cmd *cmd; @@ -230,7 +236,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, return NULL; cmd->opcode = opcode; - cmd->index = index; + cmd->index = hdev->id; cmd->param = kmalloc(len, GFP_ATOMIC); if (!cmd->param) { @@ -244,48 +250,36 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, cmd->sk = sk; sock_hold(sk); - list_add(&cmd->list, &cmd_list); + list_add(&cmd->list, &hdev->mgmt_pending); return cmd; } -static void mgmt_pending_foreach(u16 opcode, int index, +static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, void (*cb)(struct pending_cmd *cmd, void *data), void *data) { struct list_head *p, *n; - list_for_each_safe(p, n, &cmd_list) { + list_for_each_safe(p, n, &hdev->mgmt_pending) { struct pending_cmd *cmd; cmd = list_entry(p, struct pending_cmd, list); - if (cmd->opcode != opcode) - continue; - - if (index >= 0 && cmd->index != index) + if (opcode > 0 && cmd->opcode != opcode) continue; cb(cmd, data); } } -static struct pending_cmd *mgmt_pending_find(u16 opcode, int index) +static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev) { - struct list_head *p; - - list_for_each(p, &cmd_list) { - struct pending_cmd *cmd; - - cmd = list_entry(p, struct pending_cmd, list); - - if (cmd->opcode != opcode) - continue; - - if (index >= 0 && cmd->index != index) - continue; + struct pending_cmd *cmd; - return cmd; + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (cmd->opcode == opcode) + return cmd; } return NULL; @@ -323,12 +317,12 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { + if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) { err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -337,7 +331,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) if (cp->val) queue_work(hdev->workqueue, &hdev->power_on); else - queue_work(hdev->workqueue, &hdev->power_off); + queue_work(hdev->workqueue, &hdev->power_off.work); err = 0; @@ -350,7 +344,7 @@ failed: static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, u16 len) { - struct mgmt_mode *cp; + struct mgmt_cp_set_discoverable *cp; struct hci_dev *hdev; struct pending_cmd *cmd; u8 scan; @@ -374,8 +368,8 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); goto failed; } @@ -386,7 +380,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -396,11 +390,16 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, if (cp->val) scan |= SCAN_INQUIRY; + else + cancel_delayed_work(&hdev->discov_off); err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); if (err < 0) mgmt_pending_remove(cmd); + if (cp->val) + hdev->discov_timeout = get_unaligned_le16(&cp->timeout); + failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -435,8 +434,8 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, goto failed; } - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); goto failed; } @@ -446,7 +445,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -468,8 +467,8 @@ failed: return err; } -static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, - struct sock *skip_sk) +static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, + u16 data_len, struct sock *skip_sk) { struct sk_buff *skb; struct mgmt_hdr *hdr; @@ -482,7 +481,10 @@ static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, hdr = (void *) skb_put(skb, sizeof(*hdr)); hdr->opcode = cpu_to_le16(event); - hdr->index = cpu_to_le16(index); + if (hdev) + hdr->index = cpu_to_le16(hdev->id); + else + hdr->index = cpu_to_le16(MGMT_INDEX_NONE); hdr->len = cpu_to_le16(data_len); if (data) @@ -534,7 +536,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data, ev.val = cp->val; - err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); + err = mgmt_event(MGMT_EV_PAIRABLE, hdev, &ev, sizeof(ev), sk); failed: hci_dev_unlock_bh(hdev); @@ -587,7 +589,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data) u16 eir_len = 0; u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)]; int i, truncated = 0; - struct list_head *p; + struct bt_uuid *uuid; size_t name_len; name_len = strlen(hdev->dev_name); @@ -612,8 +614,7 @@ static void create_eir(struct hci_dev *hdev, u8 *data) memset(uuid16_list, 0, sizeof(uuid16_list)); /* Group all UUID16 types */ - list_for_each(p, &hdev->uuids) { - struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); + list_for_each_entry(uuid, &hdev->uuids, list) { u16 uuid16; uuid16 = get_uuid16(uuid->uuid); @@ -689,14 +690,11 @@ static int update_eir(struct hci_dev *hdev) static u8 get_service_classes(struct hci_dev *hdev) { - struct list_head *p; + struct bt_uuid *uuid; u8 val = 0; - list_for_each(p, &hdev->uuids) { - struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list); - + list_for_each_entry(uuid, &hdev->uuids, list) val |= uuid->svc_hint; - } return val; } @@ -895,6 +893,9 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, if (err == 0) err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 0); + else + cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, -err); + hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -902,30 +903,32 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, return err; } -static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) +static int load_link_keys(struct sock *sk, u16 index, unsigned char *data, + u16 len) { struct hci_dev *hdev; - struct mgmt_cp_load_keys *cp; + struct mgmt_cp_load_link_keys *cp; u16 key_count, expected_len; int i; cp = (void *) data; if (len < sizeof(*cp)) - return -EINVAL; + return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL); key_count = get_unaligned_le16(&cp->key_count); - expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); + expected_len = sizeof(*cp) + key_count * + sizeof(struct mgmt_link_key_info); if (expected_len != len) { - BT_ERR("load_keys: expected %u bytes, got %u bytes", + BT_ERR("load_link_keys: expected %u bytes, got %u bytes", len, expected_len); - return -EINVAL; + return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, EINVAL); } hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); + return cmd_status(sk, index, MGMT_OP_LOAD_LINK_KEYS, ENODEV); BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, key_count); @@ -942,7 +945,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) clear_bit(HCI_DEBUG_KEYS, &hdev->flags); for (i = 0; i < key_count; i++) { - struct mgmt_key_info *key = &cp->keys[i]; + struct mgmt_link_key_info *key = &cp->keys[i]; hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, key->pin_len); @@ -954,27 +957,28 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) return 0; } -static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) +static int remove_keys(struct sock *sk, u16 index, unsigned char *data, + u16 len) { struct hci_dev *hdev; - struct mgmt_cp_remove_key *cp; + struct mgmt_cp_remove_keys *cp; struct hci_conn *conn; int err; cp = (void *) data; if (len != sizeof(*cp)) - return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); + return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, EINVAL); hdev = hci_dev_get(index); if (!hdev) - return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); + return cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, ENODEV); hci_dev_lock_bh(hdev); err = hci_remove_link_key(hdev, &cp->bdaddr); if (err < 0) { - err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); + err = cmd_status(sk, index, MGMT_OP_REMOVE_KEYS, -err); goto unlock; } @@ -1026,7 +1030,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) goto failed; } - if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { + if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) { err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); goto failed; } @@ -1040,7 +1044,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1060,10 +1064,23 @@ failed: return err; } +static u8 link_to_mgmt(u8 link_type) +{ + switch (link_type) { + case LE_LINK: + return MGMT_ADDR_LE; + case ACL_LINK: + return MGMT_ADDR_BREDR; + default: + return MGMT_ADDR_INVALID; + } +} + static int get_connections(struct sock *sk, u16 index) { struct mgmt_rp_get_connections *rp; struct hci_dev *hdev; + struct hci_conn *c; struct list_head *p; size_t rp_len; u16 count; @@ -1082,7 +1099,7 @@ static int get_connections(struct sock *sk, u16 index) count++; } - rp_len = sizeof(*rp) + (count * sizeof(bdaddr_t)); + rp_len = sizeof(*rp) + (count * sizeof(struct mgmt_addr_info)); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { err = -ENOMEM; @@ -1092,12 +1109,17 @@ static int get_connections(struct sock *sk, u16 index) put_unaligned_le16(count, &rp->conn_count); i = 0; - list_for_each(p, &hdev->conn_hash.list) { - struct hci_conn *c = list_entry(p, struct hci_conn, list); - - bacpy(&rp->conn[i++], &c->dst); + list_for_each_entry(c, &hdev->conn_hash.list, list) { + bacpy(&rp->addr[i].bdaddr, &c->dst); + rp->addr[i].type = link_to_mgmt(c->type); + if (rp->addr[i].type == MGMT_ADDR_INVALID) + continue; + i++; } + /* Recalculate length in case of filtered SCO connections, etc */ + rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info)); + err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); unlock: @@ -1113,7 +1135,7 @@ static int send_pin_code_neg_reply(struct sock *sk, u16 index, struct pending_cmd *cmd; int err; - cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp, + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; @@ -1174,7 +1196,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, goto failed; } - cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1265,19 +1287,12 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, static inline struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - struct list_head *p; - - list_for_each(p, &cmd_list) { - struct pending_cmd *cmd; - - cmd = list_entry(p, struct pending_cmd, list); + struct pending_cmd *cmd; + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->opcode != MGMT_OP_PAIR_DEVICE) continue; - if (cmd->index != hdev->id) - continue; - if (cmd->user_data != conn) continue; @@ -1310,16 +1325,19 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status) static void pairing_complete_cb(struct hci_conn *conn, u8 status) { struct pending_cmd *cmd; + struct hci_dev *hdev = conn->hdev; BT_DBG("status %u", status); + hci_dev_lock_bh(hdev); + cmd = find_pairing(conn); - if (!cmd) { + if (!cmd) BT_DBG("Unable to find a pending command"); - return; - } + else + pairing_complete(cmd, status); - pairing_complete(cmd, status); + hci_dev_unlock_bh(hdev); } static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) @@ -1370,7 +1388,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); if (!cmd) { err = -ENOMEM; hci_conn_put(conn); @@ -1432,7 +1450,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, goto failed; } - cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); + cmd = mgmt_pending_add(sk, mgmt_op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1469,7 +1487,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data, hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); + cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; @@ -1515,12 +1533,12 @@ static int read_local_oob_data(struct sock *sk, u16 index) goto unlock; } - if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) { + if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY); goto unlock; } - cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); if (!cmd) { err = -ENOMEM; goto unlock; @@ -1607,8 +1625,6 @@ static int remove_remote_oob_data(struct sock *sk, u16 index, static int start_discovery(struct sock *sk, u16 index) { - u8 lap[3] = { 0x33, 0x8b, 0x9e }; - struct hci_cp_inquiry cp; struct pending_cmd *cmd; struct hci_dev *hdev; int err; @@ -1621,18 +1637,18 @@ static int start_discovery(struct sock *sk, u16 index) hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0); + if (!test_bit(HCI_UP, &hdev->flags)) { + err = cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENETDOWN); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } - memset(&cp, 0, sizeof(cp)); - memcpy(&cp.lap, lap, 3); - cp.length = 0x08; - cp.num_rsp = 0x00; - - err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); + err = hci_do_inquiry(hdev, INQUIRY_LEN_BREDR); if (err < 0) mgmt_pending_remove(cmd); @@ -1657,13 +1673,13 @@ static int stop_discovery(struct sock *sk, u16 index) hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0); + cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0); if (!cmd) { err = -ENOMEM; goto failed; } - err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); + err = hci_cancel_inquiry(hdev); if (err < 0) mgmt_pending_remove(cmd); @@ -1678,7 +1694,6 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; - struct pending_cmd *cmd; struct mgmt_cp_block_device *cp = (void *) data; int err; @@ -1695,23 +1710,13 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data, hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0); - if (!cmd) { - err = -ENOMEM; - goto failed; - } - err = hci_blacklist_add(hdev, &cp->bdaddr); - if (err < 0) err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err); else err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE, NULL, 0); - mgmt_pending_remove(cmd); - -failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -1722,7 +1727,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, u16 len) { struct hci_dev *hdev; - struct pending_cmd *cmd; struct mgmt_cp_unblock_device *cp = (void *) data; int err; @@ -1739,12 +1743,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, hci_dev_lock_bh(hdev); - cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0); - if (!cmd) { - err = -ENOMEM; - goto failed; - } - err = hci_blacklist_del(hdev, &cp->bdaddr); if (err < 0) @@ -1753,9 +1751,6 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data, err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE, NULL, 0); - mgmt_pending_remove(cmd); - -failed: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); @@ -1883,11 +1878,11 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) case MGMT_OP_SET_SERVICE_CACHE: err = set_service_cache(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_LOAD_KEYS: - err = load_keys(sk, index, buf + sizeof(*hdr), len); + case MGMT_OP_LOAD_LINK_KEYS: + err = load_link_keys(sk, index, buf + sizeof(*hdr), len); break; - case MGMT_OP_REMOVE_KEY: - err = remove_key(sk, index, buf + sizeof(*hdr), len); + case MGMT_OP_REMOVE_KEYS: + err = remove_keys(sk, index, buf + sizeof(*hdr), len); break; case MGMT_OP_DISCONNECT: err = disconnect(sk, index, buf + sizeof(*hdr), len); @@ -1958,14 +1953,26 @@ done: return err; } -int mgmt_index_added(u16 index) +static void cmd_status_rsp(struct pending_cmd *cmd, void *data) { - return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); + u8 *status = data; + + cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); + mgmt_pending_remove(cmd); } -int mgmt_index_removed(u16 index) +int mgmt_index_added(struct hci_dev *hdev) { - return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); + return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL); +} + +int mgmt_index_removed(struct hci_dev *hdev) +{ + u8 status = ENODEV; + + mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); + + return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL); } struct cmd_lookup { @@ -1993,17 +2000,22 @@ static void mode_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_free(cmd); } -int mgmt_powered(u16 index, u8 powered) +int mgmt_powered(struct hci_dev *hdev, u8 powered) { struct mgmt_mode ev; struct cmd_lookup match = { powered, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, mode_rsp, &match); + + if (!powered) { + u8 status = ENETDOWN; + mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status); + } ev.val = powered; - ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); + ret = mgmt_event(MGMT_EV_POWERED, hdev, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); @@ -2011,17 +2023,17 @@ int mgmt_powered(u16 index, u8 powered) return ret; } -int mgmt_discoverable(u16 index, u8 discoverable) +int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable) { struct mgmt_mode ev; struct cmd_lookup match = { discoverable, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, mode_rsp, &match); ev.val = discoverable; - ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), + ret = mgmt_event(MGMT_EV_DISCOVERABLE, hdev, &ev, sizeof(ev), match.sk); if (match.sk) @@ -2030,17 +2042,17 @@ int mgmt_discoverable(u16 index, u8 discoverable) return ret; } -int mgmt_connectable(u16 index, u8 connectable) +int mgmt_connectable(struct hci_dev *hdev, u8 connectable) { struct mgmt_mode ev; struct cmd_lookup match = { connectable, NULL }; int ret; - mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); + mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, mode_rsp, &match); ev.val = connectable; - ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); + ret = mgmt_event(MGMT_EV_CONNECTABLE, hdev, &ev, sizeof(ev), match.sk); if (match.sk) sock_put(match.sk); @@ -2048,9 +2060,23 @@ int mgmt_connectable(u16 index, u8 connectable) return ret; } -int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) +int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) +{ + if (scan & SCAN_PAGE) + mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev, + cmd_status_rsp, &status); + + if (scan & SCAN_INQUIRY) + mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, + cmd_status_rsp, &status); + + return 0; +} + +int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + u8 persistent) { - struct mgmt_ev_new_key ev; + struct mgmt_ev_new_link_key ev; memset(&ev, 0, sizeof(ev)); @@ -2060,17 +2086,17 @@ int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) memcpy(ev.key.val, key->val, 16); ev.key.pin_len = key->pin_len; - return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); } -int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type) +int mgmt_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type) { - struct mgmt_ev_connected ev; + struct mgmt_addr_info ev; bacpy(&ev.bdaddr, bdaddr); - ev.link_type = link_type; + ev.type = link_to_mgmt(link_type); - return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_CONNECTED, hdev, &ev, sizeof(ev), NULL); } static void disconnect_rsp(struct pending_cmd *cmd, void *data) @@ -2089,17 +2115,18 @@ static void disconnect_rsp(struct pending_cmd *cmd, void *data) mgmt_pending_remove(cmd); } -int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) +int mgmt_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { - struct mgmt_ev_disconnected ev; + struct mgmt_addr_info ev; struct sock *sk = NULL; int err; - mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); + mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); bacpy(&ev.bdaddr, bdaddr); + ev.type = link_to_mgmt(type); - err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); + err = mgmt_event(MGMT_EV_DISCONNECTED, hdev, &ev, sizeof(ev), sk); if (sk) sock_put(sk); @@ -2107,57 +2134,60 @@ int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) return err; } -int mgmt_disconnect_failed(u16 index) +int mgmt_disconnect_failed(struct hci_dev *hdev) { struct pending_cmd *cmd; int err; - cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, index); + cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) return -ENOENT; - err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); + err = cmd_status(cmd->sk, hdev->id, MGMT_OP_DISCONNECT, EIO); mgmt_pending_remove(cmd); return err; } -int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, + u8 status) { struct mgmt_ev_connect_failed ev; - bacpy(&ev.bdaddr, bdaddr); + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_mgmt(type); ev.status = status; - return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); } -int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure) +int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) { struct mgmt_ev_pin_code_request ev; bacpy(&ev.bdaddr, bdaddr); ev.secure = secure; - return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), + return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL); } -int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); + cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; - err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, + err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -2165,20 +2195,21 @@ int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) return err; } -int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status) { struct pending_cmd *cmd; struct mgmt_rp_pin_code_reply rp; int err; - cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); + cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; - err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, + err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, sizeof(rp)); mgmt_pending_remove(cmd); @@ -2186,97 +2217,93 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) return err; } -int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value, - u8 confirm_hint) +int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + __le32 value, u8 confirm_hint) { struct mgmt_ev_user_confirm_request ev; - BT_DBG("hci%u", index); + BT_DBG("%s", hdev->name); bacpy(&ev.bdaddr, bdaddr); ev.confirm_hint = confirm_hint; put_unaligned_le32(value, &ev.value); - return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), + return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), NULL); } -static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, - u8 opcode) +static int confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status, u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; int err; - cmd = mgmt_pending_find(opcode, index); + cmd = mgmt_pending_find(opcode, hdev); if (!cmd) return -ENOENT; bacpy(&rp.bdaddr, bdaddr); rp.status = status; - err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); + err = cmd_complete(cmd->sk, hdev->id, opcode, &rp, sizeof(rp)); mgmt_pending_remove(cmd); return err; } -int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status) { - return confirm_reply_complete(index, bdaddr, status, + return confirm_reply_complete(hdev, bdaddr, status, MGMT_OP_USER_CONFIRM_REPLY); } -int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 status) { - return confirm_reply_complete(index, bdaddr, status, + return confirm_reply_complete(hdev, bdaddr, status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } -int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) +int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct mgmt_ev_auth_failed ev; bacpy(&ev.bdaddr, bdaddr); ev.status = status; - return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL); } -int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status) +int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) { struct pending_cmd *cmd; - struct hci_dev *hdev; struct mgmt_cp_set_local_name ev; int err; memset(&ev, 0, sizeof(ev)); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); - cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index); + cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) goto send_event; if (status) { - err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO); + err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + EIO); goto failed; } - hdev = hci_dev_get(index); - if (hdev) { - hci_dev_lock_bh(hdev); - update_eir(hdev); - hci_dev_unlock_bh(hdev); - hci_dev_put(hdev); - } + update_eir(hdev); - err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev, + err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, &ev, sizeof(ev)); if (err < 0) goto failed; send_event: - err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev), + err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), cmd ? cmd->sk : NULL); failed: @@ -2285,29 +2312,30 @@ failed: return err; } -int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, - u8 status) +int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash, + u8 *randomizer, u8 status) { struct pending_cmd *cmd; int err; - BT_DBG("hci%u status %u", index, status); + BT_DBG("%s status %u", hdev->name, status); - cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index); + cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); if (!cmd) return -ENOENT; if (status) { - err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - EIO); + err = cmd_status(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_DATA, EIO); } else { struct mgmt_rp_read_local_oob_data rp; memcpy(rp.hash, hash, sizeof(rp.hash)); memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer)); - err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, - &rp, sizeof(rp)); + err = cmd_complete(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_DATA, + &rp, sizeof(rp)); } mgmt_pending_remove(cmd); @@ -2315,14 +2343,15 @@ int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer, return err; } -int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, - u8 *eir) +int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, + u8 *dev_class, s8 rssi, u8 *eir) { struct mgmt_ev_device_found ev; memset(&ev, 0, sizeof(ev)); - bacpy(&ev.bdaddr, bdaddr); + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_mgmt(type); ev.rssi = rssi; if (eir) @@ -2331,10 +2360,10 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi, if (dev_class) memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class)); - return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, &ev, sizeof(ev), NULL); } -int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) +int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name) { struct mgmt_ev_remote_name ev; @@ -2343,37 +2372,64 @@ int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name) bacpy(&ev.bdaddr, bdaddr); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); - return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL); + return mgmt_event(MGMT_EV_REMOTE_NAME, hdev, &ev, sizeof(ev), NULL); } -int mgmt_discovering(u16 index, u8 discovering) +int mgmt_inquiry_failed(struct hci_dev *hdev, u8 status) { - return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering, + struct pending_cmd *cmd; + int err; + + cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); + if (!cmd) + return -ENOENT; + + err = cmd_status(cmd->sk, hdev->id, cmd->opcode, status); + mgmt_pending_remove(cmd); + + return err; +} + +int mgmt_discovering(struct hci_dev *hdev, u8 discovering) +{ + struct pending_cmd *cmd; + + if (discovering) + cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev); + else + cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev); + + if (cmd != NULL) { + cmd_complete(cmd->sk, hdev->id, cmd->opcode, NULL, 0); + mgmt_pending_remove(cmd); + } + + return mgmt_event(MGMT_EV_DISCOVERING, hdev, &discovering, sizeof(discovering), NULL); } -int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr) +int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct pending_cmd *cmd; struct mgmt_ev_device_blocked ev; - cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index); + cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev); bacpy(&ev.bdaddr, bdaddr); - return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev), - cmd ? cmd->sk : NULL); + return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); } -int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr) +int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct pending_cmd *cmd; struct mgmt_ev_device_unblocked ev; - cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index); + cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev); bacpy(&ev.bdaddr, bdaddr); - return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev), - cmd ? cmd->sk : NULL); + return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); } diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 4e32e18211f9..8743f369ed3f 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -65,7 +65,8 @@ static DEFINE_MUTEX(rfcomm_mutex); static LIST_HEAD(session_list); -static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len, + u32 priority); static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci); static int rfcomm_queue_disc(struct rfcomm_dlc *d); @@ -377,13 +378,11 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) { struct rfcomm_dlc *d; - struct list_head *p; - list_for_each(p, &s->dlcs) { - d = list_entry(p, struct rfcomm_dlc, list); + list_for_each_entry(d, &s->dlcs, list) if (d->dlci == dlci) return d; - } + return NULL; } @@ -749,19 +748,34 @@ void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *d } /* ---- RFCOMM frame sending ---- */ -static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len, + u32 priority) { struct socket *sock = s->sock; + struct sock *sk = sock->sk; struct kvec iv = { data, len }; struct msghdr msg; - BT_DBG("session %p len %d", s, len); + BT_DBG("session %p len %d priority %u", s, len, priority); + + if (sk->sk_priority != priority) { + lock_sock(sk); + sk->sk_priority = priority; + release_sock(sk); + } memset(&msg, 0, sizeof(msg)); return kernel_sendmsg(sock, &msg, &iv, 1, len); } +static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd) +{ + BT_DBG("%p cmd %u", s, cmd->ctrl); + + return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd), HCI_PRIO_MAX); +} + static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) { struct rfcomm_cmd cmd; @@ -773,7 +787,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); + return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) @@ -787,7 +801,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); + return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) @@ -801,7 +815,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); + return rfcomm_send_cmd(s, &cmd); } static int rfcomm_queue_disc(struct rfcomm_dlc *d) @@ -815,6 +829,8 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d) if (!skb) return -ENOMEM; + skb->priority = HCI_PRIO_MAX; + cmd = (void *) __skb_put(skb, sizeof(*cmd)); cmd->addr = d->addr; cmd->ctrl = __ctrl(RFCOMM_DISC, 1); @@ -837,7 +853,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) cmd.len = __len8(0); cmd.fcs = __fcs2((u8 *) &cmd); - return rfcomm_send_frame(s, (void *) &cmd, sizeof(cmd)); + return rfcomm_send_cmd(s, &cmd); } static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) @@ -862,7 +878,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d) @@ -904,7 +920,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, @@ -942,7 +958,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) @@ -969,7 +985,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig) @@ -996,7 +1012,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) @@ -1018,7 +1034,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) @@ -1040,7 +1056,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len) @@ -1091,7 +1107,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits) *ptr = __fcs(buf); ptr++; - return rfcomm_send_frame(s, buf, ptr - buf); + return rfcomm_send_frame(s, buf, ptr - buf, HCI_PRIO_MAX); } static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) @@ -1769,7 +1785,8 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d) return skb_queue_len(&d->tx_queue); while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) { - err = rfcomm_send_frame(d->session, skb->data, skb->len); + err = rfcomm_send_frame(d->session, skb->data, skb->len, + skb->priority); if (err < 0) { skb_queue_head(&d->tx_queue, skb); break; @@ -2120,15 +2137,13 @@ static struct hci_cb rfcomm_cb = { static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) { struct rfcomm_session *s; - struct list_head *pp, *p; rfcomm_lock(); - list_for_each(p, &session_list) { - s = list_entry(p, struct rfcomm_session, list); - list_for_each(pp, &s->dlcs) { + list_for_each_entry(s, &session_list, list) { + struct rfcomm_dlc *d; + list_for_each_entry(d, &s->dlcs, list) { struct sock *sk = s->sock->sk; - struct rfcomm_dlc *d = list_entry(pp, struct rfcomm_dlc, list); seq_printf(f, "%s %s %ld %d %d %d %d\n", batostr(&bt_sk(sk)->src), diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 5417f6127323..aea2bdd1510f 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -600,6 +600,8 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock, break; } + skb->priority = sk->sk_priority; + err = rfcomm_dlc_send(d, skb); if (err < 0) { kfree_skb(skb); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index c258796313e0..fa8f4de53b99 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -34,6 +34,7 @@ #include <linux/capability.h> #include <linux/slab.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> @@ -65,7 +66,7 @@ struct rfcomm_dev { struct rfcomm_dlc *dlc; struct tty_struct *tty; wait_queue_head_t wait; - struct tasklet_struct wakeup_task; + struct work_struct wakeup_task; struct device *tty_dev; @@ -81,7 +82,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); -static void rfcomm_tty_wakeup(unsigned long arg); +static void rfcomm_tty_wakeup(struct work_struct *work); /* ---- Device functions ---- */ static void rfcomm_dev_destruct(struct rfcomm_dev *dev) @@ -133,13 +134,10 @@ static inline void rfcomm_dev_put(struct rfcomm_dev *dev) static struct rfcomm_dev *__rfcomm_dev_get(int id) { struct rfcomm_dev *dev; - struct list_head *p; - list_for_each(p, &rfcomm_dev_list) { - dev = list_entry(p, struct rfcomm_dev, list); + list_for_each_entry(dev, &rfcomm_dev_list, list) if (dev->id == id) return dev; - } return NULL; } @@ -197,7 +195,7 @@ static DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL); static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) { - struct rfcomm_dev *dev; + struct rfcomm_dev *dev, *entry; struct list_head *head = &rfcomm_dev_list, *p; int err = 0; @@ -212,8 +210,8 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) if (req->dev_id < 0) { dev->id = 0; - list_for_each(p, &rfcomm_dev_list) { - if (list_entry(p, struct rfcomm_dev, list)->id != dev->id) + list_for_each_entry(entry, &rfcomm_dev_list, list) { + if (entry->id != dev->id) break; dev->id++; @@ -222,9 +220,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) } else { dev->id = req->dev_id; - list_for_each(p, &rfcomm_dev_list) { - struct rfcomm_dev *entry = list_entry(p, struct rfcomm_dev, list); - + list_for_each_entry(entry, &rfcomm_dev_list, list) { if (entry->id == dev->id) { err = -EADDRINUSE; goto out; @@ -257,7 +253,7 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) atomic_set(&dev->opened, 0); init_waitqueue_head(&dev->wait); - tasklet_init(&dev->wakeup_task, rfcomm_tty_wakeup, (unsigned long) dev); + INIT_WORK(&dev->wakeup_task, rfcomm_tty_wakeup); skb_queue_head_init(&dev->pending); @@ -351,7 +347,7 @@ static void rfcomm_wfree(struct sk_buff *skb) struct rfcomm_dev *dev = (void *) skb->sk; atomic_sub(skb->truesize, &dev->wmem_alloc); if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) - tasklet_schedule(&dev->wakeup_task); + queue_work(system_nrt_wq, &dev->wakeup_task); rfcomm_dev_put(dev); } @@ -455,9 +451,9 @@ static int rfcomm_release_dev(void __user *arg) static int rfcomm_get_dev_list(void __user *arg) { + struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; - struct list_head *p; int n = 0, size, err; u16 dev_num; @@ -479,8 +475,7 @@ static int rfcomm_get_dev_list(void __user *arg) read_lock_bh(&rfcomm_dev_lock); - list_for_each(p, &rfcomm_dev_list) { - struct rfcomm_dev *dev = list_entry(p, struct rfcomm_dev, list); + list_for_each_entry(dev, &rfcomm_dev_list, list) { if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) continue; (di + n)->id = dev->id; @@ -635,9 +630,10 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) } /* ---- TTY functions ---- */ -static void rfcomm_tty_wakeup(unsigned long arg) +static void rfcomm_tty_wakeup(struct work_struct *work) { - struct rfcomm_dev *dev = (void *) arg; + struct rfcomm_dev *dev = container_of(work, struct rfcomm_dev, + wakeup_task); struct tty_struct *tty = dev->tty; if (!tty) return; @@ -762,7 +758,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) rfcomm_dlc_close(dev->dlc, 0); clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); - tasklet_kill(&dev->wakeup_task); + cancel_work_sync(&dev->wakeup_task); rfcomm_dlc_lock(dev->dlc); tty->driver_data = NULL; @@ -1155,9 +1151,11 @@ static const struct tty_operations rfcomm_ops = { int __init rfcomm_init_ttys(void) { + int error; + rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); if (!rfcomm_tty_driver) - return -1; + return -ENOMEM; rfcomm_tty_driver->owner = THIS_MODULE; rfcomm_tty_driver->driver_name = "rfcomm"; @@ -1172,10 +1170,11 @@ int __init rfcomm_init_ttys(void) rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); - if (tty_register_driver(rfcomm_tty_driver)) { + error = tty_register_driver(rfcomm_tty_driver); + if (error) { BT_ERR("Can't register RFCOMM TTY driver"); put_tty_driver(rfcomm_tty_driver); - return -1; + return error; } BT_INFO("RFCOMM TTY layer initialized"); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 759b63572641..94e94ca35384 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -181,7 +181,8 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) if (!skb) return; - hci_send_acl(conn->hcon, skb, 0); + skb->priority = HCI_PRIO_MAX; + hci_send_acl(conn->hchan, skb, 0); mod_timer(&conn->security_timer, jiffies + msecs_to_jiffies(SMP_TIMEOUT)); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index feb77ea7b58e..a3754ac262c3 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -186,7 +186,8 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info) strcpy(info->bus_info, "N/A"); } -static u32 br_fix_features(struct net_device *dev, u32 features) +static netdev_features_t br_fix_features(struct net_device *dev, + netdev_features_t features) { struct net_bridge *br = netdev_priv(dev); @@ -341,10 +342,10 @@ void br_dev_setup(struct net_device *dev) dev->priv_flags = IFF_EBRIDGE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | - NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | + NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | - NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | + NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX; br->dev = dev; diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c8e7861b88b0..973813e34428 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -556,7 +556,7 @@ skip: return skb->len; } -/* Create new static fdb entry */ +/* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, __u16 state, __u16 flags) { @@ -575,16 +575,21 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, } else { if (flags & NLM_F_EXCL) return -EEXIST; + } + + if (fdb_to_nud(fdb) != state) { + if (state & NUD_PERMANENT) + fdb->is_local = fdb->is_static = 1; + else if (state & NUD_NOARP) { + fdb->is_local = 0; + fdb->is_static = 1; + } else + fdb->is_local = fdb->is_static = 0; - if (flags & NLM_F_REPLACE) - fdb->updated = fdb->used = jiffies; - fdb->is_local = fdb->is_static = 0; + fdb->updated = fdb->used = jiffies; + fdb_notify(fdb, RTM_NEWNEIGH); } - if (state & NUD_PERMANENT) - fdb->is_local = fdb->is_static = 1; - else if (state & NUD_NOARP) - fdb->is_static = 1; return 0; } @@ -627,6 +632,11 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -EINVAL; } + if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { + pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); + return -EINVAL; + } + p = br_port_get_rtnl(dev); if (p == NULL) { pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", @@ -634,9 +644,15 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) return -EINVAL; } - spin_lock_bh(&p->br->hash_lock); - err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); - spin_unlock_bh(&p->br->hash_lock); + if (ndm->ndm_flags & NTF_USE) { + rcu_read_lock(); + br_fdb_update(p->br, p, addr); + rcu_read_unlock(); + } else { + spin_lock_bh(&p->br->hash_lock); + err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); + spin_unlock_bh(&p->br->hash_lock); + } return err; } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f603e5b0b930..0a942fbccc9a 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -296,10 +296,11 @@ int br_min_mtu(const struct net_bridge *br) /* * Recomputes features using slave's features */ -u32 br_features_recompute(struct net_bridge *br, u32 features) +netdev_features_t br_features_recompute(struct net_bridge *br, + netdev_features_t features) { struct net_bridge_port *p; - u32 mask; + netdev_features_t mask; if (list_empty(&br->port_list)) return features; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index a5f4e5769809..7743e0d109ea 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -127,7 +127,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get( { struct br_ip br_dst; - ipv6_addr_copy(&br_dst.u.ip6, dst); + br_dst.u.ip6 = *dst; br_dst.proto = htons(ETH_P_IPV6); return br_mdb_ip_get(mdb, &br_dst); @@ -154,7 +154,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): - ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); + ip.u.ip6 = ipv6_hdr(skb)->daddr; break; #endif default: @@ -474,7 +474,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, mldq->mld_cksum = 0; mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval)); mldq->mld_reserved = 0; - ipv6_addr_copy(&mldq->mld_mca, group); + mldq->mld_mca = *group; /* checksum */ mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, @@ -783,7 +783,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, if (!ipv6_is_transient_multicast(group)) return 0; - ipv6_addr_copy(&br_group.u.ip6, group); + br_group.u.ip6 = *group; br_group.proto = htons(ETH_P_IPV6); return br_multicast_add_group(br, port, &br_group); @@ -1344,7 +1344,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, if (!ipv6_is_transient_multicast(group)) return; - ipv6_addr_copy(&br_group.u.ip6, group); + br_group.u.ip6 = *group; br_group.proto = htons(ETH_P_IPV6); br_multicast_leave_group(br, port, &br_group); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index d7d6fb05411f..4027029aa5e4 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -387,7 +387,8 @@ extern int br_add_if(struct net_bridge *br, extern int br_del_if(struct net_bridge *br, struct net_device *dev); extern int br_min_mtu(const struct net_bridge *br); -extern u32 br_features_recompute(struct net_bridge *br, u32 features); +extern netdev_features_t br_features_recompute(struct net_bridge *br, + netdev_features_t features); /* br_input.c */ extern int br_handle_frame_finish(struct sk_buff *skb); diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index f1fa1f6e658d..f7e8c70b343c 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -24,6 +24,7 @@ #include <net/caif/caif_layer.h> #include <net/caif/cfpkt.h> #include <net/caif/cfcnfg.h> +#include <net/caif/cfserl.h> MODULE_LICENSE("GPL"); @@ -53,7 +54,8 @@ struct cfcnfg *get_cfcnfg(struct net *net) struct caif_net *caifn; BUG_ON(!net); caifn = net_generic(net, caif_net_id); - BUG_ON(!caifn); + if (!caifn) + return NULL; return caifn->cfg; } EXPORT_SYMBOL(get_cfcnfg); @@ -63,7 +65,8 @@ static struct caif_device_entry_list *caif_device_list(struct net *net) struct caif_net *caifn; BUG_ON(!net); caifn = net_generic(net, caif_net_id); - BUG_ON(!caifn); + if (!caifn) + return NULL; return &caifn->caifdevs; } @@ -92,7 +95,8 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev) struct caif_device_entry *caifd; caifdevs = caif_device_list(dev_net(dev)); - BUG_ON(!caifdevs); + if (!caifdevs) + return NULL; caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); if (!caifd) @@ -112,7 +116,9 @@ static struct caif_device_entry *caif_get(struct net_device *dev) struct caif_device_entry_list *caifdevs = caif_device_list(dev_net(dev)); struct caif_device_entry *caifd; - BUG_ON(!caifdevs); + if (!caifdevs) + return NULL; + list_for_each_entry_rcu(caifd, &caifdevs->list, list) { if (caifd->netdev == dev) return caifd; @@ -129,6 +135,8 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt) skb = cfpkt_tonative(pkt); skb->dev = caifd->netdev; + skb_reset_network_header(skb); + skb->protocol = htons(ETH_P_CAIF); err = dev_queue_xmit(skb); if (err > 0) @@ -172,7 +180,10 @@ static int receive(struct sk_buff *skb, struct net_device *dev, /* Release reference to stack upwards */ caifd_put(caifd); - return 0; + + if (err != 0) + err = NET_RX_DROP; + return err; } static struct packet_type caif_packet_type __read_mostly = { @@ -203,6 +214,55 @@ static void dev_flowctrl(struct net_device *dev, int on) caifd_put(caifd); } +void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, + struct packet_type *, struct net_device *)) +{ + struct caif_device_entry *caifd; + enum cfcnfg_phy_preference pref; + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); + struct caif_device_entry_list *caifdevs; + + caifdevs = caif_device_list(dev_net(dev)); + if (!cfg || !caifdevs) + return; + caifd = caif_device_alloc(dev); + if (!caifd) + return; + *layer = &caifd->layer; + + switch (caifdev->link_select) { + case CAIF_LINK_HIGH_BANDW: + pref = CFPHYPREF_HIGH_BW; + break; + case CAIF_LINK_LOW_LATENCY: + pref = CFPHYPREF_LOW_LAT; + break; + default: + pref = CFPHYPREF_HIGH_BW; + break; + } + mutex_lock(&caifdevs->lock); + list_add_rcu(&caifd->list, &caifdevs->list); + + strncpy(caifd->layer.name, dev->name, + sizeof(caifd->layer.name) - 1); + caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; + caifd->layer.transmit = transmit; + cfcnfg_add_phy_layer(cfg, + dev, + &caifd->layer, + pref, + link_support, + caifdev->use_fcs, + head_room); + mutex_unlock(&caifdevs->lock); + if (rcv_func) + *rcv_func = receive; +} + /* notify Caif of device events */ static int caif_device_notify(struct notifier_block *me, unsigned long what, void *arg) @@ -210,62 +270,40 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, struct net_device *dev = arg; struct caif_device_entry *caifd = NULL; struct caif_dev_common *caifdev; - enum cfcnfg_phy_preference pref; - enum cfcnfg_phy_type phy_type; struct cfcnfg *cfg; + struct cflayer *layer, *link_support; + int head_room = 0; struct caif_device_entry_list *caifdevs; - if (dev->type != ARPHRD_CAIF) - return 0; - cfg = get_cfcnfg(dev_net(dev)); - if (cfg == NULL) + caifdevs = caif_device_list(dev_net(dev)); + if (!cfg || !caifdevs) return 0; - caifdevs = caif_device_list(dev_net(dev)); + caifd = caif_get(dev); + if (caifd == NULL && dev->type != ARPHRD_CAIF) + return 0; switch (what) { case NETDEV_REGISTER: - caifd = caif_device_alloc(dev); - if (!caifd) - return 0; + if (caifd != NULL) + break; caifdev = netdev_priv(dev); - caifdev->flowctrl = dev_flowctrl; - caifd->layer.transmit = transmit; - - if (caifdev->use_frag) - phy_type = CFPHYTYPE_FRAG; - else - phy_type = CFPHYTYPE_CAIF; - - switch (caifdev->link_select) { - case CAIF_LINK_HIGH_BANDW: - pref = CFPHYPREF_HIGH_BW; - break; - case CAIF_LINK_LOW_LATENCY: - pref = CFPHYPREF_LOW_LAT; - break; - default: - pref = CFPHYPREF_HIGH_BW; - break; + link_support = NULL; + if (caifdev->use_frag) { + head_room = 1; + link_support = cfserl_create(dev->ifindex, + caifdev->use_stx); + if (!link_support) { + pr_warn("Out of memory\n"); + break; + } } - strncpy(caifd->layer.name, dev->name, - sizeof(caifd->layer.name) - 1); - caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; - - mutex_lock(&caifdevs->lock); - list_add_rcu(&caifd->list, &caifdevs->list); - - cfcnfg_add_phy_layer(cfg, - phy_type, - dev, - &caifd->layer, - pref, - caifdev->use_fcs, - caifdev->use_stx); - mutex_unlock(&caifdevs->lock); + caif_enroll_dev(dev, caifdev, link_support, head_room, + &layer, NULL); + caifdev->flowctrl = dev_flowctrl; break; case NETDEV_UP: @@ -371,17 +409,14 @@ static void caif_exit_net(struct net *net) struct caif_device_entry *caifd, *tmp; struct caif_device_entry_list *caifdevs = caif_device_list(net); - struct cfcnfg *cfg; + struct cfcnfg *cfg = get_cfcnfg(net); + + if (!cfg || !caifdevs) + return; rtnl_lock(); mutex_lock(&caifdevs->lock); - cfg = get_cfcnfg(net); - if (cfg == NULL) { - mutex_unlock(&caifdevs->lock); - return; - } - list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { int i = 0; list_del_rcu(&caifd->list); diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 00523ecc4ced..598aafb4cb51 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -45,8 +45,8 @@ struct cfcnfg_phyinfo { /* Interface index */ int ifindex; - /* Use Start of frame extension */ - bool use_stx; + /* Protocol head room added for CAIF link layer */ + int head_room; /* Use Start of frame checksum */ bool use_fcs; @@ -187,11 +187,11 @@ int caif_disconnect_client(struct net *net, struct cflayer *adap_layer) if (channel_id != 0) { struct cflayer *servl; servl = cfmuxl_remove_uplayer(cfg->mux, channel_id); + cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); if (servl != NULL) layer_set_up(servl, NULL); } else pr_debug("nothing to disconnect\n"); - cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer); /* Do RCU sync before initiating cleanup */ synchronize_rcu(); @@ -350,9 +350,7 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, *ifindex = phy->ifindex; *proto_tail = 2; - *proto_head = - - protohead[param.linktype] + (phy->use_stx ? 1 : 0); + *proto_head = protohead[param.linktype] + phy->head_room; rcu_read_unlock(); @@ -460,13 +458,13 @@ unlock: } void -cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, +cfcnfg_add_phy_layer(struct cfcnfg *cnfg, struct net_device *dev, struct cflayer *phy_layer, enum cfcnfg_phy_preference pref, - bool fcs, bool stx) + struct cflayer *link_support, + bool fcs, int head_room) { struct cflayer *frml; - struct cflayer *phy_driver = NULL; struct cfcnfg_phyinfo *phyinfo = NULL; int i; u8 phyid; @@ -482,26 +480,13 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, goto got_phyid; } pr_warn("Too many CAIF Link Layers (max 6)\n"); - goto out_err; + goto out; got_phyid: phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); if (!phyinfo) goto out_err; - switch (phy_type) { - case CFPHYTYPE_FRAG: - phy_driver = - cfserl_create(CFPHYTYPE_FRAG, phyid, stx); - if (!phy_driver) - goto out_err; - break; - case CFPHYTYPE_CAIF: - phy_driver = NULL; - break; - default: - goto out_err; - } phy_layer->id = phyid; phyinfo->pref = pref; phyinfo->id = phyid; @@ -509,7 +494,7 @@ got_phyid: phyinfo->dev_info.dev = dev; phyinfo->phy_layer = phy_layer; phyinfo->ifindex = dev->ifindex; - phyinfo->use_stx = stx; + phyinfo->head_room = head_room; phyinfo->use_fcs = fcs; frml = cffrml_create(phyid, fcs); @@ -519,23 +504,23 @@ got_phyid: phyinfo->frm_layer = frml; layer_set_up(frml, cnfg->mux); - if (phy_driver != NULL) { - phy_driver->id = phyid; - layer_set_dn(frml, phy_driver); - layer_set_up(phy_driver, frml); - layer_set_dn(phy_driver, phy_layer); - layer_set_up(phy_layer, phy_driver); + if (link_support != NULL) { + link_support->id = phyid; + layer_set_dn(frml, link_support); + layer_set_up(link_support, frml); + layer_set_dn(link_support, phy_layer); + layer_set_up(phy_layer, link_support); } else { layer_set_dn(frml, phy_layer); layer_set_up(phy_layer, frml); } list_add_rcu(&phyinfo->node, &cnfg->phys); +out: mutex_unlock(&cnfg->lock); return; out_err: - kfree(phy_driver); kfree(phyinfo); mutex_unlock(&cnfg->lock); } diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index df08c47183d4..de5390700cfe 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -144,7 +144,8 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) } from = skb_pull(skb, len); from -= len; - memcpy(data, from, len); + if (data) + memcpy(data, from, len); return 0; } diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 797c8d165993..8e68b97f13ee 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -31,7 +31,7 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); -struct cflayer *cfserl_create(int type, int instance, bool use_stx) +struct cflayer *cfserl_create(int instance, bool use_stx) { struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); if (!this) @@ -40,7 +40,6 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx) this->layer.receive = cfserl_receive; this->layer.transmit = cfserl_transmit; this->layer.ctrlcmd = cfserl_ctrlcmd; - this->layer.type = type; this->usestx = use_stx; spin_lock_init(&this->sync); snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1"); diff --git a/net/core/Makefile b/net/core/Makefile index 0d357b1c4e57..c4ecc864020f 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -3,7 +3,7 @@ # obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ - gen_stats.o gen_estimator.o net_namespace.o secure_seq.o + gen_stats.o gen_estimator.o net_namespace.o secure_seq.o flow_dissector.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o @@ -19,3 +19,4 @@ obj-$(CONFIG_FIB_RULES) += fib_rules.o obj-$(CONFIG_TRACEPOINTS) += net-traces.o obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o +obj-$(CONFIG_NETPRIO_CGROUP) += netprio_cgroup.o diff --git a/net/core/dev.c b/net/core/dev.c index 5a13edfc9f73..f494675471a9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -133,10 +133,9 @@ #include <linux/pci.h> #include <linux/inetdevice.h> #include <linux/cpu_rmap.h> -#include <linux/if_tunnel.h> -#include <linux/if_pppox.h> -#include <linux/ppp_defs.h> #include <linux/net_tstamp.h> +#include <linux/jump_label.h> +#include <net/flow_keys.h> #include "net-sysfs.h" @@ -1320,8 +1319,6 @@ EXPORT_SYMBOL(dev_close); */ void dev_disable_lro(struct net_device *dev) { - u32 flags; - /* * If we're trying to disable lro on a vlan device * use the underlying physical device instead @@ -1329,15 +1326,9 @@ void dev_disable_lro(struct net_device *dev) if (is_vlan_dev(dev)) dev = vlan_dev_real_dev(dev); - if (dev->ethtool_ops && dev->ethtool_ops->get_flags) - flags = dev->ethtool_ops->get_flags(dev); - else - flags = ethtool_op_get_flags(dev); + dev->wanted_features &= ~NETIF_F_LRO; + netdev_update_features(dev); - if (!(flags & ETH_FLAG_LRO)) - return; - - __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); } @@ -1450,34 +1441,55 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev) } EXPORT_SYMBOL(call_netdevice_notifiers); -/* When > 0 there are consumers of rx skb time stamps */ -static atomic_t netstamp_needed = ATOMIC_INIT(0); +static struct jump_label_key netstamp_needed __read_mostly; +#ifdef HAVE_JUMP_LABEL +/* We are not allowed to call jump_label_dec() from irq context + * If net_disable_timestamp() is called from irq context, defer the + * jump_label_dec() calls. + */ +static atomic_t netstamp_needed_deferred; +#endif void net_enable_timestamp(void) { - atomic_inc(&netstamp_needed); +#ifdef HAVE_JUMP_LABEL + int deferred = atomic_xchg(&netstamp_needed_deferred, 0); + + if (deferred) { + while (--deferred) + jump_label_dec(&netstamp_needed); + return; + } +#endif + WARN_ON(in_interrupt()); + jump_label_inc(&netstamp_needed); } EXPORT_SYMBOL(net_enable_timestamp); void net_disable_timestamp(void) { - atomic_dec(&netstamp_needed); +#ifdef HAVE_JUMP_LABEL + if (in_interrupt()) { + atomic_inc(&netstamp_needed_deferred); + return; + } +#endif + jump_label_dec(&netstamp_needed); } EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { - if (atomic_read(&netstamp_needed)) + skb->tstamp.tv64 = 0; + if (static_branch(&netstamp_needed)) __net_timestamp(skb); - else - skb->tstamp.tv64 = 0; } -static inline void net_timestamp_check(struct sk_buff *skb) -{ - if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed)) - __net_timestamp(skb); -} +#define net_timestamp_check(COND, SKB) \ + if (static_branch(&netstamp_needed)) { \ + if ((COND) && !(SKB)->tstamp.tv64) \ + __net_timestamp(SKB); \ + } \ static int net_hwtstamp_validate(struct ifreq *ifr) { @@ -1924,7 +1936,8 @@ EXPORT_SYMBOL(skb_checksum_help); * It may return NULL if the skb requires no segmentation. This is * only possible when GSO is used for verifying header integrity. */ -struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) +struct sk_buff *skb_gso_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; @@ -1954,9 +1967,9 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features) if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) dev->ethtool_ops->get_drvinfo(dev, &info); - WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n", - info.driver, dev ? dev->features : 0L, - skb->sk ? skb->sk->sk_route_caps : 0L, + WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n", + info.driver, dev ? &dev->features : NULL, + skb->sk ? &skb->sk->sk_route_caps : NULL, skb->len, skb->data_len, skb->ip_summed); if (skb_header_cloned(skb) && @@ -2065,7 +2078,7 @@ static void dev_gso_skb_destructor(struct sk_buff *skb) * This function segments the given skb and stores the list of segments * in skb->next. */ -static int dev_gso_segment(struct sk_buff *skb, int features) +static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs; @@ -2104,7 +2117,7 @@ static inline void skb_orphan_try(struct sk_buff *skb) } } -static bool can_checksum_protocol(unsigned long features, __be16 protocol) +static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) { return ((features & NETIF_F_GEN_CSUM) || ((features & NETIF_F_V4_CSUM) && @@ -2115,7 +2128,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol) protocol == htons(ETH_P_FCOE))); } -static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features) +static netdev_features_t harmonize_features(struct sk_buff *skb, + __be16 protocol, netdev_features_t features) { if (!can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; @@ -2127,10 +2141,10 @@ static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features return features; } -u32 netif_skb_features(struct sk_buff *skb) +netdev_features_t netif_skb_features(struct sk_buff *skb) { __be16 protocol = skb->protocol; - u32 features = skb->dev->features; + netdev_features_t features = skb->dev->features; if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; @@ -2176,7 +2190,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, unsigned int skb_len; if (likely(!skb->next)) { - u32 features; + netdev_features_t features; /* * If device doesn't need skb->dst, release it right now while @@ -2257,7 +2271,7 @@ gso: return rc; } txq_trans_update(txq); - if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) + if (unlikely(netif_xmit_stopped(txq) && skb->next)) return NETDEV_TX_BUSY; } while (skb->next); @@ -2457,6 +2471,18 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, return rc; } +#if IS_ENABLED(CONFIG_NETPRIO_CGROUP) +static void skb_update_prio(struct sk_buff *skb) +{ + struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap); + + if ((!skb->priority) && (skb->sk) && map) + skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx]; +} +#else +#define skb_update_prio(skb) +#endif + static DEFINE_PER_CPU(int, xmit_recursion); #define RECURSION_LIMIT 10 @@ -2497,6 +2523,8 @@ int dev_queue_xmit(struct sk_buff *skb) */ rcu_read_lock_bh(); + skb_update_prio(skb); + txq = dev_pick_tx(dev, skb); q = rcu_dereference_bh(txq->qdisc); @@ -2531,7 +2559,7 @@ int dev_queue_xmit(struct sk_buff *skb) HARD_TX_LOCK(dev, txq, cpu); - if (!netif_tx_queue_stopped(txq)) { + if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); rc = dev_hard_start_xmit(skb, dev, txq); __this_cpu_dec(xmit_recursion); @@ -2592,123 +2620,28 @@ static inline void ____napi_schedule(struct softnet_data *sd, */ void __skb_get_rxhash(struct sk_buff *skb) { - int nhoff, hash = 0, poff; - const struct ipv6hdr *ip6; - const struct iphdr *ip; - const struct vlan_hdr *vlan; - u8 ip_proto; - u32 addr1, addr2; - u16 proto; - union { - u32 v32; - u16 v16[2]; - } ports; - - nhoff = skb_network_offset(skb); - proto = skb->protocol; - -again: - switch (proto) { - case __constant_htons(ETH_P_IP): -ip: - if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) - goto done; - - ip = (const struct iphdr *) (skb->data + nhoff); - if (ip_is_fragment(ip)) - ip_proto = 0; - else - ip_proto = ip->protocol; - addr1 = (__force u32) ip->saddr; - addr2 = (__force u32) ip->daddr; - nhoff += ip->ihl * 4; - break; - case __constant_htons(ETH_P_IPV6): -ipv6: - if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) - goto done; - - ip6 = (const struct ipv6hdr *) (skb->data + nhoff); - ip_proto = ip6->nexthdr; - addr1 = (__force u32) ip6->saddr.s6_addr32[3]; - addr2 = (__force u32) ip6->daddr.s6_addr32[3]; - nhoff += 40; - break; - case __constant_htons(ETH_P_8021Q): - if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff)) - goto done; - vlan = (const struct vlan_hdr *) (skb->data + nhoff); - proto = vlan->h_vlan_encapsulated_proto; - nhoff += sizeof(*vlan); - goto again; - case __constant_htons(ETH_P_PPP_SES): - if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff)) - goto done; - proto = *((__be16 *) (skb->data + nhoff + - sizeof(struct pppoe_hdr))); - nhoff += PPPOE_SES_HLEN; - switch (proto) { - case __constant_htons(PPP_IP): - goto ip; - case __constant_htons(PPP_IPV6): - goto ipv6; - default: - goto done; - } - default: - goto done; - } - - switch (ip_proto) { - case IPPROTO_GRE: - if (pskb_may_pull(skb, nhoff + 16)) { - u8 *h = skb->data + nhoff; - __be16 flags = *(__be16 *)h; + struct flow_keys keys; + u32 hash; - /* - * Only look inside GRE if version zero and no - * routing - */ - if (!(flags & (GRE_VERSION|GRE_ROUTING))) { - proto = *(__be16 *)(h + 2); - nhoff += 4; - if (flags & GRE_CSUM) - nhoff += 4; - if (flags & GRE_KEY) - nhoff += 4; - if (flags & GRE_SEQ) - nhoff += 4; - goto again; - } - } - break; - case IPPROTO_IPIP: - goto again; - default: - break; - } + if (!skb_flow_dissect(skb, &keys)) + return; - ports.v32 = 0; - poff = proto_ports_offset(ip_proto); - if (poff >= 0) { - nhoff += poff; - if (pskb_may_pull(skb, nhoff + 4)) { - ports.v32 = * (__force u32 *) (skb->data + nhoff); - if (ports.v16[1] < ports.v16[0]) - swap(ports.v16[0], ports.v16[1]); - skb->l4_rxhash = 1; - } + if (keys.ports) { + if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) + swap(keys.port16[0], keys.port16[1]); + skb->l4_rxhash = 1; } /* get a consistent hash (same value on both flow directions) */ - if (addr2 < addr1) - swap(addr1, addr2); + if ((__force u32)keys.dst < (__force u32)keys.src) + swap(keys.dst, keys.src); - hash = jhash_3words(addr1, addr2, ports.v32, hashrnd); + hash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src, + (__force u32)keys.ports, hashrnd); if (!hash) hash = 1; -done: skb->rxhash = hash; } EXPORT_SYMBOL(__skb_get_rxhash); @@ -2719,6 +2652,8 @@ EXPORT_SYMBOL(__skb_get_rxhash); struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); +struct jump_label_key rps_needed __read_mostly; + static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) @@ -2998,12 +2933,11 @@ int netif_rx(struct sk_buff *skb) if (netpoll_rx(skb)) return NET_RX_DROP; - if (netdev_tstamp_prequeue) - net_timestamp_check(skb); + net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); #ifdef CONFIG_RPS - { + if (static_branch(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -3018,14 +2952,13 @@ int netif_rx(struct sk_buff *skb) rcu_read_unlock(); preempt_enable(); - } -#else + } else +#endif { unsigned int qtail; ret = enqueue_to_backlog(skb, get_cpu(), &qtail); put_cpu(); } -#endif return ret; } EXPORT_SYMBOL(netif_rx); @@ -3231,8 +3164,7 @@ static int __netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; __be16 type; - if (!netdev_tstamp_prequeue) - net_timestamp_check(skb); + net_timestamp_check(!netdev_tstamp_prequeue, skb); trace_netif_receive_skb(skb); @@ -3363,14 +3295,13 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { - if (netdev_tstamp_prequeue) - net_timestamp_check(skb); + net_timestamp_check(netdev_tstamp_prequeue, skb); if (skb_defer_rx_timestamp(skb)) return NET_RX_SUCCESS; #ifdef CONFIG_RPS - { + if (static_branch(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu, ret; @@ -3381,16 +3312,12 @@ int netif_receive_skb(struct sk_buff *skb) if (cpu >= 0) { ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - } else { - rcu_read_unlock(); - ret = __netif_receive_skb(skb); + return ret; } - - return ret; + rcu_read_unlock(); } -#else - return __netif_receive_skb(skb); #endif + return __netif_receive_skb(skb); } EXPORT_SYMBOL(netif_receive_skb); @@ -4539,7 +4466,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags) static int __dev_set_promiscuity(struct net_device *dev, int inc) { - unsigned short old_flags = dev->flags; + unsigned int old_flags = dev->flags; uid_t uid; gid_t gid; @@ -4596,7 +4523,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc) */ int dev_set_promiscuity(struct net_device *dev, int inc) { - unsigned short old_flags = dev->flags; + unsigned int old_flags = dev->flags; int err; err = __dev_set_promiscuity(dev, inc); @@ -4623,7 +4550,7 @@ EXPORT_SYMBOL(dev_set_promiscuity); int dev_set_allmulti(struct net_device *dev, int inc) { - unsigned short old_flags = dev->flags; + unsigned int old_flags = dev->flags; ASSERT_RTNL(); @@ -4726,7 +4653,7 @@ EXPORT_SYMBOL(dev_get_flags); int __dev_change_flags(struct net_device *dev, unsigned int flags) { - int old_flags = dev->flags; + unsigned int old_flags = dev->flags; int ret; ASSERT_RTNL(); @@ -4809,10 +4736,10 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags) * Change settings on device based state flags. The flags are * in the userspace exported format. */ -int dev_change_flags(struct net_device *dev, unsigned flags) +int dev_change_flags(struct net_device *dev, unsigned int flags) { - int ret, changes; - int old_flags = dev->flags; + int ret; + unsigned int changes, old_flags = dev->flags; ret = __dev_change_flags(dev, flags); if (ret < 0) @@ -5369,7 +5296,8 @@ static void rollback_registered(struct net_device *dev) list_del(&single); } -static u32 netdev_fix_features(struct net_device *dev, u32 features) +static netdev_features_t netdev_fix_features(struct net_device *dev, + netdev_features_t features) { /* Fix illegal checksum combinations */ if ((features & NETIF_F_HW_CSUM) && @@ -5378,12 +5306,6 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features) features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); } - if ((features & NETIF_F_NO_CSUM) && - (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) { - netdev_warn(dev, "mixed no checksumming and other settings.\n"); - features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM); - } - /* Fix illegal SG+CSUM combinations. */ if ((features & NETIF_F_SG) && !(features & NETIF_F_ALL_CSUM)) { @@ -5431,7 +5353,7 @@ static u32 netdev_fix_features(struct net_device *dev, u32 features) int __netdev_update_features(struct net_device *dev) { - u32 features; + netdev_features_t features; int err = 0; ASSERT_RTNL(); @@ -5447,16 +5369,16 @@ int __netdev_update_features(struct net_device *dev) if (dev->features == features) return 0; - netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n", - dev->features, features); + netdev_dbg(dev, "Features changed: %pNF -> %pNF\n", + &dev->features, &features); if (dev->netdev_ops->ndo_set_features) err = dev->netdev_ops->ndo_set_features(dev, features); if (unlikely(err < 0)) { netdev_err(dev, - "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", - err, features, dev->features); + "set_features() failed (%d); wanted %pNF, left %pNF\n", + err, &features, &dev->features); return -1; } @@ -5555,6 +5477,9 @@ static void netdev_init_one_queue(struct net_device *dev, queue->xmit_lock_owner = -1; netdev_queue_numa_node_write(queue, NUMA_NO_NODE); queue->dev = dev; +#ifdef CONFIG_BQL + dql_init(&queue->dql, HZ); +#endif } static int netif_alloc_netdev_queues(struct net_device *dev) @@ -5640,11 +5565,12 @@ int register_netdevice(struct net_device *dev) dev->wanted_features = dev->features & dev->hw_features; /* Turn on no cache copy if HW is doing checksum */ - dev->hw_features |= NETIF_F_NOCACHE_COPY; - if ((dev->features & NETIF_F_ALL_CSUM) && - !(dev->features & NETIF_F_NO_CSUM)) { - dev->wanted_features |= NETIF_F_NOCACHE_COPY; - dev->features |= NETIF_F_NOCACHE_COPY; + if (!(dev->flags & IFF_LOOPBACK)) { + dev->hw_features |= NETIF_F_NOCACHE_COPY; + if (dev->features & NETIF_F_ALL_CSUM) { + dev->wanted_features |= NETIF_F_NOCACHE_COPY; + dev->features |= NETIF_F_NOCACHE_COPY; + } } /* Make NETIF_F_HIGHDMA inheritable to VLAN devices. @@ -6380,7 +6306,8 @@ static int dev_cpu_callback(struct notifier_block *nfb, * @one to the master device with current feature set @all. Will not * enable anything that is off in @mask. Returns the new feature set. */ -u32 netdev_increment_features(u32 all, u32 one, u32 mask) +netdev_features_t netdev_increment_features(netdev_features_t all, + netdev_features_t one, netdev_features_t mask) { if (mask & NETIF_F_GEN_CSUM) mask |= NETIF_F_ALL_CSUM; @@ -6389,10 +6316,6 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask) all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask; all &= one | ~NETIF_F_ALL_FOR_ALL; - /* If device needs checksumming, downgrade to it. */ - if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM)) - all &= ~NETIF_F_NO_CSUM; - /* If one device supports hw checksumming, set for all. */ if (all & NETIF_F_GEN_CSUM) all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index f44481707124..31b0b7f5383e 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -36,235 +36,44 @@ u32 ethtool_op_get_link(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_link); -u32 ethtool_op_get_tx_csum(struct net_device *dev) -{ - return (dev->features & NETIF_F_ALL_CSUM) != 0; -} -EXPORT_SYMBOL(ethtool_op_get_tx_csum); - -int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_IP_CSUM; - else - dev->features &= ~NETIF_F_IP_CSUM; - - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_tx_csum); - -int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_HW_CSUM; - else - dev->features &= ~NETIF_F_HW_CSUM; - - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); - -int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - else - dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); - - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); - -u32 ethtool_op_get_sg(struct net_device *dev) -{ - return (dev->features & NETIF_F_SG) != 0; -} -EXPORT_SYMBOL(ethtool_op_get_sg); - -int ethtool_op_set_sg(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_SG; - else - dev->features &= ~NETIF_F_SG; - - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_sg); - -u32 ethtool_op_get_tso(struct net_device *dev) -{ - return (dev->features & NETIF_F_TSO) != 0; -} -EXPORT_SYMBOL(ethtool_op_get_tso); - -int ethtool_op_set_tso(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_TSO; - else - dev->features &= ~NETIF_F_TSO; - - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_tso); - -u32 ethtool_op_get_ufo(struct net_device *dev) -{ - return (dev->features & NETIF_F_UFO) != 0; -} -EXPORT_SYMBOL(ethtool_op_get_ufo); - -int ethtool_op_set_ufo(struct net_device *dev, u32 data) -{ - if (data) - dev->features |= NETIF_F_UFO; - else - dev->features &= ~NETIF_F_UFO; - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_ufo); - -/* the following list of flags are the same as their associated - * NETIF_F_xxx values in include/linux/netdevice.h - */ -static const u32 flags_dup_features = - (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE | - ETH_FLAG_RXHASH); - -u32 ethtool_op_get_flags(struct net_device *dev) -{ - /* in the future, this function will probably contain additional - * handling for flags which are not so easily handled - * by a simple masking operation - */ - - return dev->features & flags_dup_features; -} -EXPORT_SYMBOL(ethtool_op_get_flags); - -/* Check if device can enable (or disable) particular feature coded in "data" - * argument. Flags "supported" describe features that can be toggled by device. - * If feature can not be toggled, it state (enabled or disabled) must match - * hardcoded device features state, otherwise flags are marked as invalid. - */ -bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) -{ - u32 features = dev->features & flags_dup_features; - /* "data" can contain only flags_dup_features bits, - * see __ethtool_set_flags */ - - return (features & ~supported) != (data & ~supported); -} -EXPORT_SYMBOL(ethtool_invalid_flags); - -int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) -{ - if (ethtool_invalid_flags(dev, data, supported)) - return -EINVAL; - - dev->features = ((dev->features & ~flags_dup_features) | - (data & flags_dup_features)); - return 0; -} -EXPORT_SYMBOL(ethtool_op_set_flags); - /* Handlers for each ethtool command */ -#define ETHTOOL_DEV_FEATURE_WORDS 1 - -static void ethtool_get_features_compat(struct net_device *dev, - struct ethtool_get_features_block *features) -{ - if (!dev->ethtool_ops) - return; - - /* getting RX checksum */ - if (dev->ethtool_ops->get_rx_csum) - if (dev->ethtool_ops->get_rx_csum(dev)) - features[0].active |= NETIF_F_RXCSUM; - - /* mark legacy-changeable features */ - if (dev->ethtool_ops->set_sg) - features[0].available |= NETIF_F_SG; - if (dev->ethtool_ops->set_tx_csum) - features[0].available |= NETIF_F_ALL_CSUM; - if (dev->ethtool_ops->set_tso) - features[0].available |= NETIF_F_ALL_TSO; - if (dev->ethtool_ops->set_rx_csum) - features[0].available |= NETIF_F_RXCSUM; - if (dev->ethtool_ops->set_flags) - features[0].available |= flags_dup_features; -} - -static int ethtool_set_feature_compat(struct net_device *dev, - int (*legacy_set)(struct net_device *, u32), - struct ethtool_set_features_block *features, u32 mask) -{ - u32 do_set; - - if (!legacy_set) - return 0; - - if (!(features[0].valid & mask)) - return 0; - - features[0].valid &= ~mask; - - do_set = !!(features[0].requested & mask); - - if (legacy_set(dev, do_set) < 0) - netdev_info(dev, - "Legacy feature change (%s) failed for 0x%08x\n", - do_set ? "set" : "clear", mask); - - return 1; -} - -static int ethtool_set_flags_compat(struct net_device *dev, - int (*legacy_set)(struct net_device *, u32), - struct ethtool_set_features_block *features, u32 mask) -{ - u32 value; - - if (!legacy_set) - return 0; - - if (!(features[0].valid & mask)) - return 0; - - value = dev->features & ~features[0].valid; - value |= features[0].requested; - - features[0].valid &= ~mask; - - if (legacy_set(dev, value & mask) < 0) - netdev_info(dev, "Legacy flags change failed\n"); - - return 1; -} - -static int ethtool_set_features_compat(struct net_device *dev, - struct ethtool_set_features_block *features) -{ - int compat; - - if (!dev->ethtool_ops) - return 0; - - compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg, - features, NETIF_F_SG); - compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum, - features, NETIF_F_ALL_CSUM); - compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso, - features, NETIF_F_ALL_TSO); - compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, - features, NETIF_F_RXCSUM); - compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags, - features, flags_dup_features); - - return compat; -} +#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) + +static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { + [NETIF_F_SG_BIT] = "tx-scatter-gather", + [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", + [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", + [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", + [NETIF_F_HIGHDMA_BIT] = "highdma", + [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", + [NETIF_F_HW_VLAN_TX_BIT] = "tx-vlan-hw-insert", + + [NETIF_F_HW_VLAN_RX_BIT] = "rx-vlan-hw-parse", + [NETIF_F_HW_VLAN_FILTER_BIT] = "rx-vlan-filter", + [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", + [NETIF_F_GSO_BIT] = "tx-generic-segmentation", + [NETIF_F_LLTX_BIT] = "tx-lockless", + [NETIF_F_NETNS_LOCAL_BIT] = "netns-local", + [NETIF_F_GRO_BIT] = "rx-gro", + [NETIF_F_LRO_BIT] = "rx-lro", + + [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", + [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", + [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", + [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", + [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", + [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", + + [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", + [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", + [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu", + [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", + [NETIF_F_RXHASH_BIT] = "rx-hashing", + [NETIF_F_RXCSUM_BIT] = "rx-checksum", + [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", + [NETIF_F_LOOPBACK_BIT] = "loopback", +}; static int ethtool_get_features(struct net_device *dev, void __user *useraddr) { @@ -272,18 +81,21 @@ static int ethtool_get_features(struct net_device *dev, void __user *useraddr) .cmd = ETHTOOL_GFEATURES, .size = ETHTOOL_DEV_FEATURE_WORDS, }; - struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = { - { - .available = dev->hw_features, - .requested = dev->wanted_features, - .active = dev->features, - .never_changed = NETIF_F_NEVER_CHANGE, - }, - }; + struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; u32 __user *sizeaddr; u32 copy_size; + int i; - ethtool_get_features_compat(dev, features); + /* in case feature bits run out again */ + BUILD_BUG_ON(ETHTOOL_DEV_FEATURE_WORDS * sizeof(u32) > sizeof(netdev_features_t)); + + for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { + features[i].available = (u32)(dev->hw_features >> (32 * i)); + features[i].requested = (u32)(dev->wanted_features >> (32 * i)); + features[i].active = (u32)(dev->features >> (32 * i)); + features[i].never_changed = + (u32)(NETIF_F_NEVER_CHANGE >> (32 * i)); + } sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); if (get_user(copy_size, sizeaddr)) @@ -305,7 +117,8 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) { struct ethtool_sfeatures cmd; struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; - int ret = 0; + netdev_features_t wanted = 0, valid = 0; + int i, ret = 0; if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; @@ -317,65 +130,29 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) if (copy_from_user(features, useraddr, sizeof(features))) return -EFAULT; - if (features[0].valid & ~NETIF_F_ETHTOOL_BITS) - return -EINVAL; + for (i = 0; i < ETHTOOL_DEV_FEATURE_WORDS; ++i) { + valid |= (netdev_features_t)features[i].valid << (32 * i); + wanted |= (netdev_features_t)features[i].requested << (32 * i); + } - if (ethtool_set_features_compat(dev, features)) - ret |= ETHTOOL_F_COMPAT; + if (valid & ~NETIF_F_ETHTOOL_BITS) + return -EINVAL; - if (features[0].valid & ~dev->hw_features) { - features[0].valid &= dev->hw_features; + if (valid & ~dev->hw_features) { + valid &= dev->hw_features; ret |= ETHTOOL_F_UNSUPPORTED; } - dev->wanted_features &= ~features[0].valid; - dev->wanted_features |= features[0].valid & features[0].requested; + dev->wanted_features &= ~valid; + dev->wanted_features |= wanted & valid; __netdev_update_features(dev); - if ((dev->wanted_features ^ dev->features) & features[0].valid) + if ((dev->wanted_features ^ dev->features) & valid) ret |= ETHTOOL_F_WISH; return ret; } -static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = { - /* NETIF_F_SG */ "tx-scatter-gather", - /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", - /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", - /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", - /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6", - /* NETIF_F_HIGHDMA */ "highdma", - /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", - /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", - - /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse", - /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter", - /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged", - /* NETIF_F_GSO */ "tx-generic-segmentation", - /* NETIF_F_LLTX */ "tx-lockless", - /* NETIF_F_NETNS_LOCAL */ "netns-local", - /* NETIF_F_GRO */ "rx-gro", - /* NETIF_F_LRO */ "rx-lro", - - /* NETIF_F_TSO */ "tx-tcp-segmentation", - /* NETIF_F_UFO */ "tx-udp-fragmentation", - /* NETIF_F_GSO_ROBUST */ "tx-gso-robust", - /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation", - /* NETIF_F_TSO6 */ "tx-tcp6-segmentation", - /* NETIF_F_FSO */ "tx-fcoe-segmentation", - "", - "", - - /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc", - /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp", - /* NETIF_F_FCOE_MTU */ "fcoe-mtu", - /* NETIF_F_NTUPLE */ "rx-ntuple-filter", - /* NETIF_F_RXHASH */ "rx-hashing", - /* NETIF_F_RXCSUM */ "rx-checksum", - /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy", - /* NETIF_F_LOOPBACK */ "loopback", -}; - static int __ethtool_get_sset_count(struct net_device *dev, int sset) { const struct ethtool_ops *ops = dev->ethtool_ops; @@ -402,7 +179,7 @@ static void __ethtool_get_strings(struct net_device *dev, ops->get_strings(dev, stringset, data); } -static u32 ethtool_get_feature_mask(u32 eth_cmd) +static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) { /* feature masks of legacy discrete ethtool ops */ @@ -433,136 +210,82 @@ static u32 ethtool_get_feature_mask(u32 eth_cmd) } } -static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd) -{ - const struct ethtool_ops *ops = dev->ethtool_ops; - - if (!ops) - return NULL; - - switch (ethcmd) { - case ETHTOOL_GTXCSUM: - return ops->get_tx_csum; - case ETHTOOL_GRXCSUM: - return ops->get_rx_csum; - case ETHTOOL_SSG: - return ops->get_sg; - case ETHTOOL_STSO: - return ops->get_tso; - case ETHTOOL_SUFO: - return ops->get_ufo; - default: - return NULL; - } -} - -static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev) -{ - return !!(dev->features & NETIF_F_ALL_CSUM); -} - static int ethtool_get_one_feature(struct net_device *dev, char __user *useraddr, u32 ethcmd) { - u32 mask = ethtool_get_feature_mask(ethcmd); + netdev_features_t mask = ethtool_get_feature_mask(ethcmd); struct ethtool_value edata = { .cmd = ethcmd, .data = !!(dev->features & mask), }; - /* compatibility with discrete get_ ops */ - if (!(dev->hw_features & mask)) { - u32 (*actor)(struct net_device *); - - actor = __ethtool_get_one_feature_actor(dev, ethcmd); - - /* bug compatibility with old get_rx_csum */ - if (ethcmd == ETHTOOL_GRXCSUM && !actor) - actor = __ethtool_get_rx_csum_oldbug; - - if (actor) - edata.data = actor(dev); - } - if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; } -static int __ethtool_set_tx_csum(struct net_device *dev, u32 data); -static int __ethtool_set_rx_csum(struct net_device *dev, u32 data); -static int __ethtool_set_sg(struct net_device *dev, u32 data); -static int __ethtool_set_tso(struct net_device *dev, u32 data); -static int __ethtool_set_ufo(struct net_device *dev, u32 data); - static int ethtool_set_one_feature(struct net_device *dev, void __user *useraddr, u32 ethcmd) { struct ethtool_value edata; - u32 mask; + netdev_features_t mask; if (copy_from_user(&edata, useraddr, sizeof(edata))) return -EFAULT; mask = ethtool_get_feature_mask(ethcmd); mask &= dev->hw_features; - if (mask) { - if (edata.data) - dev->wanted_features |= mask; - else - dev->wanted_features &= ~mask; + if (!mask) + return -EOPNOTSUPP; - __netdev_update_features(dev); - return 0; - } + if (edata.data) + dev->wanted_features |= mask; + else + dev->wanted_features &= ~mask; - /* Driver is not converted to ndo_fix_features or does not - * support changing this offload. In the latter case it won't - * have corresponding ethtool_ops field set. - * - * Following part is to be removed after all drivers advertise - * their changeable features in netdev->hw_features and stop - * using discrete offload setting ops. - */ + __netdev_update_features(dev); - switch (ethcmd) { - case ETHTOOL_STXCSUM: - return __ethtool_set_tx_csum(dev, edata.data); - case ETHTOOL_SRXCSUM: - return __ethtool_set_rx_csum(dev, edata.data); - case ETHTOOL_SSG: - return __ethtool_set_sg(dev, edata.data); - case ETHTOOL_STSO: - return __ethtool_set_tso(dev, edata.data); - case ETHTOOL_SUFO: - return __ethtool_set_ufo(dev, edata.data); - default: - return -EOPNOTSUPP; - } + return 0; +} + +#define ETH_ALL_FLAGS (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | \ + ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH) +#define ETH_ALL_FEATURES (NETIF_F_LRO | NETIF_F_HW_VLAN_RX | \ + NETIF_F_HW_VLAN_TX | NETIF_F_NTUPLE | NETIF_F_RXHASH) + +static u32 __ethtool_get_flags(struct net_device *dev) +{ + u32 flags = 0; + + if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO; + if (dev->features & NETIF_F_HW_VLAN_RX) flags |= ETH_FLAG_RXVLAN; + if (dev->features & NETIF_F_HW_VLAN_TX) flags |= ETH_FLAG_TXVLAN; + if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE; + if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH; + + return flags; } -int __ethtool_set_flags(struct net_device *dev, u32 data) +static int __ethtool_set_flags(struct net_device *dev, u32 data) { - u32 changed; + netdev_features_t features = 0, changed; - if (data & ~flags_dup_features) + if (data & ~ETH_ALL_FLAGS) return -EINVAL; - /* legacy set_flags() op */ - if (dev->ethtool_ops->set_flags) { - if (unlikely(dev->hw_features & flags_dup_features)) - netdev_warn(dev, - "driver BUG: mixed hw_features and set_flags()\n"); - return dev->ethtool_ops->set_flags(dev, data); - } + if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO; + if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_RX; + if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_TX; + if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE; + if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH; /* allow changing only bits set in hw_features */ - changed = (data ^ dev->features) & flags_dup_features; + changed = (features ^ dev->features) & ETH_ALL_FEATURES; if (changed & ~dev->hw_features) return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; dev->wanted_features = - (dev->wanted_features & ~changed) | (data & dev->hw_features); + (dev->wanted_features & ~changed) | (features & changed); __netdev_update_features(dev); @@ -1231,81 +954,6 @@ static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); } -static int __ethtool_set_sg(struct net_device *dev, u32 data) -{ - int err; - - if (!dev->ethtool_ops->set_sg) - return -EOPNOTSUPP; - - if (data && !(dev->features & NETIF_F_ALL_CSUM)) - return -EINVAL; - - if (!data && dev->ethtool_ops->set_tso) { - err = dev->ethtool_ops->set_tso(dev, 0); - if (err) - return err; - } - - if (!data && dev->ethtool_ops->set_ufo) { - err = dev->ethtool_ops->set_ufo(dev, 0); - if (err) - return err; - } - return dev->ethtool_ops->set_sg(dev, data); -} - -static int __ethtool_set_tx_csum(struct net_device *dev, u32 data) -{ - int err; - - if (!dev->ethtool_ops->set_tx_csum) - return -EOPNOTSUPP; - - if (!data && dev->ethtool_ops->set_sg) { - err = __ethtool_set_sg(dev, 0); - if (err) - return err; - } - - return dev->ethtool_ops->set_tx_csum(dev, data); -} - -static int __ethtool_set_rx_csum(struct net_device *dev, u32 data) -{ - if (!dev->ethtool_ops->set_rx_csum) - return -EOPNOTSUPP; - - if (!data) - dev->features &= ~NETIF_F_GRO; - - return dev->ethtool_ops->set_rx_csum(dev, data); -} - -static int __ethtool_set_tso(struct net_device *dev, u32 data) -{ - if (!dev->ethtool_ops->set_tso) - return -EOPNOTSUPP; - - if (data && !(dev->features & NETIF_F_SG)) - return -EINVAL; - - return dev->ethtool_ops->set_tso(dev, data); -} - -static int __ethtool_set_ufo(struct net_device *dev, u32 data) -{ - if (!dev->ethtool_ops->set_ufo) - return -EOPNOTSUPP; - if (data && !(dev->features & NETIF_F_SG)) - return -EINVAL; - if (data && !((dev->features & NETIF_F_GEN_CSUM) || - (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) - == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) - return -EINVAL; - return dev->ethtool_ops->set_ufo(dev, data); -} - static int ethtool_self_test(struct net_device *dev, char __user *useraddr) { struct ethtool_test test; @@ -1771,9 +1419,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) break; case ETHTOOL_GFLAGS: rc = ethtool_get_value(dev, useraddr, ethcmd, - (dev->ethtool_ops->get_flags ? - dev->ethtool_ops->get_flags : - ethtool_op_get_flags)); + __ethtool_get_flags); break; case ETHTOOL_SFLAGS: rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c new file mode 100644 index 000000000000..0985b9b14b80 --- /dev/null +++ b/net/core/flow_dissector.c @@ -0,0 +1,143 @@ +#include <linux/skbuff.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/if_vlan.h> +#include <net/ip.h> +#include <linux/if_tunnel.h> +#include <linux/if_pppox.h> +#include <linux/ppp_defs.h> +#include <net/flow_keys.h> + +/* copy saddr & daddr, possibly using 64bit load/store + * Equivalent to : flow->src = iph->saddr; + * flow->dst = iph->daddr; + */ +static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(*flow), dst) != + offsetof(typeof(*flow), src) + sizeof(flow->src)); + memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst)); +} + +bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) +{ + int poff, nhoff = skb_network_offset(skb); + u8 ip_proto; + __be16 proto = skb->protocol; + + memset(flow, 0, sizeof(*flow)); + +again: + switch (proto) { + case __constant_htons(ETH_P_IP): { + const struct iphdr *iph; + struct iphdr _iph; +ip: + iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + if (!iph) + return false; + + if (ip_is_fragment(iph)) + ip_proto = 0; + else + ip_proto = iph->protocol; + iph_to_flow_copy_addrs(flow, iph); + nhoff += iph->ihl * 4; + break; + } + case __constant_htons(ETH_P_IPV6): { + const struct ipv6hdr *iph; + struct ipv6hdr _iph; +ipv6: + iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); + if (!iph) + return false; + + ip_proto = iph->nexthdr; + flow->src = iph->saddr.s6_addr32[3]; + flow->dst = iph->daddr.s6_addr32[3]; + nhoff += sizeof(struct ipv6hdr); + break; + } + case __constant_htons(ETH_P_8021Q): { + const struct vlan_hdr *vlan; + struct vlan_hdr _vlan; + + vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); + if (!vlan) + return false; + + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + goto again; + } + case __constant_htons(ETH_P_PPP_SES): { + struct { + struct pppoe_hdr hdr; + __be16 proto; + } *hdr, _hdr; + hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + if (!hdr) + return false; + proto = hdr->proto; + nhoff += PPPOE_SES_HLEN; + switch (proto) { + case __constant_htons(PPP_IP): + goto ip; + case __constant_htons(PPP_IPV6): + goto ipv6; + default: + return false; + } + } + default: + return false; + } + + switch (ip_proto) { + case IPPROTO_GRE: { + struct gre_hdr { + __be16 flags; + __be16 proto; + } *hdr, _hdr; + + hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); + if (!hdr) + return false; + /* + * Only look inside GRE if version zero and no + * routing + */ + if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { + proto = hdr->proto; + nhoff += 4; + if (hdr->flags & GRE_CSUM) + nhoff += 4; + if (hdr->flags & GRE_KEY) + nhoff += 4; + if (hdr->flags & GRE_SEQ) + nhoff += 4; + goto again; + } + break; + } + case IPPROTO_IPIP: + goto again; + default: + break; + } + + flow->ip_proto = ip_proto; + poff = proto_ports_offset(ip_proto); + if (poff >= 0) { + __be32 *ports, _ports; + + nhoff += poff; + ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports); + if (ports) + flow->ports = *ports; + } + + return true; +} +EXPORT_SYMBOL(skb_flow_dissect); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 5ac07d31fbc9..cdf8dc34f0ba 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -238,6 +238,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) it to safe state. */ skb_queue_purge(&n->arp_queue); + n->arp_queue_len_bytes = 0; n->output = neigh_blackhole; if (n->nud_state & NUD_VALID) n->nud_state = NUD_NOARP; @@ -272,7 +273,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) } EXPORT_SYMBOL(neigh_ifdown); -static struct neighbour *neigh_alloc(struct neigh_table *tbl) +static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev) { struct neighbour *n = NULL; unsigned long now = jiffies; @@ -287,7 +288,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) goto out_entries; } - n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC); + if (tbl->entry_size) + n = kzalloc(tbl->entry_size, GFP_ATOMIC); + else { + int sz = sizeof(*n) + tbl->key_len; + + sz = ALIGN(sz, NEIGH_PRIV_ALIGN); + sz += dev->neigh_priv_len; + n = kzalloc(sz, GFP_ATOMIC); + } if (!n) goto out_entries; @@ -462,7 +471,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, u32 hash_val; int key_len = tbl->key_len; int error; - struct neighbour *n1, *rc, *n = neigh_alloc(tbl); + struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev); struct neigh_hash_table *nht; if (!n) { @@ -480,6 +489,14 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey, goto out_neigh_release; } + if (dev->netdev_ops->ndo_neigh_construct) { + error = dev->netdev_ops->ndo_neigh_construct(n); + if (error < 0) { + rc = ERR_PTR(error); + goto out_neigh_release; + } + } + /* Device specific setup. */ if (n->parms->neigh_setup && (error = n->parms->neigh_setup(n)) < 0) { @@ -677,18 +694,14 @@ static inline void neigh_parms_put(struct neigh_parms *parms) neigh_parms_destroy(parms); } -static void neigh_destroy_rcu(struct rcu_head *head) -{ - struct neighbour *neigh = container_of(head, struct neighbour, rcu); - - kmem_cache_free(neigh->tbl->kmem_cachep, neigh); -} /* * neighbour must already be out of the table; * */ void neigh_destroy(struct neighbour *neigh) { + struct net_device *dev = neigh->dev; + NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); if (!neigh->dead) { @@ -702,14 +715,18 @@ void neigh_destroy(struct neighbour *neigh) printk(KERN_WARNING "Impossible event.\n"); skb_queue_purge(&neigh->arp_queue); + neigh->arp_queue_len_bytes = 0; + + if (dev->netdev_ops->ndo_neigh_destroy) + dev->netdev_ops->ndo_neigh_destroy(neigh); - dev_put(neigh->dev); + dev_put(dev); neigh_parms_put(neigh->parms); NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh); atomic_dec(&neigh->tbl->entries); - call_rcu(&neigh->rcu, neigh_destroy_rcu); + kfree_rcu(neigh, rcu); } EXPORT_SYMBOL(neigh_destroy); @@ -842,6 +859,7 @@ static void neigh_invalidate(struct neighbour *neigh) write_lock(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); + neigh->arp_queue_len_bytes = 0; } static void neigh_probe(struct neighbour *neigh) @@ -980,15 +998,20 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) if (neigh->nud_state == NUD_INCOMPLETE) { if (skb) { - if (skb_queue_len(&neigh->arp_queue) >= - neigh->parms->queue_len) { + while (neigh->arp_queue_len_bytes + skb->truesize > + neigh->parms->queue_len_bytes) { struct sk_buff *buff; + buff = __skb_dequeue(&neigh->arp_queue); + if (!buff) + break; + neigh->arp_queue_len_bytes -= buff->truesize; kfree_skb(buff); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); } skb_dst_force(skb); __skb_queue_tail(&neigh->arp_queue, skb); + neigh->arp_queue_len_bytes += skb->truesize; } rc = 1; } @@ -1175,6 +1198,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_lock_bh(&neigh->lock); } skb_queue_purge(&neigh->arp_queue); + neigh->arp_queue_len_bytes = 0; } out: if (update_isrouter) { @@ -1477,11 +1501,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time); - if (!tbl->kmem_cachep) - tbl->kmem_cachep = - kmem_cache_create(tbl->id, tbl->entry_size, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, - NULL); tbl->stats = alloc_percpu(struct neigh_statistics); if (!tbl->stats) panic("cannot create neighbour cache statistics"); @@ -1566,9 +1585,6 @@ int neigh_table_clear(struct neigh_table *tbl) free_percpu(tbl->stats); tbl->stats = NULL; - kmem_cache_destroy(tbl->kmem_cachep); - tbl->kmem_cachep = NULL; - return 0; } EXPORT_SYMBOL(neigh_table_clear); @@ -1747,7 +1763,11 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); - NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len); + NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); + /* approximative value for deprecated QUEUE_LEN (in packets) */ + NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, + DIV_ROUND_UP(parms->queue_len_bytes, + SKB_TRUESIZE(ETH_FRAME_LEN))); NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); @@ -1974,7 +1994,11 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) switch (i) { case NDTPA_QUEUE_LEN: - p->queue_len = nla_get_u32(tbp[i]); + p->queue_len_bytes = nla_get_u32(tbp[i]) * + SKB_TRUESIZE(ETH_FRAME_LEN); + break; + case NDTPA_QUEUE_LENBYTES: + p->queue_len_bytes = nla_get_u32(tbp[i]); break; case NDTPA_PROXY_QLEN: p->proxy_qlen = nla_get_u32(tbp[i]); @@ -2638,117 +2662,158 @@ EXPORT_SYMBOL(neigh_app_ns); #ifdef CONFIG_SYSCTL -#define NEIGH_VARS_MAX 19 +static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int size, ret; + ctl_table tmp = *ctl; + + tmp.data = &size; + size = DIV_ROUND_UP(*(int *)ctl->data, SKB_TRUESIZE(ETH_FRAME_LEN)); + ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); + if (write && !ret) + *(int *)ctl->data = size * SKB_TRUESIZE(ETH_FRAME_LEN); + return ret; +} + +enum { + NEIGH_VAR_MCAST_PROBE, + NEIGH_VAR_UCAST_PROBE, + NEIGH_VAR_APP_PROBE, + NEIGH_VAR_RETRANS_TIME, + NEIGH_VAR_BASE_REACHABLE_TIME, + NEIGH_VAR_DELAY_PROBE_TIME, + NEIGH_VAR_GC_STALETIME, + NEIGH_VAR_QUEUE_LEN, + NEIGH_VAR_QUEUE_LEN_BYTES, + NEIGH_VAR_PROXY_QLEN, + NEIGH_VAR_ANYCAST_DELAY, + NEIGH_VAR_PROXY_DELAY, + NEIGH_VAR_LOCKTIME, + NEIGH_VAR_RETRANS_TIME_MS, + NEIGH_VAR_BASE_REACHABLE_TIME_MS, + NEIGH_VAR_GC_INTERVAL, + NEIGH_VAR_GC_THRESH1, + NEIGH_VAR_GC_THRESH2, + NEIGH_VAR_GC_THRESH3, + NEIGH_VAR_MAX +}; static struct neigh_sysctl_table { struct ctl_table_header *sysctl_header; - struct ctl_table neigh_vars[NEIGH_VARS_MAX]; + struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; char *dev_name; } neigh_sysctl_template __read_mostly = { .neigh_vars = { - { + [NEIGH_VAR_MCAST_PROBE] = { .procname = "mcast_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_UCAST_PROBE] = { .procname = "ucast_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_APP_PROBE] = { .procname = "app_solicit", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_RETRANS_TIME] = { .procname = "retrans_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - { + [NEIGH_VAR_BASE_REACHABLE_TIME] = { .procname = "base_reachable_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { + [NEIGH_VAR_DELAY_PROBE_TIME] = { .procname = "delay_first_probe_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { + [NEIGH_VAR_GC_STALETIME] = { .procname = "gc_stale_time", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { + [NEIGH_VAR_QUEUE_LEN] = { .procname = "unres_qlen", .maxlen = sizeof(int), .mode = 0644, + .proc_handler = proc_unres_qlen, + }, + [NEIGH_VAR_QUEUE_LEN_BYTES] = { + .procname = "unres_qlen_bytes", + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_PROXY_QLEN] = { .procname = "proxy_qlen", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_ANYCAST_DELAY] = { .procname = "anycast_delay", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - { + [NEIGH_VAR_PROXY_DELAY] = { .procname = "proxy_delay", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - { + [NEIGH_VAR_LOCKTIME] = { .procname = "locktime", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_userhz_jiffies, }, - { + [NEIGH_VAR_RETRANS_TIME_MS] = { .procname = "retrans_time_ms", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, - { + [NEIGH_VAR_BASE_REACHABLE_TIME_MS] = { .procname = "base_reachable_time_ms", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, - { + [NEIGH_VAR_GC_INTERVAL] = { .procname = "gc_interval", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - { + [NEIGH_VAR_GC_THRESH1] = { .procname = "gc_thresh1", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_GC_THRESH2] = { .procname = "gc_thresh2", .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, - { + [NEIGH_VAR_GC_THRESH3] = { .procname = "gc_thresh3", .maxlen = sizeof(int), .mode = 0644, @@ -2781,47 +2846,49 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, if (!t) goto err; - t->neigh_vars[0].data = &p->mcast_probes; - t->neigh_vars[1].data = &p->ucast_probes; - t->neigh_vars[2].data = &p->app_probes; - t->neigh_vars[3].data = &p->retrans_time; - t->neigh_vars[4].data = &p->base_reachable_time; - t->neigh_vars[5].data = &p->delay_probe_time; - t->neigh_vars[6].data = &p->gc_staletime; - t->neigh_vars[7].data = &p->queue_len; - t->neigh_vars[8].data = &p->proxy_qlen; - t->neigh_vars[9].data = &p->anycast_delay; - t->neigh_vars[10].data = &p->proxy_delay; - t->neigh_vars[11].data = &p->locktime; - t->neigh_vars[12].data = &p->retrans_time; - t->neigh_vars[13].data = &p->base_reachable_time; + t->neigh_vars[NEIGH_VAR_MCAST_PROBE].data = &p->mcast_probes; + t->neigh_vars[NEIGH_VAR_UCAST_PROBE].data = &p->ucast_probes; + t->neigh_vars[NEIGH_VAR_APP_PROBE].data = &p->app_probes; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME].data = &p->retrans_time; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].data = &p->base_reachable_time; + t->neigh_vars[NEIGH_VAR_DELAY_PROBE_TIME].data = &p->delay_probe_time; + t->neigh_vars[NEIGH_VAR_GC_STALETIME].data = &p->gc_staletime; + t->neigh_vars[NEIGH_VAR_QUEUE_LEN].data = &p->queue_len_bytes; + t->neigh_vars[NEIGH_VAR_QUEUE_LEN_BYTES].data = &p->queue_len_bytes; + t->neigh_vars[NEIGH_VAR_PROXY_QLEN].data = &p->proxy_qlen; + t->neigh_vars[NEIGH_VAR_ANYCAST_DELAY].data = &p->anycast_delay; + t->neigh_vars[NEIGH_VAR_PROXY_DELAY].data = &p->proxy_delay; + t->neigh_vars[NEIGH_VAR_LOCKTIME].data = &p->locktime; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].data = &p->retrans_time; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].data = &p->base_reachable_time; if (dev) { dev_name_source = dev->name; /* Terminate the table early */ - memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14])); + memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, + sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); } else { dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; - t->neigh_vars[14].data = (int *)(p + 1); - t->neigh_vars[15].data = (int *)(p + 1) + 1; - t->neigh_vars[16].data = (int *)(p + 1) + 2; - t->neigh_vars[17].data = (int *)(p + 1) + 3; + t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); + t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; + t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; + t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; } if (handler) { /* RetransTime */ - t->neigh_vars[3].proc_handler = handler; - t->neigh_vars[3].extra1 = dev; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME].proc_handler = handler; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME].extra1 = dev; /* ReachableTime */ - t->neigh_vars[4].proc_handler = handler; - t->neigh_vars[4].extra1 = dev; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = handler; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].extra1 = dev; /* RetransTime (in milliseconds)*/ - t->neigh_vars[12].proc_handler = handler; - t->neigh_vars[12].extra1 = dev; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; + t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].extra1 = dev; /* ReachableTime (in milliseconds) */ - t->neigh_vars[13].proc_handler = handler; - t->neigh_vars[13].extra1 = dev; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; } t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index c71c434a4c05..3bf72b638d34 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -21,6 +21,7 @@ #include <linux/wireless.h> #include <linux/vmalloc.h> #include <linux/export.h> +#include <linux/jiffies.h> #include <net/wext.h> #include "net-sysfs.h" @@ -606,9 +607,12 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); - if (old_map) + if (map) + jump_label_inc(&rps_needed); + if (old_map) { kfree_rcu(old_map, rcu); - + jump_label_dec(&rps_needed); + } free_cpumask_var(mask); return len; } @@ -780,7 +784,7 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num) #endif } -#ifdef CONFIG_XPS +#ifdef CONFIG_SYSFS /* * netdev_queue sysfs structures and functions. */ @@ -826,6 +830,133 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { .store = netdev_queue_attr_store, }; +static ssize_t show_trans_timeout(struct netdev_queue *queue, + struct netdev_queue_attribute *attribute, + char *buf) +{ + unsigned long trans_timeout; + + spin_lock_irq(&queue->_xmit_lock); + trans_timeout = queue->trans_timeout; + spin_unlock_irq(&queue->_xmit_lock); + + return sprintf(buf, "%lu", trans_timeout); +} + +static struct netdev_queue_attribute queue_trans_timeout = + __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); + +#ifdef CONFIG_BQL +/* + * Byte queue limits sysfs structures and functions. + */ +static ssize_t bql_show(char *buf, unsigned int value) +{ + return sprintf(buf, "%u\n", value); +} + +static ssize_t bql_set(const char *buf, const size_t count, + unsigned int *pvalue) +{ + unsigned int value; + int err; + + if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) + value = DQL_MAX_LIMIT; + else { + err = kstrtouint(buf, 10, &value); + if (err < 0) + return err; + if (value > DQL_MAX_LIMIT) + return -EINVAL; + } + + *pvalue = value; + + return count; +} + +static ssize_t bql_show_hold_time(struct netdev_queue *queue, + struct netdev_queue_attribute *attr, + char *buf) +{ + struct dql *dql = &queue->dql; + + return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); +} + +static ssize_t bql_set_hold_time(struct netdev_queue *queue, + struct netdev_queue_attribute *attribute, + const char *buf, size_t len) +{ + struct dql *dql = &queue->dql; + unsigned value; + int err; + + err = kstrtouint(buf, 10, &value); + if (err < 0) + return err; + + dql->slack_hold_time = msecs_to_jiffies(value); + + return len; +} + +static struct netdev_queue_attribute bql_hold_time_attribute = + __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time, + bql_set_hold_time); + +static ssize_t bql_show_inflight(struct netdev_queue *queue, + struct netdev_queue_attribute *attr, + char *buf) +{ + struct dql *dql = &queue->dql; + + return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); +} + +static struct netdev_queue_attribute bql_inflight_attribute = + __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL); + +#define BQL_ATTR(NAME, FIELD) \ +static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ + struct netdev_queue_attribute *attr, \ + char *buf) \ +{ \ + return bql_show(buf, queue->dql.FIELD); \ +} \ + \ +static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ + struct netdev_queue_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return bql_set(buf, len, &queue->dql.FIELD); \ +} \ + \ +static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \ + __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \ + bql_set_ ## NAME); + +BQL_ATTR(limit, limit) +BQL_ATTR(limit_max, max_limit) +BQL_ATTR(limit_min, min_limit) + +static struct attribute *dql_attrs[] = { + &bql_limit_attribute.attr, + &bql_limit_max_attribute.attr, + &bql_limit_min_attribute.attr, + &bql_hold_time_attribute.attr, + &bql_inflight_attribute.attr, + NULL +}; + +static struct attribute_group dql_group = { + .name = "byte_queue_limits", + .attrs = dql_attrs, +}; +#endif /* CONFIG_BQL */ + +#ifdef CONFIG_XPS static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) { struct net_device *dev = queue->dev; @@ -890,6 +1021,52 @@ static DEFINE_MUTEX(xps_map_mutex); #define xmap_dereference(P) \ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) +static void xps_queue_release(struct netdev_queue *queue) +{ + struct net_device *dev = queue->dev; + struct xps_dev_maps *dev_maps; + struct xps_map *map; + unsigned long index; + int i, pos, nonempty = 0; + + index = get_netdev_queue_index(queue); + + mutex_lock(&xps_map_mutex); + dev_maps = xmap_dereference(dev->xps_maps); + + if (dev_maps) { + for_each_possible_cpu(i) { + map = xmap_dereference(dev_maps->cpu_map[i]); + if (!map) + continue; + + for (pos = 0; pos < map->len; pos++) + if (map->queues[pos] == index) + break; + + if (pos < map->len) { + if (map->len > 1) + map->queues[pos] = + map->queues[--map->len]; + else { + RCU_INIT_POINTER(dev_maps->cpu_map[i], + NULL); + kfree_rcu(map, rcu); + map = NULL; + } + } + if (map) + nonempty = 1; + } + + if (!nonempty) { + RCU_INIT_POINTER(dev->xps_maps, NULL); + kfree_rcu(dev_maps, rcu); + } + } + mutex_unlock(&xps_map_mutex); +} + static ssize_t store_xps_map(struct netdev_queue *queue, struct netdev_queue_attribute *attribute, const char *buf, size_t len) @@ -901,7 +1078,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, struct xps_map *map, *new_map; struct xps_dev_maps *dev_maps, *new_dev_maps; int nonempty = 0; - int numa_node = -2; + int numa_node_id = -2; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -944,10 +1121,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue, need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu); #ifdef CONFIG_NUMA if (need_set) { - if (numa_node == -2) - numa_node = cpu_to_node(cpu); - else if (numa_node != cpu_to_node(cpu)) - numa_node = -1; + if (numa_node_id == -2) + numa_node_id = cpu_to_node(cpu); + else if (numa_node_id != cpu_to_node(cpu)) + numa_node_id = -1; } #endif if (need_set && pos >= map_len) { @@ -997,7 +1174,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue, if (dev_maps) kfree_rcu(dev_maps, rcu); - netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : + netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id : NUMA_NO_NODE); mutex_unlock(&xps_map_mutex); @@ -1020,58 +1197,23 @@ error: static struct netdev_queue_attribute xps_cpus_attribute = __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); +#endif /* CONFIG_XPS */ static struct attribute *netdev_queue_default_attrs[] = { + &queue_trans_timeout.attr, +#ifdef CONFIG_XPS &xps_cpus_attribute.attr, +#endif NULL }; static void netdev_queue_release(struct kobject *kobj) { struct netdev_queue *queue = to_netdev_queue(kobj); - struct net_device *dev = queue->dev; - struct xps_dev_maps *dev_maps; - struct xps_map *map; - unsigned long index; - int i, pos, nonempty = 0; - - index = get_netdev_queue_index(queue); - - mutex_lock(&xps_map_mutex); - dev_maps = xmap_dereference(dev->xps_maps); - - if (dev_maps) { - for_each_possible_cpu(i) { - map = xmap_dereference(dev_maps->cpu_map[i]); - if (!map) - continue; - - for (pos = 0; pos < map->len; pos++) - if (map->queues[pos] == index) - break; - - if (pos < map->len) { - if (map->len > 1) - map->queues[pos] = - map->queues[--map->len]; - else { - RCU_INIT_POINTER(dev_maps->cpu_map[i], - NULL); - kfree_rcu(map, rcu); - map = NULL; - } - } - if (map) - nonempty = 1; - } - - if (!nonempty) { - RCU_INIT_POINTER(dev->xps_maps, NULL); - kfree_rcu(dev_maps, rcu); - } - } - mutex_unlock(&xps_map_mutex); +#ifdef CONFIG_XPS + xps_queue_release(queue); +#endif memset(kobj, 0, sizeof(*kobj)); dev_put(queue->dev); @@ -1079,7 +1221,9 @@ static void netdev_queue_release(struct kobject *kobj) static struct kobj_type netdev_queue_ktype = { .sysfs_ops = &netdev_queue_sysfs_ops, +#ifdef CONFIG_XPS .release = netdev_queue_release, +#endif .default_attrs = netdev_queue_default_attrs, }; @@ -1092,22 +1236,29 @@ static int netdev_queue_add_kobject(struct net_device *net, int index) kobj->kset = net->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); - if (error) { - kobject_put(kobj); - return error; - } + if (error) + goto exit; + +#ifdef CONFIG_BQL + error = sysfs_create_group(kobj, &dql_group); + if (error) + goto exit; +#endif kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); + return 0; +exit: + kobject_put(kobj); return error; } -#endif /* CONFIG_XPS */ +#endif /* CONFIG_SYSFS */ int netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) { -#ifdef CONFIG_XPS +#ifdef CONFIG_SYSFS int i; int error = 0; @@ -1119,20 +1270,26 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num) } } - while (--i >= new_num) - kobject_put(&net->_tx[i].kobj); + while (--i >= new_num) { + struct netdev_queue *queue = net->_tx + i; + +#ifdef CONFIG_BQL + sysfs_remove_group(&queue->kobj, &dql_group); +#endif + kobject_put(&queue->kobj); + } return error; #else return 0; -#endif +#endif /* CONFIG_SYSFS */ } static int register_queue_kobjects(struct net_device *net) { int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; -#if defined(CONFIG_RPS) || defined(CONFIG_XPS) +#ifdef CONFIG_SYSFS net->queues_kset = kset_create_and_add("queues", NULL, &net->dev.kobj); if (!net->queues_kset) @@ -1173,7 +1330,7 @@ static void remove_queue_kobjects(struct net_device *net) net_rx_queue_update_kobjects(net, real_rx, 0); netdev_queue_update_kobjects(net, real_tx, 0); -#if defined(CONFIG_RPS) || defined(CONFIG_XPS) +#ifdef CONFIG_SYSFS kset_unregister(net->queues_kset); #endif } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index cf64c1ffa4cd..0d38808a2305 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work) local_irq_save(flags); __netif_tx_lock(txq, smp_processor_id()); - if (netif_tx_queue_frozen_or_stopped(txq) || + if (netif_xmit_frozen_or_stopped(txq) || ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { skb_queue_head(&npinfo->txq, skb); __netif_tx_unlock(txq); @@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (__netif_tx_trylock(txq)) { - if (!netif_tx_queue_stopped(txq)) { + if (!netif_xmit_stopped(txq)) { status = ops->ndo_start_xmit(skb, dev); if (status == NETDEV_TX_OK) txq_trans_update(txq); @@ -422,6 +422,7 @@ static void arp_reply(struct sk_buff *skb) struct sk_buff *send_skb; struct netpoll *np, *tmp; unsigned long flags; + int hlen, tlen; int hits = 0; if (list_empty(&npinfo->rx_np)) @@ -479,8 +480,9 @@ static void arp_reply(struct sk_buff *skb) if (tip != np->local_ip) continue; - send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev), - LL_RESERVED_SPACE(np->dev)); + hlen = LL_RESERVED_SPACE(np->dev); + tlen = np->dev->needed_tailroom; + send_skb = find_skb(np, size + hlen + tlen, hlen); if (!send_skb) continue; diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c new file mode 100644 index 000000000000..3a9fd4826b75 --- /dev/null +++ b/net/core/netprio_cgroup.c @@ -0,0 +1,344 @@ +/* + * net/core/netprio_cgroup.c Priority Control Group + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Neil Horman <nhorman@tuxdriver.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/skbuff.h> +#include <linux/cgroup.h> +#include <linux/rcupdate.h> +#include <linux/atomic.h> +#include <net/rtnetlink.h> +#include <net/pkt_cls.h> +#include <net/sock.h> +#include <net/netprio_cgroup.h> + +static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, + struct cgroup *cgrp); +static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp); +static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp); + +struct cgroup_subsys net_prio_subsys = { + .name = "net_prio", + .create = cgrp_create, + .destroy = cgrp_destroy, + .populate = cgrp_populate, +#ifdef CONFIG_NETPRIO_CGROUP + .subsys_id = net_prio_subsys_id, +#endif + .module = THIS_MODULE +}; + +#define PRIOIDX_SZ 128 + +static unsigned long prioidx_map[PRIOIDX_SZ]; +static DEFINE_SPINLOCK(prioidx_map_lock); +static atomic_t max_prioidx = ATOMIC_INIT(0); + +static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp) +{ + return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id), + struct cgroup_netprio_state, css); +} + +static int get_prioidx(u32 *prio) +{ + unsigned long flags; + u32 prioidx; + + spin_lock_irqsave(&prioidx_map_lock, flags); + prioidx = find_first_zero_bit(prioidx_map, sizeof(unsigned long) * PRIOIDX_SZ); + set_bit(prioidx, prioidx_map); + spin_unlock_irqrestore(&prioidx_map_lock, flags); + if (prioidx == sizeof(unsigned long) * PRIOIDX_SZ) + return -ENOSPC; + + atomic_set(&max_prioidx, prioidx); + *prio = prioidx; + return 0; +} + +static void put_prioidx(u32 idx) +{ + unsigned long flags; + + spin_lock_irqsave(&prioidx_map_lock, flags); + clear_bit(idx, prioidx_map); + spin_unlock_irqrestore(&prioidx_map_lock, flags); +} + +static void extend_netdev_table(struct net_device *dev, u32 new_len) +{ + size_t new_size = sizeof(struct netprio_map) + + ((sizeof(u32) * new_len)); + struct netprio_map *new_priomap = kzalloc(new_size, GFP_KERNEL); + struct netprio_map *old_priomap; + int i; + + old_priomap = rtnl_dereference(dev->priomap); + + if (!new_priomap) { + printk(KERN_WARNING "Unable to alloc new priomap!\n"); + return; + } + + for (i = 0; + old_priomap && (i < old_priomap->priomap_len); + i++) + new_priomap->priomap[i] = old_priomap->priomap[i]; + + new_priomap->priomap_len = new_len; + + rcu_assign_pointer(dev->priomap, new_priomap); + if (old_priomap) + kfree_rcu(old_priomap, rcu); +} + +static void update_netdev_tables(void) +{ + struct net_device *dev; + u32 max_len = atomic_read(&max_prioidx); + struct netprio_map *map; + + rtnl_lock(); + for_each_netdev(&init_net, dev) { + map = rtnl_dereference(dev->priomap); + if ((!map) || + (map->priomap_len < max_len)) + extend_netdev_table(dev, max_len); + } + rtnl_unlock(); +} + +static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, + struct cgroup *cgrp) +{ + struct cgroup_netprio_state *cs; + int ret; + + cs = kzalloc(sizeof(*cs), GFP_KERNEL); + if (!cs) + return ERR_PTR(-ENOMEM); + + if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) { + kfree(cs); + return ERR_PTR(-EINVAL); + } + + ret = get_prioidx(&cs->prioidx); + if (ret != 0) { + printk(KERN_WARNING "No space in priority index array\n"); + kfree(cs); + return ERR_PTR(ret); + } + + return &cs->css; +} + +static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) +{ + struct cgroup_netprio_state *cs; + struct net_device *dev; + struct netprio_map *map; + + cs = cgrp_netprio_state(cgrp); + rtnl_lock(); + for_each_netdev(&init_net, dev) { + map = rtnl_dereference(dev->priomap); + if (map) + map->priomap[cs->prioidx] = 0; + } + rtnl_unlock(); + put_prioidx(cs->prioidx); + kfree(cs); +} + +static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft) +{ + return (u64)cgrp_netprio_state(cgrp)->prioidx; +} + +static int read_priomap(struct cgroup *cont, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct net_device *dev; + u32 prioidx = cgrp_netprio_state(cont)->prioidx; + u32 priority; + struct netprio_map *map; + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, dev) { + map = rcu_dereference(dev->priomap); + priority = map ? map->priomap[prioidx] : 0; + cb->fill(cb, dev->name, priority); + } + rcu_read_unlock(); + return 0; +} + +static int write_priomap(struct cgroup *cgrp, struct cftype *cft, + const char *buffer) +{ + char *devname = kstrdup(buffer, GFP_KERNEL); + int ret = -EINVAL; + u32 prioidx = cgrp_netprio_state(cgrp)->prioidx; + unsigned long priority; + char *priostr; + struct net_device *dev; + struct netprio_map *map; + + if (!devname) + return -ENOMEM; + + /* + * Minimally sized valid priomap string + */ + if (strlen(devname) < 3) + goto out_free_devname; + + priostr = strstr(devname, " "); + if (!priostr) + goto out_free_devname; + + /* + *Separate the devname from the associated priority + *and advance the priostr poitner to the priority value + */ + *priostr = '\0'; + priostr++; + + /* + * If the priostr points to NULL, we're at the end of the passed + * in string, and its not a valid write + */ + if (*priostr == '\0') + goto out_free_devname; + + ret = kstrtoul(priostr, 10, &priority); + if (ret < 0) + goto out_free_devname; + + ret = -ENODEV; + + dev = dev_get_by_name(&init_net, devname); + if (!dev) + goto out_free_devname; + + update_netdev_tables(); + ret = 0; + rcu_read_lock(); + map = rcu_dereference(dev->priomap); + if (map) + map->priomap[prioidx] = priority; + rcu_read_unlock(); + dev_put(dev); + +out_free_devname: + kfree(devname); + return ret; +} + +static struct cftype ss_files[] = { + { + .name = "prioidx", + .read_u64 = read_prioidx, + }, + { + .name = "ifpriomap", + .read_map = read_priomap, + .write_string = write_priomap, + }, +}; + +static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) +{ + return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files)); +} + +static int netprio_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct netprio_map *old; + u32 max_len = atomic_read(&max_prioidx); + + /* + * Note this is called with rtnl_lock held so we have update side + * protection on our rcu assignments + */ + + switch (event) { + + case NETDEV_REGISTER: + if (max_len) + extend_netdev_table(dev, max_len); + break; + case NETDEV_UNREGISTER: + old = rtnl_dereference(dev->priomap); + RCU_INIT_POINTER(dev->priomap, NULL); + if (old) + kfree_rcu(old, rcu); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block netprio_device_notifier = { + .notifier_call = netprio_device_event +}; + +static int __init init_cgroup_netprio(void) +{ + int ret; + + ret = cgroup_load_subsys(&net_prio_subsys); + if (ret) + goto out; +#ifndef CONFIG_NETPRIO_CGROUP + smp_wmb(); + net_prio_subsys_id = net_prio_subsys.subsys_id; +#endif + + register_netdevice_notifier(&netprio_device_notifier); + +out: + return ret; +} + +static void __exit exit_cgroup_netprio(void) +{ + struct netprio_map *old; + struct net_device *dev; + + unregister_netdevice_notifier(&netprio_device_notifier); + + cgroup_unload_subsys(&net_prio_subsys); + +#ifndef CONFIG_NETPRIO_CGROUP + net_prio_subsys_id = -1; + synchronize_rcu(); +#endif + + rtnl_lock(); + for_each_netdev(&init_net, dev) { + old = rtnl_dereference(dev->priomap); + RCU_INIT_POINTER(dev->priomap, NULL); + if (old) + kfree_rcu(old, rcu); + } + rtnl_unlock(); +} + +module_init(init_cgroup_netprio); +module_exit(exit_cgroup_netprio); +MODULE_LICENSE("GPL v2"); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 0001c243b35c..449fe0f068f8 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1304,7 +1304,7 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); - ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); + pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; if (debug) printk(KERN_DEBUG "pktgen: dst6 set to: %s\n", buf); @@ -1327,8 +1327,7 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); - ipv6_addr_copy(&pkt_dev->cur_in6_daddr, - &pkt_dev->min_in6_daddr); + pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; if (debug) printk(KERN_DEBUG "pktgen: dst6_min set to: %s\n", buf); @@ -1371,7 +1370,7 @@ static ssize_t pktgen_if_write(struct file *file, scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); - ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); + pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; if (debug) printk(KERN_DEBUG "pktgen: src6 set to: %s\n", buf); @@ -2079,9 +2078,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ifp = ifp->if_next) { if (ifp->scope == IFA_LINK && !(ifp->flags & IFA_F_TENTATIVE)) { - ipv6_addr_copy(&pkt_dev-> - cur_in6_saddr, - &ifp->addr); + pkt_dev->cur_in6_saddr = ifp->addr; err = 0; break; } @@ -2958,8 +2955,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, iph->payload_len = htons(sizeof(struct udphdr) + datalen); iph->nexthdr = IPPROTO_UDP; - ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); - ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); + iph->daddr = pkt_dev->cur_in6_daddr; + iph->saddr = pkt_dev->cur_in6_saddr; skb->mac_header = (skb->network_header - ETH_HLEN - pkt_dev->pkt_overhead); @@ -3345,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) __netif_tx_lock_bh(txq); - if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) { + if (unlikely(netif_xmit_frozen_or_stopped(txq))) { ret = NETDEV_TX_BUSY; pkt_dev->last_ok = 0; goto unlock; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 3c30ee4a5710..678ae4e783aa 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -245,6 +245,55 @@ nodata: EXPORT_SYMBOL(__alloc_skb); /** + * build_skb - build a network buffer + * @data: data buffer provided by caller + * + * Allocate a new &sk_buff. Caller provides space holding head and + * skb_shared_info. @data must have been allocated by kmalloc() + * The return is the new skb buffer. + * On a failure the return is %NULL, and @data is not freed. + * Notes : + * Before IO, driver allocates only data buffer where NIC put incoming frame + * Driver should add room at head (NET_SKB_PAD) and + * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) + * After IO, driver calls build_skb(), to allocate sk_buff and populate it + * before giving packet to stack. + * RX rings only contains data buffers, not full skbs. + */ +struct sk_buff *build_skb(void *data) +{ + struct skb_shared_info *shinfo; + struct sk_buff *skb; + unsigned int size; + + skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + if (!skb) + return NULL; + + size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + memset(skb, 0, offsetof(struct sk_buff, tail)); + skb->truesize = SKB_TRUESIZE(size); + atomic_set(&skb->users, 1); + skb->head = data; + skb->data = data; + skb_reset_tail_pointer(skb); + skb->end = skb->tail + size; +#ifdef NET_SKBUFF_DATA_USES_OFFSET + skb->mac_header = ~0U; +#endif + + /* make sure we initialize shinfo sequentially */ + shinfo = skb_shinfo(skb); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + kmemcheck_annotate_variable(shinfo->destructor_arg); + + return skb; +} +EXPORT_SYMBOL(build_skb); + +/** * __netdev_alloc_skb - allocate an skbuff for rx on a specific device * @dev: network device to receive on * @length: length to allocate @@ -2621,7 +2670,7 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ -struct sk_buff *skb_segment(struct sk_buff *skb, u32 features) +struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; @@ -3169,6 +3218,26 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, } EXPORT_SYMBOL_GPL(skb_tstamp_tx); +void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) +{ + struct sock *sk = skb->sk; + struct sock_exterr_skb *serr; + int err; + + skb->wifi_acked_valid = 1; + skb->wifi_acked = acked; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS; + + err = sock_queue_err_skb(sk, skb); + if (err) + kfree_skb(skb); +} +EXPORT_SYMBOL_GPL(skb_complete_wifi_ack); + /** * skb_partial_csum_set - set up and verify partial csum values for packet diff --git a/net/core/sock.c b/net/core/sock.c index 4ed7b1d12f5e..9777da86aeac 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -125,6 +125,7 @@ #include <net/xfrm.h> #include <linux/ipsec.h> #include <net/cls_cgroup.h> +#include <net/netprio_cgroup.h> #include <linux/filter.h> @@ -221,10 +222,16 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); EXPORT_SYMBOL(sysctl_optmem_max); -#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) +#if defined(CONFIG_CGROUPS) +#if !defined(CONFIG_NET_CLS_CGROUP) int net_cls_subsys_id = -1; EXPORT_SYMBOL_GPL(net_cls_subsys_id); #endif +#if !defined(CONFIG_NETPRIO_CGROUP) +int net_prio_subsys_id = -1; +EXPORT_SYMBOL_GPL(net_prio_subsys_id); +#endif +#endif static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { @@ -269,14 +276,14 @@ static void sock_warn_obsolete_bsdism(const char *name) } } -static void sock_disable_timestamp(struct sock *sk, int flag) +#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) + +static void sock_disable_timestamp(struct sock *sk, unsigned long flags) { - if (sock_flag(sk, flag)) { - sock_reset_flag(sk, flag); - if (!sock_flag(sk, SOCK_TIMESTAMP) && - !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { + if (sk->sk_flags & flags) { + sk->sk_flags &= ~flags; + if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) net_disable_timestamp(); - } } } @@ -682,7 +689,7 @@ set_rcvbuf: SOCK_TIMESTAMPING_RX_SOFTWARE); else sock_disable_timestamp(sk, - SOCK_TIMESTAMPING_RX_SOFTWARE); + (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, val & SOF_TIMESTAMPING_SOFTWARE); sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, @@ -740,6 +747,11 @@ set_rcvbuf: case SO_RXQ_OVFL: sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); break; + + case SO_WIFI_STATUS: + sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); + break; + default: ret = -ENOPROTOOPT; break; @@ -961,6 +973,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); break; + case SO_WIFI_STATUS: + v.val = !!sock_flag(sk, SOCK_WIFI_STATUS); + break; + default: return -ENOPROTOOPT; } @@ -1111,6 +1127,18 @@ void sock_update_classid(struct sock *sk) sk->sk_classid = classid; } EXPORT_SYMBOL(sock_update_classid); + +void sock_update_netprioidx(struct sock *sk) +{ + struct cgroup_netprio_state *state; + if (in_interrupt()) + return; + rcu_read_lock(); + state = task_netprio_state(current); + sk->sk_cgrp_prioidx = state ? state->prioidx : 0; + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(sock_update_netprioidx); #endif /** @@ -1138,6 +1166,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, atomic_set(&sk->sk_wmem_alloc, 1); sock_update_classid(sk); + sock_update_netprioidx(sk); } return sk; @@ -1158,8 +1187,7 @@ static void __sk_free(struct sock *sk) RCU_INIT_POINTER(sk->sk_filter, NULL); } - sock_disable_timestamp(sk, SOCK_TIMESTAMP); - sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); + sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); if (atomic_read(&sk->sk_omem_alloc)) printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", @@ -1204,7 +1232,14 @@ void sk_release_kernel(struct sock *sk) } EXPORT_SYMBOL(sk_release_kernel); -struct sock *sk_clone(const struct sock *sk, const gfp_t priority) +/** + * sk_clone_lock - clone a socket, and lock its clone + * @sk: the socket to clone + * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) + * + * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) + */ +struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) { struct sock *newsk; @@ -1290,14 +1325,13 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority) if (newsk->sk_prot->sockets_allocated) percpu_counter_inc(newsk->sk_prot->sockets_allocated); - if (sock_flag(newsk, SOCK_TIMESTAMP) || - sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) + if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) net_enable_timestamp(); } out: return newsk; } -EXPORT_SYMBOL_GPL(sk_clone); +EXPORT_SYMBOL_GPL(sk_clone_lock); void sk_setup_caps(struct sock *sk, struct dst_entry *dst) { @@ -2129,16 +2163,15 @@ EXPORT_SYMBOL(sock_get_timestampns); void sock_enable_timestamp(struct sock *sk, int flag) { if (!sock_flag(sk, flag)) { + unsigned long previous_flags = sk->sk_flags; + sock_set_flag(sk, flag); /* * we just set one of the two flags which require net * time stamping, but time stamping might have been on * already because of the other one */ - if (!sock_flag(sk, - flag == SOCK_TIMESTAMP ? - SOCK_TIMESTAMPING_RX_SOFTWARE : - SOCK_TIMESTAMP)) + if (!(previous_flags & SK_FLAGS_TIMESTAMP)) net_enable_timestamp(); } } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 77a65f031488..d05559d4d9cd 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -68,8 +68,13 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, if (sock_table != orig_sock_table) { rcu_assign_pointer(rps_sock_flow_table, sock_table); - synchronize_rcu(); - vfree(orig_sock_table); + if (sock_table) + jump_label_inc(&rps_needed); + if (orig_sock_table) { + jump_label_dec(&rps_needed); + synchronize_rcu(); + vfree(orig_sock_table); + } } } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 3f4e5414c8e5..1c67fe8ff90d 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -474,10 +474,11 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt; + const struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4 = { .flowi4_oif = skb_rtable(skb)->rt_iif, - .daddr = ip_hdr(skb)->saddr, - .saddr = ip_hdr(skb)->daddr, + .daddr = iph->saddr, + .saddr = iph->daddr, .flowi4_tos = RT_CONN_FLAGS(sk), .flowi4_proto = sk->sk_protocol, .fl4_sport = dccp_hdr(skb)->dccph_dport, diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 17ee85ce148d..ce903f747e64 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -150,8 +150,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; @@ -244,8 +244,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); - ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); + fl6.daddr = ireq6->rmt_addr; + fl6.saddr = ireq6->loc_addr; fl6.flowlabel = 0; fl6.flowi6_oif = ireq6->iif; fl6.fl6_dport = inet_rsk(req)->rmt_port; @@ -270,7 +270,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, dh->dccph_checksum = dccp_v6_csum_finish(skb, &ireq6->loc_addr, &ireq6->rmt_addr); - ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); + fl6.daddr = ireq6->rmt_addr; err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -313,8 +313,8 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) &rxip6h->daddr); memset(&fl6, 0, sizeof(fl6)); - ipv6_addr_copy(&fl6.daddr, &rxip6h->saddr); - ipv6_addr_copy(&fl6.saddr, &rxip6h->daddr); + fl6.daddr = rxip6h->saddr; + fl6.saddr = rxip6h->daddr; fl6.flowi6_proto = IPPROTO_DCCP; fl6.flowi6_oif = inet6_iif(rxskb); @@ -419,8 +419,8 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) goto drop_and_free; ireq6 = inet6_rsk(req); - ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); - ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); + ireq6->rmt_addr = ipv6_hdr(skb)->saddr; + ireq6->loc_addr = ipv6_hdr(skb)->daddr; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || @@ -491,7 +491,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); + newnp->rcv_saddr = newnp->saddr; inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; @@ -526,9 +526,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; - ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); + fl6.daddr = ireq6->rmt_addr; final_p = fl6_update_dst(&fl6, opt, &final); - ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); + fl6.saddr = ireq6->loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; @@ -559,9 +559,9 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - ipv6_addr_copy(&newnp->daddr, &ireq6->rmt_addr); - ipv6_addr_copy(&newnp->saddr, &ireq6->loc_addr); - ipv6_addr_copy(&newnp->rcv_saddr, &ireq6->loc_addr); + newnp->daddr = ireq6->rmt_addr; + newnp->saddr = ireq6->loc_addr; + newnp->rcv_saddr = ireq6->loc_addr; newsk->sk_bound_dev_if = ireq6->iif; /* Now IPv6 options... @@ -877,7 +877,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); + usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } @@ -910,7 +910,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, return -EINVAL; } - ipv6_addr_copy(&np->daddr, &usin->sin6_addr); + np->daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* @@ -949,8 +949,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_DCCP; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, saddr ? saddr : &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; @@ -966,11 +966,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (saddr == NULL) { saddr = &fl6.saddr; - ipv6_addr_copy(&np->rcv_saddr, saddr); + np->rcv_saddr = *saddr; } /* set the source address */ - ipv6_addr_copy(&np->saddr, saddr); + np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; __ip6_dst_store(sk, dst, NULL, NULL); diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index d7041a0963af..b50d5fd3d696 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -60,8 +60,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); tw6 = inet6_twsk((struct sock *)tw); - ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); - ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); + tw6->tw_v6_daddr = np->daddr; + tw6->tw_v6_rcv_saddr = np->rcv_saddr; tw->tw_ipv6only = np->ipv6only; } #endif @@ -100,7 +100,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk, * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair */ - struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); + struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); if (newsk != NULL) { struct dccp_request_sock *dreq = dccp_rsk(req); diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 33d0e6297c21..0a8d6ebd9b45 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -152,6 +152,17 @@ static const struct file_operations dccpprobe_fops = { .llseek = noop_llseek, }; +static __init int setup_jprobe(void) +{ + int ret = register_jprobe(&dccp_send_probe); + + if (ret) { + request_module("dccp"); + ret = register_jprobe(&dccp_send_probe); + } + return ret; +} + static __init int dccpprobe_init(void) { int ret = -ENOMEM; @@ -163,8 +174,7 @@ static __init int dccpprobe_init(void) if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) goto err0; - try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0, - "dccp"); + ret = setup_jprobe(); if (ret) goto err1; diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index 7f0eb087dc11..3532ac64c82d 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -107,7 +107,7 @@ struct neigh_table dn_neigh_table = { .gc_staletime = 60 * HZ, .reachable_time = 30 * HZ, .delay_probe_time = 5 * HZ, - .queue_len = 3, + .queue_len_bytes = 64*1024, .ucast_probes = 0, .app_probes = 0, .mcast_probes = 0, diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index c53ded2a98df..274791cd7a35 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -1,5 +1,5 @@ -menuconfig NET_DSA - bool "Distributed Switch Architecture support" +config NET_DSA + tristate "Distributed Switch Architecture support" default n depends on EXPERIMENTAL && NETDEVICES && !S390 select PHYLIB @@ -23,38 +23,4 @@ config NET_DSA_TAG_TRAILER bool default n - -# switch drivers -config NET_DSA_MV88E6XXX - bool - default n - -config NET_DSA_MV88E6060 - bool "Marvell 88E6060 ethernet switch chip support" - select NET_DSA_TAG_TRAILER - ---help--- - This enables support for the Marvell 88E6060 ethernet switch - chip. - -config NET_DSA_MV88E6XXX_NEED_PPU - bool - default n - -config NET_DSA_MV88E6131 - bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" - select NET_DSA_MV88E6XXX - select NET_DSA_MV88E6XXX_NEED_PPU - select NET_DSA_TAG_DSA - ---help--- - This enables support for the Marvell 88E6085/6095/6095F/6131 - ethernet switch chips. - -config NET_DSA_MV88E6123_61_65 - bool "Marvell 88E6123/6161/6165 ethernet switch chip support" - select NET_DSA_MV88E6XXX - select NET_DSA_TAG_EDSA - ---help--- - This enables support for the Marvell 88E6123/6161/6165 - ethernet switch chips. - endif diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 2374faff4dea..7b9fcbbeda5d 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -1,13 +1,8 @@ -# tagging formats -obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o -obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o -obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o - -# switch drivers -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o -obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6123_61_65) += mv88e6123_61_65.o -obj-$(CONFIG_NET_DSA_MV88E6131) += mv88e6131.o - # the core -obj-$(CONFIG_NET_DSA) += dsa.o slave.o +obj-$(CONFIG_NET_DSA) += dsa_core.o +dsa_core-y += dsa.o slave.o + +# tagging formats +dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o +dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o +dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 0dc1589343c3..88e7c2f3fa0d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -29,6 +29,7 @@ void register_switch_driver(struct dsa_switch_driver *drv) list_add_tail(&drv->list, &dsa_switch_drivers); mutex_unlock(&dsa_switch_drivers_mutex); } +EXPORT_SYMBOL_GPL(register_switch_driver); void unregister_switch_driver(struct dsa_switch_driver *drv) { @@ -36,6 +37,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv) list_del_init(&drv->list); mutex_unlock(&dsa_switch_drivers_mutex); } +EXPORT_SYMBOL_GPL(unregister_switch_driver); static struct dsa_switch_driver * dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name) @@ -199,29 +201,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds) } -/* hooks for ethertype-less tagging formats *********************************/ -/* - * The original DSA tag format and some other tag formats have no - * ethertype, which means that we need to add a little hack to the - * networking receive path to make sure that received frames get - * the right ->protocol assigned to them when one of those tag - * formats is in use. - */ -bool dsa_uses_dsa_tags(void *dsa_ptr) -{ - struct dsa_switch_tree *dst = dsa_ptr; - - return !!(dst->tag_protocol == htons(ETH_P_DSA)); -} - -bool dsa_uses_trailer_tags(void *dsa_ptr) -{ - struct dsa_switch_tree *dst = dsa_ptr; - - return !!(dst->tag_protocol == htons(ETH_P_TRAILER)); -} - - /* link polling *************************************************************/ static void dsa_link_poll_work(struct work_struct *ugly) { @@ -419,12 +398,36 @@ static struct platform_driver dsa_driver = { static int __init dsa_init_module(void) { - return platform_driver_register(&dsa_driver); + int rc; + + rc = platform_driver_register(&dsa_driver); + if (rc) + return rc; + +#ifdef CONFIG_NET_DSA_TAG_DSA + dev_add_pack(&dsa_packet_type); +#endif +#ifdef CONFIG_NET_DSA_TAG_EDSA + dev_add_pack(&edsa_packet_type); +#endif +#ifdef CONFIG_NET_DSA_TAG_TRAILER + dev_add_pack(&trailer_packet_type); +#endif + return 0; } module_init(dsa_init_module); static void __exit dsa_cleanup_module(void) { +#ifdef CONFIG_NET_DSA_TAG_TRAILER + dev_remove_pack(&trailer_packet_type); +#endif +#ifdef CONFIG_NET_DSA_TAG_EDSA + dev_remove_pack(&edsa_packet_type); +#endif +#ifdef CONFIG_NET_DSA_TAG_DSA + dev_remove_pack(&dsa_packet_type); +#endif platform_driver_unregister(&dsa_driver); } module_exit(dsa_cleanup_module); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 4b0ea0540442..d4cf5cc747e3 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -11,97 +11,9 @@ #ifndef __DSA_PRIV_H #define __DSA_PRIV_H -#include <linux/list.h> #include <linux/phy.h> -#include <linux/timer.h> -#include <linux/workqueue.h> #include <net/dsa.h> -struct dsa_switch { - /* - * Parent switch tree, and switch index. - */ - struct dsa_switch_tree *dst; - int index; - - /* - * Configuration data for this switch. - */ - struct dsa_chip_data *pd; - - /* - * The used switch driver. - */ - struct dsa_switch_driver *drv; - - /* - * Reference to mii bus to use. - */ - struct mii_bus *master_mii_bus; - - /* - * Slave mii_bus and devices for the individual ports. - */ - u32 dsa_port_mask; - u32 phys_port_mask; - struct mii_bus *slave_mii_bus; - struct net_device *ports[DSA_MAX_PORTS]; -}; - -struct dsa_switch_tree { - /* - * Configuration data for the platform device that owns - * this dsa switch tree instance. - */ - struct dsa_platform_data *pd; - - /* - * Reference to network device to use, and which tagging - * protocol to use. - */ - struct net_device *master_netdev; - __be16 tag_protocol; - - /* - * The switch and port to which the CPU is attached. - */ - s8 cpu_switch; - s8 cpu_port; - - /* - * Link state polling. - */ - int link_poll_needed; - struct work_struct link_poll_work; - struct timer_list link_poll_timer; - - /* - * Data for the individual switch chips. - */ - struct dsa_switch *ds[DSA_MAX_SWITCHES]; -}; - -static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) -{ - return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port); -} - -static inline u8 dsa_upstream_port(struct dsa_switch *ds) -{ - struct dsa_switch_tree *dst = ds->dst; - - /* - * If this is the root switch (i.e. the switch that connects - * to the CPU), return the cpu port number on this switch. - * Else return the (DSA) port number that connects to the - * switch that is one hop closer to the cpu. - */ - if (dst->cpu_switch == ds->index) - return dst->cpu_port; - else - return ds->pd->rtable[dst->cpu_switch]; -} - struct dsa_slave_priv { /* * The linux network interface corresponding to this @@ -123,44 +35,8 @@ struct dsa_slave_priv { struct phy_device *phy; }; -struct dsa_switch_driver { - struct list_head list; - - __be16 tag_protocol; - int priv_size; - - /* - * Probing and setup. - */ - char *(*probe)(struct mii_bus *bus, int sw_addr); - int (*setup)(struct dsa_switch *ds); - int (*set_addr)(struct dsa_switch *ds, u8 *addr); - - /* - * Access to the switch's PHY registers. - */ - int (*phy_read)(struct dsa_switch *ds, int port, int regnum); - int (*phy_write)(struct dsa_switch *ds, int port, - int regnum, u16 val); - - /* - * Link state polling and IRQ handling. - */ - void (*poll_link)(struct dsa_switch *ds); - - /* - * ethtool hardware statistics. - */ - void (*get_strings)(struct dsa_switch *ds, int port, uint8_t *data); - void (*get_ethtool_stats)(struct dsa_switch *ds, - int port, uint64_t *data); - int (*get_sset_count)(struct dsa_switch *ds); -}; - /* dsa.c */ extern char dsa_driver_version[]; -void register_switch_driver(struct dsa_switch_driver *type); -void unregister_switch_driver(struct dsa_switch_driver *type); /* slave.c */ void dsa_slave_mii_bus_init(struct dsa_switch *ds); @@ -170,12 +46,15 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds, /* tag_dsa.c */ netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev); +extern struct packet_type dsa_packet_type; /* tag_edsa.c */ netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev); +extern struct packet_type edsa_packet_type; /* tag_trailer.c */ netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev); +extern struct packet_type trailer_packet_type; #endif diff --git a/net/dsa/mv88e6060.c b/net/dsa/mv88e6060.c deleted file mode 100644 index 8f4ff5a2c813..000000000000 --- a/net/dsa/mv88e6060.c +++ /dev/null @@ -1,288 +0,0 @@ -/* - * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/list.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "dsa_priv.h" - -#define REG_PORT(p) (8 + (p)) -#define REG_GLOBAL 0x0f - -static int reg_read(struct dsa_switch *ds, int addr, int reg) -{ - return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg); -} - -#define REG_READ(addr, reg) \ - ({ \ - int __ret; \ - \ - __ret = reg_read(ds, addr, reg); \ - if (__ret < 0) \ - return __ret; \ - __ret; \ - }) - - -static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) -{ - return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr, - reg, val); -} - -#define REG_WRITE(addr, reg, val) \ - ({ \ - int __ret; \ - \ - __ret = reg_write(ds, addr, reg, val); \ - if (__ret < 0) \ - return __ret; \ - }) - -static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr) -{ - int ret; - - ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03); - if (ret >= 0) { - ret &= 0xfff0; - if (ret == 0x0600) - return "Marvell 88E6060"; - } - - return NULL; -} - -static int mv88e6060_switch_reset(struct dsa_switch *ds) -{ - int i; - int ret; - - /* - * Set all ports to the disabled state. - */ - for (i = 0; i < 6; i++) { - ret = REG_READ(REG_PORT(i), 0x04); - REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); - } - - /* - * Wait for transmit queues to drain. - */ - msleep(2); - - /* - * Reset the switch. - */ - REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); - - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - if ((ret & 0x8000) == 0x0000) - break; - - msleep(1); - } - if (i == 1000) - return -ETIMEDOUT; - - return 0; -} - -static int mv88e6060_setup_global(struct dsa_switch *ds) -{ - /* - * Disable discarding of frames with excessive collisions, - * set the maximum frame size to 1536 bytes, and mask all - * interrupt sources. - */ - REG_WRITE(REG_GLOBAL, 0x04, 0x0800); - - /* - * Enable automatic address learning, set the address - * database size to 1024 entries, and set the default aging - * time to 5 minutes. - */ - REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); - - return 0; -} - -static int mv88e6060_setup_port(struct dsa_switch *ds, int p) -{ - int addr = REG_PORT(p); - - /* - * Do not force flow control, disable Ingress and Egress - * Header tagging, disable VLAN tunneling, and set the port - * state to Forwarding. Additionally, if this is the CPU - * port, enable Ingress and Egress Trailer tagging mode. - */ - REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); - - /* - * Port based VLAN map: give each port its own address - * database, allow the CPU port to talk to each of the 'real' - * ports, and allow each of the 'real' ports to only talk to - * the CPU port. - */ - REG_WRITE(addr, 0x06, - ((p & 0xf) << 12) | - (dsa_is_cpu_port(ds, p) ? - ds->phys_port_mask : - (1 << ds->dst->cpu_port))); - - /* - * Port Association Vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. - */ - REG_WRITE(addr, 0x0b, 1 << p); - - return 0; -} - -static int mv88e6060_setup(struct dsa_switch *ds) -{ - int i; - int ret; - - ret = mv88e6060_switch_reset(ds); - if (ret < 0) - return ret; - - /* @@@ initialise atu */ - - ret = mv88e6060_setup_global(ds); - if (ret < 0) - return ret; - - for (i = 0; i < 6; i++) { - ret = mv88e6060_setup_port(ds, i); - if (ret < 0) - return ret; - } - - return 0; -} - -static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) -{ - REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); - REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); - REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); - - return 0; -} - -static int mv88e6060_port_to_phy_addr(int port) -{ - if (port >= 0 && port <= 5) - return port; - return -1; -} - -static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum) -{ - int addr; - - addr = mv88e6060_port_to_phy_addr(port); - if (addr == -1) - return 0xffff; - - return reg_read(ds, addr, regnum); -} - -static int -mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) -{ - int addr; - - addr = mv88e6060_port_to_phy_addr(port); - if (addr == -1) - return 0xffff; - - return reg_write(ds, addr, regnum, val); -} - -static void mv88e6060_poll_link(struct dsa_switch *ds) -{ - int i; - - for (i = 0; i < DSA_MAX_PORTS; i++) { - struct net_device *dev; - int uninitialized_var(port_status); - int link; - int speed; - int duplex; - int fc; - - dev = ds->ports[i]; - if (dev == NULL) - continue; - - link = 0; - if (dev->flags & IFF_UP) { - port_status = reg_read(ds, REG_PORT(i), 0x00); - if (port_status < 0) - continue; - - link = !!(port_status & 0x1000); - } - - if (!link) { - if (netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link down\n", dev->name); - netif_carrier_off(dev); - } - continue; - } - - speed = (port_status & 0x0100) ? 100 : 10; - duplex = (port_status & 0x0200) ? 1 : 0; - fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0; - - if (!netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", - fc ? "en" : "dis"); - netif_carrier_on(dev); - } - } -} - -static struct dsa_switch_driver mv88e6060_switch_driver = { - .tag_protocol = htons(ETH_P_TRAILER), - .probe = mv88e6060_probe, - .setup = mv88e6060_setup, - .set_addr = mv88e6060_set_addr, - .phy_read = mv88e6060_phy_read, - .phy_write = mv88e6060_phy_write, - .poll_link = mv88e6060_poll_link, -}; - -static int __init mv88e6060_init(void) -{ - register_switch_driver(&mv88e6060_switch_driver); - return 0; -} -module_init(mv88e6060_init); - -static void __exit mv88e6060_cleanup(void) -{ - unregister_switch_driver(&mv88e6060_switch_driver); -} -module_exit(mv88e6060_cleanup); diff --git a/net/dsa/mv88e6123_61_65.c b/net/dsa/mv88e6123_61_65.c deleted file mode 100644 index 52faaa21a4d9..000000000000 --- a/net/dsa/mv88e6123_61_65.c +++ /dev/null @@ -1,447 +0,0 @@ -/* - * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/list.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "dsa_priv.h" -#include "mv88e6xxx.h" - -static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) -{ - int ret; - - ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); - if (ret >= 0) { - ret &= 0xfff0; - if (ret == 0x1210) - return "Marvell 88E6123"; - if (ret == 0x1610) - return "Marvell 88E6161"; - if (ret == 0x1650) - return "Marvell 88E6165"; - } - - return NULL; -} - -static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds) -{ - int i; - int ret; - - /* - * Set all ports to the disabled state. - */ - for (i = 0; i < 8; i++) { - ret = REG_READ(REG_PORT(i), 0x04); - REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); - } - - /* - * Wait for transmit queues to drain. - */ - msleep(2); - - /* - * Reset the switch. - */ - REG_WRITE(REG_GLOBAL, 0x04, 0xc400); - - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - if ((ret & 0xc800) == 0xc800) - break; - - msleep(1); - } - if (i == 1000) - return -ETIMEDOUT; - - return 0; -} - -static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) -{ - int ret; - int i; - - /* - * Disable the PHY polling unit (since there won't be any - * external PHYs to poll), don't discard packets with - * excessive collisions, and mask all interrupt sources. - */ - REG_WRITE(REG_GLOBAL, 0x04, 0x0000); - - /* - * Set the default address aging time to 5 minutes, and - * enable address learn messages to be sent to all message - * ports. - */ - REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); - - /* - * Configure the priority mapping registers. - */ - ret = mv88e6xxx_config_prio(ds); - if (ret < 0) - return ret; - - /* - * Configure the upstream port, and configure the upstream - * port as the port to which ingress and egress monitor frames - * are to be sent. - */ - REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); - - /* - * Disable remote management for now, and set the switch's - * DSA device number. - */ - REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); - - /* - * Send all frames with destination addresses matching - * 01:80:c2:00:00:2x to the CPU port. - */ - REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); - - /* - * Send all frames with destination addresses matching - * 01:80:c2:00:00:0x to the CPU port. - */ - REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); - - /* - * Disable the loopback filter, disable flow control - * messages, disable flood broadcast override, disable - * removing of provider tags, disable ATU age violation - * interrupts, disable tag flow control, force flow - * control priority to the highest, and send all special - * multicast frames to the CPU at the highest priority. - */ - REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); - - /* - * Program the DSA routing table. - */ - for (i = 0; i < 32; i++) { - int nexthop; - - nexthop = 0x1f; - if (i != ds->index && i < ds->dst->pd->nr_chips) - nexthop = ds->pd->rtable[i] & 0x1f; - - REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); - } - - /* - * Clear all trunk masks. - */ - for (i = 0; i < 8; i++) - REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); - - /* - * Clear all trunk mappings. - */ - for (i = 0; i < 16; i++) - REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); - - /* - * Disable ingress rate limiting by resetting all ingress - * rate limit registers to their initial state. - */ - for (i = 0; i < 6; i++) - REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); - - /* - * Initialise cross-chip port VLAN table to reset defaults. - */ - REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); - - /* - * Clear the priority override table. - */ - for (i = 0; i < 16; i++) - REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); - - /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ - - return 0; -} - -static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) -{ - int addr = REG_PORT(p); - u16 val; - - /* - * MAC Forcing register: don't force link, speed, duplex - * or flow control state to any particular values on physical - * ports, but force the CPU port and all DSA ports to 1000 Mb/s - * full duplex. - */ - if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) - REG_WRITE(addr, 0x01, 0x003e); - else - REG_WRITE(addr, 0x01, 0x0003); - - /* - * Do not limit the period of time that this port can be - * paused for by the remote end or the period of time that - * this port can pause the remote end. - */ - REG_WRITE(addr, 0x02, 0x0000); - - /* - * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, - * disable Header mode, enable IGMP/MLD snooping, disable VLAN - * tunneling, determine priority by looking at 802.1p and IP - * priority fields (IP prio has precedence), and set STP state - * to Forwarding. - * - * If this is the CPU link, use DSA or EDSA tagging depending - * on which tagging mode was configured. - * - * If this is a link to another switch, use DSA tagging mode. - * - * If this is the upstream port for this switch, enable - * forwarding of unknown unicasts and multicasts. - */ - val = 0x0433; - if (dsa_is_cpu_port(ds, p)) { - if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) - val |= 0x3300; - else - val |= 0x0100; - } - if (ds->dsa_port_mask & (1 << p)) - val |= 0x0100; - if (p == dsa_upstream_port(ds)) - val |= 0x000c; - REG_WRITE(addr, 0x04, val); - - /* - * Port Control 1: disable trunking. Also, if this is the - * CPU port, enable learn messages to be sent to this port. - */ - REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); - - /* - * Port based VLAN map: give each port its own address - * database, allow the CPU port to talk to each of the 'real' - * ports, and allow each of the 'real' ports to only talk to - * the upstream port. - */ - val = (p & 0xf) << 12; - if (dsa_is_cpu_port(ds, p)) - val |= ds->phys_port_mask; - else - val |= 1 << dsa_upstream_port(ds); - REG_WRITE(addr, 0x06, val); - - /* - * Default VLAN ID and priority: don't set a default VLAN - * ID, and set the default packet priority to zero. - */ - REG_WRITE(addr, 0x07, 0x0000); - - /* - * Port Control 2: don't force a good FCS, set the maximum - * frame size to 10240 bytes, don't let the switch add or - * strip 802.1q tags, don't discard tagged or untagged frames - * on this port, do a destination address lookup on all - * received packets as usual, disable ARP mirroring and don't - * send a copy of all transmitted/received frames on this port - * to the CPU. - */ - REG_WRITE(addr, 0x08, 0x2080); - - /* - * Egress rate control: disable egress rate control. - */ - REG_WRITE(addr, 0x09, 0x0001); - - /* - * Egress rate control 2: disable egress rate control. - */ - REG_WRITE(addr, 0x0a, 0x0000); - - /* - * Port Association Vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. - */ - REG_WRITE(addr, 0x0b, 1 << p); - - /* - * Port ATU control: disable limiting the number of address - * database entries that this port is allowed to use. - */ - REG_WRITE(addr, 0x0c, 0x0000); - - /* - * Priorit Override: disable DA, SA and VTU priority override. - */ - REG_WRITE(addr, 0x0d, 0x0000); - - /* - * Port Ethertype: use the Ethertype DSA Ethertype value. - */ - REG_WRITE(addr, 0x0f, ETH_P_EDSA); - - /* - * Tag Remap: use an identity 802.1p prio -> switch prio - * mapping. - */ - REG_WRITE(addr, 0x18, 0x3210); - - /* - * Tag Remap 2: use an identity 802.1p prio -> switch prio - * mapping. - */ - REG_WRITE(addr, 0x19, 0x7654); - - return 0; -} - -static int mv88e6123_61_65_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int i; - int ret; - - mutex_init(&ps->smi_mutex); - mutex_init(&ps->stats_mutex); - - ret = mv88e6123_61_65_switch_reset(ds); - if (ret < 0) - return ret; - - /* @@@ initialise vtu and atu */ - - ret = mv88e6123_61_65_setup_global(ds); - if (ret < 0) - return ret; - - for (i = 0; i < 6; i++) { - ret = mv88e6123_61_65_setup_port(ds, i); - if (ret < 0) - return ret; - } - - return 0; -} - -static int mv88e6123_61_65_port_to_phy_addr(int port) -{ - if (port >= 0 && port <= 4) - return port; - return -1; -} - -static int -mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) -{ - int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_read(ds, addr, regnum); -} - -static int -mv88e6123_61_65_phy_write(struct dsa_switch *ds, - int port, int regnum, u16 val) -{ - int addr = mv88e6123_61_65_port_to_phy_addr(port); - return mv88e6xxx_phy_write(ds, addr, regnum, val); -} - -static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { - { "in_good_octets", 8, 0x00, }, - { "in_bad_octets", 4, 0x02, }, - { "in_unicast", 4, 0x04, }, - { "in_broadcasts", 4, 0x06, }, - { "in_multicasts", 4, 0x07, }, - { "in_pause", 4, 0x16, }, - { "in_undersize", 4, 0x18, }, - { "in_fragments", 4, 0x19, }, - { "in_oversize", 4, 0x1a, }, - { "in_jabber", 4, 0x1b, }, - { "in_rx_error", 4, 0x1c, }, - { "in_fcs_error", 4, 0x1d, }, - { "out_octets", 8, 0x0e, }, - { "out_unicast", 4, 0x10, }, - { "out_broadcasts", 4, 0x13, }, - { "out_multicasts", 4, 0x12, }, - { "out_pause", 4, 0x15, }, - { "excessive", 4, 0x11, }, - { "collisions", 4, 0x1e, }, - { "deferred", 4, 0x05, }, - { "single", 4, 0x14, }, - { "multiple", 4, 0x17, }, - { "out_fcs_error", 4, 0x03, }, - { "late", 4, 0x1f, }, - { "hist_64bytes", 4, 0x08, }, - { "hist_65_127bytes", 4, 0x09, }, - { "hist_128_255bytes", 4, 0x0a, }, - { "hist_256_511bytes", 4, 0x0b, }, - { "hist_512_1023bytes", 4, 0x0c, }, - { "hist_1024_max_bytes", 4, 0x0d, }, -}; - -static void -mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data) -{ - mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), - mv88e6123_61_65_hw_stats, port, data); -} - -static void -mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds, - int port, uint64_t *data) -{ - mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), - mv88e6123_61_65_hw_stats, port, data); -} - -static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) -{ - return ARRAY_SIZE(mv88e6123_61_65_hw_stats); -} - -static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_EDSA), - .priv_size = sizeof(struct mv88e6xxx_priv_state), - .probe = mv88e6123_61_65_probe, - .setup = mv88e6123_61_65_setup, - .set_addr = mv88e6xxx_set_addr_indirect, - .phy_read = mv88e6123_61_65_phy_read, - .phy_write = mv88e6123_61_65_phy_write, - .poll_link = mv88e6xxx_poll_link, - .get_strings = mv88e6123_61_65_get_strings, - .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, - .get_sset_count = mv88e6123_61_65_get_sset_count, -}; - -static int __init mv88e6123_61_65_init(void) -{ - register_switch_driver(&mv88e6123_61_65_switch_driver); - return 0; -} -module_init(mv88e6123_61_65_init); - -static void __exit mv88e6123_61_65_cleanup(void) -{ - unregister_switch_driver(&mv88e6123_61_65_switch_driver); -} -module_exit(mv88e6123_61_65_cleanup); diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c deleted file mode 100644 index 9bd1061fa4ee..000000000000 --- a/net/dsa/mv88e6131.c +++ /dev/null @@ -1,443 +0,0 @@ -/* - * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/list.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "dsa_priv.h" -#include "mv88e6xxx.h" - -/* - * Switch product IDs - */ -#define ID_6085 0x04a0 -#define ID_6095 0x0950 -#define ID_6131 0x1060 - -static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) -{ - int ret; - - ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); - if (ret >= 0) { - ret &= 0xfff0; - if (ret == ID_6085) - return "Marvell 88E6085"; - if (ret == ID_6095) - return "Marvell 88E6095/88E6095F"; - if (ret == ID_6131) - return "Marvell 88E6131"; - } - - return NULL; -} - -static int mv88e6131_switch_reset(struct dsa_switch *ds) -{ - int i; - int ret; - - /* - * Set all ports to the disabled state. - */ - for (i = 0; i < 11; i++) { - ret = REG_READ(REG_PORT(i), 0x04); - REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); - } - - /* - * Wait for transmit queues to drain. - */ - msleep(2); - - /* - * Reset the switch. - */ - REG_WRITE(REG_GLOBAL, 0x04, 0xc400); - - /* - * Wait up to one second for reset to complete. - */ - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - if ((ret & 0xc800) == 0xc800) - break; - - msleep(1); - } - if (i == 1000) - return -ETIMEDOUT; - - return 0; -} - -static int mv88e6131_setup_global(struct dsa_switch *ds) -{ - int ret; - int i; - - /* - * Enable the PHY polling unit, don't discard packets with - * excessive collisions, use a weighted fair queueing scheme - * to arbitrate between packet queues, set the maximum frame - * size to 1632, and mask all interrupt sources. - */ - REG_WRITE(REG_GLOBAL, 0x04, 0x4400); - - /* - * Set the default address aging time to 5 minutes, and - * enable address learn messages to be sent to all message - * ports. - */ - REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); - - /* - * Configure the priority mapping registers. - */ - ret = mv88e6xxx_config_prio(ds); - if (ret < 0) - return ret; - - /* - * Set the VLAN ethertype to 0x8100. - */ - REG_WRITE(REG_GLOBAL, 0x19, 0x8100); - - /* - * Disable ARP mirroring, and configure the upstream port as - * the port to which ingress and egress monitor frames are to - * be sent. - */ - REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0); - - /* - * Disable cascade port functionality unless this device - * is used in a cascade configuration, and set the switch's - * DSA device number. - */ - if (ds->dst->pd->nr_chips > 1) - REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f)); - else - REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f)); - - /* - * Send all frames with destination addresses matching - * 01:80:c2:00:00:0x to the CPU port. - */ - REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); - - /* - * Ignore removed tag data on doubly tagged packets, disable - * flow control messages, force flow control priority to the - * highest, and send all special multicast frames to the CPU - * port at the highest priority. - */ - REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); - - /* - * Program the DSA routing table. - */ - for (i = 0; i < 32; i++) { - int nexthop; - - nexthop = 0x1f; - if (i != ds->index && i < ds->dst->pd->nr_chips) - nexthop = ds->pd->rtable[i] & 0x1f; - - REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); - } - - /* - * Clear all trunk masks. - */ - for (i = 0; i < 8; i++) - REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff); - - /* - * Clear all trunk mappings. - */ - for (i = 0; i < 16; i++) - REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); - - /* - * Force the priority of IGMP/MLD snoop frames and ARP frames - * to the highest setting. - */ - REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff); - - return 0; -} - -static int mv88e6131_setup_port(struct dsa_switch *ds, int p) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int addr = REG_PORT(p); - u16 val; - - /* - * MAC Forcing register: don't force link, speed, duplex - * or flow control state to any particular values on physical - * ports, but force the CPU port and all DSA ports to 1000 Mb/s - * (100 Mb/s on 6085) full duplex. - */ - if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) - if (ps->id == ID_6085) - REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */ - else - REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */ - else - REG_WRITE(addr, 0x01, 0x0003); - - /* - * Port Control: disable Core Tag, disable Drop-on-Lock, - * transmit frames unmodified, disable Header mode, - * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN - * tunneling, determine priority by looking at 802.1p and - * IP priority fields (IP prio has precedence), and set STP - * state to Forwarding. - * - * If this is the upstream port for this switch, enable - * forwarding of unknown unicasts, and enable DSA tagging - * mode. - * - * If this is the link to another switch, use DSA tagging - * mode, but do not enable forwarding of unknown unicasts. - */ - val = 0x0433; - if (p == dsa_upstream_port(ds)) { - val |= 0x0104; - /* - * On 6085, unknown multicast forward is controlled - * here rather than in Port Control 2 register. - */ - if (ps->id == ID_6085) - val |= 0x0008; - } - if (ds->dsa_port_mask & (1 << p)) - val |= 0x0100; - REG_WRITE(addr, 0x04, val); - - /* - * Port Control 1: disable trunking. Also, if this is the - * CPU port, enable learn messages to be sent to this port. - */ - REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); - - /* - * Port based VLAN map: give each port its own address - * database, allow the CPU port to talk to each of the 'real' - * ports, and allow each of the 'real' ports to only talk to - * the upstream port. - */ - val = (p & 0xf) << 12; - if (dsa_is_cpu_port(ds, p)) - val |= ds->phys_port_mask; - else - val |= 1 << dsa_upstream_port(ds); - REG_WRITE(addr, 0x06, val); - - /* - * Default VLAN ID and priority: don't set a default VLAN - * ID, and set the default packet priority to zero. - */ - REG_WRITE(addr, 0x07, 0x0000); - - /* - * Port Control 2: don't force a good FCS, don't use - * VLAN-based, source address-based or destination - * address-based priority overrides, don't let the switch - * add or strip 802.1q tags, don't discard tagged or - * untagged frames on this port, do a destination address - * lookup on received packets as usual, don't send a copy - * of all transmitted/received frames on this port to the - * CPU, and configure the upstream port number. - * - * If this is the upstream port for this switch, enable - * forwarding of unknown multicast addresses. - */ - if (ps->id == ID_6085) - /* - * on 6085, bits 3:0 are reserved, bit 6 control ARP - * mirroring, and multicast forward is handled in - * Port Control register. - */ - REG_WRITE(addr, 0x08, 0x0080); - else { - val = 0x0080 | dsa_upstream_port(ds); - if (p == dsa_upstream_port(ds)) - val |= 0x0040; - REG_WRITE(addr, 0x08, val); - } - - /* - * Rate Control: disable ingress rate limiting. - */ - REG_WRITE(addr, 0x09, 0x0000); - - /* - * Rate Control 2: disable egress rate limiting. - */ - REG_WRITE(addr, 0x0a, 0x0000); - - /* - * Port Association Vector: when learning source addresses - * of packets, add the address to the address database using - * a port bitmap that has only the bit for this port set and - * the other bits clear. - */ - REG_WRITE(addr, 0x0b, 1 << p); - - /* - * Tag Remap: use an identity 802.1p prio -> switch prio - * mapping. - */ - REG_WRITE(addr, 0x18, 0x3210); - - /* - * Tag Remap 2: use an identity 802.1p prio -> switch prio - * mapping. - */ - REG_WRITE(addr, 0x19, 0x7654); - - return 0; -} - -static int mv88e6131_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int i; - int ret; - - mutex_init(&ps->smi_mutex); - mv88e6xxx_ppu_state_init(ds); - mutex_init(&ps->stats_mutex); - - ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0; - - ret = mv88e6131_switch_reset(ds); - if (ret < 0) - return ret; - - /* @@@ initialise vtu and atu */ - - ret = mv88e6131_setup_global(ds); - if (ret < 0) - return ret; - - for (i = 0; i < 11; i++) { - ret = mv88e6131_setup_port(ds, i); - if (ret < 0) - return ret; - } - - return 0; -} - -static int mv88e6131_port_to_phy_addr(int port) -{ - if (port >= 0 && port <= 11) - return port; - return -1; -} - -static int -mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum) -{ - int addr = mv88e6131_port_to_phy_addr(port); - return mv88e6xxx_phy_read_ppu(ds, addr, regnum); -} - -static int -mv88e6131_phy_write(struct dsa_switch *ds, - int port, int regnum, u16 val) -{ - int addr = mv88e6131_port_to_phy_addr(port); - return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val); -} - -static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = { - { "in_good_octets", 8, 0x00, }, - { "in_bad_octets", 4, 0x02, }, - { "in_unicast", 4, 0x04, }, - { "in_broadcasts", 4, 0x06, }, - { "in_multicasts", 4, 0x07, }, - { "in_pause", 4, 0x16, }, - { "in_undersize", 4, 0x18, }, - { "in_fragments", 4, 0x19, }, - { "in_oversize", 4, 0x1a, }, - { "in_jabber", 4, 0x1b, }, - { "in_rx_error", 4, 0x1c, }, - { "in_fcs_error", 4, 0x1d, }, - { "out_octets", 8, 0x0e, }, - { "out_unicast", 4, 0x10, }, - { "out_broadcasts", 4, 0x13, }, - { "out_multicasts", 4, 0x12, }, - { "out_pause", 4, 0x15, }, - { "excessive", 4, 0x11, }, - { "collisions", 4, 0x1e, }, - { "deferred", 4, 0x05, }, - { "single", 4, 0x14, }, - { "multiple", 4, 0x17, }, - { "out_fcs_error", 4, 0x03, }, - { "late", 4, 0x1f, }, - { "hist_64bytes", 4, 0x08, }, - { "hist_65_127bytes", 4, 0x09, }, - { "hist_128_255bytes", 4, 0x0a, }, - { "hist_256_511bytes", 4, 0x0b, }, - { "hist_512_1023bytes", 4, 0x0c, }, - { "hist_1024_max_bytes", 4, 0x0d, }, -}; - -static void -mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data) -{ - mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats), - mv88e6131_hw_stats, port, data); -} - -static void -mv88e6131_get_ethtool_stats(struct dsa_switch *ds, - int port, uint64_t *data) -{ - mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats), - mv88e6131_hw_stats, port, data); -} - -static int mv88e6131_get_sset_count(struct dsa_switch *ds) -{ - return ARRAY_SIZE(mv88e6131_hw_stats); -} - -static struct dsa_switch_driver mv88e6131_switch_driver = { - .tag_protocol = cpu_to_be16(ETH_P_DSA), - .priv_size = sizeof(struct mv88e6xxx_priv_state), - .probe = mv88e6131_probe, - .setup = mv88e6131_setup, - .set_addr = mv88e6xxx_set_addr_direct, - .phy_read = mv88e6131_phy_read, - .phy_write = mv88e6131_phy_write, - .poll_link = mv88e6xxx_poll_link, - .get_strings = mv88e6131_get_strings, - .get_ethtool_stats = mv88e6131_get_ethtool_stats, - .get_sset_count = mv88e6131_get_sset_count, -}; - -static int __init mv88e6131_init(void) -{ - register_switch_driver(&mv88e6131_switch_driver); - return 0; -} -module_init(mv88e6131_init); - -static void __exit mv88e6131_cleanup(void) -{ - unregister_switch_driver(&mv88e6131_switch_driver); -} -module_exit(mv88e6131_cleanup); diff --git a/net/dsa/mv88e6xxx.c b/net/dsa/mv88e6xxx.c deleted file mode 100644 index efe661a9def4..000000000000 --- a/net/dsa/mv88e6xxx.c +++ /dev/null @@ -1,522 +0,0 @@ -/* - * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support - * Copyright (c) 2008 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include <linux/list.h> -#include <linux/netdevice.h> -#include <linux/phy.h> -#include "dsa_priv.h" -#include "mv88e6xxx.h" - -/* - * If the switch's ADDR[4:0] strap pins are strapped to zero, it will - * use all 32 SMI bus addresses on its SMI bus, and all switch registers - * will be directly accessible on some {device address,register address} - * pair. If the ADDR[4:0] pins are not strapped to zero, the switch - * will only respond to SMI transactions to that specific address, and - * an indirect addressing mechanism needs to be used to access its - * registers. - */ -static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr) -{ - int ret; - int i; - - for (i = 0; i < 16; i++) { - ret = mdiobus_read(bus, sw_addr, 0); - if (ret < 0) - return ret; - - if ((ret & 0x8000) == 0) - return 0; - } - - return -ETIMEDOUT; -} - -int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg) -{ - int ret; - - if (sw_addr == 0) - return mdiobus_read(bus, addr, reg); - - /* - * Wait for the bus to become free. - */ - ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); - if (ret < 0) - return ret; - - /* - * Transmit the read command. - */ - ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg); - if (ret < 0) - return ret; - - /* - * Wait for the read command to complete. - */ - ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); - if (ret < 0) - return ret; - - /* - * Read the data. - */ - ret = mdiobus_read(bus, sw_addr, 1); - if (ret < 0) - return ret; - - return ret & 0xffff; -} - -int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int ret; - - mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_read(ds->master_mii_bus, - ds->pd->sw_addr, addr, reg); - mutex_unlock(&ps->smi_mutex); - - return ret; -} - -int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, - int reg, u16 val) -{ - int ret; - - if (sw_addr == 0) - return mdiobus_write(bus, addr, reg, val); - - /* - * Wait for the bus to become free. - */ - ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); - if (ret < 0) - return ret; - - /* - * Transmit the data to write. - */ - ret = mdiobus_write(bus, sw_addr, 1, val); - if (ret < 0) - return ret; - - /* - * Transmit the write command. - */ - ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg); - if (ret < 0) - return ret; - - /* - * Wait for the write command to complete. - */ - ret = mv88e6xxx_reg_wait_ready(bus, sw_addr); - if (ret < 0) - return ret; - - return 0; -} - -int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int ret; - - mutex_lock(&ps->smi_mutex); - ret = __mv88e6xxx_reg_write(ds->master_mii_bus, - ds->pd->sw_addr, addr, reg, val); - mutex_unlock(&ps->smi_mutex); - - return ret; -} - -int mv88e6xxx_config_prio(struct dsa_switch *ds) -{ - /* - * Configure the IP ToS mapping registers. - */ - REG_WRITE(REG_GLOBAL, 0x10, 0x0000); - REG_WRITE(REG_GLOBAL, 0x11, 0x0000); - REG_WRITE(REG_GLOBAL, 0x12, 0x5555); - REG_WRITE(REG_GLOBAL, 0x13, 0x5555); - REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa); - REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa); - REG_WRITE(REG_GLOBAL, 0x16, 0xffff); - REG_WRITE(REG_GLOBAL, 0x17, 0xffff); - - /* - * Configure the IEEE 802.1p priority mapping register. - */ - REG_WRITE(REG_GLOBAL, 0x18, 0xfa41); - - return 0; -} - -int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr) -{ - REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); - REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); - REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); - - return 0; -} - -int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr) -{ - int i; - int ret; - - for (i = 0; i < 6; i++) { - int j; - - /* - * Write the MAC address byte. - */ - REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]); - - /* - * Wait for the write to complete. - */ - for (j = 0; j < 16; j++) { - ret = REG_READ(REG_GLOBAL2, 0x0d); - if ((ret & 0x8000) == 0) - break; - } - if (j == 16) - return -ETIMEDOUT; - } - - return 0; -} - -int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum) -{ - if (addr >= 0) - return mv88e6xxx_reg_read(ds, addr, regnum); - return 0xffff; -} - -int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val) -{ - if (addr >= 0) - return mv88e6xxx_reg_write(ds, addr, regnum, val); - return 0; -} - -#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU -static int mv88e6xxx_ppu_disable(struct dsa_switch *ds) -{ - int ret; - int i; - - ret = REG_READ(REG_GLOBAL, 0x04); - REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000); - - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - msleep(1); - if ((ret & 0xc000) != 0xc000) - return 0; - } - - return -ETIMEDOUT; -} - -static int mv88e6xxx_ppu_enable(struct dsa_switch *ds) -{ - int ret; - int i; - - ret = REG_READ(REG_GLOBAL, 0x04); - REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000); - - for (i = 0; i < 1000; i++) { - ret = REG_READ(REG_GLOBAL, 0x00); - msleep(1); - if ((ret & 0xc000) == 0xc000) - return 0; - } - - return -ETIMEDOUT; -} - -static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) -{ - struct mv88e6xxx_priv_state *ps; - - ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work); - if (mutex_trylock(&ps->ppu_mutex)) { - struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1; - - if (mv88e6xxx_ppu_enable(ds) == 0) - ps->ppu_disabled = 0; - mutex_unlock(&ps->ppu_mutex); - } -} - -static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) -{ - struct mv88e6xxx_priv_state *ps = (void *)_ps; - - schedule_work(&ps->ppu_work); -} - -static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int ret; - - mutex_lock(&ps->ppu_mutex); - - /* - * If the PHY polling unit is enabled, disable it so that - * we can access the PHY registers. If it was already - * disabled, cancel the timer that is going to re-enable - * it. - */ - if (!ps->ppu_disabled) { - ret = mv88e6xxx_ppu_disable(ds); - if (ret < 0) { - mutex_unlock(&ps->ppu_mutex); - return ret; - } - ps->ppu_disabled = 1; - } else { - del_timer(&ps->ppu_timer); - ret = 0; - } - - return ret; -} - -static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - - /* - * Schedule a timer to re-enable the PHY polling unit. - */ - mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10)); - mutex_unlock(&ps->ppu_mutex); -} - -void mv88e6xxx_ppu_state_init(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - - mutex_init(&ps->ppu_mutex); - INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work); - init_timer(&ps->ppu_timer); - ps->ppu_timer.data = (unsigned long)ps; - ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; -} - -int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum) -{ - int ret; - - ret = mv88e6xxx_ppu_access_get(ds); - if (ret >= 0) { - ret = mv88e6xxx_reg_read(ds, addr, regnum); - mv88e6xxx_ppu_access_put(ds); - } - - return ret; -} - -int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, - int regnum, u16 val) -{ - int ret; - - ret = mv88e6xxx_ppu_access_get(ds); - if (ret >= 0) { - ret = mv88e6xxx_reg_write(ds, addr, regnum, val); - mv88e6xxx_ppu_access_put(ds); - } - - return ret; -} -#endif - -void mv88e6xxx_poll_link(struct dsa_switch *ds) -{ - int i; - - for (i = 0; i < DSA_MAX_PORTS; i++) { - struct net_device *dev; - int uninitialized_var(port_status); - int link; - int speed; - int duplex; - int fc; - - dev = ds->ports[i]; - if (dev == NULL) - continue; - - link = 0; - if (dev->flags & IFF_UP) { - port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00); - if (port_status < 0) - continue; - - link = !!(port_status & 0x0800); - } - - if (!link) { - if (netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link down\n", dev->name); - netif_carrier_off(dev); - } - continue; - } - - switch (port_status & 0x0300) { - case 0x0000: - speed = 10; - break; - case 0x0100: - speed = 100; - break; - case 0x0200: - speed = 1000; - break; - default: - speed = -1; - break; - } - duplex = (port_status & 0x0400) ? 1 : 0; - fc = (port_status & 0x8000) ? 1 : 0; - - if (!netif_carrier_ok(dev)) { - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", - fc ? "en" : "dis"); - netif_carrier_on(dev); - } - } -} - -static int mv88e6xxx_stats_wait(struct dsa_switch *ds) -{ - int ret; - int i; - - for (i = 0; i < 10; i++) { - ret = REG_READ(REG_GLOBAL, 0x1d); - if ((ret & 0x8000) == 0) - return 0; - } - - return -ETIMEDOUT; -} - -static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port) -{ - int ret; - - /* - * Snapshot the hardware statistics counters for this port. - */ - REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port); - - /* - * Wait for the snapshotting to complete. - */ - ret = mv88e6xxx_stats_wait(ds); - if (ret < 0) - return ret; - - return 0; -} - -static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val) -{ - u32 _val; - int ret; - - *val = 0; - - ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat); - if (ret < 0) - return; - - ret = mv88e6xxx_stats_wait(ds); - if (ret < 0) - return; - - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e); - if (ret < 0) - return; - - _val = ret << 16; - - ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f); - if (ret < 0) - return; - - *val = _val | ret; -} - -void mv88e6xxx_get_strings(struct dsa_switch *ds, - int nr_stats, struct mv88e6xxx_hw_stat *stats, - int port, uint8_t *data) -{ - int i; - - for (i = 0; i < nr_stats; i++) { - memcpy(data + i * ETH_GSTRING_LEN, - stats[i].string, ETH_GSTRING_LEN); - } -} - -void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, - int nr_stats, struct mv88e6xxx_hw_stat *stats, - int port, uint64_t *data) -{ - struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); - int ret; - int i; - - mutex_lock(&ps->stats_mutex); - - ret = mv88e6xxx_stats_snapshot(ds, port); - if (ret < 0) { - mutex_unlock(&ps->stats_mutex); - return; - } - - /* - * Read each of the counters. - */ - for (i = 0; i < nr_stats; i++) { - struct mv88e6xxx_hw_stat *s = stats + i; - u32 low; - u32 high; - - mv88e6xxx_stats_read(ds, s->reg, &low); - if (s->sizeof_stat == 8) - mv88e6xxx_stats_read(ds, s->reg + 1, &high); - else - high = 0; - - data[i] = (((u64)high) << 32) | low; - } - - mutex_unlock(&ps->stats_mutex); -} diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h deleted file mode 100644 index 61156ca26a0d..000000000000 --- a/net/dsa/mv88e6xxx.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support - * Copyright (c) 2008 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __MV88E6XXX_H -#define __MV88E6XXX_H - -#define REG_PORT(p) (0x10 + (p)) -#define REG_GLOBAL 0x1b -#define REG_GLOBAL2 0x1c - -struct mv88e6xxx_priv_state { - /* - * When using multi-chip addressing, this mutex protects - * access to the indirect access registers. (In single-chip - * mode, this mutex is effectively useless.) - */ - struct mutex smi_mutex; - -#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU - /* - * Handles automatic disabling and re-enabling of the PHY - * polling unit. - */ - struct mutex ppu_mutex; - int ppu_disabled; - struct work_struct ppu_work; - struct timer_list ppu_timer; -#endif - - /* - * This mutex serialises access to the statistics unit. - * Hold this mutex over snapshot + dump sequences. - */ - struct mutex stats_mutex; - - int id; /* switch product id */ -}; - -struct mv88e6xxx_hw_stat { - char string[ETH_GSTRING_LEN]; - int sizeof_stat; - int reg; -}; - -int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg); -int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg); -int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr, - int reg, u16 val); -int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val); -int mv88e6xxx_config_prio(struct dsa_switch *ds); -int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr); -int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr); -int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum); -int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val); -void mv88e6xxx_ppu_state_init(struct dsa_switch *ds); -int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum); -int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr, - int regnum, u16 val); -void mv88e6xxx_poll_link(struct dsa_switch *ds); -void mv88e6xxx_get_strings(struct dsa_switch *ds, - int nr_stats, struct mv88e6xxx_hw_stat *stats, - int port, uint8_t *data); -void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, - int nr_stats, struct mv88e6xxx_hw_stat *stats, - int port, uint64_t *data); - -#define REG_READ(addr, reg) \ - ({ \ - int __ret; \ - \ - __ret = mv88e6xxx_reg_read(ds, addr, reg); \ - if (__ret < 0) \ - return __ret; \ - __ret; \ - }) - -#define REG_WRITE(addr, reg, val) \ - ({ \ - int __ret; \ - \ - __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \ - if (__ret < 0) \ - return __ret; \ - }) - - - -#endif diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 98dfe80b4538..cacce1e22f9c 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -186,20 +186,7 @@ out: return 0; } -static struct packet_type dsa_packet_type __read_mostly = { +struct packet_type dsa_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DSA), .func = dsa_rcv, }; - -static int __init dsa_init_module(void) -{ - dev_add_pack(&dsa_packet_type); - return 0; -} -module_init(dsa_init_module); - -static void __exit dsa_cleanup_module(void) -{ - dev_remove_pack(&dsa_packet_type); -} -module_exit(dsa_cleanup_module); diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 6f383322ad25..e70c43c25e64 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -205,20 +205,7 @@ out: return 0; } -static struct packet_type edsa_packet_type __read_mostly = { +struct packet_type edsa_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_EDSA), .func = edsa_rcv, }; - -static int __init edsa_init_module(void) -{ - dev_add_pack(&edsa_packet_type); - return 0; -} -module_init(edsa_init_module); - -static void __exit edsa_cleanup_module(void) -{ - dev_remove_pack(&edsa_packet_type); -} -module_exit(edsa_cleanup_module); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d6d7d0add3cb..94bc260d015d 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -114,20 +114,7 @@ out: return 0; } -static struct packet_type trailer_packet_type __read_mostly = { +struct packet_type trailer_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_TRAILER), .func = trailer_rcv, }; - -static int __init trailer_init_module(void) -{ - dev_add_pack(&trailer_packet_type); - return 0; -} -module_init(trailer_init_module); - -static void __exit trailer_cleanup_module(void) -{ - dev_remove_pack(&trailer_packet_type); -} -module_exit(trailer_cleanup_module); diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 1c1f26c5d672..7e717cb35ad1 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -322,6 +322,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; + int hlen, tlen; int res; if (len + 15 > dev->mtu) { @@ -331,12 +332,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, dev_hold(dev); - skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev), + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; + skb = sock_alloc_send_skb(sk, len + hlen + tlen, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); eb = (struct ec_cb *)&skb->cb; diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 19d6aefe97d4..e4ecc1eef98c 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -50,8 +50,6 @@ * SUCH DAMAGE. */ -#define DEBUG - #include <linux/bitops.h> #include <linux/if_arp.h> #include <linux/module.h> @@ -113,6 +111,20 @@ struct lowpan_dev_record { struct list_head list; }; +struct lowpan_fragment { + struct sk_buff *skb; /* skb to be assembled */ + spinlock_t lock; /* concurency lock */ + u16 length; /* length to be assemled */ + u32 bytes_rcv; /* bytes received */ + u16 tag; /* current fragment tag */ + struct timer_list timer; /* assembling timer */ + struct list_head list; /* fragments list */ +}; + +static unsigned short fragment_tag; +static LIST_HEAD(lowpan_fragments); +spinlock_t flist_lock; + static inline struct lowpan_dev_info *lowpan_dev_info(const struct net_device *dev) { @@ -234,6 +246,50 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr, return 0; } +static void +lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb) +{ + struct udphdr *uh = udp_hdr(skb); + + pr_debug("(%s): UDP header compression\n", __func__); + + if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) == + LOWPAN_NHC_UDP_4BIT_PORT) && + ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) == + LOWPAN_NHC_UDP_4BIT_PORT)) { + pr_debug("(%s): both ports compression to 4 bits\n", __func__); + **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11; + **(hc06_ptr + 1) = /* subtraction is faster */ + (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) + + ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4)); + *hc06_ptr += 2; + } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) == + LOWPAN_NHC_UDP_8BIT_PORT) { + pr_debug("(%s): remove 8 bits of dest\n", __func__); + **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01; + memcpy(*hc06_ptr + 1, &uh->source, 2); + **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT); + *hc06_ptr += 4; + } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) == + LOWPAN_NHC_UDP_8BIT_PORT) { + pr_debug("(%s): remove 8 bits of source\n", __func__); + **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10; + memcpy(*hc06_ptr + 1, &uh->dest, 2); + **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT); + *hc06_ptr += 4; + } else { + pr_debug("(%s): can't compress header\n", __func__); + **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00; + memcpy(*hc06_ptr + 1, &uh->source, 2); + memcpy(*hc06_ptr + 3, &uh->dest, 2); + *hc06_ptr += 5; + } + + /* checksum is always inline */ + memcpy(*hc06_ptr, &uh->check, 2); + *hc06_ptr += 2; +} + static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) { u8 ret; @@ -244,6 +300,73 @@ static u8 lowpan_fetch_skb_u8(struct sk_buff *skb) return ret; } +static u16 lowpan_fetch_skb_u16(struct sk_buff *skb) +{ + u16 ret; + + BUG_ON(!pskb_may_pull(skb, 2)); + + ret = skb->data[0] | (skb->data[1] << 8); + skb_pull(skb, 2); + return ret; +} + +static int +lowpan_uncompress_udp_header(struct sk_buff *skb) +{ + struct udphdr *uh = udp_hdr(skb); + u8 tmp; + + tmp = lowpan_fetch_skb_u8(skb); + + if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) { + pr_debug("(%s): UDP header uncompression\n", __func__); + switch (tmp & LOWPAN_NHC_UDP_CS_P_11) { + case LOWPAN_NHC_UDP_CS_P_00: + memcpy(&uh->source, &skb->data[0], 2); + memcpy(&uh->dest, &skb->data[2], 2); + skb_pull(skb, 4); + break; + case LOWPAN_NHC_UDP_CS_P_01: + memcpy(&uh->source, &skb->data[0], 2); + uh->dest = + skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT; + skb_pull(skb, 3); + break; + case LOWPAN_NHC_UDP_CS_P_10: + uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT; + memcpy(&uh->dest, &skb->data[1], 2); + skb_pull(skb, 3); + break; + case LOWPAN_NHC_UDP_CS_P_11: + uh->source = + LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4); + uh->dest = + LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f); + skb_pull(skb, 1); + break; + default: + pr_debug("(%s) ERROR: unknown UDP format\n", __func__); + goto err; + break; + } + + pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n", + __func__, uh->source, uh->dest); + + /* copy checksum */ + memcpy(&uh->check, &skb->data[0], 2); + skb_pull(skb, 2); + } else { + pr_debug("(%s): ERROR: unsupported NH format\n", __func__); + goto err; + } + + return 0; +err: + return -EINVAL; +} + static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *_daddr, @@ -342,8 +465,6 @@ static int lowpan_header_create(struct sk_buff *skb, if (hdr->nexthdr == UIP_PROTO_UDP) iphc0 |= LOWPAN_IPHC_NH_C; -/* TODO: next header compression */ - if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) { *hc06_ptr = hdr->nexthdr; hc06_ptr += 1; @@ -431,8 +552,9 @@ static int lowpan_header_create(struct sk_buff *skb, } } - /* TODO: UDP header compression */ - /* TODO: Next Header compression */ + /* UDP header compression */ + if (hdr->nexthdr == UIP_PROTO_UDP) + lowpan_compress_udp_header(&hc06_ptr, skb); head[0] = iphc0; head[1] = iphc1; @@ -467,6 +589,7 @@ static int lowpan_header_create(struct sk_buff *skb, memcpy(&(sa.hwaddr), saddr, 8); mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; + return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, type, (void *)&da, (void *)&sa, skb->len); } @@ -511,6 +634,21 @@ static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr) return stat; } +static void lowpan_fragment_timer_expired(unsigned long entry_addr) +{ + struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr; + + pr_debug("%s: timer expired for frame with tag %d\n", __func__, + entry->tag); + + spin_lock(&flist_lock); + list_del(&entry->list); + spin_unlock(&flist_lock); + + dev_kfree_skb(entry->skb); + kfree(entry); +} + static int lowpan_process_data(struct sk_buff *skb) { @@ -525,6 +663,107 @@ lowpan_process_data(struct sk_buff *skb) if (skb->len < 2) goto drop; iphc0 = lowpan_fetch_skb_u8(skb); + + /* fragments assembling */ + switch (iphc0 & LOWPAN_DISPATCH_MASK) { + case LOWPAN_DISPATCH_FRAG1: + case LOWPAN_DISPATCH_FRAGN: + { + struct lowpan_fragment *frame; + u8 len, offset; + u16 tag; + bool found = false; + + len = lowpan_fetch_skb_u8(skb); /* frame length */ + tag = lowpan_fetch_skb_u16(skb); + + /* + * check if frame assembling with the same tag is + * already in progress + */ + spin_lock(&flist_lock); + + list_for_each_entry(frame, &lowpan_fragments, list) + if (frame->tag == tag) { + found = true; + break; + } + + /* alloc new frame structure */ + if (!found) { + frame = kzalloc(sizeof(struct lowpan_fragment), + GFP_ATOMIC); + if (!frame) + goto unlock_and_drop; + + INIT_LIST_HEAD(&frame->list); + + frame->length = (iphc0 & 7) | (len << 3); + frame->tag = tag; + + /* allocate buffer for frame assembling */ + frame->skb = alloc_skb(frame->length + + sizeof(struct ipv6hdr), GFP_ATOMIC); + + if (!frame->skb) { + kfree(frame); + goto unlock_and_drop; + } + + frame->skb->priority = skb->priority; + frame->skb->dev = skb->dev; + + /* reserve headroom for uncompressed ipv6 header */ + skb_reserve(frame->skb, sizeof(struct ipv6hdr)); + skb_put(frame->skb, frame->length); + + init_timer(&frame->timer); + /* time out is the same as for ipv6 - 60 sec */ + frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT; + frame->timer.data = (unsigned long)frame; + frame->timer.function = lowpan_fragment_timer_expired; + + add_timer(&frame->timer); + + list_add_tail(&frame->list, &lowpan_fragments); + } + + if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) + goto unlock_and_drop; + + offset = lowpan_fetch_skb_u8(skb); /* fetch offset */ + + /* if payload fits buffer, copy it */ + if (likely((offset * 8 + skb->len) <= frame->length)) + skb_copy_to_linear_data_offset(frame->skb, offset * 8, + skb->data, skb->len); + else + goto unlock_and_drop; + + frame->bytes_rcv += skb->len; + + /* frame assembling complete */ + if ((frame->bytes_rcv == frame->length) && + frame->timer.expires > jiffies) { + /* if timer haven't expired - first of all delete it */ + del_timer(&frame->timer); + list_del(&frame->list); + spin_unlock(&flist_lock); + + dev_kfree_skb(skb); + skb = frame->skb; + kfree(frame); + iphc0 = lowpan_fetch_skb_u8(skb); + break; + } + spin_unlock(&flist_lock); + + return kfree_skb(skb), 0; + } + default: + break; + } + iphc1 = lowpan_fetch_skb_u8(skb); _saddr = mac_cb(skb)->sa.hwaddr; @@ -659,7 +898,10 @@ lowpan_process_data(struct sk_buff *skb) goto drop; } - /* TODO: UDP header parse */ + /* UDP data uncompression */ + if (iphc0 & LOWPAN_IPHC_NH_C) + if (lowpan_uncompress_udp_header(skb)) + goto drop; /* Not fragmented package */ hdr.payload_len = htons(skb->len); @@ -674,6 +916,9 @@ lowpan_process_data(struct sk_buff *skb) lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr, sizeof(hdr)); return lowpan_skb_deliver(skb, &hdr); + +unlock_and_drop: + spin_unlock(&flist_lock); drop: kfree_skb(skb); return -EINVAL; @@ -692,18 +937,115 @@ static int lowpan_set_address(struct net_device *dev, void *p) return 0; } +static int lowpan_get_mac_header_length(struct sk_buff *skb) +{ + /* + * Currently long addressing mode is supported only, so the overall + * header size is 21: + * FC SeqNum DPAN DA SA Sec + * 2 + 1 + 2 + 8 + 8 + 0 = 21 + */ + return 21; +} + +static int +lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, + int mlen, int plen, int offset) +{ + struct sk_buff *frag; + int hlen, ret; + + /* if payload length is zero, therefore it's a first fragment */ + hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE); + + lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); + + frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE); + if (!frag) + return -ENOMEM; + + frag->priority = skb->priority; + frag->dev = skb->dev; + + /* copy header, MFR and payload */ + memcpy(skb_put(frag, mlen), skb->data, mlen); + memcpy(skb_put(frag, hlen), head, hlen); + + if (plen) + skb_copy_from_linear_data_offset(skb, offset + mlen, + skb_put(frag, plen), plen); + + lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data, + frag->len); + + ret = dev_queue_xmit(frag); + + return ret; +} + +static int +lowpan_skb_fragmentation(struct sk_buff *skb) +{ + int err, header_length, payload_length, tag, offset = 0; + u8 head[5]; + + header_length = lowpan_get_mac_header_length(skb); + payload_length = skb->len - header_length; + tag = fragment_tag++; + + /* first fragment header */ + head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7); + head[1] = (payload_length >> 3) & 0xff; + head[2] = tag & 0xff; + head[3] = tag >> 8; + + err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); + + /* next fragment header */ + head[0] &= ~LOWPAN_DISPATCH_FRAG1; + head[0] |= LOWPAN_DISPATCH_FRAGN; + + while ((payload_length - offset > 0) && (err >= 0)) { + int len = LOWPAN_FRAG_SIZE; + + head[4] = offset / 8; + + if (payload_length - offset < len) + len = payload_length - offset; + + err = lowpan_fragment_xmit(skb, head, header_length, + len, offset); + offset += len; + } + + return err; +} + static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev) { - int err = 0; + int err = -1; pr_debug("(%s): package xmit\n", __func__); skb->dev = lowpan_dev_info(dev)->real_dev; if (skb->dev == NULL) { pr_debug("(%s) ERROR: no real wpan device found\n", __func__); - dev_kfree_skb(skb); - } else + goto error; + } + + if (skb->len <= IEEE802154_MTU) { err = dev_queue_xmit(skb); + goto out; + } + + pr_debug("(%s): frame is too big, fragmentation is needed\n", + __func__); + err = lowpan_skb_fragmentation(skb); +error: + dev_kfree_skb(skb); +out: + if (err < 0) + pr_debug("(%s): ERROR: xmit failed\n", __func__); return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); } @@ -730,13 +1072,12 @@ static void lowpan_setup(struct net_device *dev) dev->addr_len = IEEE802154_ADDR_LEN; memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); dev->type = ARPHRD_IEEE802154; - dev->features = NETIF_F_NO_CSUM; /* Frame Control + Sequence Number + Address fields + Security Header */ dev->hard_header_len = 2 + 1 + 20 + 14; dev->needed_tailroom = 2; /* FCS */ dev->mtu = 1281; dev->tx_queue_len = 0; - dev->flags = IFF_NOARP | IFF_BROADCAST; + dev->flags = IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = 0; dev->netdev_ops = &lowpan_netdev_ops; @@ -765,8 +1106,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev, goto drop; /* check that it's our buffer */ - if ((skb->data[0] & 0xe0) == 0x60) + switch (skb->data[0] & 0xe0) { + case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ + case LOWPAN_DISPATCH_FRAG1: /* first fragment header */ + case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */ lowpan_process_data(skb); + break; + default: + break; + } return NET_RX_SUCCESS; diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 5d8cf80b930d..aeff3f310482 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -159,6 +159,24 @@ #define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */ #define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */ +#define LOWPAN_DISPATCH_MASK 0xf8 /* 11111000 */ + +#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */ + +#define LOWPAN_FRAG1_HEAD_SIZE 0x4 +#define LOWPAN_FRAGN_HEAD_SIZE 0x5 + +/* + * According IEEE802.15.4 standard: + * - MTU is 127 octets + * - maximum MHR size is 37 octets + * - MFR size is 2 octets + * + * so minimal payload size that we may guarantee is: + * MTU - MHR - MFR = 88 octets + */ +#define LOWPAN_FRAG_SIZE 88 + /* * Values of fields within the IPHC encoding first byte * (C stands for compressed and I for inline) @@ -201,6 +219,11 @@ #define LOWPAN_NHC_UDP_CHECKSUMC 0x04 #define LOWPAN_NHC_UDP_CHECKSUMI 0x00 +#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0 +#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0 +#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000 +#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00 + /* values for port compression, _with checksum_ ie bit 5 set to 0 */ #define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */ #define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline, diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index faecf648123f..1b09eaabaac1 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -209,6 +209,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, unsigned mtu; struct sk_buff *skb; struct dgram_sock *ro = dgram_sk(sk); + int hlen, tlen; int err; if (msg->msg_flags & MSG_OOB) { @@ -229,13 +230,15 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); - skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; + skb = sock_alloc_send_skb(sk, hlen + tlen + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 10970ca85748..f96bae8fd330 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -108,6 +108,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct net_device *dev; unsigned mtu; struct sk_buff *skb; + int hlen, tlen; int err; if (msg->msg_flags & MSG_OOB) { @@ -137,12 +138,14 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, goto out_dev; } - skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + size, + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; + skb = sock_alloc_send_skb(sk, hlen + tlen + size, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_dev; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_mac_header(skb); skb_reset_network_header(skb); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1b5096a9875a..15dc4c4828de 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1250,7 +1250,8 @@ out: return err; } -static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features) +static struct sk_buff *inet_gso_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct iphdr *iph; @@ -1572,9 +1573,9 @@ static __net_init int ipv4_mib_init_net(struct net *net) sizeof(struct icmp_mib), __alignof__(struct icmp_mib)) < 0) goto err_icmp_mib; - if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics, - sizeof(struct icmpmsg_mib), - __alignof__(struct icmpmsg_mib)) < 0) + net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), + GFP_KERNEL); + if (!net->mib.icmpmsg_statistics) goto err_icmpmsg_mib; tcp_mib_init(net); @@ -1598,7 +1599,7 @@ err_tcp_mib: static __net_exit void ipv4_mib_exit_net(struct net *net) { - snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics); + kfree(net->mib.icmpmsg_statistics); snmp_mib_free((void __percpu **)net->mib.icmp_statistics); snmp_mib_free((void __percpu **)net->mib.udplite_statistics); snmp_mib_free((void __percpu **)net->mib.udp_statistics); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 96a164aa1367..ff324ebc8893 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -112,11 +112,6 @@ #include <net/arp.h> #include <net/ax25.h> #include <net/netrom.h> -#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) -#include <net/atmclip.h> -struct neigh_table *clip_tbl_hook; -EXPORT_SYMBOL(clip_tbl_hook); -#endif #include <asm/system.h> #include <linux/uaccess.h> @@ -164,7 +159,6 @@ static const struct neigh_ops arp_broken_ops = { struct neigh_table arp_tbl = { .family = AF_INET, - .entry_size = sizeof(struct neighbour) + 4, .key_len = 4, .hash = arp_hash, .constructor = arp_constructor, @@ -177,7 +171,7 @@ struct neigh_table arp_tbl = { .gc_staletime = 60 * HZ, .reachable_time = 30 * HZ, .delay_probe_time = 5 * HZ, - .queue_len = 3, + .queue_len_bytes = 64*1024, .ucast_probes = 3, .mcast_probes = 3, .anycast_delay = 1 * HZ, @@ -592,16 +586,18 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct sk_buff *skb; struct arphdr *arp; unsigned char *arp_ptr; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; /* * Allocate a buffer */ - skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); + skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC); if (skb == NULL) return NULL; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev)); skb->dev = dev; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index b2ca095cb9da..fa057d105bef 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -304,9 +304,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct igmpv3_report *pig; struct net *net = dev_net(dev); struct flowi4 fl4; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; while (1) { - skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), + skb = alloc_skb(size + hlen + tlen, GFP_ATOMIC | __GFP_NOWARN); if (skb) break; @@ -327,7 +329,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); pip = ip_hdr(skb); @@ -647,6 +649,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, __be32 group = pmc ? pmc->multiaddr : 0; struct flowi4 fl4; __be32 dst; + int hlen, tlen; if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) return igmpv3_send_report(in_dev, pmc); @@ -661,7 +664,9 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, if (IS_ERR(rt)) return -1; - skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; + skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC); if (skb == NULL) { ip_rt_put(rt); return -1; @@ -669,7 +674,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, skb_dst_set(skb, &rt->dst); - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); iph = ip_hdr(skb); @@ -1574,7 +1579,7 @@ out_unlock: * Add multicast single-source filter to the interface list */ static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode, - __be32 *psfsrc, int delta) + __be32 *psfsrc) { struct ip_sf_list *psf, *psf_prev; @@ -1709,7 +1714,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, pmc->sfcount[sfmode]++; err = 0; for (i=0; i<sfcount; i++) { - err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta); + err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c14d88ad348d..a598768c616c 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -588,10 +588,19 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, } EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); -struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, - const gfp_t priority) +/** + * inet_csk_clone_lock - clone an inet socket, and lock its clone + * @sk: the socket to clone + * @req: request_sock + * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) + * + * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) + */ +struct sock *inet_csk_clone_lock(const struct sock *sk, + const struct request_sock *req, + const gfp_t priority) { - struct sock *newsk = sk_clone(sk, priority); + struct sock *newsk = sk_clone_lock(sk, priority); if (newsk != NULL) { struct inet_connection_sock *newicsk = inet_csk(newsk); @@ -615,7 +624,7 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, } return newsk; } -EXPORT_SYMBOL_GPL(inet_csk_clone); +EXPORT_SYMBOL_GPL(inet_csk_clone_lock); /* * At this point, there should be no process reference to this diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ccee270a9b65..0a46c541b477 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -132,13 +132,10 @@ static int inet_csk_diag_fill(struct sock *sk, if (r->idiag_family == AF_INET6) { const struct ipv6_pinfo *np = inet6_sk(sk); + *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr; + *(struct in6_addr *)r->id.idiag_dst = np->daddr; if (ext & (1 << (INET_DIAG_TCLASS - 1))) RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass); - - ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, - &np->rcv_saddr); - ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, - &np->daddr); } #endif @@ -228,10 +225,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); - ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, - &tw6->tw_v6_rcv_saddr); - ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, - &tw6->tw_v6_daddr); + *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr; + *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr; } #endif nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; @@ -607,10 +602,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, r->idiag_inode = 0; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) if (r->idiag_family == AF_INET6) { - ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, - &inet6_rsk(req)->loc_addr); - ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst, - &inet6_rsk(req)->rmt_addr); + *(struct in6_addr *)r->id.idiag_src = inet6_rsk(req)->loc_addr; + *(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr; } #endif nlh->nlmsg_len = skb_tail_pointer(skb) - b; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d55110e93120..2b32296b7958 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -171,7 +171,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -}; +} __attribute__((aligned(4*sizeof(unsigned long)))); static struct net_device_stats *ipgre_get_stats(struct net_device *dev) { @@ -835,6 +835,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); + if (max_headroom > dev->needed_headroom) + dev->needed_headroom = max_headroom; if (!new_skb) { ip_rt_put(rt); dev->stats.tx_dropped++; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 0bc95f3977d2..0d5e5672f3d1 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -319,6 +319,20 @@ int ip_output(struct sk_buff *skb) !(IPCB(skb)->flags & IPSKB_REROUTED)); } +/* + * copy saddr and daddr, possibly using 64bit load/stores + * Equivalent to : + * iph->saddr = fl4->saddr; + * iph->daddr = fl4->daddr; + */ +static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) +{ + BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != + offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); + memcpy(&iph->saddr, &fl4->saddr, + sizeof(fl4->saddr) + sizeof(fl4->daddr)); +} + int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl) { struct sock *sk = skb->sk; @@ -381,8 +395,8 @@ packet_routed: iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; - iph->saddr = fl4->saddr; - iph->daddr = fl4->daddr; + ip_copy_addrs(iph, fl4); + /* Transport layer set skb->h.foo itself. */ if (inet_opt && inet_opt->opt.optlen) { @@ -1337,8 +1351,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, ip_select_ident(iph, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; - iph->saddr = fl4->saddr; - iph->daddr = fl4->daddr; + ip_copy_addrs(iph, fl4); if (opt) { iph->ihl += opt->optlen>>2; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 09ff51bf16a4..80d5fa450210 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -55,20 +55,13 @@ /* * SOL_IP control messages. */ +#define PKTINFO_SKB_CB(__skb) ((struct in_pktinfo *)((__skb)->cb)) static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { - struct in_pktinfo info; - struct rtable *rt = skb_rtable(skb); + struct in_pktinfo info = *PKTINFO_SKB_CB(skb); info.ipi_addr.s_addr = ip_hdr(skb)->daddr; - if (rt) { - info.ipi_ifindex = rt->rt_iif; - info.ipi_spec_dst.s_addr = rt->rt_spec_dst; - } else { - info.ipi_ifindex = 0; - info.ipi_spec_dst.s_addr = 0; - } put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info); } @@ -992,20 +985,28 @@ e_inval: } /** - * ip_queue_rcv_skb - Queue an skb into sock receive queue + * ipv4_pktinfo_prepare - transfert some info from rtable to skb * @sk: socket * @skb: buffer * - * Queues an skb into socket receive queue. If IP_CMSG_PKTINFO option - * is not set, we drop skb dst entry now, while dst cache line is hot. + * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst + * in skb->cb[] before dst drop. + * This way, receiver doesnt make cache line misses to read rtable. */ -int ip_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +void ipv4_pktinfo_prepare(struct sk_buff *skb) { - if (!(inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO)) - skb_dst_drop(skb); - return sock_queue_rcv_skb(sk, skb); + struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); + const struct rtable *rt = skb_rtable(skb); + + if (rt) { + pktinfo->ipi_ifindex = rt->rt_iif; + pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst; + } else { + pktinfo->ipi_ifindex = 0; + pktinfo->ipi_spec_dst.s_addr = 0; + } + skb_dst_drop(skb); } -EXPORT_SYMBOL(ip_queue_rcv_skb); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 0da2afc97f32..915eb5265b2e 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -763,13 +763,15 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d struct sk_buff *skb; struct bootp_pkt *b; struct iphdr *h; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; /* Allocate packet */ - skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15, + skb = alloc_skb(sizeof(struct bootp_pkt) + hlen + tlen + 15, GFP_KERNEL); if (!skb) return; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt)); memset(b, 0, sizeof(struct bootp_pkt)); @@ -822,8 +824,13 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d skb->dev = dev; skb->protocol = htons(ETH_P_IP); if (dev_hard_header(skb, dev, ntohs(skb->protocol), - dev->broadcast, dev->dev_addr, skb->len) < 0 || - dev_queue_xmit(skb) < 0) + dev->broadcast, dev->dev_addr, skb->len) < 0) { + kfree_skb(skb); + printk("E"); + return; + } + + if (dev_queue_xmit(skb) < 0) printk("E"); } diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 065effd8349a..94906908a416 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -148,7 +148,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -}; +} __attribute__((aligned(4*sizeof(unsigned long)))); static struct net_device_stats *ipip_get_stats(struct net_device *dev) { diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 76a7f07b38b6..8e54490ee3f4 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1520,7 +1520,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v struct mr_table *mrt; struct vif_device *v; int ct; - LIST_HEAD(list); if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; @@ -1529,10 +1528,9 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v v = &mrt->vif_table[0]; for (ct = 0; ct < mrt->maxvif; ct++, v++) { if (v->dev == dev) - vif_delete(mrt, ct, 1, &list); + vif_delete(mrt, ct, 1, NULL); } } - unregister_netdevice_many(&list); return NOTIFY_DONE; } diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index e59aabd0eae4..a057fe64debd 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -404,6 +404,7 @@ __ipq_rcv_skb(struct sk_buff *skb) int status, type, pid, flags; unsigned int nlmsglen, skblen; struct nlmsghdr *nlh; + bool enable_timestamp = false; skblen = skb->len; if (skblen < sizeof(*nlh)) @@ -441,12 +442,13 @@ __ipq_rcv_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EBUSY); } } else { - net_enable_timestamp(); + enable_timestamp = true; peer_pid = pid; } spin_unlock_bh(&queue_lock); - + if (enable_timestamp) + net_enable_timestamp(); status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); if (status < 0) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 466ea8bb7a4d..961eed4f510a 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -288,7 +288,7 @@ static void icmpmsg_put(struct seq_file *seq) count = 0; for (i = 0; i < ICMPMSG_MIB_MAX; i++) { - val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i); + val = atomic_long_read(&net->mib.icmpmsg_statistics->mibs[i]); if (val) { type[count] = i; vals[count++] = val; @@ -307,6 +307,7 @@ static void icmp_put(struct seq_file *seq) { int i; struct net *net = seq->private; + atomic_long_t *ptr = net->mib.icmpmsg_statistics->mibs; seq_puts(seq, "\nIcmp: InMsgs InErrors"); for (i=0; icmpmibmap[i].name != NULL; i++) @@ -319,15 +320,13 @@ static void icmp_put(struct seq_file *seq) snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS)); for (i=0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", - snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, - icmpmibmap[i].index)); + atomic_long_read(ptr + icmpmibmap[i].index)); seq_printf(seq, " %lu %lu", snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS), snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS)); for (i=0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " %lu", - snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, - icmpmibmap[i].index | 0x100)); + atomic_long_read(ptr + (icmpmibmap[i].index | 0x100))); } /* diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 007e2eb769d3..3ccda5ae8a27 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -292,7 +292,8 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) { /* Charge it to the socket. */ - if (ip_queue_rcv_skb(sk, skb) < 0) { + ipv4_pktinfo_prepare(skb); + if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } @@ -327,6 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, unsigned int iphlen; int err; struct rtable *rt = *rtp; + int hlen, tlen; if (length > rt->dst.dev->mtu) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, @@ -336,12 +338,14 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, if (flags&MSG_PROBE) goto out; + hlen = LL_RESERVED_SPACE(rt->dst.dev); + tlen = rt->dst.dev->needed_tailroom; skb = sock_alloc_send_skb(sk, - length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, + length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); + skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ca5e237df029..7047069cf967 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -108,7 +108,6 @@ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif -#include <net/atmclip.h> #include <net/secure_seq.h> #define RT_FL_TOS(oldflp4) \ @@ -1019,23 +1018,18 @@ static int slow_chain_length(const struct rtable *head) static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr) { - struct neigh_table *tbl = &arp_tbl; static const __be32 inaddr_any = 0; struct net_device *dev = dst->dev; const __be32 *pkey = daddr; struct neighbour *n; -#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) - if (dev->type == ARPHRD_ATM) - tbl = clip_tbl_hook; -#endif if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) pkey = &inaddr_any; - n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey); + n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); if (n) return n; - return neigh_create(tbl, pkey, dev); + return neigh_create(&arp_tbl, pkey, dev); } static int rt_bind_neighbour(struct rtable *rt) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 34f5db1e1c8b..45156be3abfd 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -891,15 +891,18 @@ EXPORT_SYMBOL(tcp_sendpage); #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) -static inline int select_size(const struct sock *sk, int sg) +static inline int select_size(const struct sock *sk, bool sg) { const struct tcp_sock *tp = tcp_sk(sk); int tmp = tp->mss_cache; if (sg) { - if (sk_can_gso(sk)) - tmp = 0; - else { + if (sk_can_gso(sk)) { + /* Small frames wont use a full page: + * Payload will immediately follow tcp header. + */ + tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); + } else { int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); if (tmp >= pgbreak && @@ -917,9 +920,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct iovec *iov; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int iovlen, flags; + int iovlen, flags, err, copied; int mss_now, size_goal; - int sg, err, copied; + bool sg; long timeo; lock_sock(sk); @@ -946,7 +949,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; - sg = sk->sk_route_caps & NETIF_F_SG; + sg = !!(sk->sk_route_caps & NETIF_F_SG); while (--iovlen >= 0) { size_t seglen = iov->iov_len; @@ -2653,7 +2656,8 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL(compat_tcp_getsockopt); #endif -struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features) +struct sk_buff *tcp_tso_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct tcphdr *th; diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 850c737e08e2..fc6d475f488f 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -292,7 +292,7 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && left * tp->mss_cache < sk->sk_gso_max_size) return 1; - return left <= tcp_max_burst(tp); + return left <= tcp_max_tso_deferred_mss(tp); } EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 52b5c2d0ecd0..78dd38cd5496 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2858,7 +2858,7 @@ static void tcp_try_keep_open(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; - if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker) + if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { @@ -2881,7 +2881,8 @@ static void tcp_try_to_open(struct sock *sk, int flag) if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); - tcp_moderate_cwnd(tp); + if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open) + tcp_moderate_cwnd(tp); } else { tcp_cwnd_down(sk, flag); } @@ -3009,11 +3010,11 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int newly_acked_sacked, int flag) + int newly_acked_sacked, bool is_dupack, + int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); int fast_rexmit = 0, mib_idx; @@ -3066,17 +3067,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, } break; - case TCP_CA_Disorder: - tcp_try_undo_dsack(sk); - if (!tp->undo_marker || - /* For SACK case do not Open to allow to undo - * catching for all duplicate ACKs. */ - tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { - tp->undo_marker = 0; - tcp_set_ca_state(sk, TCP_CA_Open); - } - break; - case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); @@ -3117,7 +3107,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } - if (icsk->icsk_ca_state == TCP_CA_Disorder) + if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk)) { @@ -3681,10 +3671,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; + bool is_dupack = false; u32 prior_in_flight; u32 prior_fackets; int prior_packets; int prior_sacked = tp->sacked_out; + int pkts_acked = 0; int newly_acked_sacked = 0; int frto_cwnd = 0; @@ -3757,6 +3749,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); + pkts_acked = prior_packets - tp->packets_out; newly_acked_sacked = (prior_packets - prior_sacked) - (tp->packets_out - tp->sacked_out); @@ -3771,8 +3764,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); - tcp_fastretrans_alert(sk, prior_packets - tp->packets_out, - newly_acked_sacked, flag); + is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); + tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) tcp_cong_avoid(sk, ack, prior_in_flight); @@ -3784,6 +3778,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return 1; no_queue: + /* If data was DSACKed, see if we can undo a cwnd reduction. */ + if (flag & FLAG_DSACKING_ACK) + tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than * it needs to be for normal retransmission. @@ -3797,10 +3795,14 @@ invalid_ack: return -1; old_ack: + /* If data was SACKed, tag it and see if we should send more data. + * If data was DSACKed, see if we can undo a cwnd reduction. + */ if (TCP_SKB_CB(skb)->sacked) { - tcp_sacktag_write_queue(sk, skb, prior_snd_una); - if (icsk->icsk_ca_state == TCP_CA_Open) - tcp_try_keep_open(sk); + flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); + newly_acked_sacked = tp->sacked_out - prior_sacked; + tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + is_dupack, flag); } SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a9db4b1a2215..c4b8b09db9f5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1511,6 +1511,7 @@ exit: return NULL; put_and_exit: tcp_clear_xmit_timers(newsk); + tcp_cleanup_congestion_control(newsk); bh_unlock_sock(newsk); sock_put(newsk); goto exit; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 66363b689ad6..9dc146e5ed65 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -343,8 +343,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); tw6 = inet6_twsk((struct sock *)tw); - ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); - ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); + tw6->tw_v6_daddr = np->daddr; + tw6->tw_v6_rcv_saddr = np->rcv_saddr; tw->tw_tclass = np->tclass; tw->tw_ipv6only = np->ipv6only; } @@ -425,7 +425,7 @@ static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, */ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) { - struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); + struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); if (newsk != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); @@ -495,7 +495,9 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, newtp->frto_counter = 0; newtp->frto_highmark = 0; - newicsk->icsk_ca_ops = &tcp_init_congestion_ops; + if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && + !try_module_get(newicsk->icsk_ca_ops->owner)) + newicsk->icsk_ca_ops = &tcp_init_congestion_ops; tcp_set_ca_state(newsk, TCP_CA_Open); tcp_init_xmit_timers(newsk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 63170e297540..58f69acd3d22 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1581,7 +1581,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) * frame, so if we have space for more than 3 frames * then send now. */ - if (limit > tcp_max_burst(tp) * tp->mss_cache) + if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) goto send_now; } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5a65eeac1d29..ad481b32f1e3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1358,7 +1358,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (inet_sk(sk)->inet_daddr) sock_rps_save_rxhash(sk, skb); - rc = ip_queue_rcv_skb(sk, skb); + rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); @@ -1474,6 +1474,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) rc = 0; + ipv4_pktinfo_prepare(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); @@ -2247,7 +2248,8 @@ int udp4_ufo_send_check(struct sk_buff *skb) return 0; } -struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features) +struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index cf88df82e2c2..586051726341 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -636,7 +636,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, goto out; } - ipv6_addr_copy(&ifa->addr, addr); + ifa->addr = *addr; spin_lock_init(&ifa->lock); spin_lock_init(&ifa->state_lock); @@ -1228,7 +1228,7 @@ try_nextdev: if (!hiscore->ifa) return -EADDRNOTAVAIL; - ipv6_addr_copy(saddr, &hiscore->ifa->addr); + *saddr = hiscore->ifa->addr; in6_ifa_put(hiscore->ifa); return 0; } @@ -1249,7 +1249,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, list_for_each_entry(ifp, &idev->addr_list, if_list) { if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { - ipv6_addr_copy(addr, &ifp->addr); + *addr = ifp->addr; err = 0; break; } @@ -1700,7 +1700,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_protocol = RTPROT_KERNEL, }; - ipv6_addr_copy(&cfg.fc_dst, pfx); + cfg.fc_dst = *pfx; /* Prevent useless cloning on PtP SIT. This thing is done here expecting that the whole diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d27c797f9f05..7694c82e629d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -347,7 +347,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { - if (!inet->transparent && + if (!(inet->freebind || inet->transparent) && !ipv6_chk_addr(net, &addr->sin6_addr, dev, 0)) { err = -EADDRNOTAVAIL; @@ -361,10 +361,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) inet->inet_rcv_saddr = v4addr; inet->inet_saddr = v4addr; - ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); + np->rcv_saddr = addr->sin6_addr; if (!(addr_type & IPV6_ADDR_MULTICAST)) - ipv6_addr_copy(&np->saddr, &addr->sin6_addr); + np->saddr = addr->sin6_addr; /* Make sure we are allowed to bind here. */ if (sk->sk_prot->get_port(sk, snum)) { @@ -458,14 +458,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr, peer == 1) return -ENOTCONN; sin->sin6_port = inet->inet_dport; - ipv6_addr_copy(&sin->sin6_addr, &np->daddr); + sin->sin6_addr = np->daddr; if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { if (ipv6_addr_any(&np->rcv_saddr)) - ipv6_addr_copy(&sin->sin6_addr, &np->saddr); + sin->sin6_addr = np->saddr; else - ipv6_addr_copy(&sin->sin6_addr, &np->rcv_saddr); + sin->sin6_addr = np->rcv_saddr; sin->sin6_port = inet->inet_sport; } @@ -660,8 +660,8 @@ int inet6_sk_rebuild_header(struct sock *sk) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = np->saddr; fl6.flowlabel = np->flow_label; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; @@ -769,7 +769,8 @@ out: return err; } -static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features) +static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); struct ipv6hdr *ipv6h; @@ -985,9 +986,9 @@ static int __net_init ipv6_init_mibs(struct net *net) sizeof(struct icmpv6_mib), __alignof__(struct icmpv6_mib)) < 0) goto err_icmp_mib; - if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, - sizeof(struct icmpv6msg_mib), - __alignof__(struct icmpv6msg_mib)) < 0) + net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), + GFP_KERNEL); + if (!net->mib.icmpv6msg_statistics) goto err_icmpmsg_mib; return 0; @@ -1008,7 +1009,7 @@ static void ipv6_cleanup_mibs(struct net *net) snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6); snmp_mib_free((void __percpu **)net->mib.ipv6_statistics); snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics); - snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics); + kfree(net->mib.icmpv6msg_statistics); } static int __net_init inet6_net_init(struct net *net) diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 4c0f894d0843..2ae79dbeec2f 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -193,9 +193,9 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des printk(KERN_WARNING "destopt hao: invalid header length: %u\n", hao->length); goto bad; } - ipv6_addr_copy(&final_addr, &hao->addr); - ipv6_addr_copy(&hao->addr, &iph->saddr); - ipv6_addr_copy(&iph->saddr, &final_addr); + final_addr = hao->addr; + hao->addr = iph->saddr; + iph->saddr = final_addr; } break; } @@ -241,13 +241,13 @@ static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr) segments = rthdr->hdrlen >> 1; addrs = ((struct rt0_hdr *)rthdr)->addr; - ipv6_addr_copy(&final_addr, addrs + segments - 1); + final_addr = addrs[segments - 1]; addrs += segments - segments_left; memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs)); - ipv6_addr_copy(addrs, &iph->daddr); - ipv6_addr_copy(&iph->daddr, &final_addr); + addrs[0] = iph->daddr; + iph->daddr = final_addr; } static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 674255f5e6b7..fc1cdcd7041a 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -75,7 +75,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) if (pac == NULL) return -ENOMEM; pac->acl_next = NULL; - ipv6_addr_copy(&pac->acl_addr, addr); + pac->acl_addr = *addr; rcu_read_lock(); if (ifindex == 0) { @@ -296,7 +296,7 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) goto out; } - ipv6_addr_copy(&aca->aca_addr, addr); + aca->aca_addr = *addr; aca->aca_idev = idev; aca->aca_rt = rt; aca->aca_users = 1; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index e2480691c220..ae08aee1773c 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -71,7 +71,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); + usin->sin6_addr = flowlabel->dst; } } @@ -143,7 +143,7 @@ ipv4_connected: } } - ipv6_addr_copy(&np->daddr, daddr); + np->daddr = *daddr; np->flow_label = fl6.flowlabel; inet->inet_dport = usin->sin6_port; @@ -154,8 +154,8 @@ ipv4_connected: */ fl6.flowi6_proto = sk->sk_protocol; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; @@ -179,10 +179,10 @@ ipv4_connected: /* source address lookup done in ip6_dst_lookup */ if (ipv6_addr_any(&np->saddr)) - ipv6_addr_copy(&np->saddr, &fl6.saddr); + np->saddr = fl6.saddr; if (ipv6_addr_any(&np->rcv_saddr)) { - ipv6_addr_copy(&np->rcv_saddr, &fl6.saddr); + np->rcv_saddr = fl6.saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; if (sk->sk_prot->rehash) sk->sk_prot->rehash(sk); @@ -257,7 +257,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info) skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); - ipv6_addr_copy(&iph->daddr, &fl6->daddr); + iph->daddr = fl6->daddr; serr = SKB_EXT_ERR(skb); serr->ee.ee_errno = err; @@ -294,7 +294,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) skb_put(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); iph = ipv6_hdr(skb); - ipv6_addr_copy(&iph->daddr, &fl6->daddr); + iph->daddr = fl6->daddr; mtu_info = IP6CBMTU(skb); @@ -303,7 +303,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu) mtu_info->ip6m_addr.sin6_port = 0; mtu_info->ip6m_addr.sin6_flowinfo = 0; mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif; - ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr); + mtu_info->ip6m_addr.sin6_addr = ipv6_hdr(skb)->daddr; __skb_pull(skb, skb_tail_pointer(skb) - skb->data); skb_reset_transport_header(skb); @@ -354,8 +354,8 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_port = serr->port; sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { - ipv6_addr_copy(&sin->sin6_addr, - (struct in6_addr *)(nh + serr->addr_offset)); + sin->sin6_addr = + *(struct in6_addr *)(nh + serr->addr_offset); if (np->sndflow) sin->sin6_flowinfo = (*(__be32 *)(nh + serr->addr_offset - 24) & @@ -376,7 +376,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; if (skb->protocol == htons(ETH_P_IPV6)) { - ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr); + sin->sin6_addr = ipv6_hdr(skb)->saddr; if (np->rxopt.all) datagram_recv_ctl(sk, msg, skb); if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -451,7 +451,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len) sin->sin6_flowinfo = 0; sin->sin6_port = 0; sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id; - ipv6_addr_copy(&sin->sin6_addr, &mtu_info.ip6m_addr.sin6_addr); + sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr; } put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info); @@ -475,7 +475,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); + src_info.ipi6_addr = ipv6_hdr(skb)->daddr; put_cmsg(msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } @@ -550,7 +550,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) struct in6_pktinfo src_info; src_info.ipi6_ifindex = opt->iif; - ipv6_addr_copy(&src_info.ipi6_addr, &ipv6_hdr(skb)->daddr); + src_info.ipi6_addr = ipv6_hdr(skb)->daddr; put_cmsg(msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { @@ -584,7 +584,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) */ sin6.sin6_family = AF_INET6; - ipv6_addr_copy(&sin6.sin6_addr, &ipv6_hdr(skb)->daddr); + sin6.sin6_addr = ipv6_hdr(skb)->daddr; sin6.sin6_port = ports[1]; sin6.sin6_flowinfo = 0; sin6.sin6_scope_id = 0; @@ -654,12 +654,12 @@ int datagram_send_ctl(struct net *net, struct sock *sk, if (addr_type != IPV6_ADDR_ANY) { int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; - if (!inet_sk(sk)->transparent && + if (!(inet_sk(sk)->freebind || inet_sk(sk)->transparent) && !ipv6_chk_addr(net, &src_info->ipi6_addr, strict ? dev : NULL, 0)) err = -EINVAL; else - ipv6_addr_copy(&fl6->saddr, &src_info->ipi6_addr); + fl6->saddr = src_info->ipi6_addr; } rcu_read_unlock(); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index bf22a225f422..3d641b6e9b09 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -243,9 +243,9 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff) if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE; - ipv6_addr_copy(&tmp_addr, &ipv6h->saddr); - ipv6_addr_copy(&ipv6h->saddr, &hao->addr); - ipv6_addr_copy(&hao->addr, &tmp_addr); + tmp_addr = ipv6h->saddr; + ipv6h->saddr = hao->addr; + hao->addr = tmp_addr; if (skb->tstamp.tv64 == 0) __net_timestamp(skb); @@ -461,9 +461,9 @@ looped_back: return -1; } - ipv6_addr_copy(&daddr, addr); - ipv6_addr_copy(addr, &ipv6_hdr(skb)->daddr); - ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &daddr); + daddr = *addr; + *addr = ipv6_hdr(skb)->daddr; + ipv6_hdr(skb)->daddr = daddr; skb_dst_drop(skb); ip6_route_input(skb); @@ -690,7 +690,7 @@ static void ipv6_push_rthdr(struct sk_buff *skb, u8 *proto, memcpy(phdr->addr, ihdr->addr + 1, (hops - 1) * sizeof(struct in6_addr)); - ipv6_addr_copy(phdr->addr + (hops - 1), *addr_p); + phdr->addr[hops - 1] = **addr_p; *addr_p = ihdr->addr; phdr->rt_hdr.nexthdr = *proto; @@ -888,8 +888,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, if (!opt || !opt->srcrt) return NULL; - ipv6_addr_copy(orig, &fl6->daddr); - ipv6_addr_copy(&fl6->daddr, ((struct rt0_hdr *)opt->srcrt)->addr); + *orig = fl6->daddr; + fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; return orig; } diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 295571576f83..b6c573152067 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -96,7 +96,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, if (!ipv6_prefix_equal(&saddr, &r->src.addr, r->src.plen)) goto again; - ipv6_addr_copy(&flp6->saddr, &saddr); + flp6->saddr = saddr; } goto out; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 90868fb42757..9e2bdccf9143 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -290,9 +290,9 @@ static void mip6_addr_swap(struct sk_buff *skb) if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); - ipv6_addr_copy(&tmp, &iph->saddr); - ipv6_addr_copy(&iph->saddr, &hao->addr); - ipv6_addr_copy(&hao->addr, &tmp); + tmp = iph->saddr; + iph->saddr = hao->addr; + hao->addr = tmp; } } } @@ -444,9 +444,9 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; - ipv6_addr_copy(&fl6.daddr, &hdr->saddr); + fl6.daddr = hdr->saddr; if (saddr) - ipv6_addr_copy(&fl6.saddr, saddr); + fl6.saddr = *saddr; fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; @@ -538,9 +538,9 @@ static void icmpv6_echo_reply(struct sk_buff *skb) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; - ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); + fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) - ipv6_addr_copy(&fl6.saddr, saddr); + fl6.saddr = *saddr; fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); @@ -786,8 +786,8 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, int oif) { memset(fl6, 0, sizeof(*fl6)); - ipv6_addr_copy(&fl6->saddr, saddr); - ipv6_addr_copy(&fl6->daddr, daddr); + fl6->saddr = *saddr; + fl6->daddr = *daddr; fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 1567fb120392..02dd203d9eac 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -65,9 +65,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); + fl6.daddr = treq->rmt_addr; final_p = fl6_update_dst(&fl6, np->opt, &final); - ipv6_addr_copy(&fl6.saddr, &treq->loc_addr); + fl6.saddr = treq->loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet_rsk(req)->rmt_port; @@ -157,7 +157,7 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; sin6->sin6_family = AF_INET6; - ipv6_addr_copy(&sin6->sin6_addr, &np->daddr); + sin6->sin6_addr = np->daddr; sin6->sin6_port = inet_sk(sk)->inet_dport; /* We do not store received flowlabel for TCP */ sin6->sin6_flowinfo = 0; @@ -215,8 +215,8 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = np->saddr; fl6.flowlabel = np->flow_label; IP6_ECN_flow_xmit(sk, fl6.flowlabel); fl6.flowi6_oif = sk->sk_bound_dev_if; @@ -246,7 +246,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused) skb_dst_set_noref(skb, dst); /* Restore final destination back after routing done */ - ipv6_addr_copy(&fl6.daddr, &np->daddr); + fl6.daddr = np->daddr; res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); rcu_read_unlock(); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 93718f3db79b..424f063fb229 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -425,7 +425,8 @@ out: static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, int addrlen, int plen, - int offset) + int offset, int allow_create, + int replace_required) { struct fib6_node *fn, *in, *ln; struct fib6_node *pn = NULL; @@ -447,8 +448,18 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, * Prefix match */ if (plen < fn->fn_bit || - !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) + !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) { + if (!allow_create) { + if (replace_required) { + pr_warn("IPv6: Can't replace route, " + "no match found\n"); + return ERR_PTR(-ENOENT); + } + pr_warn("IPv6: NLM_F_CREATE should be set " + "when creating new route\n"); + } goto insert_above; + } /* * Exact match ? @@ -477,6 +488,23 @@ static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr, fn = dir ? fn->right: fn->left; } while (fn); + if (!allow_create) { + /* We should not create new node because + * NLM_F_REPLACE was specified without NLM_F_CREATE + * I assume it is safe to require NLM_F_CREATE when + * REPLACE flag is used! Later we may want to remove the + * check for replace_required, because according + * to netlink specification, NLM_F_CREATE + * MUST be specified if new route is created. + * That would keep IPv6 consistent with IPv4 + */ + if (replace_required) { + pr_warn("IPv6: Can't replace route, no match found\n"); + return ERR_PTR(-ENOENT); + } + pr_warn("IPv6: NLM_F_CREATE should be set " + "when creating new route\n"); + } /* * We walked to the bottom of tree. * Create new leaf node without children. @@ -614,6 +642,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, { struct rt6_info *iter = NULL; struct rt6_info **ins; + int replace = (NULL != info->nlh && + (info->nlh->nlmsg_flags&NLM_F_REPLACE)); + int add = (NULL == info->nlh || + (info->nlh->nlmsg_flags&NLM_F_CREATE)); + int found = 0; ins = &fn->leaf; @@ -626,6 +659,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, /* * Same priority level */ + if (NULL != info->nlh && + (info->nlh->nlmsg_flags&NLM_F_EXCL)) + return -EEXIST; + if (replace) { + found++; + break; + } if (iter->rt6i_dev == rt->rt6i_dev && iter->rt6i_idev == rt->rt6i_idev && @@ -655,17 +695,40 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, /* * insert node */ + if (!replace) { + if (!add) + pr_warn("IPv6: NLM_F_CREATE should be set when creating new route\n"); + +add: + rt->dst.rt6_next = iter; + *ins = rt; + rt->rt6i_node = fn; + atomic_inc(&rt->rt6i_ref); + inet6_rt_notify(RTM_NEWROUTE, rt, info); + info->nl_net->ipv6.rt6_stats->fib_rt_entries++; + + if ((fn->fn_flags & RTN_RTINFO) == 0) { + info->nl_net->ipv6.rt6_stats->fib_route_nodes++; + fn->fn_flags |= RTN_RTINFO; + } - rt->dst.rt6_next = iter; - *ins = rt; - rt->rt6i_node = fn; - atomic_inc(&rt->rt6i_ref); - inet6_rt_notify(RTM_NEWROUTE, rt, info); - info->nl_net->ipv6.rt6_stats->fib_rt_entries++; - - if ((fn->fn_flags & RTN_RTINFO) == 0) { - info->nl_net->ipv6.rt6_stats->fib_route_nodes++; - fn->fn_flags |= RTN_RTINFO; + } else { + if (!found) { + if (add) + goto add; + pr_warn("IPv6: NLM_F_REPLACE set, but no existing node found!\n"); + return -ENOENT; + } + *ins = rt; + rt->rt6i_node = fn; + rt->dst.rt6_next = iter->dst.rt6_next; + atomic_inc(&rt->rt6i_ref); + inet6_rt_notify(RTM_NEWROUTE, rt, info); + rt6_release(iter); + if ((fn->fn_flags & RTN_RTINFO) == 0) { + info->nl_net->ipv6.rt6_stats->fib_route_nodes++; + fn->fn_flags |= RTN_RTINFO; + } } return 0; @@ -696,9 +759,25 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) { struct fib6_node *fn, *pn = NULL; int err = -ENOMEM; + int allow_create = 1; + int replace_required = 0; + if (NULL != info->nlh) { + if (!(info->nlh->nlmsg_flags&NLM_F_CREATE)) + allow_create = 0; + if ((info->nlh->nlmsg_flags&NLM_F_REPLACE)) + replace_required = 1; + } + if (!allow_create && !replace_required) + pr_warn("IPv6: RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n"); fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr), - rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst)); + rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst), + allow_create, replace_required); + + if (IS_ERR(fn)) { + err = PTR_ERR(fn); + fn = NULL; + } if (fn == NULL) goto out; @@ -736,7 +815,8 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) sn = fib6_add_1(sfn, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src)); + offsetof(struct rt6_info, rt6i_src), + allow_create, replace_required); if (sn == NULL) { /* If it is failed, discard just allocated @@ -753,8 +833,13 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) } else { sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr, sizeof(struct in6_addr), rt->rt6i_src.plen, - offsetof(struct rt6_info, rt6i_src)); + offsetof(struct rt6_info, rt6i_src), + allow_create, replace_required); + if (IS_ERR(sn)) { + err = PTR_ERR(sn); + sn = NULL; + } if (sn == NULL) goto st_failure; } diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 4566dbd916d3..b7867a1215b1 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -386,7 +386,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, err = -EINVAL; goto done; } - ipv6_addr_copy(&fl->dst, &freq->flr_dst); + fl->dst = freq->flr_dst; atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 84d0bd5cac93..a24e15557843 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -238,8 +238,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, hdr->nexthdr = proto; hdr->hop_limit = hlimit; - ipv6_addr_copy(&hdr->saddr, &fl6->saddr); - ipv6_addr_copy(&hdr->daddr, first_hop); + hdr->saddr = fl6->saddr; + hdr->daddr = *first_hop; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; @@ -290,8 +290,8 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev, hdr->nexthdr = proto; hdr->hop_limit = np->hop_limit; - ipv6_addr_copy(&hdr->saddr, saddr); - ipv6_addr_copy(&hdr->daddr, daddr); + hdr->saddr = *saddr; + hdr->daddr = *daddr; return 0; } @@ -631,6 +631,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; + int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; @@ -797,6 +798,8 @@ slow_path: */ *prevhdr = NEXTHDR_FRAGMENT; + hroom = LL_RESERVED_SPACE(rt->dst.dev); + troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. @@ -815,7 +818,8 @@ slow_path: * Allocate buffer. */ - if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) { + if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); @@ -828,7 +832,7 @@ slow_path: */ ip6_copy_metadata(frag, skb); - skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev)); + skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); @@ -1059,7 +1063,7 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (err) return ERR_PTR(err); if (final_dst) - ipv6_addr_copy(&fl6->daddr, final_dst); + fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; @@ -1095,7 +1099,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, if (err) return ERR_PTR(err); if (final_dst) - ipv6_addr_copy(&fl6->daddr, final_dst); + fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; @@ -1588,7 +1592,7 @@ int ip6_push_pending_frames(struct sock *sk) if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; - ipv6_addr_copy(final_dst, &fl6->daddr); + *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); @@ -1604,8 +1608,8 @@ int ip6_push_pending_frames(struct sock *sk) hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; - ipv6_addr_copy(&hdr->saddr, &fl6->saddr); - ipv6_addr_copy(&hdr->daddr, final_dst); + hdr->saddr = fl6->saddr; + hdr->daddr = *final_dst; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4e2e9ff67ef2..f5f98f558acb 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -93,7 +93,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -}; +} __attribute__((aligned(4*sizeof(unsigned long)))); static struct net_device_stats *ip6_get_stats(struct net_device *dev) { @@ -979,8 +979,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; - ipv6_addr_copy(&ipv6h->saddr, &fl6->saddr); - ipv6_addr_copy(&ipv6h->daddr, &fl6->daddr); + ipv6h->saddr = fl6->saddr; + ipv6h->daddr = fl6->daddr; nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); @@ -1155,8 +1155,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr)); /* Set up flowi template */ - ipv6_addr_copy(&fl6->saddr, &p->laddr); - ipv6_addr_copy(&fl6->daddr, &p->raddr); + fl6->saddr = p->laddr; + fl6->daddr = p->raddr; fl6->flowi6_oif = p->link; fl6->flowlabel = 0; @@ -1212,8 +1212,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) static int ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p) { - ipv6_addr_copy(&t->parms.laddr, &p->laddr); - ipv6_addr_copy(&t->parms.raddr, &p->raddr); + t->parms.laddr = p->laddr; + t->parms.raddr = p->raddr; t->parms.flags = p->flags; t->parms.hop_limit = p->hop_limit; t->parms.encap_limit = p->encap_limit; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 449a9185b8f2..c7e95c8c579f 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1105,8 +1105,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, msg->im6_msgtype = MRT6MSG_WHOLEPKT; msg->im6_mif = mrt->mroute_reg_vif_num; msg->im6_pad = 0; - ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); - ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); + msg->im6_src = ipv6_hdr(pkt)->saddr; + msg->im6_dst = ipv6_hdr(pkt)->daddr; skb->ip_summed = CHECKSUM_UNNECESSARY; } else @@ -1131,8 +1131,8 @@ static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt, msg->im6_msgtype = assert; msg->im6_mif = mifi; msg->im6_pad = 0; - ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr); - ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr); + msg->im6_src = ipv6_hdr(pkt)->saddr; + msg->im6_dst = ipv6_hdr(pkt)->daddr; skb_dst_set(skb, dst_clone(skb_dst(pkt))); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2181,8 +2181,8 @@ int ip6mr_get_route(struct net *net, iph->payload_len = 0; iph->nexthdr = IPPROTO_NONE; iph->hop_limit = 0; - ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr); - ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr); + iph->saddr = rt->rt6i_src.addr; + iph->daddr = rt->rt6i_dst.addr; err = ip6mr_cache_unresolved(mrt, vif, skb2); read_unlock(&mrt_lock); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 26cb08c84b74..18a2719003c3 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -435,7 +435,7 @@ sticky_done: goto e_inval; np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex; - ipv6_addr_copy(&np->sticky_pktinfo.ipi6_addr, &pkt.ipi6_addr); + np->sticky_pktinfo.ipi6_addr = pkt.ipi6_addr; retv = 0; break; } @@ -980,8 +980,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; - np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) : - ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr)); + src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxhlim) { @@ -992,8 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, struct in6_pktinfo src_info; src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : np->sticky_pktinfo.ipi6_ifindex; - np->mcast_oif? ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr) : - ipv6_addr_copy(&src_info.ipi6_addr, &(np->sticky_pktinfo.ipi6_addr)); + src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr; put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info); } if (np->rxopt.bits.rxohlim) { diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index ee7839f4d6e3..518cbb90c44b 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -155,7 +155,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) return -ENOMEM; mc_lst->next = NULL; - ipv6_addr_copy(&mc_lst->addr, addr); + mc_lst->addr = *addr; rcu_read_lock(); if (ifindex == 0) { @@ -858,7 +858,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); - ipv6_addr_copy(&mc->mca_addr, addr); + mc->mca_addr = *addr; mc->idev = idev; /* (reference taken) */ mc->mca_users = 1; /* mca_stamp should be updated upon changes */ @@ -1343,13 +1343,15 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) struct mld2_report *pmr; struct in6_addr addr_buf; const struct in6_addr *saddr; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += LL_ALLOCATED_SPACE(dev); + size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1357,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) if (!skb) return NULL; - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: @@ -1723,6 +1725,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) struct mld_msg *hdr; const struct in6_addr *snd_addr, *saddr; struct in6_addr addr_buf; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; int err, len, payload_len, full_len; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, @@ -1744,7 +1748,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) IPSTATS_MIB_OUT, full_len); rcu_read_unlock(); - skb = sock_alloc_send_skb(sk, LL_ALLOCATED_SPACE(dev) + full_len, 1, &err); + skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err); if (skb == NULL) { rcu_read_lock(); @@ -1754,7 +1758,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) return; } - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: @@ -1772,7 +1776,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type) hdr = (struct mld_msg *) skb_put(skb, sizeof(struct mld_msg)); memset(hdr, 0, sizeof(struct mld_msg)); hdr->mld_type = type; - ipv6_addr_copy(&hdr->mld_mca, addr); + hdr->mld_mca = *addr; hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len, IPPROTO_ICMPV6, @@ -1914,7 +1918,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca, * Add multicast single-source filter to the interface list */ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, - const struct in6_addr *psfsrc, int delta) + const struct in6_addr *psfsrc) { struct ip6_sf_list *psf, *psf_prev; @@ -2045,7 +2049,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca, pmc->mca_sfcount[sfmode]++; err = 0; for (i=0; i<sfcount; i++) { - err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i], delta); + err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]); if (err) break; } diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index 43242e6e6103..7e1e0fbfef21 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -195,8 +195,8 @@ static inline int mip6_report_rl_allow(struct timeval *stamp, mip6_report_rl.stamp.tv_sec = stamp->tv_sec; mip6_report_rl.stamp.tv_usec = stamp->tv_usec; mip6_report_rl.iif = iif; - ipv6_addr_copy(&mip6_report_rl.src, src); - ipv6_addr_copy(&mip6_report_rl.dst, dst); + mip6_report_rl.src = *src; + mip6_report_rl.dst = *dst; allow = 1; } spin_unlock_bh(&mip6_report_rl.lock); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0cb78d7ddaf5..cfb9709ac7c9 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -126,7 +126,6 @@ static const struct neigh_ops ndisc_direct_ops = { struct neigh_table nd_tbl = { .family = AF_INET6, - .entry_size = sizeof(struct neighbour) + sizeof(struct in6_addr), .key_len = sizeof(struct in6_addr), .hash = ndisc_hash, .constructor = ndisc_constructor, @@ -141,7 +140,7 @@ struct neigh_table nd_tbl = { .gc_staletime = 60 * HZ, .reachable_time = ND_REACHABLE_TIME, .delay_probe_time = 5 * HZ, - .queue_len = 3, + .queue_len_bytes = 64*1024, .ucast_probes = 3, .mcast_probes = 3, .anycast_delay = 1 * HZ, @@ -446,6 +445,8 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, struct sock *sk = net->ipv6.ndisc_sk; struct sk_buff *skb; struct icmp6hdr *hdr; + int hlen = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; int len; int err; u8 *opt; @@ -459,7 +460,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, skb = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + - len + LL_ALLOCATED_SPACE(dev)), + len + hlen + tlen), 1, &err); if (!skb) { ND_PRINTK0(KERN_ERR @@ -468,7 +469,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, return NULL; } - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); skb->transport_header = skb->tail; @@ -479,7 +480,7 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, opt = skb_transport_header(skb) + sizeof(struct icmp6hdr); if (target) { - ipv6_addr_copy((struct in6_addr *)opt, target); + *(struct in6_addr *)opt = *target; opt += sizeof(*target); } @@ -1533,6 +1534,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, struct inet6_dev *idev; struct flowi6 fl6; u8 *opt; + int hlen, tlen; int rd_len; int err; u8 ha_buf[MAX_ADDR_LEN], *ha = NULL; @@ -1590,9 +1592,11 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, rd_len &= ~0x7; len += rd_len; + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; buff = sock_alloc_send_skb(sk, (MAX_HEADER + sizeof(struct ipv6hdr) + - len + LL_ALLOCATED_SPACE(dev)), + len + hlen + tlen), 1, &err); if (buff == NULL) { ND_PRINTK0(KERN_ERR @@ -1601,7 +1605,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, goto release; } - skb_reserve(buff, LL_RESERVED_SPACE(dev)); + skb_reserve(buff, hlen); ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, IPPROTO_ICMPV6, len); @@ -1617,9 +1621,9 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, */ addrp = (struct in6_addr *)(icmph + 1); - ipv6_addr_copy(addrp, target); + *addrp = *target; addrp++; - ipv6_addr_copy(addrp, &ipv6_hdr(skb)->daddr); + *addrp = ipv6_hdr(skb)->daddr; opt = (u8*) (addrp + 1); diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index e63c3972a739..fb80a23c6640 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -405,6 +405,7 @@ __ipq_rcv_skb(struct sk_buff *skb) int status, type, pid, flags; unsigned int nlmsglen, skblen; struct nlmsghdr *nlh; + bool enable_timestamp = false; skblen = skb->len; if (skblen < sizeof(*nlh)) @@ -442,11 +443,13 @@ __ipq_rcv_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EBUSY); } } else { - net_enable_timestamp(); + enable_timestamp = true; peer_pid = pid; } spin_unlock_bh(&queue_lock); + if (enable_timestamp) + net_enable_timestamp(); status = ipq_receive_peer(NLMSG_DATA(nlh), type, nlmsglen - NLMSG_LENGTH(0)); diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c index a5a4c5dd5396..b5a2aa58a03a 100644 --- a/net/ipv6/netfilter/ip6t_REJECT.c +++ b/net/ipv6/netfilter/ip6t_REJECT.c @@ -93,8 +93,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.saddr, &oip6h->daddr); - ipv6_addr_copy(&fl6.daddr, &oip6h->saddr); + fl6.saddr = oip6h->daddr; + fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph.dest; fl6.fl6_dport = otcph.source; security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); @@ -129,8 +129,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb) *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20)); ip6h->hop_limit = ip6_dst_hoplimit(dst); ip6h->nexthdr = IPPROTO_TCP; - ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr); - ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr); + ip6h->saddr = oip6h->daddr; + ip6h->daddr = oip6h->saddr; tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 1008ce94bc33..fdeb6d03da81 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -142,11 +142,7 @@ static const struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_SENTINEL }; -/* can be called either with percpu mib (pcpumib != NULL), - * or shared one (smib != NULL) - */ -static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib, - atomic_long_t *smib) +static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib) { char name[32]; int i; @@ -163,14 +159,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpum snprintf(name, sizeof(name), "Icmp6%s%s", i & 0x100 ? "Out" : "In", p); seq_printf(seq, "%-32s\t%lu\n", name, - pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i)); + atomic_long_read(smib + i)); } /* print by number (nonzero only) - ICMPMsgStat format */ for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { unsigned long val; - val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i); + val = atomic_long_read(smib + i); if (!val) continue; snprintf(name, sizeof(name), "Icmp6%sType%u", @@ -215,8 +211,7 @@ static int snmp6_seq_show(struct seq_file *seq, void *v) snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, NULL, snmp6_icmp6_list); - snmp6_seq_show_icmpv6msg(seq, - (void __percpu **)net->mib.icmpv6msg_statistics, NULL); + snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, NULL, snmp6_udp6_list); snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, @@ -246,7 +241,7 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v) snmp6_ipstats_list); snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, snmp6_icmp6_list); - snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs); + snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs); return 0; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 331af3b882ac..a4894f4f1944 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -299,9 +299,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) } inet->inet_rcv_saddr = inet->inet_saddr = v4addr; - ipv6_addr_copy(&np->rcv_saddr, &addr->sin6_addr); + np->rcv_saddr = addr->sin6_addr; if (!(addr_type & IPV6_ADDR_MULTICAST)) - ipv6_addr_copy(&np->saddr, &addr->sin6_addr); + np->saddr = addr->sin6_addr; err = 0; out_unlock: rcu_read_unlock(); @@ -383,7 +383,8 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) } /* Charge it to the socket. */ - if (ip_queue_rcv_skb(sk, skb) < 0) { + skb_dst_drop(skb); + if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); return NET_RX_DROP; } @@ -494,7 +495,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, if (sin6) { sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; - ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); + sin6->sin6_addr = ipv6_hdr(skb)->saddr; sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -610,6 +611,8 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, struct sk_buff *skb; int err; struct rt6_info *rt = (struct rt6_info *)*dstp; + int hlen = LL_RESERVED_SPACE(rt->dst.dev); + int tlen = rt->dst.dev->needed_tailroom; if (length > rt->dst.dev->mtu) { ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); @@ -619,11 +622,11 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, goto out; skb = sock_alloc_send_skb(sk, - length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15, + length + hlen + tlen + 15, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto error; - skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev)); + skb_reserve(skb, hlen); skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; @@ -843,11 +846,11 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, goto out; if (!ipv6_addr_any(daddr)) - ipv6_addr_copy(&fl6.daddr, daddr); + fl6.daddr = *daddr; else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.saddr = np->saddr; final_p = fl6_update_dst(&fl6, opt, &final); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index dfb164e9051a..b69fae76a6f1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -153,8 +153,8 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a) fq->id = arg->id; fq->user = arg->user; - ipv6_addr_copy(&fq->saddr, arg->src); - ipv6_addr_copy(&fq->daddr, arg->dst); + fq->saddr = *arg->src; + fq->daddr = *arg->dst; } EXPORT_SYMBOL(ip6_frag_init); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3399dd326287..0e381bb94683 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -731,14 +731,14 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, if (rt->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; - ipv6_addr_copy(&rt->rt6i_gateway, daddr); + rt->rt6i_gateway = *daddr; } rt->rt6i_flags |= RTF_CACHE; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { - ipv6_addr_copy(&rt->rt6i_src.addr, saddr); + rt->rt6i_src.addr = *saddr; rt->rt6i_src.plen = 128; } #endif @@ -934,7 +934,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori in6_dev_hold(rt->rt6i_idev); rt->rt6i_expires = 0; - ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); + rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; rt->rt6i_metric = 0; @@ -1094,7 +1094,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, rt->dst.output = ip6_output; dst_set_neighbour(&rt->dst, neigh); atomic_set(&rt->dst.__refcnt, 1); - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); + rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); @@ -1237,9 +1237,18 @@ int ip6_route_add(struct fib6_config *cfg) if (cfg->fc_metric == 0) cfg->fc_metric = IP6_RT_PRIO_USER; - table = fib6_new_table(net, cfg->fc_table); + err = -ENOBUFS; + if (NULL != cfg->fc_nlinfo.nlh && + !(cfg->fc_nlinfo.nlh->nlmsg_flags&NLM_F_CREATE)) { + table = fib6_get_table(net, cfg->fc_table); + if (table == NULL) { + printk(KERN_WARNING "IPv6: NLM_F_CREATE should be specified when creating new route\n"); + table = fib6_new_table(net, cfg->fc_table); + } + } else { + table = fib6_new_table(net, cfg->fc_table); + } if (table == NULL) { - err = -ENOBUFS; goto out; } @@ -1322,7 +1331,7 @@ int ip6_route_add(struct fib6_config *cfg) int gwa_type; gw_addr = &cfg->fc_gateway; - ipv6_addr_copy(&rt->rt6i_gateway, gw_addr); + rt->rt6i_gateway = *gw_addr; gwa_type = ipv6_addr_type(gw_addr); if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { @@ -1376,7 +1385,7 @@ int ip6_route_add(struct fib6_config *cfg) err = -EINVAL; goto out; } - ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc); + rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; rt->rt6i_prefsrc.plen = 128; } else rt->rt6i_prefsrc.plen = 0; @@ -1573,7 +1582,7 @@ static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest, }, }; - ipv6_addr_copy(&rdfl.gateway, gateway); + rdfl.gateway = *gateway; if (rt6_need_strict(dest)) flags |= RT6_LOOKUP_F_IFACE; @@ -1629,7 +1638,7 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src, if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; - ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); + nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; dst_set_neighbour(&nrt->dst, neigh_clone(neigh)); if (ip6_ins_rt(nrt)) @@ -1775,7 +1784,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, rt->dst.output = ort->dst.output; rt->dst.flags |= DST_HOST; - ipv6_addr_copy(&rt->rt6i_dst.addr, dest); + rt->rt6i_dst.addr = *dest; rt->rt6i_dst.plen = 128; dst_copy_metrics(&rt->dst, &ort->dst); rt->dst.error = ort->dst.error; @@ -1785,7 +1794,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, rt->dst.lastuse = jiffies; rt->rt6i_expires = 0; - ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); + rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; rt->rt6i_metric = 0; @@ -1848,8 +1857,8 @@ static struct rt6_info *rt6_add_route_info(struct net *net, .fc_nlinfo.nl_net = net, }; - ipv6_addr_copy(&cfg.fc_dst, prefix); - ipv6_addr_copy(&cfg.fc_gateway, gwaddr); + cfg.fc_dst = *prefix; + cfg.fc_gateway = *gwaddr; /* We should treat it as a default route if prefix length is 0. */ if (!prefixlen) @@ -1898,7 +1907,7 @@ struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, .fc_nlinfo.nl_net = dev_net(dev), }; - ipv6_addr_copy(&cfg.fc_gateway, gwaddr); + cfg.fc_gateway = *gwaddr; ip6_route_add(&cfg); @@ -1944,9 +1953,9 @@ static void rtmsg_to_fib6_config(struct net *net, cfg->fc_nlinfo.nl_net = net; - ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); - ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); - ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); + cfg->fc_dst = rtmsg->rtmsg_dst; + cfg->fc_src = rtmsg->rtmsg_src; + cfg->fc_gateway = rtmsg->rtmsg_gateway; } int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) @@ -2080,7 +2089,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, } dst_set_neighbour(&rt->dst, neigh); - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); + rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); @@ -2098,7 +2107,7 @@ int ip6_route_get_saddr(struct net *net, struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt); int err = 0; if (rt->rt6i_prefsrc.plen) - ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr); + *saddr = rt->rt6i_prefsrc.addr; else err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, daddr, prefs, saddr); @@ -2437,7 +2446,7 @@ static int rt6_fill_node(struct net *net, if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; - ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr); + saddr_buf = rt->rt6i_prefsrc.addr; NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); } @@ -2511,14 +2520,14 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout; - ipv6_addr_copy(&fl6.saddr, nla_data(tb[RTA_SRC])); + fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); } if (tb[RTA_DST]) { if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout; - ipv6_addr_copy(&fl6.daddr, nla_data(tb[RTA_DST])); + fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); } if (tb[RTA_IIF]) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index a7a18602a046..50968f226e75 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -91,7 +91,7 @@ struct pcpu_tstats { unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; -}; +} __attribute__((aligned(4*sizeof(unsigned long)))); static struct net_device_stats *ipip6_get_stats(struct net_device *dev) { @@ -914,7 +914,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) goto done; #ifdef CONFIG_IPV6_SIT_6RD } else { - ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); + ip6rd.prefix = t->ip6rd.prefix; ip6rd.relay_prefix = t->ip6rd.relay_prefix; ip6rd.prefixlen = t->ip6rd.prefixlen; ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; @@ -1082,7 +1082,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) if (relay_prefix != ip6rd.relay_prefix) goto done; - ipv6_addr_copy(&t->ip6rd.prefix, &prefix); + t->ip6rd.prefix = prefix; t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd.prefixlen; t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 5a0d6648bbbc..8e951d8d3b81 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -200,8 +200,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->mss = mss; ireq->rmt_port = th->source; ireq->loc_port = th->dest; - ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); - ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); + ireq6->rmt_addr = ipv6_hdr(skb)->saddr; + ireq6->loc_addr = ipv6_hdr(skb)->daddr; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { @@ -237,9 +237,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); + fl6.daddr = ireq6->rmt_addr; final_p = fl6_update_dst(&fl6, np->opt, &final); - ipv6_addr_copy(&fl6.saddr, &ireq6->loc_addr); + fl6.saddr = ireq6->loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet_rsk(req)->rmt_port; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2dea4bb7b54a..9d74eee334d6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -153,7 +153,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; - ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst); + usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } @@ -195,7 +195,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, tp->write_seq = 0; } - ipv6_addr_copy(&np->daddr, &usin->sin6_addr); + np->daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* @@ -244,9 +244,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, - (saddr ? saddr : &np->saddr)); + fl6.daddr = np->daddr; + fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; @@ -264,11 +263,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (saddr == NULL) { saddr = &fl6.saddr; - ipv6_addr_copy(&np->rcv_saddr, saddr); + np->rcv_saddr = *saddr; } /* set the source address */ - ipv6_addr_copy(&np->saddr, saddr); + np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; @@ -398,8 +397,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.daddr, &np->daddr); - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.daddr = np->daddr; + fl6.saddr = np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; @@ -489,8 +488,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_TCP; - ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); - ipv6_addr_copy(&fl6.saddr, &treq->loc_addr); + fl6.daddr = treq->rmt_addr; + fl6.saddr = treq->loc_addr; fl6.flowlabel = 0; fl6.flowi6_oif = treq->iif; fl6.flowi6_mark = sk->sk_mark; @@ -512,7 +511,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, if (skb) { __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); - ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); + fl6.daddr = treq->rmt_addr; err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); err = net_xmit_eval(err); } @@ -617,8 +616,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, tp->md5sig_info->alloced6++; } - ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr, - peer); + tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr = *peer; tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey; tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen; @@ -750,8 +748,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, bp = &hp->md5_blk.ip6; /* 1. TCP pseudo-header (RFC2460) */ - ipv6_addr_copy(&bp->saddr, saddr); - ipv6_addr_copy(&bp->daddr, daddr); + bp->saddr = *saddr; + bp->daddr = *daddr; bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); @@ -1039,8 +1037,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, #endif memset(&fl6, 0, sizeof(fl6)); - ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr); - ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr); + fl6.daddr = ipv6_hdr(skb)->saddr; + fl6.saddr = ipv6_hdr(skb)->daddr; buff->ip_summed = CHECKSUM_PARTIAL; buff->csum = 0; @@ -1250,8 +1248,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) tcp_openreq_init(req, &tmp_opt, skb); treq = inet6_rsk(req); - ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); - ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); + treq->rmt_addr = ipv6_hdr(skb)->saddr; + treq->loc_addr = ipv6_hdr(skb)->daddr; if (!want_cookie || tmp_opt.tstamp_ok) TCP_ECN_create_request(req, tcp_hdr(skb)); @@ -1381,7 +1379,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); - ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr); + newnp->rcv_saddr = newnp->saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; @@ -1445,9 +1443,9 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr); - ipv6_addr_copy(&newnp->saddr, &treq->loc_addr); - ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr); + newnp->daddr = treq->rmt_addr; + newnp->saddr = treq->loc_addr; + newnp->rcv_saddr = treq->loc_addr; newsk->sk_bound_dev_if = treq->iif; /* Now IPv6 options... diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8c2541915183..adfe26a7fc63 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -418,8 +418,7 @@ try_again: ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin6->sin6_addr); else { - ipv6_addr_copy(&sin6->sin6_addr, - &ipv6_hdr(skb)->saddr); + sin6->sin6_addr = ipv6_hdr(skb)->saddr; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = IP6CB(skb)->iif; } @@ -539,7 +538,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) goto drop; } - if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) { + skb_dst_drop(skb); + rc = sock_queue_rcv_skb(sk, skb); + if (rc < 0) { /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP6_INC_STATS_BH(sock_net(sk), @@ -1114,11 +1115,11 @@ do_udp_sendmsg: fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) - ipv6_addr_copy(&fl6.daddr, daddr); + fl6.daddr = *daddr; else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) - ipv6_addr_copy(&fl6.saddr, &np->saddr); + fl6.saddr = np->saddr; fl6.fl6_sport = inet->inet_sport; final_p = fl6_update_dst(&fl6, opt, &final); @@ -1299,7 +1300,8 @@ static int udp6_ufo_send_check(struct sk_buff *skb) return 0; } -static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index 3437d7d4eed6..a81ce9450750 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -72,8 +72,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->nexthdr = IPPROTO_BEETPH; } - ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); - ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); + top_iph->saddr = *(struct in6_addr *)&x->props.saddr; + top_iph->daddr = *(struct in6_addr *)&x->id.daddr; return 0; } @@ -99,8 +99,8 @@ static int xfrm6_beet_input(struct xfrm_state *x, struct sk_buff *skb) ip6h = ipv6_hdr(skb); ip6h->payload_len = htons(skb->len - size); - ipv6_addr_copy(&ip6h->daddr, (struct in6_addr *) &x->sel.daddr.a6); - ipv6_addr_copy(&ip6h->saddr, (struct in6_addr *) &x->sel.saddr.a6); + ip6h->daddr = *(struct in6_addr *)&x->sel.daddr.a6; + ip6h->saddr = *(struct in6_addr *)&x->sel.saddr.a6; err = 0; out: return err; diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index 4d6edff0498f..261e6e6f487e 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) dsfield &= ~INET_ECN_MASK; ipv6_change_dsfield(top_iph, 0, dsfield); top_iph->hop_limit = ip6_dst_hoplimit(dst->child); - ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr); - ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr); + top_iph->saddr = *(struct in6_addr *)&x->props.saddr; + top_iph->daddr = *(struct in6_addr *)&x->id.daddr; return 0; } diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index faae41737fca..4eeff89c1aaa 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -49,7 +49,7 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) struct sock *sk = skb->sk; fl6.flowi6_oif = sk->sk_bound_dev_if; - ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr); + fl6.daddr = ipv6_hdr(skb)->daddr; ipv6_local_rxpmtu(sk, &fl6, mtu); } @@ -60,7 +60,7 @@ static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) struct sock *sk = skb->sk; fl6.fl6_dport = inet_sk(sk)->inet_dport; - ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr); + fl6.daddr = ipv6_hdr(skb)->daddr; ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index d879f7efbd10..8ea65e032733 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -132,8 +132,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) memset(fl6, 0, sizeof(struct flowi6)); fl6->flowi6_mark = skb->mark; - ipv6_addr_copy(&fl6->daddr, reverse ? &hdr->saddr : &hdr->daddr); - ipv6_addr_copy(&fl6->saddr, reverse ? &hdr->daddr : &hdr->saddr); + fl6->daddr = reverse ? hdr->saddr : hdr->daddr; + fl6->saddr = reverse ? hdr->daddr : hdr->saddr; while (nh + offset + 1 < skb->data || pskb_may_pull(skb, nh + offset + 1 - skb->data)) { diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index f2d72b8a3faa..3f2f7c4ab721 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c @@ -27,8 +27,8 @@ __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) /* Initialize temporary selector matching only * to current session. */ - ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl6->daddr); - ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl6->saddr); + *(struct in6_addr *)&sel->daddr = fl6->daddr; + *(struct in6_addr *)&sel->saddr = fl6->saddr; sel->dport = xfrm_flowi_dport(fl, &fl6->uli); sel->dport_mask = htons(0xffff); sel->sport = xfrm_flowi_sport(fl, &fl6->uli); diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 32e3bb026110..5c93f2952b08 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -1461,14 +1461,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) } /* Allocate a new instance */ - new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC); + new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } - /* Dup */ - memcpy(new, orig, sizeof(struct tsap_cb)); spin_lock_init(&new->lock); /* We don't need the old instance any more */ diff --git a/net/key/af_key.c b/net/key/af_key.c index 1e733e9073d0..bfc0bef170cb 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port sin6->sin6_family = AF_INET6; sin6->sin6_port = port; sin6->sin6_flowinfo = 0; - ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6); + sin6->sin6_addr = *(struct in6_addr *)xaddr->a6; sin6->sin6_scope_id = 0; return 128; } diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 93b243422659..476b106c0b1c 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -177,7 +177,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index b064e4df12c6..556765749b9c 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -78,7 +78,8 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); @@ -392,13 +393,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, pubsta->addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ - /* - * The aggregation code is not prepared to handle - * anything but STA/AP due to the BSSID handling. - * IBSS could work in the code but isn't supported - * by drivers or the standard. - */ if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d06c65fa5526..2577c45069e5 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -411,7 +411,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) | BIT(NL80211_STA_FLAG_WME) | BIT(NL80211_STA_FLAG_MFP) | - BIT(NL80211_STA_FLAG_AUTHENTICATED); + BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_TDLS_PEER); if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED); if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE)) @@ -422,6 +423,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP); if (test_sta_flag(sta, WLAN_STA_AUTH)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); } @@ -488,6 +491,31 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata, (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); } +static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata, + u8 *resp, size_t resp_len) +{ + struct sk_buff *new, *old; + + if (!resp || !resp_len) + return -EINVAL; + + old = sdata->u.ap.probe_resp; + + new = dev_alloc_skb(resp_len); + if (!new) + return -ENOMEM; + + memcpy(skb_put(new, resp_len), resp, resp_len); + + rcu_assign_pointer(sdata->u.ap.probe_resp, new); + synchronize_rcu(); + + if (old) + dev_kfree_skb(old); + + return 0; +} + /* * This handles both adding a beacon and setting new beacon info */ @@ -498,6 +526,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, int new_head_len, new_tail_len; int size; int err = -EINVAL; + u32 changed = 0; old = rtnl_dereference(sdata->u.ap.beacon); @@ -581,11 +610,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, kfree(old); + err = ieee80211_set_probe_resp(sdata, params->probe_resp, + params->probe_resp_len); + if (!err) + changed |= BSS_CHANGED_AP_PROBE_RESP; + ieee80211_config_ap_ssid(sdata, params); + changed |= BSS_CHANGED_BEACON_ENABLED | + BSS_CHANGED_BEACON | + BSS_CHANGED_SSID; - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | - BSS_CHANGED_BEACON | - BSS_CHANGED_SSID); + ieee80211_bss_info_change_notify(sdata, changed); return 0; } @@ -594,6 +629,8 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; + struct ieee80211_sub_if_data *vlan; + int ret; sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -601,7 +638,24 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, if (old) return -EALREADY; - return ieee80211_config_beacon(sdata, params); + ret = ieee80211_config_beacon(sdata, params); + if (ret) + return ret; + + /* + * Apply control port protocol, this allows us to + * not encrypt dynamic WEP control frames. + */ + sdata->control_port_protocol = params->crypto.control_port_ethertype; + sdata->control_port_no_encrypt = params->crypto.control_port_no_encrypt; + list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) { + vlan->control_port_protocol = + params->crypto.control_port_ethertype; + vlan->control_port_no_encrypt = + params->crypto.control_port_no_encrypt; + } + + return 0; } static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, @@ -778,7 +832,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, } if (params->ht_capa) - ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, params->ht_capa, &sta->sta.ht_cap); @@ -847,7 +901,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, sta_apply_parameters(local, sta, params); - rate_control_rate_init(sta); + /* + * for TDLS, rate control should be initialized only when supported + * rates are known. + */ + if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) + rate_control_rate_init(sta); layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_AP; @@ -931,6 +990,9 @@ static int ieee80211_change_station(struct wiphy *wiphy, sta_apply_parameters(local, sta, params); + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) + rate_control_rate_init(sta); + rcu_read_unlock(); if (sdata->vif.type == NL80211_IFTYPE_STATION && @@ -1394,7 +1456,7 @@ static int ieee80211_set_channel(struct wiphy *wiphy, (old_oper_type != local->_oper_channel_type)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) && + if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR && old_vif_oper_type != sdata->vif.bss_conf.channel_type) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); @@ -1917,7 +1979,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - u64 *cookie) + bool dont_wait_for_ack, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; @@ -1925,10 +1987,15 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct sta_info *sta; struct ieee80211_work *wk; const struct ieee80211_mgmt *mgmt = (void *)buf; - u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | - IEEE80211_TX_CTL_REQ_TX_STATUS; + u32 flags; bool is_offchan = false; + if (dont_wait_for_ack) + flags = IEEE80211_TX_CTL_NO_ACK; + else + flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | + IEEE80211_TX_CTL_REQ_TX_STATUS; + /* Check that we are on the requested channel for transmission */ if (chan != local->tmp_channel && chan != local->oper_channel) @@ -2488,6 +2555,82 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, + const u8 *peer, u64 *cookie) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; + struct ieee80211_qos_hdr *nullfunc; + struct sk_buff *skb; + int size = sizeof(*nullfunc); + __le16 fc; + bool qos; + struct ieee80211_tx_info *info; + struct sta_info *sta; + + rcu_read_lock(); + sta = sta_info_get(sdata, peer); + if (sta) { + qos = test_sta_flag(sta, WLAN_STA_WME); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + return -ENOLINK; + } + + if (qos) { + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } else { + size -= 2; + fc = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS); + } + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + size); + if (!skb) + return -ENOMEM; + + skb->dev = dev; + + skb_reserve(skb, local->hw.extra_tx_headroom); + + nullfunc = (void *) skb_put(skb, size); + nullfunc->frame_control = fc; + nullfunc->duration_id = 0; + memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); + memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); + memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); + nullfunc->seq_ctrl = 0; + + info = IEEE80211_SKB_CB(skb); + + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_INTFL_NL80211_FRAME_TX; + + skb_set_queue_mapping(skb, IEEE80211_AC_VO); + skb->priority = 7; + if (qos) + nullfunc->qos_ctrl = cpu_to_le16(7); + + local_bh_disable(); + ieee80211_xmit(sdata, skb); + local_bh_enable(); + + *cookie = (unsigned long) skb; + return 0; +} + +static struct ieee80211_channel * +ieee80211_wiphy_get_channel(struct wiphy *wiphy) +{ + struct ieee80211_local *local = wiphy_priv(wiphy); + + return local->oper_channel; +} + struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, @@ -2553,4 +2696,6 @@ struct cfg80211_ops mac80211_config_ops = { .set_rekey_data = ieee80211_set_rekey_data, .tdls_oper = ieee80211_tdls_oper, .tdls_mgmt = ieee80211_tdls_mgmt, + .probe_client = ieee80211_probe_client, + .get_channel = ieee80211_wiphy_get_channel, }; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 883996b2f99f..00cefcb493eb 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -190,7 +190,7 @@ static ssize_t uapsd_max_sp_len_write(struct file *file, return -EFAULT; buf[len] = '\0'; - ret = strict_strtoul(buf, 0, &val); + ret = kstrtoul(buf, 0, &val); if (ret) return -EINVAL; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 3110cbdc501b..2406b3e7393f 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -63,10 +63,10 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf, test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" int res = scnprintf(buf, sizeof(buf), - "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", + "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", TEST(AUTH), TEST(ASSOC), TEST(PS_STA), TEST(PS_DRIVER), TEST(AUTHORIZED), - TEST(SHORT_PREAMBLE), TEST(ASSOC_AP), + TEST(SHORT_PREAMBLE), TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT), TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 5f165d7eb2db..49cc5e0e8a6a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -5,11 +5,24 @@ #include "ieee80211_i.h" #include "driver-trace.h" +static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) +{ + WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); +} + static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb) { local->ops->tx(&local->hw, skb); } +static inline void drv_tx_frags(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff_head *skbs) +{ + local->ops->tx_frags(&local->hw, vif, sta, skbs); +} + static inline int drv_start(struct ieee80211_local *local) { int ret; @@ -69,15 +82,23 @@ static inline int drv_resume(struct ieee80211_local *local) #endif static inline int drv_add_interface(struct ieee80211_local *local, - struct ieee80211_vif *vif) + struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); - trace_drv_add_interface(local, vif_to_sdata(vif)); - ret = local->ops->add_interface(&local->hw, vif); + if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MONITOR)) + return -EINVAL; + + trace_drv_add_interface(local, sdata); + ret = local->ops->add_interface(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); + + if (ret == 0) + sdata->flags |= IEEE80211_SDATA_IN_DRIVER; + return ret; } @@ -89,6 +110,8 @@ static inline int drv_change_interface(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_change_interface(local, sdata, type, p2p); ret = local->ops->change_interface(&local->hw, &sdata->vif, type, p2p); trace_drv_return_int(local, ret); @@ -96,12 +119,15 @@ static inline int drv_change_interface(struct ieee80211_local *local, } static inline void drv_remove_interface(struct ieee80211_local *local, - struct ieee80211_vif *vif) + struct ieee80211_sub_if_data *sdata) { might_sleep(); - trace_drv_remove_interface(local, vif_to_sdata(vif)); - local->ops->remove_interface(&local->hw, vif); + check_sdata_in_driver(sdata); + + trace_drv_remove_interface(local, sdata); + local->ops->remove_interface(&local->hw, &sdata->vif); + sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; trace_drv_return_void(local); } @@ -124,6 +150,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_bss_info_changed(local, sdata, info, changed); if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); @@ -139,6 +167,8 @@ static inline int drv_tx_sync(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_tx_sync(local, sdata, bssid, type); if (local->ops->tx_sync) ret = local->ops->tx_sync(&local->hw, &sdata->vif, @@ -154,6 +184,8 @@ static inline void drv_finish_tx_sync(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_finish_tx_sync(local, sdata, bssid, type); if (local->ops->finish_tx_sync) local->ops->finish_tx_sync(&local->hw, &sdata->vif, @@ -211,6 +243,8 @@ static inline int drv_set_key(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_set_key(local, cmd, sdata, sta, key); ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); trace_drv_return_int(local, ret); @@ -228,6 +262,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, if (sta) ista = &sta->sta; + check_sdata_in_driver(sdata); + trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); if (local->ops->update_tkip_key) local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, @@ -243,6 +279,8 @@ static inline int drv_hw_scan(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_hw_scan(local, sdata); ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); trace_drv_return_int(local, ret); @@ -254,6 +292,8 @@ static inline void drv_cancel_hw_scan(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_cancel_hw_scan(local, sdata); local->ops->cancel_hw_scan(&local->hw, &sdata->vif); trace_drv_return_void(local); @@ -269,6 +309,8 @@ drv_sched_scan_start(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_sched_scan_start(local, sdata); ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, req, ies); @@ -281,6 +323,8 @@ static inline void drv_sched_scan_stop(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_sched_scan_stop(local, sdata); local->ops->sched_scan_stop(&local->hw, &sdata->vif); trace_drv_return_void(local); @@ -377,6 +421,8 @@ static inline void drv_sta_notify(struct ieee80211_local *local, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { + check_sdata_in_driver(sdata); + trace_drv_sta_notify(local, sdata, cmd, sta); if (local->ops->sta_notify) local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); @@ -391,6 +437,8 @@ static inline int drv_sta_add(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_sta_add(local, sdata, sta); if (local->ops->sta_add) ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); @@ -406,6 +454,8 @@ static inline void drv_sta_remove(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_sta_remove(local, sdata, sta); if (local->ops->sta_remove) local->ops->sta_remove(&local->hw, &sdata->vif, sta); @@ -421,6 +471,8 @@ static inline int drv_conf_tx(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_conf_tx(local, sdata, queue, params); if (local->ops->conf_tx) ret = local->ops->conf_tx(&local->hw, &sdata->vif, @@ -436,6 +488,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_get_tsf(local, sdata); if (local->ops->get_tsf) ret = local->ops->get_tsf(&local->hw, &sdata->vif); @@ -449,6 +503,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_set_tsf(local, sdata, tsf); if (local->ops->set_tsf) local->ops->set_tsf(&local->hw, &sdata->vif, tsf); @@ -460,6 +516,8 @@ static inline void drv_reset_tsf(struct ieee80211_local *local, { might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_reset_tsf(local, sdata); if (local->ops->reset_tsf) local->ops->reset_tsf(&local->hw, &sdata->vif); @@ -489,6 +547,8 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, buf_size); if (local->ops->ampdu_action) @@ -644,6 +704,8 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local, might_sleep(); + check_sdata_in_driver(sdata); + trace_drv_set_bitrate_mask(local, sdata, mask); if (local->ops->set_bitrate_mask) ret = local->ops->set_bitrate_mask(&local->hw, @@ -657,6 +719,8 @@ static inline void drv_set_rekey_data(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_gtk_rekey_data *data) { + check_sdata_in_driver(sdata); + trace_drv_set_rekey_data(local, sdata, data); if (local->ops->set_rekey_data) local->ops->set_rekey_data(&local->hw, &sdata->vif, data); diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index f0fb737efa86..810cfbea6ad1 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -19,7 +19,82 @@ #include "ieee80211_i.h" #include "rate.h" -void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, +bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata) +{ + const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40); + if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) && + !(sdata->u.mgd.ht_capa.cap_info & flg)) + return true; + return false; +} + +void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_ht_cap *ht_cap, + u16 flag) +{ + __le16 le_flag = cpu_to_le16(flag); + if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) { + if (!(sdata->u.mgd.ht_capa.cap_info & le_flag)) + ht_cap->cap &= ~flag; + } +} + +void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_ht_cap *ht_cap) +{ + u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask); + u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask); + int i; + + if (sdata->vif.type != NL80211_IFTYPE_STATION) { + WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); + return; + } + + /* NOTE: If you add more over-rides here, update register_hw + * ht_capa_mod_msk logic in main.c as well. + * And, if this method can ever change ht_cap.ht_supported, fix + * the check in ieee80211_add_ht_ie. + */ + + /* check for HT over-rides, MCS rates first. */ + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { + u8 m = smask[i]; + ht_cap->mcs.rx_mask[i] &= ~m; /* turn off all masked bits */ + /* Add back rates that are supported */ + ht_cap->mcs.rx_mask[i] |= (m & scaps[i]); + } + + /* Force removal of HT-40 capabilities? */ + __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40); + __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40); + + /* Allow user to disable the max-AMSDU bit. */ + __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU); + + /* Allow user to decrease AMPDU factor */ + if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_FACTOR) { + u8 n = sdata->u.mgd.ht_capa.ampdu_params_info + & IEEE80211_HT_AMPDU_PARM_FACTOR; + if (n < ht_cap->ampdu_factor) + ht_cap->ampdu_factor = n; + } + + /* Allow the user to increase AMPDU density. */ + if (sdata->u.mgd.ht_capa_mask.ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_DENSITY) { + u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info & + IEEE80211_HT_AMPDU_PARM_DENSITY) + >> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT; + if (n > ht_cap->ampdu_density) + ht_cap->ampdu_density = n; + } +} + + +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap) { @@ -103,6 +178,12 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, /* handle MCS rate 32 too */ if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) ht_cap->mcs.rx_mask[32/8] |= 1; + + /* + * If user has specified capability over-rides, take care + * of that here. + */ + ieee80211_apply_htcap_overrides(sdata, ht_cap); } void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, bool tx) @@ -196,7 +277,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index ede9a8b341ac..7d84af70132f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -97,6 +97,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, /* if merging, indicate to driver that we leave the old IBSS */ if (sdata->vif.bss_conf.ibss_joined) { sdata->vif.bss_conf.ibss_joined = false; + netif_carrier_off(sdata->dev); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS); } @@ -207,6 +208,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel, mgmt, skb->len, 0, GFP_KERNEL); cfg80211_put_bss(bss); + netif_carrier_on(sdata->dev); cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL); } @@ -990,6 +992,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) } sta_info_flush(sdata->local, sdata); + netif_carrier_off(sdata->dev); /* remove beacon */ kfree(sdata->u.ibss.ie); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ea10a51babda..762243e469df 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -24,6 +24,7 @@ #include <linux/spinlock.h> #include <linux/etherdevice.h> #include <linux/leds.h> +#include <linux/idr.h> #include <net/ieee80211_radiotap.h> #include <net/cfg80211.h> #include <net/mac80211.h> @@ -141,6 +142,7 @@ typedef unsigned __bitwise__ ieee80211_tx_result; struct ieee80211_tx_data { struct sk_buff *skb; + struct sk_buff_head skbs; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; struct sta_info *sta; @@ -184,12 +186,15 @@ enum ieee80211_packet_rx_flags { * enum ieee80211_rx_flags - RX data flags * * @IEEE80211_RX_CMNTR: received on cooked monitor already + * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported + * to cfg80211_report_obss_beacon(). * * These flags are used across handling multiple interfaces * for a single frame. */ enum ieee80211_rx_flags { IEEE80211_RX_CMNTR = BIT(0), + IEEE80211_RX_BEACON_REPORTED = BIT(1), }; struct ieee80211_rx_data { @@ -228,6 +233,7 @@ struct beacon_data { struct ieee80211_if_ap { struct beacon_data __rcu *beacon; + struct sk_buff __rcu *probe_resp; struct list_head vlans; @@ -443,6 +449,9 @@ struct ieee80211_if_managed { */ int rssi_min_thold, rssi_max_thold; int last_ave_beacon_signal; + + struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ + struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ }; struct ieee80211_if_ibss { @@ -543,6 +552,7 @@ struct ieee80211_if_mesh { * associated stations and deliver multicast frames both * back to wireless media and to the local net stack. * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume. + * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver */ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_ALLMULTI = BIT(0), @@ -550,6 +560,7 @@ enum ieee80211_sub_if_data_flags { IEEE80211_SDATA_OPERATING_GMODE = BIT(2), IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4), + IEEE80211_SDATA_IN_DRIVER = BIT(5), }; /** @@ -722,17 +733,16 @@ enum { * operating channel * @SCAN_SET_CHANNEL: Set the next channel to be scanned * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses - * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP - * about us leaving the channel and stop all associated STA interfaces - * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the - * AP about us being back and restart all associated STA interfaces + * @SCAN_SUSPEND: Suspend the scan and go back to operating channel to + * send out data + * @SCAN_RESUME: Resume the scan and scan the next channel */ enum mac80211_scan_state { SCAN_DECISION, SCAN_SET_CHANNEL, SCAN_SEND_PROBE, - SCAN_LEAVE_OPER_CHANNEL, - SCAN_ENTER_OPER_CHANNEL, + SCAN_SUSPEND, + SCAN_RESUME, }; struct ieee80211_local { @@ -1012,6 +1022,9 @@ struct ieee80211_local { u32 hw_roc_cookie; bool hw_roc_for_tx; + struct idr ack_status_frames; + spinlock_t ack_status_lock; + /* dummy netdev for use w/ NAPI */ struct net_device napi_dev; @@ -1030,6 +1043,69 @@ struct ieee80211_ra_tid { u16 tid; }; +/* Parsed Information Elements */ +struct ieee802_11_elems { + u8 *ie_start; + size_t total_len; + + /* pointers to IEs */ + u8 *ssid; + u8 *supp_rates; + u8 *fh_params; + u8 *ds_params; + u8 *cf_params; + struct ieee80211_tim_ie *tim; + u8 *ibss_params; + u8 *challenge; + u8 *wpa; + u8 *rsn; + u8 *erp_info; + u8 *ext_supp_rates; + u8 *wmm_info; + u8 *wmm_param; + struct ieee80211_ht_cap *ht_cap_elem; + struct ieee80211_ht_info *ht_info_elem; + struct ieee80211_meshconf_ie *mesh_config; + u8 *mesh_id; + u8 *peering; + u8 *preq; + u8 *prep; + u8 *perr; + struct ieee80211_rann_ie *rann; + u8 *ch_switch_elem; + u8 *country_elem; + u8 *pwr_constr_elem; + u8 *quiet_elem; /* first quite element */ + u8 *timeout_int; + + /* length of them, respectively */ + u8 ssid_len; + u8 supp_rates_len; + u8 fh_params_len; + u8 ds_params_len; + u8 cf_params_len; + u8 tim_len; + u8 ibss_params_len; + u8 challenge_len; + u8 wpa_len; + u8 rsn_len; + u8 erp_info_len; + u8 ext_supp_rates_len; + u8 wmm_info_len; + u8 wmm_param_len; + u8 mesh_id_len; + u8 peering_len; + u8 preq_len; + u8 prep_len; + u8 perr_len; + u8 ch_switch_elem_len; + u8 country_elem_len; + u8 pwr_constr_elem_len; + u8 quiet_elem_len; + u8 num_of_quiet_elem; /* can be more the one */ + u8 timeout_int_len; +}; + static inline struct ieee80211_local *hw_to_local( struct ieee80211_hw *hw) { @@ -1179,7 +1255,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev); /* HT */ -void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband, +bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata); +void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, + struct ieee80211_sta_ht_cap *ht_cap); +void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, struct ieee80211_ht_cap *ht_cap_ie, struct ieee80211_sta_ht_cap *ht_cap); void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, @@ -1334,6 +1414,12 @@ void ieee80211_recalc_smps(struct ieee80211_local *local); size_t ieee80211_ie_split(const u8 *ies, size_t ielen, const u8 *ids, int n_ids, size_t offset); size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); +u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 cap); +u8 *ieee80211_ie_build_ht_info(u8 *pos, + struct ieee80211_sta_ht_cap *ht_cap, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type); /* internal work items */ void ieee80211_work_init(struct ieee80211_local *local); @@ -1362,6 +1448,8 @@ ieee80211_get_channel_mode(struct ieee80211_local *local, bool ieee80211_set_channel_type(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_channel_type chantype); +enum nl80211_channel_type +ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 30d73552e9ab..b34ca0cbdf6c 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -188,11 +188,22 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) if (!is_valid_ether_addr(sdata->u.wds.remote_addr)) return -ENOLINK; break; - case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_AP_VLAN: { + struct ieee80211_sub_if_data *master; + if (!sdata->bss) return -ENOLINK; + list_add(&sdata->u.vlan.list, &sdata->bss->vlans); + + master = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + sdata->control_port_protocol = + master->control_port_protocol; + sdata->control_port_no_encrypt = + master->control_port_no_encrypt; break; + } case NL80211_IFTYPE_AP: sdata->bss = &sdata->u.ap; break; @@ -265,7 +276,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) break; default: if (coming_up) { - res = drv_add_interface(local, &sdata->vif); + res = drv_add_interface(local, sdata); if (res) goto err_stop; } @@ -282,10 +293,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) changed |= ieee80211_reset_erp_info(sdata); ieee80211_bss_info_change_notify(sdata, changed); - if (sdata->vif.type == NL80211_IFTYPE_STATION) + if (sdata->vif.type == NL80211_IFTYPE_STATION || + sdata->vif.type == NL80211_IFTYPE_ADHOC) netif_carrier_off(dev); else netif_carrier_on(dev); + + /* + * set default queue parameters so drivers don't + * need to initialise the hardware if the hardware + * doesn't start up with sane defaults + */ + ieee80211_set_wmm_default(sdata); } set_bit(SDATA_STATE_RUNNING, &sdata->state); @@ -329,15 +348,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) if (coming_up) local->open_count++; - if (hw_reconf_flags) { + if (hw_reconf_flags) ieee80211_hw_config(local, hw_reconf_flags); - /* - * set default queue parameters so drivers don't - * need to initialise the hardware if the hardware - * doesn't start up with sane defaults - */ - ieee80211_set_wmm_default(sdata); - } ieee80211_recalc_ps(local, -1); @@ -345,7 +357,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up) return 0; err_del_interface: - drv_remove_interface(local, &sdata->vif); + drv_remove_interface(local, sdata); err_stop: if (!local->open_count) drv_stop(local); @@ -450,15 +462,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, struct ieee80211_sub_if_data *vlan, *tmpsdata; struct beacon_data *old_beacon = rtnl_dereference(sdata->u.ap.beacon); + struct sk_buff *old_probe_resp = + rtnl_dereference(sdata->u.ap.probe_resp); /* sdata_running will return false, so this will disable */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - /* remove beacon */ + /* remove beacon and probe response */ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); + RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL); synchronize_rcu(); kfree(old_beacon); + kfree_skb(old_probe_resp); /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, @@ -520,7 +536,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, ieee80211_free_keys(sdata); if (going_down) - drv_remove_interface(local, &sdata->vif); + drv_remove_interface(local, sdata); } sdata->bss = NULL; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index fb02ea52d2c2..87a89741432d 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -134,9 +134,13 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) sdata->crypto_tx_tailroom_needed_cnt--; + WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); + return 0; } @@ -179,7 +183,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sdata = key->sdata; if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || - (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV))) + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) increment_tailroom_need_count(sdata); if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index d999bf3b84e1..dddedfad5404 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -100,7 +100,7 @@ static void ieee80211_reconfig_filter(struct work_struct *work) */ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) { - struct ieee80211_channel *chan, *scan_chan; + struct ieee80211_channel *chan; enum nl80211_channel_type channel_type; /* This logic needs to match logic in ieee80211_hw_config */ @@ -114,7 +114,7 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) else channel_type = NL80211_CHAN_NO_HT; } else if (local->tmp_channel) { - chan = scan_chan = local->tmp_channel; + chan = local->tmp_channel; channel_type = local->tmp_channel_type; } else { chan = local->oper_channel; @@ -126,8 +126,8 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) return false; /* Check current hardware-config against oper_channel. */ - if ((local->oper_channel != local->hw.conf.channel) || - (local->_oper_channel_type != local->hw.conf.channel_type)) + if (local->oper_channel != local->hw.conf.channel || + local->_oper_channel_type != local->hw.conf.channel_type) return false; return true; @@ -135,7 +135,7 @@ bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local) int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) { - struct ieee80211_channel *chan, *scan_chan; + struct ieee80211_channel *chan; int ret = 0; int power; enum nl80211_channel_type channel_type; @@ -143,14 +143,12 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) might_sleep(); - scan_chan = local->scan_channel; - /* If this off-channel logic ever changes, ieee80211_on_oper_channel * may need to change as well. */ offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; - if (scan_chan) { - chan = scan_chan; + if (local->scan_channel) { + chan = local->scan_channel; /* If scanning on oper channel, use whatever channel-type * is currently in use. */ @@ -159,7 +157,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) else channel_type = NL80211_CHAN_NO_HT; } else if (local->tmp_channel) { - chan = scan_chan = local->tmp_channel; + chan = local->tmp_channel; channel_type = local->tmp_channel_type; } else { chan = local->oper_channel; @@ -560,6 +558,19 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { }, }; +static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = { + .ampdu_params_info = IEEE80211_HT_AMPDU_PARM_FACTOR | + IEEE80211_HT_AMPDU_PARM_DENSITY, + + .cap_info = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_MAX_AMSDU | + IEEE80211_HT_CAP_SGI_40), + .mcs = { + .rx_mask = { 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, }, + }, +}; + struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { @@ -595,7 +606,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | - WIPHY_FLAG_4ADDR_STATION; + WIPHY_FLAG_4ADDR_STATION | + WIPHY_FLAG_REPORTS_OBSS | + WIPHY_FLAG_OFFCHAN_TX | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; + + wiphy->features = NL80211_FEATURE_SK_TX_STATUS; if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -608,7 +624,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); - BUG_ON(!ops->tx); + BUG_ON(!ops->tx && !ops->tx_frags); BUG_ON(!ops->start); BUG_ON(!ops->stop); BUG_ON(!ops->config); @@ -628,6 +644,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, local->user_power_level = -1; local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; + wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; INIT_LIST_HEAD(&local->interfaces); @@ -670,6 +687,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, INIT_WORK(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); + spin_lock_init(&local->ack_status_lock); + idr_init(&local->ack_status_frames); + /* preallocate at least one entry */ + idr_pre_get(&local->ack_status_frames, GFP_KERNEL); + sta_info_init(local); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { @@ -1045,6 +1067,13 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) } EXPORT_SYMBOL(ieee80211_unregister_hw); +static int ieee80211_free_ack_frame(int id, void *p, void *data) +{ + WARN_ONCE(1, "Have pending ack frames!\n"); + kfree_skb(p); + return 0; +} + void ieee80211_free_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); @@ -1055,6 +1084,10 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) if (local->wiphy_ciphers_allocated) kfree(local->hw.wiphy->cipher_suites); + idr_for_each(&local->ack_status_frames, + ieee80211_free_ack_frame, NULL); + idr_destroy(&local->ack_status_frames); + wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index a7078fdba8ca..ee82d2f7f114 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -76,6 +76,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; + struct ieee80211_local *local = sdata->local; /* * As support for each feature is added, check for matching @@ -87,15 +88,23 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat * - MDA enabled * - Power management control on fc */ - if (ifmsh->mesh_id_len == ie->mesh_id_len && - memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && - (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && - (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && - (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && - (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && - (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)) - return true; - + if (!(ifmsh->mesh_id_len == ie->mesh_id_len && + memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 && + (ifmsh->mesh_pp_id == ie->mesh_config->meshconf_psel) && + (ifmsh->mesh_pm_id == ie->mesh_config->meshconf_pmetric) && + (ifmsh->mesh_cc_id == ie->mesh_config->meshconf_congest) && + (ifmsh->mesh_sp_id == ie->mesh_config->meshconf_synch) && + (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) + goto mismatch; + + /* disallow peering with mismatched channel types for now */ + if (ie->ht_info_elem && + (local->_oper_channel_type != + ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) + goto mismatch; + + return true; +mismatch: return false; } @@ -341,6 +350,49 @@ int mesh_add_ds_params_ie(struct sk_buff *skb, return 0; } +int mesh_add_ht_cap_ie(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; + u8 *pos; + + sband = local->hw.wiphy->bands[local->oper_channel->band]; + if (!sband->ht_cap.ht_supported || + local->_oper_channel_type == NL80211_CHAN_NO_HT) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); + ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); + + return 0; +} + +int mesh_add_ht_info_ie(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_channel *channel = local->oper_channel; + enum nl80211_channel_type channel_type = local->_oper_channel_type; + struct ieee80211_supported_band *sband = + local->hw.wiphy->bands[channel->band]; + struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; + u8 *pos; + + if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) + return 0; + + if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) + return -ENOMEM; + + pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); + ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); + + return 0; +} static void ieee80211_mesh_path_timer(unsigned long data) { struct ieee80211_sub_if_data *sdata = diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 8c00e2d1d636..622cc96eb4de 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -31,6 +31,8 @@ * @MESH_PATH_FIXED: the mesh path has been manually set and should not be * modified * @MESH_PATH_RESOLVED: the mesh path can has been resolved + * @MESH_PATH_REQ_QUEUED: there is an unsent path request for this destination + * already queued up, waiting for the discovery process to start. * * MESH_PATH_RESOLVED is used by the mesh path timer to * decide when to stop or cancel the mesh path discovery. @@ -41,6 +43,7 @@ enum mesh_path_flags { MESH_PATH_SN_VALID = BIT(2), MESH_PATH_FIXED = BIT(3), MESH_PATH_RESOLVED = BIT(4), + MESH_PATH_REQ_QUEUED = BIT(5), }; /** @@ -212,6 +215,10 @@ int mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); int mesh_add_ds_params_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata); +int mesh_add_ht_cap_ie(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata); +int mesh_add_ht_info_ie(struct sk_buff *skb, + struct ieee80211_sub_if_data *sdata); void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); void ieee80211s_init(void); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 174040a42887..ce3db2735d7c 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -113,20 +113,20 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); + struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 *pos; - int ie_len; + u8 *pos, ie_len; + int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + + sizeof(mgmt->u.action.u.mesh_action); + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + hdr_len + + 2 + 37); /* max HWMP IE */ if (!skb) return -1; skb_reserve(skb, local->hw.extra_tx_headroom); - /* 25 is the size of the common mgmt part (24) plus the size of the - * common action part (1) - */ - mgmt = (struct ieee80211_mgmt *) - skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); - memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); + memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -240,20 +240,20 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); + struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - u8 *pos; - int ie_len; + u8 *pos, ie_len; + int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) + + sizeof(mgmt->u.action.u.mesh_action); + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + hdr_len + + 2 + 15 /* PERR IE */); if (!skb) return -1; skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom); - /* 25 is the size of the common mgmt part (24) plus the size of the - * common action part (1) - */ - mgmt = (struct ieee80211_mgmt *) - skb_put(skb, 25 + sizeof(mgmt->u.action.u.mesh_action)); - memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.mesh_action)); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); + memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -867,9 +867,20 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) return; } + spin_lock(&mpath->state_lock); + if (mpath->flags & MESH_PATH_REQ_QUEUED) { + spin_unlock(&mpath->state_lock); + spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); + kfree(preq_node); + return; + } + memcpy(preq_node->dst, mpath->dst, ETH_ALEN); preq_node->flags = flags; + mpath->flags |= MESH_PATH_REQ_QUEUED; + spin_unlock(&mpath->state_lock); + list_add_tail(&preq_node->list, &ifmsh->preq_queue.list); ++ifmsh->preq_queue_len; spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); @@ -921,6 +932,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata) goto enddiscovery; spin_lock_bh(&mpath->state_lock); + mpath->flags &= ~MESH_PATH_REQ_QUEUED; if (preq_node->flags & PREQ_Q_F_START) { if (mpath->flags & MESH_PATH_RESOLVING) { spin_unlock_bh(&mpath->state_lock); @@ -1028,11 +1040,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb, mesh_queue_preq(mpath, PREQ_Q_F_START); } - if (skb_queue_len(&mpath->frame_queue) >= - MESH_FRAME_QUEUE_LEN) + if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN) skb_to_free = skb_dequeue(&mpath->frame_queue); info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; + ieee80211_set_qos_hdr(sdata, skb); skb_queue_tail(&mpath->frame_queue, skb); if (skb_to_free) mesh_path_discard_frame(skb_to_free, sdata); @@ -1061,6 +1073,7 @@ void mesh_path_timer(unsigned long data) } else if (mpath->discovery_retries < max_preq_retries(sdata)) { ++mpath->discovery_retries; mpath->discovery_timeout *= 2; + mpath->flags &= ~MESH_PATH_REQ_QUEUED; spin_unlock_bh(&mpath->state_lock); mesh_queue_preq(mpath, 0); } else { diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 7f54c5042235..7bd2a76aef0e 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -69,8 +69,6 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void) lockdep_is_held(&pathtbl_resize_lock)); } -static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath); - /* * CAREFUL -- "tbl" must not be an expression, * in particular not an rcu_dereference(), since @@ -213,7 +211,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) struct ieee80211_hdr *hdr; struct sk_buff_head tmpq; unsigned long flags; - struct ieee80211_sub_if_data *sdata = mpath->sdata; rcu_assign_pointer(mpath->next_hop, sta); @@ -224,8 +221,6 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta) while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) { hdr = (struct ieee80211_hdr *) skb->data; memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN); - skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); - ieee80211_set_qos_hdr(sdata, skb); __skb_queue_tail(&tmpq, skb); } @@ -423,21 +418,18 @@ static void mesh_gate_node_reclaim(struct rcu_head *rp) } /** - * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates - * @mesh_tbl: table which contains known_gates list - * @mpath: mpath to known mesh gate - * - * Returns: 0 on success - * + * mesh_path_add_gate - add the given mpath to a mesh gate to our path table + * @mpath: gate path to add to table */ -static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath) +int mesh_path_add_gate(struct mesh_path *mpath) { + struct mesh_table *tbl; struct mpath_node *gate, *new_gate; struct hlist_node *n; int err; rcu_read_lock(); - tbl = rcu_dereference(tbl); + tbl = rcu_dereference(mesh_paths); hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list) if (gate->mpath == mpath) { @@ -481,8 +473,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) struct mpath_node *gate; struct hlist_node *p, *q; - tbl = rcu_dereference(tbl); - hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) if (gate->mpath == mpath) { spin_lock_bh(&tbl->gates_lock); @@ -501,16 +491,6 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath) } /** - * - * mesh_path_add_gate - add the given mpath to a mesh gate to our path table - * @mpath: gate path to add to table - */ -int mesh_path_add_gate(struct mesh_path *mpath) -{ - return mesh_gate_add(mesh_paths, mpath); -} - -/** * mesh_gate_num - number of gates known to this interface * @sdata: subif data */ diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 7e57f5d07f66..7314372b12ba 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -80,11 +80,15 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) * on it in the lifecycle management section! */ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, - u8 *hw_addr, u32 rates) + u8 *hw_addr, u32 rates, + struct ieee802_11_elems *elems) { struct ieee80211_local *local = sdata->local; + struct ieee80211_supported_band *sband; struct sta_info *sta; + sband = local->hw.wiphy->bands[local->oper_channel->band]; + if (local->num_sta >= MESH_MAX_PLINKS) return NULL; @@ -96,6 +100,10 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, set_sta_flag(sta, WLAN_STA_AUTHORIZED); set_sta_flag(sta, WLAN_STA_WME); sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + if (elems->ht_cap_elem) + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, + elems->ht_cap_elem, + &sta->sta.ht_cap); rate_control_rate_init(sta); return sta; @@ -153,23 +161,31 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, enum ieee80211_self_protected_actioncode action, u8 *da, __le16 llid, __le16 plid, __le16 reason) { struct ieee80211_local *local = sdata->local; - struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + - sdata->u.mesh.ie_len); + struct sk_buff *skb; struct ieee80211_mgmt *mgmt; bool include_plid = false; - int ie_len = 4; u16 peering_proto = 0; - u8 *pos; - + u8 *pos, ie_len = 4; + int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) + + sizeof(mgmt->u.action.u.self_prot); + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + + hdr_len + + 2 + /* capability info */ + 2 + /* AID */ + 2 + 8 + /* supported rates */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + + 2 + sdata->u.mesh.mesh_id_len + + 2 + sizeof(struct ieee80211_meshconf_ie) + + 2 + sizeof(struct ieee80211_ht_cap) + + 2 + sizeof(struct ieee80211_ht_info) + + 2 + 8 + /* peering IE */ + sdata->u.mesh.ie_len); if (!skb) return -1; skb_reserve(skb, local->hw.extra_tx_headroom); - /* 25 is the size of the common mgmt part (24) plus the size of the - * common action part (1) - */ - mgmt = (struct ieee80211_mgmt *) - skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot)); - memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot)); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); + memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); memcpy(mgmt->da, da, ETH_ALEN); @@ -235,6 +251,13 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, memcpy(pos, &reason, 2); pos += 2; } + + if (action != WLAN_SP_MESH_PEERING_CLOSE) { + if (mesh_add_ht_cap_ie(skb, sdata) || + mesh_add_ht_info_ie(skb, sdata)) + return -1; + } + if (mesh_add_vendor_ies(skb, sdata)) return -1; @@ -261,7 +284,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, elems->ie_start, elems->total_len, GFP_KERNEL); else - sta = mesh_plink_alloc(sdata, hw_addr, rates); + sta = mesh_plink_alloc(sdata, hw_addr, rates, elems); if (!sta) return; if (sta_info_insert_rcu(sta)) { @@ -552,7 +575,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m } rates = ieee80211_sta_get_rates(local, &elems, rx_status->band); - sta = mesh_plink_alloc(sdata, mgmt->sa, rates); + sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems); if (!sta) { mpl_dbg("Mesh plink error: plink table full\n"); return; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b1b1bb368f70..09019d135942 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -209,6 +209,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata, channel_type = NL80211_CHAN_HT20; if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) && + !ieee80111_cfg_override_disables_ht40(sdata) && (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) { switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { @@ -1120,6 +1121,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, /* on the next assoc, re-program HT parameters */ sdata->ht_opmode_valid = false; + memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); + memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); local->power_constr_level = 0; @@ -1359,9 +1362,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); - mutex_lock(&local->mtx); - ieee80211_recalc_idle(local); - mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. @@ -1370,6 +1370,10 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata) IEEE80211_STYPE_DEAUTH, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, NULL, true); + + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); } void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@ -1468,6 +1472,47 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, return RX_MGMT_CFG80211_DISASSOC; } +static void ieee80211_get_rates(struct ieee80211_supported_band *sband, + u8 *supp_rates, unsigned int supp_rates_len, + u32 *rates, u32 *basic_rates, + bool *have_higher_than_11mbit, + int *min_rate, int *min_rate_index) +{ + int i, j; + + for (i = 0; i < supp_rates_len; i++) { + int rate = (supp_rates[i] & 0x7f) * 5; + bool is_basic = !!(supp_rates[i] & 0x80); + + if (rate > 110) + *have_higher_than_11mbit = true; + + /* + * BSS_MEMBERSHIP_SELECTOR_HT_PHY is defined in 802.11n-2009 + * 7.3.2.2 as a magic value instead of a rate. Hence, skip it. + * + * Note: Even through the membership selector and the basic + * rate flag share the same bit, they are not exactly + * the same. + */ + if (!!(supp_rates[i] & 0x80) && + (supp_rates[i] & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) + continue; + + for (j = 0; j < sband->n_bitrates; j++) { + if (sband->bitrates[j].bitrate == rate) { + *rates |= BIT(j); + if (is_basic) + *basic_rates |= BIT(j); + if (rate < *min_rate) { + *min_rate = rate; + *min_rate_index = j; + } + break; + } + } + } +} static bool ieee80211_assoc_success(struct ieee80211_work *wk, struct ieee80211_mgmt *mgmt, size_t len) @@ -1484,7 +1529,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; u32 changed = 0; - int i, j, err; + int err; bool have_higher_than_11mbit = false; u16 ap_ht_cap_flags; int min_rate = INT_MAX, min_rate_index = -1; @@ -1534,7 +1579,6 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, set_sta_flag(sta, WLAN_STA_AUTH); set_sta_flag(sta, WLAN_STA_ASSOC); - set_sta_flag(sta, WLAN_STA_ASSOC_AP); if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT)) set_sta_flag(sta, WLAN_STA_AUTHORIZED); @@ -1542,47 +1586,14 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, basic_rates = 0; sband = local->hw.wiphy->bands[wk->chan->band]; - for (i = 0; i < elems.supp_rates_len; i++) { - int rate = (elems.supp_rates[i] & 0x7f) * 5; - bool is_basic = !!(elems.supp_rates[i] & 0x80); - - if (rate > 110) - have_higher_than_11mbit = true; - - for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) { - rates |= BIT(j); - if (is_basic) - basic_rates |= BIT(j); - if (rate < min_rate) { - min_rate = rate; - min_rate_index = j; - } - break; - } - } - } + ieee80211_get_rates(sband, elems.supp_rates, elems.supp_rates_len, + &rates, &basic_rates, &have_higher_than_11mbit, + &min_rate, &min_rate_index); - for (i = 0; i < elems.ext_supp_rates_len; i++) { - int rate = (elems.ext_supp_rates[i] & 0x7f) * 5; - bool is_basic = !!(elems.ext_supp_rates[i] & 0x80); - - if (rate > 110) - have_higher_than_11mbit = true; - - for (j = 0; j < sband->n_bitrates; j++) { - if (sband->bitrates[j].bitrate == rate) { - rates |= BIT(j); - if (is_basic) - basic_rates |= BIT(j); - if (rate < min_rate) { - min_rate = rate; - min_rate_index = j; - } - break; - } - } - } + ieee80211_get_rates(sband, elems.ext_supp_rates, + elems.ext_supp_rates_len, &rates, &basic_rates, + &have_higher_than_11mbit, + &min_rate, &min_rate_index); /* * some buggy APs don't advertise basic_rates. use the lowest @@ -1605,7 +1616,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE; if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) - ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; @@ -1974,7 +1985,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; - ieee80211_ht_cap_ie_to_sta_ht_cap(sband, + ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, elems.ht_cap_elem, &sta->sta.ht_cap); ap_ht_cap_flags = sta->sta.ht_cap.cap; @@ -2128,9 +2139,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, ieee80211_set_disassoc(sdata, true, true); mutex_unlock(&ifmgd->mtx); - mutex_lock(&local->mtx); - ieee80211_recalc_idle(local); - mutex_unlock(&local->mtx); /* * must be outside lock due to cfg80211, * but that's not a problem. @@ -2138,6 +2146,11 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, reason, NULL, true); + + mutex_lock(&local->mtx); + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); + mutex_lock(&ifmgd->mtx); } @@ -2632,6 +2645,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= IEEE80211_STA_DISABLE_11N; + if (req->flags & ASSOC_REQ_DISABLE_HT) + ifmgd->flags |= IEEE80211_STA_DISABLE_11N; + + memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); + memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, + sizeof(ifmgd->ht_capa_mask)); + if (req->ie && req->ie_len) { memcpy(wk->ie, req->ie, req->ie_len); wk->ie_len = req->ie_len; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 3d414411a96e..ebd8cccac8f2 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -212,8 +212,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work) return; } - ieee80211_recalc_idle(local); - if (local->hw_roc_skb) { sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev); ieee80211_tx_skb(sdata, local->hw_roc_skb); @@ -227,6 +225,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work) GFP_KERNEL); } + ieee80211_recalc_idle(local); + mutex_unlock(&local->mtx); } diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 9ee7164b207c..596efaf50e09 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -125,7 +125,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); - drv_remove_interface(local, &sdata->vif); + drv_remove_interface(local, sdata); } /* stop hardware - this must stop RX */ diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 58a89554b788..b39dda523f39 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, static void -calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local, - struct minstrel_rate *d, struct ieee80211_rate *rate) +calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, + struct ieee80211_rate *rate) { int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); @@ -402,8 +402,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband, mr->rix = i; mr->bitrate = sband->bitrates[i].bitrate / 5; - calc_rate_durations(mi, local, mr, - &sband->bitrates[i]); + calc_rate_durations(local, mr, &sband->bitrates[i]); /* calculate maximum number of retransmissions before * fallback (based on maximum segment size) */ diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index cdb28535716b..ff5f7b84e825 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -36,8 +36,17 @@ /* Transmit duration for the raw data part of an average sized packet */ #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) +/* + * Define group sort order: HT40 -> SGI -> #streams + */ +#define GROUP_IDX(_streams, _sgi, _ht40) \ + MINSTREL_MAX_STREAMS * 2 * _ht40 + \ + MINSTREL_MAX_STREAMS * _sgi + \ + _streams - 1 + /* MCS rate information for an MCS group */ -#define MCS_GROUP(_streams, _sgi, _ht40) { \ +#define MCS_GROUP(_streams, _sgi, _ht40) \ + [GROUP_IDX(_streams, _sgi, _ht40)] = { \ .streams = _streams, \ .flags = \ (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ @@ -58,6 +67,9 @@ * To enable sufficiently targeted rate sampling, MCS rates are divided into * groups, based on the number of streams and flags (HT40, SGI) that they * use. + * + * Sortorder has to be fixed for GROUP_IDX macro to be applicable: + * HT40 -> SGI -> #streams */ const struct mcs_group minstrel_mcs_groups[] = { MCS_GROUP(1, 0, 0), @@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight) static int minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) { - int streams = (rate->idx / MCS_GROUP_RATES) + 1; - u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; - int i; - - for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { - if (minstrel_mcs_groups[i].streams != streams) - continue; - if (minstrel_mcs_groups[i].flags != (rate->flags & flags)) - continue; - - return i; - } - - WARN_ON(1); - return 0; + return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1, + !!(rate->flags & IEEE80211_TX_RC_SHORT_GI), + !!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)); } static inline struct minstrel_rate_stats * @@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) * Recalculate success probabilities and counters for a rate using EWMA */ static void -minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) +minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr) { if (unlikely(mr->attempts > 0)) { mr->sample_skipped = 0; @@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr * the expected number of retransmissions and their expected length */ static void -minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, - int group, int rate) +minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) { struct minstrel_rate_stats *mr; unsigned int usecs; @@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) mr = &mg->rates[i]; mr->retry_updated = false; index = MCS_GROUP_RATES * group + i; - minstrel_calc_rate_ewma(mp, mr); - minstrel_ht_calc_tp(mp, mi, group, i); + minstrel_calc_rate_ewma(mr); + minstrel_ht_calc_tp(mi, group, i); if (!mr->cur_tp) continue; @@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) static bool minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) { - if (!rate->count) + if (rate->idx < 0) return false; - if (rate->idx < 0) + if (!rate->count) return false; return !!(rate->flags & IEEE80211_TX_RC_MCS); @@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx, } static void -minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) +minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct sta_info *sta = container_of(pubsta, struct sta_info, sta); @@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { minstrel_ht_update_stats(mp, mi); if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) - minstrel_aggr_check(mp, sta, skb); + minstrel_aggr_check(sta, skb); } } @@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, static void minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, struct ieee80211_tx_rate *rate, int index, - struct ieee80211_tx_rate_control *txrc, bool sample, bool rtscts) { const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; @@ -628,11 +626,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, if (sample_idx >= 0) { sample = true; minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, - txrc, true, false); + true, false); info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; } else { minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, - txrc, false, false); + false, false); } if (mp->hw->max_rates >= 3) { @@ -643,13 +641,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, */ if (sample_idx >= 0) minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, - txrc, false, false); + false, false); else minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, - txrc, false, true); + false, true); minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, - txrc, false, !sample); + false, !sample); ar[3].count = 0; ar[3].idx = -1; @@ -660,7 +658,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, * max_tp_rate -> max_prob_rate by default. */ minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate, - txrc, false, !sample); + false, !sample); ar[2].count = 0; ar[2].idx = -1; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index fb123e2e081a..d1a8869fe05d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -748,10 +748,11 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) struct ieee80211_local *local = rx->local; struct ieee80211_hw *hw = &local->hw; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct sta_info *sta = rx->sta; struct tid_ampdu_rx *tid_agg_rx; u16 sc; - int tid; + u8 tid, ack_policy; if (!ieee80211_is_data_qos(hdr->frame_control)) goto dont_reorder; @@ -764,6 +765,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) if (!sta) goto dont_reorder; + ack_policy = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_ACK_POLICY_MASK; tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); @@ -774,6 +777,15 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) goto dont_reorder; + /* not part of a BA session */ + if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && + ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + goto dont_reorder; + + /* not actually part of this BA session */ + if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) + goto dont_reorder; + /* new, potentially un-ordered, ampdu frame - process it */ /* reset session timer */ @@ -858,6 +870,13 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) rx->sdata->control_port_protocol) return RX_CONTINUE; } + + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && + cfg80211_rx_spurious_frame(rx->sdata->dev, + hdr->addr2, + GFP_ATOMIC)) + return RX_DROP_UNUSABLE; + return RX_DROP_MONITOR; } @@ -1327,15 +1346,20 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) /* * If we receive a 4-addr nullfunc frame from a STA - * that was not moved to a 4-addr STA vlan yet, drop - * the frame to the monitor interface, to make sure - * that hostapd sees it + * that was not moved to a 4-addr STA vlan yet send + * the event to userspace and for older hostapd drop + * the frame to the monitor interface. */ if (ieee80211_has_a4(hdr->frame_control) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && - !rx->sdata->u.vlan.sta))) + !rx->sdata->u.vlan.sta))) { + if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) + cfg80211_rx_unexpected_4addr_frame( + rx->sdata->dev, sta->sta.addr, + GFP_ATOMIC); return RX_DROP_MONITOR; + } /* * Update counter and free packet here to avoid * counting this as a dropped packed. @@ -1933,6 +1957,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) return RX_CONTINUE; + skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb)); mesh_hdr->ttl--; if (status->rx_flags & IEEE80211_RX_RA_MATCH) { @@ -1957,12 +1982,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) memset(info, 0, sizeof(*info)); info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->control.vif = &rx->sdata->vif; + info->control.jiffies = jiffies; if (is_multicast_ether_addr(fwd_hdr->addr1)) { IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh, fwded_mcast); - skb_set_queue_mapping(fwd_skb, - ieee80211_select_queue(sdata, fwd_skb)); - ieee80211_set_qos_hdr(sdata, fwd_skb); } else { int err; /* @@ -2014,12 +2037,17 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) return RX_DROP_MONITOR; /* - * Allow the cooked monitor interface of an AP to see 4-addr frames so - * that a 4-addr station can be detected and moved into a separate VLAN + * Send unexpected-4addr-frame event to hostapd. For older versions, + * also drop the frame to cooked monitor interfaces. */ if (ieee80211_has_a4(hdr->frame_control) && - sdata->vif.type == NL80211_IFTYPE_AP) + sdata->vif.type == NL80211_IFTYPE_AP) { + if (rx->sta && + !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) + cfg80211_rx_unexpected_4addr_frame( + rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); return RX_DROP_MONITOR; + } err = __ieee80211_data_to_8023(rx, &port_control); if (unlikely(err)) @@ -2174,6 +2202,18 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) if (!ieee80211_is_mgmt(mgmt->frame_control)) return RX_DROP_MONITOR; + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && + ieee80211_is_beacon(mgmt->frame_control) && + !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { + struct ieee80211_rx_status *status; + + status = IEEE80211_SKB_RXCB(rx->skb); + cfg80211_report_obss_beacon(rx->local->hw.wiphy, + rx->skb->data, rx->skb->len, + status->freq, GFP_ATOMIC); + rx->flags |= IEEE80211_RX_BEACON_REPORTED; + } + if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) return RX_DROP_MONITOR; @@ -2207,13 +2247,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.category) { case WLAN_CATEGORY_BACK: - /* - * The aggregation code is not prepared to handle - * anything but STA/AP due to the BSSID handling; - * IBSS could work in the code but isn't supported - * by drivers or the standard. - */ if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP) break; @@ -2493,6 +2528,10 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, goto out_free_skb; rx->flags |= IEEE80211_RX_CMNTR; + /* If there are no cooked monitor interfaces, just free the SKB */ + if (!local->cooked_mntrs) + goto out_free_skb; + if (skb_headroom(skb) < sizeof(*rthdr) && pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) goto out_free_skb; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 105436dbb90d..81863031e0a3 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -213,12 +213,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) if (bss) ieee80211_rx_bss_put(sdata->local, bss); - /* If we are on-operating-channel, and this packet is for the - * current channel, pass the pkt on up the stack so that - * the rest of the stack can make use of it. - */ - if (ieee80211_cfg_on_oper_channel(sdata->local) - && (channel == sdata->local->oper_channel)) + if (channel == sdata->local->oper_channel) return RX_CONTINUE; dev_kfree_skb(skb); @@ -264,8 +259,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, bool was_hw_scan) { struct ieee80211_local *local = hw_to_local(hw); - bool on_oper_chan; - bool enable_beacons = false; lockdep_assert_held(&local->mtx); @@ -298,25 +291,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, local->scanning = 0; local->scan_channel = NULL; - on_oper_chan = ieee80211_cfg_on_oper_channel(local); - - if (was_hw_scan || !on_oper_chan) - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); - else - /* Set power back to normal operating levels. */ - ieee80211_hw_config(local, 0); + /* Set power back to normal operating levels. */ + ieee80211_hw_config(local, 0); if (!was_hw_scan) { - bool on_oper_chan2; ieee80211_configure_filter(local); drv_sw_scan_complete(local); - on_oper_chan2 = ieee80211_cfg_on_oper_channel(local); - /* We should always be on-channel at this point. */ - WARN_ON(!on_oper_chan2); - if (on_oper_chan2 && (on_oper_chan != on_oper_chan2)) - enable_beacons = true; - - ieee80211_offchannel_return(local, enable_beacons, true); + ieee80211_offchannel_return(local, true, true); } ieee80211_recalc_idle(local); @@ -361,11 +342,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) local->next_scan_state = SCAN_DECISION; local->scan_channel_idx = 0; - /* We always want to use off-channel PS, even if we - * are not really leaving oper-channel. Don't - * tell the AP though, as long as we are on-channel. - */ - ieee80211_offchannel_enable_all_ps(local, false); + ieee80211_offchannel_stop_vifs(local, true); ieee80211_configure_filter(local); @@ -373,8 +350,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local) ieee80211_hw_config(local, 0); ieee80211_queue_delayed_work(&local->hw, - &local->scan_work, - IEEE80211_CHANNEL_TIME); + &local->scan_work, 0); return 0; } @@ -510,96 +486,39 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local, next_chan = local->scan_req->channels[local->scan_channel_idx]; - if (ieee80211_cfg_on_oper_channel(local)) { - /* We're currently on operating channel. */ - if (next_chan == local->oper_channel) - /* We don't need to move off of operating channel. */ - local->next_scan_state = SCAN_SET_CHANNEL; - else - /* - * We do need to leave operating channel, as next - * scan is somewhere else. - */ - local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL; - } else { - /* - * we're currently scanning a different channel, let's - * see if we can scan another channel without interfering - * with the current traffic situation. - * - * Since we don't know if the AP has pending frames for us - * we can only check for our tx queues and use the current - * pm_qos requirements for rx. Hence, if no tx traffic occurs - * at all we will scan as many channels in a row as the pm_qos - * latency allows us to. Additionally we also check for the - * currently negotiated listen interval to prevent losing - * frames unnecessarily. - * - * Otherwise switch back to the operating channel. - */ - - bad_latency = time_after(jiffies + - ieee80211_scan_get_channel_time(next_chan), - local->leave_oper_channel_time + - usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); - - listen_int_exceeded = time_after(jiffies + - ieee80211_scan_get_channel_time(next_chan), - local->leave_oper_channel_time + - usecs_to_jiffies(min_beacon_int * 1024) * - local->hw.conf.listen_interval); - - if (associated && ( !tx_empty || bad_latency || - listen_int_exceeded)) - local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; - else - local->next_scan_state = SCAN_SET_CHANNEL; - } - - *next_delay = 0; -} - -static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local, - unsigned long *next_delay) -{ - /* PS will already be in off-channel mode, - * we do that once at the beginning of scanning. - */ - ieee80211_offchannel_stop_vifs(local, false); - /* - * What if the nullfunc frames didn't arrive? + * we're currently scanning a different channel, let's + * see if we can scan another channel without interfering + * with the current traffic situation. + * + * Since we don't know if the AP has pending frames for us + * we can only check for our tx queues and use the current + * pm_qos requirements for rx. Hence, if no tx traffic occurs + * at all we will scan as many channels in a row as the pm_qos + * latency allows us to. Additionally we also check for the + * currently negotiated listen interval to prevent losing + * frames unnecessarily. + * + * Otherwise switch back to the operating channel. */ - drv_flush(local, false); - if (local->ops->flush) - *next_delay = 0; - else - *next_delay = HZ / 10; - /* remember when we left the operating channel */ - local->leave_oper_channel_time = jiffies; + bad_latency = time_after(jiffies + + ieee80211_scan_get_channel_time(next_chan), + local->leave_oper_channel_time + + usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY))); - /* advance to the next channel to be scanned */ - local->next_scan_state = SCAN_SET_CHANNEL; -} - -static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local, - unsigned long *next_delay) -{ - /* switch back to the operating channel */ - local->scan_channel = NULL; - if (!ieee80211_cfg_on_oper_channel(local)) - ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + listen_int_exceeded = time_after(jiffies + + ieee80211_scan_get_channel_time(next_chan), + local->leave_oper_channel_time + + usecs_to_jiffies(min_beacon_int * 1024) * + local->hw.conf.listen_interval); - /* - * Re-enable vifs and beaconing. Leave PS - * in off-channel state..will put that back - * on-channel at the end of scanning. - */ - ieee80211_offchannel_return(local, true, false); + if (associated && (!tx_empty || bad_latency || listen_int_exceeded)) + local->next_scan_state = SCAN_SUSPEND; + else + local->next_scan_state = SCAN_SET_CHANNEL; - *next_delay = HZ / 5; - local->next_scan_state = SCAN_DECISION; + *next_delay = 0; } static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, @@ -613,10 +532,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, local->scan_channel = chan; - /* Only call hw-config if we really need to change channels. */ - if (chan != local->hw.conf.channel) - if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) - skip = 1; + if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL)) + skip = 1; /* advance state machine to next channel/band */ local->scan_channel_idx++; @@ -673,6 +590,44 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local, local->next_scan_state = SCAN_DECISION; } +static void ieee80211_scan_state_suspend(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* switch back to the operating channel */ + local->scan_channel = NULL; + ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); + + /* + * Re-enable vifs and beaconing. Leave PS + * in off-channel state..will put that back + * on-channel at the end of scanning. + */ + ieee80211_offchannel_return(local, true, false); + + *next_delay = HZ / 5; + /* afterwards, resume scan & go to next channel */ + local->next_scan_state = SCAN_RESUME; +} + +static void ieee80211_scan_state_resume(struct ieee80211_local *local, + unsigned long *next_delay) +{ + /* PS already is in off-channel mode */ + ieee80211_offchannel_stop_vifs(local, false); + + if (local->ops->flush) { + drv_flush(local, false); + *next_delay = 0; + } else + *next_delay = HZ / 10; + + /* remember when we left the operating channel */ + local->leave_oper_channel_time = jiffies; + + /* advance to the next channel to be scanned */ + local->next_scan_state = SCAN_DECISION; +} + void ieee80211_scan_work(struct work_struct *work) { struct ieee80211_local *local = @@ -743,11 +698,11 @@ void ieee80211_scan_work(struct work_struct *work) case SCAN_SEND_PROBE: ieee80211_scan_state_send_probe(local, &next_delay); break; - case SCAN_LEAVE_OPER_CHANNEL: - ieee80211_scan_state_leave_oper_channel(local, &next_delay); + case SCAN_SUSPEND: + ieee80211_scan_state_suspend(local, &next_delay); break; - case SCAN_ENTER_OPER_CHANNEL: - ieee80211_scan_state_enter_oper_channel(local, &next_delay); + case SCAN_RESUME: + ieee80211_scan_state_resume(local, &next_delay); break; } } while (next_delay == 0); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 8c8ce05ad26f..6280e8bca49d 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -30,7 +30,6 @@ * when virtual port control is not in use. * @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble * frames. - * @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP. * @WLAN_STA_WME: Station is a QoS-STA. * @WLAN_STA_WDS: Station is one of our WDS peers. * @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the @@ -52,6 +51,7 @@ * unblocks the station. * @WLAN_STA_SP: Station is in a service period, so don't try to * reply to other uAPSD trigger frames or PS-Poll. + * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. */ enum ieee80211_sta_info_flags { WLAN_STA_AUTH, @@ -59,7 +59,6 @@ enum ieee80211_sta_info_flags { WLAN_STA_PS_STA, WLAN_STA_AUTHORIZED, WLAN_STA_SHORT_PREAMBLE, - WLAN_STA_ASSOC_AP, WLAN_STA_WME, WLAN_STA_WDS, WLAN_STA_CLEAR_PS_FILT, @@ -71,6 +70,7 @@ enum ieee80211_sta_info_flags { WLAN_STA_TDLS_PEER_AUTH, WLAN_STA_UAPSD, WLAN_STA_SP, + WLAN_STA_4ADDR_EVENT, }; #define STA_TID_NUM 16 @@ -390,6 +390,12 @@ static inline int test_and_clear_sta_flag(struct sta_info *sta, return test_and_clear_bit(flag, &sta->_flags); } +static inline int test_and_set_sta_flag(struct sta_info *sta, + enum ieee80211_sta_info_flags flag) +{ + return test_and_set_bit(flag, &sta->_flags); +} + void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, struct tid_ampdu_tx *tid_tx); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 16518f386117..46222ce0e5b1 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -517,27 +517,54 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) } if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) { - struct ieee80211_work *wk; u64 cookie = (unsigned long)skb; - rcu_read_lock(); - list_for_each_entry_rcu(wk, &local->work_list, list) { - if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX) - continue; - if (wk->offchan_tx.frame != skb) - continue; - wk->offchan_tx.status = true; - break; - } - rcu_read_unlock(); - if (local->hw_roc_skb_for_status == skb) { - cookie = local->hw_roc_cookie ^ 2; - local->hw_roc_skb_for_status = NULL; + if (ieee80211_is_nullfunc(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) { + bool acked = info->flags & IEEE80211_TX_STAT_ACK; + cfg80211_probe_status(skb->dev, hdr->addr1, + cookie, acked, GFP_ATOMIC); + } else { + struct ieee80211_work *wk; + + rcu_read_lock(); + list_for_each_entry_rcu(wk, &local->work_list, list) { + if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX) + continue; + if (wk->offchan_tx.frame != skb) + continue; + wk->offchan_tx.status = true; + break; + } + rcu_read_unlock(); + if (local->hw_roc_skb_for_status == skb) { + cookie = local->hw_roc_cookie ^ 2; + local->hw_roc_skb_for_status = NULL; + } + + cfg80211_mgmt_tx_status( + skb->dev, cookie, skb->data, skb->len, + !!(info->flags & IEEE80211_TX_STAT_ACK), + GFP_ATOMIC); } + } - cfg80211_mgmt_tx_status( - skb->dev, cookie, skb->data, skb->len, - !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC); + if (unlikely(info->ack_frame_id)) { + struct sk_buff *ack_skb; + unsigned long flags; + + spin_lock_irqsave(&local->ack_status_lock, flags); + ack_skb = idr_find(&local->ack_status_frames, + info->ack_frame_id); + if (ack_skb) + idr_remove(&local->ack_status_frames, + info->ack_frame_id); + spin_unlock_irqrestore(&local->ack_status_lock, flags); + + /* consumes ack_skb */ + if (ack_skb) + skb_complete_wifi_ack(ack_skb, + info->flags & IEEE80211_TX_STAT_ACK); } /* this was a transmitted frame, but now we want to reuse it */ @@ -610,3 +637,29 @@ void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets) num_packets, GFP_ATOMIC); } EXPORT_SYMBOL(ieee80211_report_low_ack); + +void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (unlikely(info->ack_frame_id)) { + struct sk_buff *ack_skb; + unsigned long flags; + + spin_lock_irqsave(&local->ack_status_lock, flags); + ack_skb = idr_find(&local->ack_status_frames, + info->ack_frame_id); + if (ack_skb) + idr_remove(&local->ack_status_frames, + info->ack_frame_id); + spin_unlock_irqrestore(&local->ack_status_lock, flags); + + /* consumes ack_skb */ + if (ack_skb) + dev_kfree_skb_any(ack_skb); + } + + dev_kfree_skb_any(skb); +} +EXPORT_SYMBOL(ieee80211_free_txskb); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1f8b120146d1..8d31933abe6a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -36,7 +36,8 @@ /* misc utils */ -static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, +static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, + struct sk_buff *skb, int group_addr, int next_frag_len) { int rate, mrate, erp, dur, i; @@ -44,7 +45,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, struct ieee80211_local *local = tx->local; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); /* assume HW handles this */ if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) @@ -76,7 +77,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr, * at the highest possible rate belonging to the PHY rates in the * BSSBasicRateSet */ - hdr = (struct ieee80211_hdr *)tx->skb->data; + hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_ctl(hdr->frame_control)) { /* TODO: These control frames are not currently sent by * mac80211, but should they be implemented, this function @@ -572,8 +573,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) switch (tx->key->conf.cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - if (ieee80211_is_auth(hdr->frame_control)) - break; case WLAN_CIPHER_SUITE_TKIP: if (!ieee80211_is_data_present(hdr->frame_control)) tx->key = NULL; @@ -844,11 +843,13 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) return TX_CONTINUE; } -static int ieee80211_fragment(struct ieee80211_local *local, +static int ieee80211_fragment(struct ieee80211_tx_data *tx, struct sk_buff *skb, int hdrlen, int frag_threshold) { - struct sk_buff *tail = skb, *tmp; + struct ieee80211_local *local = tx->local; + struct ieee80211_tx_info *info; + struct sk_buff *tmp; int per_fragm = frag_threshold - hdrlen - FCS_LEN; int pos = hdrlen + per_fragm; int rem = skb->len - hdrlen - per_fragm; @@ -856,6 +857,8 @@ static int ieee80211_fragment(struct ieee80211_local *local, if (WARN_ON(rem < 0)) return -EINVAL; + /* first fragment was already added to queue by caller */ + while (rem) { int fraglen = per_fragm; @@ -868,12 +871,21 @@ static int ieee80211_fragment(struct ieee80211_local *local, IEEE80211_ENCRYPT_TAILROOM); if (!tmp) return -ENOMEM; - tail->next = tmp; - tail = tmp; + + __skb_queue_tail(&tx->skbs, tmp); + skb_reserve(tmp, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM); /* copy control information */ memcpy(tmp->cb, skb->cb, sizeof(tmp->cb)); + + info = IEEE80211_SKB_CB(tmp); + info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | + IEEE80211_TX_CTL_FIRST_FRAGMENT); + + if (rem) + info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; + skb_copy_queue_mapping(tmp, skb); tmp->priority = skb->priority; tmp->dev = skb->dev; @@ -885,6 +897,7 @@ static int ieee80211_fragment(struct ieee80211_local *local, pos += fraglen; } + /* adjust first fragment's length */ skb->len = hdrlen + per_fragm; return 0; } @@ -899,6 +912,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) int hdrlen; int fragnum; + /* no matter what happens, tx->skb moves to tx->skbs */ + __skb_queue_tail(&tx->skbs, skb); + tx->skb = NULL; + if (info->flags & IEEE80211_TX_CTL_DONTFRAG) return TX_CONTINUE; @@ -927,21 +944,21 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) * of the fragments then we will simply pretend to accept the skb * but store it away as pending. */ - if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold)) + if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) return TX_DROP; /* update duration/seq/flags of fragments */ fragnum = 0; - do { + + skb_queue_walk(&tx->skbs, skb) { int next_len; const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS); hdr = (void *)skb->data; info = IEEE80211_SKB_CB(skb); - if (skb->next) { + if (!skb_queue_is_last(&tx->skbs, skb)) { hdr->frame_control |= morefrags; - next_len = skb->next->len; /* * No multi-rate retries for fragmented frames, that * would completely throw off the NAV at other STAs. @@ -956,10 +973,9 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) hdr->frame_control &= ~morefrags; next_len = 0; } - hdr->duration_id = ieee80211_duration(tx, 0, next_len); hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG); fragnum++; - } while ((skb = skb->next)); + } return TX_CONTINUE; } @@ -967,16 +983,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; + struct sk_buff *skb; if (!tx->sta) return TX_CONTINUE; tx->sta->tx_packets++; - do { + skb_queue_walk(&tx->skbs, skb) { tx->sta->tx_fragments++; tx->sta->tx_bytes += skb->len; - } while ((skb = skb->next)); + } return TX_CONTINUE; } @@ -1015,21 +1031,25 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; + struct sk_buff *skb; struct ieee80211_hdr *hdr; int next_len; bool group_addr; - do { + skb_queue_walk(&tx->skbs, skb) { hdr = (void *) skb->data; if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) break; /* must not overwrite AID */ - next_len = skb->next ? skb->next->len : 0; + if (!skb_queue_is_last(&tx->skbs, skb)) { + struct sk_buff *next = skb_queue_next(&tx->skbs, skb); + next_len = next->len; + } else + next_len = 0; group_addr = is_multicast_ether_addr(hdr->addr1); hdr->duration_id = - ieee80211_duration(tx, group_addr, next_len); - } while ((skb = skb->next)); + ieee80211_duration(tx, skb, group_addr, next_len); + } return TX_CONTINUE; } @@ -1108,6 +1128,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->local = local; tx->sdata = sdata; tx->channel = local->hw.conf.channel; + __skb_queue_head_init(&tx->skbs); /* * If this flag is set to true anywhere, and we get here, @@ -1180,22 +1201,18 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, return TX_CONTINUE; } -/* - * Returns false if the frame couldn't be transmitted but was queued instead. - */ -static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, - struct sta_info *sta, bool txpending) +static bool ieee80211_tx_frags(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct sk_buff_head *skbs, + bool txpending) { - struct sk_buff *skb = *skbp, *next; + struct sk_buff *skb, *tmp; struct ieee80211_tx_info *info; - struct ieee80211_sub_if_data *sdata; unsigned long flags; - int len; - bool fragm = false; - while (skb) { + skb_queue_walk_safe(skbs, skb, tmp) { int q = skb_get_queue_mapping(skb); - __le16 fc; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); if (local->queue_stop_reasons[q] || @@ -1205,24 +1222,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, * transmission from the tx-pending tasklet when the * queue is woken again. */ - - do { - next = skb->next; - skb->next = NULL; - /* - * NB: If txpending is true, next must already - * be NULL since we must've gone through this - * loop before already; therefore we can just - * queue the frame to the head without worrying - * about reordering of fragments. - */ - if (unlikely(txpending)) - __skb_queue_head(&local->pending[q], - skb); - else - __skb_queue_tail(&local->pending[q], - skb); - } while ((skb = next)); + if (txpending) + skb_queue_splice(skbs, &local->pending[q]); + else + skb_queue_splice_tail(skbs, &local->pending[q]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); @@ -1231,47 +1234,72 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); info = IEEE80211_SKB_CB(skb); + info->control.vif = vif; + info->control.sta = sta; - if (fragm) - info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | - IEEE80211_TX_CTL_FIRST_FRAGMENT); - - next = skb->next; - len = skb->len; + __skb_unlink(skb, skbs); + drv_tx(local, skb); + } - if (next) - info->flags |= IEEE80211_TX_CTL_MORE_FRAMES; + return true; +} - sdata = vif_to_sdata(info->control.vif); +/* + * Returns false if the frame couldn't be transmitted but was queued instead. + */ +static bool __ieee80211_tx(struct ieee80211_local *local, + struct sk_buff_head *skbs, int led_len, + struct sta_info *sta, bool txpending) +{ + struct ieee80211_tx_info *info; + struct ieee80211_sub_if_data *sdata; + struct ieee80211_vif *vif; + struct ieee80211_sta *pubsta; + struct sk_buff *skb; + bool result = true; + __le16 fc; - switch (sdata->vif.type) { - case NL80211_IFTYPE_MONITOR: - info->control.vif = NULL; - break; - case NL80211_IFTYPE_AP_VLAN: - info->control.vif = &container_of(sdata->bss, - struct ieee80211_sub_if_data, u.ap)->vif; - break; - default: - /* keep */ - break; - } + if (WARN_ON(skb_queue_empty(skbs))) + return true; - if (sta && sta->uploaded) - info->control.sta = &sta->sta; - else - info->control.sta = NULL; + skb = skb_peek(skbs); + fc = ((struct ieee80211_hdr *)skb->data)->frame_control; + info = IEEE80211_SKB_CB(skb); + sdata = vif_to_sdata(info->control.vif); + if (sta && !sta->uploaded) + sta = NULL; - fc = ((struct ieee80211_hdr *)skb->data)->frame_control; - drv_tx(local, skb); + if (sta) + pubsta = &sta->sta; + else + pubsta = NULL; - ieee80211_tpt_led_trig_tx(local, fc, len); - *skbp = skb = next; - ieee80211_led_tx(local, 1); - fragm = true; + switch (sdata->vif.type) { + case NL80211_IFTYPE_MONITOR: + sdata = NULL; + vif = NULL; + break; + case NL80211_IFTYPE_AP_VLAN: + sdata = container_of(sdata->bss, + struct ieee80211_sub_if_data, u.ap); + /* fall through */ + default: + vif = &sdata->vif; + break; } - return true; + if (local->ops->tx_frags) + drv_tx_frags(local, vif, pubsta, skbs); + else + result = ieee80211_tx_frags(local, vif, pubsta, skbs, + txpending); + + ieee80211_tpt_led_trig_tx(local, fc, led_len); + ieee80211_led_tx(local, 1); + + WARN_ON(!skb_queue_empty(skbs)); + + return result; } /* @@ -1280,8 +1308,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local, struct sk_buff **skbp, */ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_result res = TX_DROP; #define CALL_TXH(txh) \ @@ -1315,13 +1342,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx) txh_done: if (unlikely(res == TX_DROP)) { I802_DEBUG_INC(tx->local->tx_handlers_drop); - while (skb) { - struct sk_buff *next; - - next = skb->next; - dev_kfree_skb(skb); - skb = next; - } + if (tx->skb) + dev_kfree_skb(tx->skb); + else + __skb_queue_purge(&tx->skbs); return -1; } else if (unlikely(res == TX_QUEUED)) { I802_DEBUG_INC(tx->local->tx_handlers_queued); @@ -1342,6 +1366,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, ieee80211_tx_result res_prepare; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool result = true; + int led_len; if (unlikely(skb->len < 10)) { dev_kfree_skb(skb); @@ -1351,6 +1376,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, rcu_read_lock(); /* initialises tx */ + led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, skb); if (unlikely(res_prepare == TX_DROP)) { @@ -1364,7 +1390,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, info->band = tx.channel->band; if (!invoke_tx_handlers(&tx)) - result = __ieee80211_tx(local, &tx.skb, tx.sta, txpending); + result = __ieee80211_tx(local, &tx.skbs, led_len, + tx.sta, txpending); out: rcu_read_unlock(); return result; @@ -1685,8 +1712,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, int nh_pos, h_pos; struct sta_info *sta = NULL; bool wme_sta = false, authorized = false, tdls_auth = false; - struct sk_buff *tmp_skb; bool tdls_direct = false; + bool multicast; + u32 info_flags = 0; + u16 info_id = 0; if (unlikely(skb->len < ETH_HLEN)) { ret = NETDEV_TX_OK; @@ -1873,7 +1902,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, * if it is a multicast address (which can only happen * in AP mode) */ - if (!is_multicast_ether_addr(hdr.addr1)) { + multicast = is_multicast_ether_addr(hdr.addr1); + if (!multicast) { rcu_read_lock(); sta = sta_info_get(sdata, hdr.addr1); if (sta) { @@ -1914,11 +1944,54 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, goto fail; } + if (unlikely(!multicast && skb->sk && + skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) { + struct sk_buff *orig_skb = skb; + + skb = skb_clone(skb, GFP_ATOMIC); + if (skb) { + unsigned long flags; + int id, r; + + spin_lock_irqsave(&local->ack_status_lock, flags); + r = idr_get_new_above(&local->ack_status_frames, + orig_skb, 1, &id); + if (r == -EAGAIN) { + idr_pre_get(&local->ack_status_frames, + GFP_ATOMIC); + r = idr_get_new_above(&local->ack_status_frames, + orig_skb, 1, &id); + } + if (WARN_ON(!id) || id > 0xffff) { + idr_remove(&local->ack_status_frames, id); + r = -ERANGE; + } + spin_unlock_irqrestore(&local->ack_status_lock, flags); + + if (!r) { + info_id = id; + info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + } else if (skb_shared(skb)) { + kfree_skb(orig_skb); + } else { + kfree_skb(skb); + skb = orig_skb; + } + } else { + /* couldn't clone -- lose tx status ... */ + skb = orig_skb; + } + } + /* * If the skb is shared we need to obtain our own copy. */ if (skb_shared(skb)) { - tmp_skb = skb; + struct sk_buff *tmp_skb = skb; + + /* can't happen -- skb is a clone if info_id != 0 */ + WARN_ON(info_id); + skb = skb_clone(skb, GFP_ATOMIC); kfree_skb(tmp_skb); @@ -2019,6 +2092,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, memset(info, 0, sizeof(*info)); dev->trans_start = jiffies; + + info->flags = info_flags; + info->ack_frame_id = info_id; + ieee80211_xmit(sdata, skb); return NETDEV_TX_OK; @@ -2062,10 +2139,15 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) { result = ieee80211_tx(sdata, skb, true); } else { + struct sk_buff_head skbs; + + __skb_queue_head_init(&skbs); + __skb_queue_tail(&skbs, skb); + hdr = (struct ieee80211_hdr *)skb->data; sta = sta_info_get(sdata, hdr->addr1); - result = __ieee80211_tx(local, &skb, sta, true); + result = __ieee80211_tx(local, &skbs, skb->len, sta, true); } return result; @@ -2279,22 +2361,31 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, } else if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_mgmt *mgmt; u8 *pos; + int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + + sizeof(mgmt->u.beacon); #ifdef CONFIG_MAC80211_MESH if (!sdata->u.mesh.mesh_id_len) goto out; #endif - /* headroom, head length, tail length and maximum TIM length */ - skb = dev_alloc_skb(local->tx_headroom + 400 + - sdata->u.mesh.ie_len); + skb = dev_alloc_skb(local->tx_headroom + + hdr_len + + 2 + /* NULL SSID */ + 2 + 8 + /* supported rates */ + 2 + 3 + /* DS params */ + 2 + (IEEE80211_MAX_SUPP_RATES - 8) + + 2 + sizeof(struct ieee80211_ht_cap) + + 2 + sizeof(struct ieee80211_ht_info) + + 2 + sdata->u.mesh.mesh_id_len + + 2 + sizeof(struct ieee80211_meshconf_ie) + + sdata->u.mesh.ie_len); if (!skb) goto out; skb_reserve(skb, local->hw.extra_tx_headroom); - mgmt = (struct ieee80211_mgmt *) - skb_put(skb, 24 + sizeof(mgmt->u.beacon)); - memset(mgmt, 0, 24 + sizeof(mgmt->u.beacon)); + mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len); + memset(mgmt, 0, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON); memset(mgmt->da, 0xff, ETH_ALEN); @@ -2313,6 +2404,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, mesh_add_ds_params_ie(skb, sdata) || ieee80211_add_ext_srates_ie(&sdata->vif, skb) || mesh_add_rsn_ie(skb, sdata) || + mesh_add_ht_cap_ie(skb, sdata) || + mesh_add_ht_info_ie(skb, sdata) || mesh_add_meshid_ie(skb, sdata) || mesh_add_meshconf_ie(skb, sdata) || mesh_add_vendor_ies(skb, sdata)) { @@ -2355,6 +2448,37 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_beacon_get_tim); +struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ieee80211_if_ap *ap = NULL; + struct sk_buff *presp = NULL, *skb = NULL; + struct ieee80211_hdr *hdr; + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + + if (sdata->vif.type != NL80211_IFTYPE_AP) + return NULL; + + rcu_read_lock(); + + ap = &sdata->u.ap; + presp = rcu_dereference(ap->probe_resp); + if (!presp) + goto out; + + skb = skb_copy(presp, GFP_ATOMIC); + if (!skb) + goto out; + + hdr = (struct ieee80211_hdr *) skb->data; + memset(hdr->addr1, 0, sizeof(hdr->addr1)); + +out: + rcu_read_unlock(); + return skb; +} +EXPORT_SYMBOL(ieee80211_proberesp_get); + struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index eca0fad09709..3b9b492e9403 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -20,6 +20,7 @@ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitmap.h> +#include <linux/crc32.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> @@ -96,13 +97,13 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; + struct sk_buff *skb; struct ieee80211_hdr *hdr; - do { + skb_queue_walk(&tx->skbs, skb) { hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); - } while ((skb = skb->next)); + } } int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, @@ -564,6 +565,172 @@ void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, } EXPORT_SYMBOL(ieee80211_queue_delayed_work); +u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, + struct ieee802_11_elems *elems, + u64 filter, u32 crc) +{ + size_t left = len; + u8 *pos = start; + bool calc_crc = filter != 0; + + memset(elems, 0, sizeof(*elems)); + elems->ie_start = start; + elems->total_len = len; + + while (left >= 2) { + u8 id, elen; + + id = *pos++; + elen = *pos++; + left -= 2; + + if (elen > left) + break; + + if (calc_crc && id < 64 && (filter & (1ULL << id))) + crc = crc32_be(crc, pos - 2, elen + 2); + + switch (id) { + case WLAN_EID_SSID: + elems->ssid = pos; + elems->ssid_len = elen; + break; + case WLAN_EID_SUPP_RATES: + elems->supp_rates = pos; + elems->supp_rates_len = elen; + break; + case WLAN_EID_FH_PARAMS: + elems->fh_params = pos; + elems->fh_params_len = elen; + break; + case WLAN_EID_DS_PARAMS: + elems->ds_params = pos; + elems->ds_params_len = elen; + break; + case WLAN_EID_CF_PARAMS: + elems->cf_params = pos; + elems->cf_params_len = elen; + break; + case WLAN_EID_TIM: + if (elen >= sizeof(struct ieee80211_tim_ie)) { + elems->tim = (void *)pos; + elems->tim_len = elen; + } + break; + case WLAN_EID_IBSS_PARAMS: + elems->ibss_params = pos; + elems->ibss_params_len = elen; + break; + case WLAN_EID_CHALLENGE: + elems->challenge = pos; + elems->challenge_len = elen; + break; + case WLAN_EID_VENDOR_SPECIFIC: + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && + pos[2] == 0xf2) { + /* Microsoft OUI (00:50:F2) */ + + if (calc_crc) + crc = crc32_be(crc, pos - 2, elen + 2); + + if (pos[3] == 1) { + /* OUI Type 1 - WPA IE */ + elems->wpa = pos; + elems->wpa_len = elen; + } else if (elen >= 5 && pos[3] == 2) { + /* OUI Type 2 - WMM IE */ + if (pos[4] == 0) { + elems->wmm_info = pos; + elems->wmm_info_len = elen; + } else if (pos[4] == 1) { + elems->wmm_param = pos; + elems->wmm_param_len = elen; + } + } + } + break; + case WLAN_EID_RSN: + elems->rsn = pos; + elems->rsn_len = elen; + break; + case WLAN_EID_ERP_INFO: + elems->erp_info = pos; + elems->erp_info_len = elen; + break; + case WLAN_EID_EXT_SUPP_RATES: + elems->ext_supp_rates = pos; + elems->ext_supp_rates_len = elen; + break; + case WLAN_EID_HT_CAPABILITY: + if (elen >= sizeof(struct ieee80211_ht_cap)) + elems->ht_cap_elem = (void *)pos; + break; + case WLAN_EID_HT_INFORMATION: + if (elen >= sizeof(struct ieee80211_ht_info)) + elems->ht_info_elem = (void *)pos; + break; + case WLAN_EID_MESH_ID: + elems->mesh_id = pos; + elems->mesh_id_len = elen; + break; + case WLAN_EID_MESH_CONFIG: + if (elen >= sizeof(struct ieee80211_meshconf_ie)) + elems->mesh_config = (void *)pos; + break; + case WLAN_EID_PEER_MGMT: + elems->peering = pos; + elems->peering_len = elen; + break; + case WLAN_EID_PREQ: + elems->preq = pos; + elems->preq_len = elen; + break; + case WLAN_EID_PREP: + elems->prep = pos; + elems->prep_len = elen; + break; + case WLAN_EID_PERR: + elems->perr = pos; + elems->perr_len = elen; + break; + case WLAN_EID_RANN: + if (elen >= sizeof(struct ieee80211_rann_ie)) + elems->rann = (void *)pos; + break; + case WLAN_EID_CHANNEL_SWITCH: + elems->ch_switch_elem = pos; + elems->ch_switch_elem_len = elen; + break; + case WLAN_EID_QUIET: + if (!elems->quiet_elem) { + elems->quiet_elem = pos; + elems->quiet_elem_len = elen; + } + elems->num_of_quiet_elem++; + break; + case WLAN_EID_COUNTRY: + elems->country_elem = pos; + elems->country_elem_len = elen; + break; + case WLAN_EID_PWR_CONSTRAINT: + elems->pwr_constr_elem = pos; + elems->pwr_constr_elem_len = elen; + break; + case WLAN_EID_TIMEOUT_INTERVAL: + elems->timeout_int = pos; + elems->timeout_int_len = elen; + break; + default: + break; + } + + left -= elen; + pos += elen; + } + + return crc; +} + void ieee802_11_parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems) { @@ -812,23 +979,9 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer, offset = noffset; } - if (sband->ht_cap.ht_supported) { - u16 cap = sband->ht_cap.cap; - __le16 tmp; - - *pos++ = WLAN_EID_HT_CAPABILITY; - *pos++ = sizeof(struct ieee80211_ht_cap); - memset(pos, 0, sizeof(struct ieee80211_ht_cap)); - tmp = cpu_to_le16(cap); - memcpy(pos, &tmp, sizeof(u16)); - pos += sizeof(u16); - *pos++ = sband->ht_cap.ampdu_factor | - (sband->ht_cap.ampdu_density << - IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); - memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); - pos += sizeof(sband->ht_cap.mcs); - pos += 2 + 4 + 1; /* ext info, BF cap, antsel */ - } + if (sband->ht_cap.ht_supported) + pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, + sband->ht_cap.cap); /* * If adding more here, adjust code in main.c @@ -1026,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) - res = drv_add_interface(local, &sdata->vif); + res = drv_add_interface(local, sdata); } /* add STAs back */ @@ -1077,7 +1230,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | - BSS_CHANGED_QOS; + BSS_CHANGED_QOS | + BSS_CHANGED_IDLE; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: @@ -1090,7 +1244,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) changed |= BSS_CHANGED_IBSS; /* fall through */ case NL80211_IFTYPE_AP: - changed |= BSS_CHANGED_SSID; + changed |= BSS_CHANGED_SSID | + BSS_CHANGED_AP_PROBE_RESP; /* fall through */ case NL80211_IFTYPE_MESH_POINT: changed |= BSS_CHANGED_BEACON | @@ -1112,6 +1267,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) } } + ieee80211_recalc_ps(local, -1); + /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. @@ -1367,6 +1524,103 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); +u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, + u16 cap) +{ + __le16 tmp; + + *pos++ = WLAN_EID_HT_CAPABILITY; + *pos++ = sizeof(struct ieee80211_ht_cap); + memset(pos, 0, sizeof(struct ieee80211_ht_cap)); + + /* capability flags */ + tmp = cpu_to_le16(cap); + memcpy(pos, &tmp, sizeof(u16)); + pos += sizeof(u16); + + /* AMPDU parameters */ + *pos++ = ht_cap->ampdu_factor | + (ht_cap->ampdu_density << + IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); + + /* MCS set */ + memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); + pos += sizeof(ht_cap->mcs); + + /* extended capabilities */ + pos += sizeof(__le16); + + /* BF capabilities */ + pos += sizeof(__le32); + + /* antenna selection */ + pos += sizeof(u8); + + return pos; +} + +u8 *ieee80211_ie_build_ht_info(u8 *pos, + struct ieee80211_sta_ht_cap *ht_cap, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type) +{ + struct ieee80211_ht_info *ht_info; + /* Build HT Information */ + *pos++ = WLAN_EID_HT_INFORMATION; + *pos++ = sizeof(struct ieee80211_ht_info); + ht_info = (struct ieee80211_ht_info *)pos; + ht_info->control_chan = + ieee80211_frequency_to_channel(channel->center_freq); + switch (channel_type) { + case NL80211_CHAN_HT40MINUS: + ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; + break; + case NL80211_CHAN_HT40PLUS: + ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + break; + case NL80211_CHAN_HT20: + default: + ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; + break; + } + if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; + ht_info->operation_mode = 0x0000; + ht_info->stbc_param = 0x0000; + + /* It seems that Basic MCS set and Supported MCS set + are identical for the first 10 bytes */ + memset(&ht_info->basic_set, 0, 16); + memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); + + return pos + sizeof(struct ieee80211_ht_info); +} + +enum nl80211_channel_type +ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) +{ + enum nl80211_channel_type channel_type; + + if (!ht_info) + return NL80211_CHAN_NO_HT; + + switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + channel_type = NL80211_CHAN_HT20; + break; + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + channel_type = NL80211_CHAN_HT40PLUS; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + channel_type = NL80211_CHAN_HT40MINUS; + break; + default: + channel_type = NL80211_CHAN_NO_HT; + } + + return channel_type; +} + int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index a1c6bfd55f0f..68ad351479df 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -330,13 +330,12 @@ ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) ieee80211_tx_set_protected(tx); - skb = tx->skb; - do { + skb_queue_walk(&tx->skbs, skb) { if (wep_encrypt_skb(tx, skb) < 0) { I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); return TX_DROP; } - } while ((skb = skb->next)); + } return TX_CONTINUE; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index fd52e695c071..43327115b490 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -83,7 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: - ra = skb->data; + qos = true; break; #endif case NL80211_IFTYPE_STATION: @@ -143,11 +143,15 @@ void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, /* Fill in the QoS header if there is one. */ if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *p = ieee80211_get_qos_ctl(hdr); - u8 ack_policy = 0, tid; + u8 ack_policy, tid; tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; - if (unlikely(sdata->local->wifi_wme_noack_test)) + /* preserve EOSP bit */ + ack_policy = *p & IEEE80211_QOS_CTL_EOSP; + + if (unlikely(sdata->local->wifi_wme_noack_test) || + is_multicast_ether_addr(hdr->addr1)) ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK; /* qos header is 2 bytes */ *p++ = ack_policy | tid; diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 6c53b6d1002b..6884a2d986dc 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c @@ -94,7 +94,8 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, /* frame sending functions */ -static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, +static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb, const u8 *ht_info_ie, struct ieee80211_supported_band *sband, struct ieee80211_channel *channel, enum ieee80211_smps_mode smps) @@ -102,8 +103,10 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, struct ieee80211_ht_info *ht_info; u8 *pos; u32 flags = channel->flags; - u16 cap = sband->ht_cap.cap; - __le16 tmp; + u16 cap; + struct ieee80211_sta_ht_cap ht_cap; + + BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); if (!sband->ht_cap.ht_supported) return; @@ -114,9 +117,13 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) return; + memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &ht_cap); + ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); /* determine capability flags */ + cap = ht_cap.cap; switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: @@ -154,34 +161,8 @@ static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie, } /* reserve and fill IE */ - pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2); - *pos++ = WLAN_EID_HT_CAPABILITY; - *pos++ = sizeof(struct ieee80211_ht_cap); - memset(pos, 0, sizeof(struct ieee80211_ht_cap)); - - /* capability flags */ - tmp = cpu_to_le16(cap); - memcpy(pos, &tmp, sizeof(u16)); - pos += sizeof(u16); - - /* AMPDU parameters */ - *pos++ = sband->ht_cap.ampdu_factor | - (sband->ht_cap.ampdu_density << - IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); - - /* MCS set */ - memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); - pos += sizeof(sband->ht_cap.mcs); - - /* extended capabilities */ - pos += sizeof(__le16); - - /* BF capabilities */ - pos += sizeof(__le32); - - /* antenna selection */ - pos += sizeof(u8); + ieee80211_ie_build_ht_cap(pos, &ht_cap, cap); } static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, @@ -356,7 +337,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata, if (wk->assoc.use_11n && wk->assoc.wmm_used && local->hw.queues >= 4) - ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie, + ieee80211_add_ht_ie(sdata, skb, wk->assoc.ht_information_ie, sband, wk->chan, wk->assoc.smps); /* if present, add any custom non-vendor IEs that go after HT */ @@ -969,10 +950,9 @@ static void ieee80211_work_work(struct work_struct *work) } if (!started && !local->tmp_channel) { - bool on_oper_chan; - bool tmp_chan_changed = false; - bool on_oper_chan2; + bool on_oper_chan, on_oper_chan2; enum nl80211_channel_type wk_ct; + on_oper_chan = ieee80211_cfg_on_oper_channel(local); /* Work with existing channel type if possible. */ @@ -981,11 +961,6 @@ static void ieee80211_work_work(struct work_struct *work) wk_ct = ieee80211_calc_ct(wk->chan_type, local->hw.conf.channel_type); - if (local->tmp_channel) - if ((local->tmp_channel != wk->chan) || - (local->tmp_channel_type != wk_ct)) - tmp_chan_changed = true; - local->tmp_channel = wk->chan; local->tmp_channel_type = wk_ct; /* @@ -1008,12 +983,7 @@ static void ieee80211_work_work(struct work_struct *work) true, false); } - } else if (tmp_chan_changed) - /* Still off-channel, but on some other - * channel, so update hardware. - * PS should already be off-channel. - */ - ieee80211_hw_config(local, 0); + } started = true; wk->timeout = jiffies; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index f614ce7bb6e3..93aab0715e8a 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -223,14 +223,14 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; + struct sk_buff *skb; ieee80211_tx_set_protected(tx); - do { + skb_queue_walk(&tx->skbs, skb) { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; - } while ((skb = skb->next)); + } return TX_CONTINUE; } @@ -390,7 +390,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) u8 scratch[6 * AES_BLOCK_SIZE]; if (info->control.hw_key && - !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && + !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields @@ -412,6 +413,12 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdrlen); + + /* the HW only needs room for the IV, but not the actual IV */ + if (info->control.hw_key && + (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) + return 0; + hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; @@ -442,14 +449,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; + struct sk_buff *skb; ieee80211_tx_set_protected(tx); - do { + skb_queue_walk(&tx->skbs, skb) { if (ccmp_encrypt_skb(tx, skb) < 0) return TX_DROP; - } while ((skb = skb->next)); + } return TX_CONTINUE; } @@ -547,15 +554,22 @@ static inline void bip_ipn_swap(u8 *d, const u8 *s) ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { - struct sk_buff *skb = tx->skb; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff *skb; + struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 aad[20]; u64 pn64; + if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) + return TX_DROP; + + skb = skb_peek(&tx->skbs); + + info = IEEE80211_SKB_CB(skb); + if (info->control.hw_key) - return 0; + return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index afca6c78948c..4aa0f4b19bd8 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -54,6 +54,12 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo); struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly; EXPORT_SYMBOL(nf_hooks); + +#if defined(CONFIG_JUMP_LABEL) +struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; +EXPORT_SYMBOL(nf_hooks_needed); +#endif + static DEFINE_MUTEX(nf_hook_mutex); int nf_register_hook(struct nf_hook_ops *reg) @@ -70,6 +76,9 @@ int nf_register_hook(struct nf_hook_ops *reg) } list_add_rcu(®->list, elem->list.prev); mutex_unlock(&nf_hook_mutex); +#if defined(CONFIG_JUMP_LABEL) + jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); +#endif return 0; } EXPORT_SYMBOL(nf_register_hook); @@ -79,7 +88,9 @@ void nf_unregister_hook(struct nf_hook_ops *reg) mutex_lock(&nf_hook_mutex); list_del_rcu(®->list); mutex_unlock(&nf_hook_mutex); - +#if defined(CONFIG_JUMP_LABEL) + jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); +#endif synchronize_net(); } EXPORT_SYMBOL(nf_unregister_hook); diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index f2d576e6b769..4015fcaf87bc 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c @@ -241,7 +241,7 @@ hash_ip6_data_isnull(const struct hash_ip6_elem *elem) static inline void hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src) { - ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); + dst->ip.in6 = src->ip.in6; } static inline void diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 60d016541c58..28988196775e 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -267,7 +267,7 @@ static inline void hash_net6_data_copy(struct hash_net6_elem *dst, const struct hash_net6_elem *src) { - ipv6_addr_copy(&dst->ip.in6, &src->ip.in6); + dst->ip.in6 = src->ip.in6; dst->cidr = src->cidr; } diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 093cc327020f..611c3359b94d 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -983,7 +983,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, if (!cp) return NF_ACCEPT; - ipv6_addr_copy(&snet.in6, &iph->saddr); + snet.in6 = iph->saddr; return handle_response_icmp(AF_INET6, skb, &snet, cih->nexthdr, cp, pp, offset, sizeof(struct ipv6hdr)); } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3cdd479f9b5d..bcf5563e4837 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -603,9 +603,9 @@ sloop: #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) { p += sizeof(struct ip_vs_sync_v6); - ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6); - ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6); - ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6); + s->v6.caddr = cp->caddr.in6; + s->v6.vaddr = cp->vaddr.in6; + s->v6.daddr = cp->daddr.in6; } else #endif { diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index aa2d7206ee8a..38a576d05b4b 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -235,7 +235,7 @@ __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr, goto out_err; } } - ipv6_addr_copy(ret_saddr, &fl6.saddr); + *ret_saddr = fl6.saddr; return dst; out_err: @@ -279,7 +279,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, atomic_read(&rt->dst.__refcnt)); } if (ret_saddr) - ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6); + *ret_saddr = dest->dst_saddr.in6; spin_unlock(&dest->dst_lock); } else { dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); @@ -705,7 +705,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* mangle the packet */ if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) goto tx_error; - ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6); + ipv6_hdr(skb)->daddr = cp->daddr.in6; if (!local || !skb->dev) { /* drop the old route when skb is not shared */ @@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); iph->priority = old_iph->priority; memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); - ipv6_addr_copy(&iph->daddr, &cp->daddr.in6); - ipv6_addr_copy(&iph->saddr, &saddr); + iph->daddr = cp->daddr.in6; + iph->saddr = saddr; iph->hop_limit = old_iph->hop_limit; /* Another hack: avoid icmp_send in ip_fragment */ diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index f03c2d4539f6..f9368f33e7af 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -750,10 +750,10 @@ static int callforward_do_filter(const union nf_inet_addr *src, struct rt6_info *rt1, *rt2; memset(&fl1, 0, sizeof(fl1)); - ipv6_addr_copy(&fl1.daddr, &src->in6); + fl1.daddr = src->in6; memset(&fl2, 0, sizeof(fl2)); - ipv6_addr_copy(&fl2.daddr, &dst->in6); + fl2.daddr = dst->in6; if (!afinfo->route(&init_net, (struct dst_entry **)&rt1, flowi6_to_flowi(&fl1), false)) { if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 9e63b43faeed..3ecade3966d5 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -161,7 +161,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb, struct flowi6 *fl6 = &fl.u.ip6; memset(fl6, 0, sizeof(*fl6)); - ipv6_addr_copy(&fl6->daddr, &ipv6_hdr(skb)->saddr); + fl6->daddr = ipv6_hdr(skb)->saddr; } rcu_read_lock(); ai = nf_get_afinfo(family); diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index b77d383cec78..c047de2046ad 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c @@ -42,7 +42,7 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, int route_err; memset(&flow, 0, sizeof(flow)); - ipv6_addr_copy(&flow.daddr, addr); + flow.daddr = *addr; if (dev) flow.flowi6_oif = dev->ifindex; diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c index 824f184f7a9b..5952237c0c86 100644 --- a/net/netlabel/netlabel_kapi.c +++ b/net/netlabel/netlabel_kapi.c @@ -155,12 +155,12 @@ int netlbl_cfg_unlbl_map_add(const char *domain, if (map6 == NULL) goto cfg_unlbl_map_add_failure; map6->type = NETLBL_NLTYPE_UNLABELED; - ipv6_addr_copy(&map6->list.addr, addr6); + map6->list.addr = *addr6; map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; - ipv6_addr_copy(&map6->list.mask, mask6); + map6->list.mask = *mask6; map6->list.valid = 1; ret_val = netlbl_af6list_add(&map6->list, &addrmap->list6); diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index bfa555869775..9879300beefd 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -216,12 +216,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info, ret_val = -ENOMEM; goto add_failure; } - ipv6_addr_copy(&map->list.addr, addr); + map->list.addr = *addr; map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; map->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; - ipv6_addr_copy(&map->list.mask, mask); + map->list.mask = *mask; map->list.valid = 1; map->type = entry->type; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e251c2c88521..049ccd2447d7 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -300,12 +300,12 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface, if (entry == NULL) return -ENOMEM; - ipv6_addr_copy(&entry->list.addr, addr); + entry->list.addr = *addr; entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; - ipv6_addr_copy(&entry->list.mask, mask); + entry->list.mask = *mask; entry->list.valid = 1; entry->secid = secid; diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 732152f718e0..c329b474eace 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1244,7 +1244,8 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCADDRT: case SIOCDELRT: case SIOCNRDECOBS: - if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; return nr_rt_ioctl(cmd, argp); default: diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index 915a87ba23e1..2cf330162d7e 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -670,14 +670,17 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg) case SIOCADDRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; - if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) + if (nr_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; - if (nr_route.ndigis < 0 || nr_route.ndigis > AX25_MAX_DIGIS) { - dev_put(dev); + if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; - } switch (nr_route.type) { case NETROM_NODE: + if (strnlen(nr_route.mnemonic, 7) == 7) { + ret = -EINVAL; + break; + } + ret = nr_add_node(&nr_route.callsign, nr_route.mnemonic, &nr_route.neighbour, diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 3925c6578767..fe5ca89abfce 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -126,7 +126,10 @@ static inline int nci_request(struct nci_dev *ndev, static void nci_reset_req(struct nci_dev *ndev, unsigned long opt) { - nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL); + struct nci_core_reset_cmd cmd; + + cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; + nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); } static void nci_init_req(struct nci_dev *ndev, unsigned long opt) @@ -136,17 +139,11 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt) static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) { - struct nci_core_conn_create_cmd conn_cmd; struct nci_rf_disc_map_cmd cmd; struct disc_map_config *cfg = cmd.mapping_configs; __u8 *num = &cmd.num_mapping_configs; int i; - /* create static rf connection */ - conn_cmd.target_handle = 0; - conn_cmd.num_target_specific_params = 0; - nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd); - /* set rf mapping configurations */ *num = 0; @@ -470,7 +467,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx, ndev->data_exchange_cb = cb; ndev->data_exchange_cb_context = cb_context; - rc = nci_send_data(ndev, ndev->conn_id, skb); + rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) clear_bit(NCI_DATA_EXCHANGE, &ndev->flags); @@ -726,7 +723,10 @@ static void nci_tx_work(struct work_struct *work) if (!skb) return; - atomic_dec(&ndev->credits_cnt); + /* Check if data flow control is used */ + if (atomic_read(&ndev->credits_cnt) != + NCI_DATA_FLOW_CONTROL_NOT_USED) + atomic_dec(&ndev->credits_cnt); nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d", nci_pbf(skb->data), diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c index e5ed90fc1a9c..511fb96e21bc 100644 --- a/net/nfc/nci/data.c +++ b/net/nfc/nci/data.c @@ -95,7 +95,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev, __skb_queue_head_init(&frags_q); while (total_len) { - frag_len = min_t(int, total_len, ndev->max_pkt_payload_size); + frag_len = + min_t(int, total_len, ndev->max_data_pkt_payload_size); skb_frag = nci_skb_alloc(ndev, (NCI_DATA_HDR_SIZE + frag_len), @@ -151,7 +152,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb) nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len); /* check if the packet need to be fragmented */ - if (skb->len <= ndev->max_pkt_payload_size) { + if (skb->len <= ndev->max_data_pkt_payload_size) { /* no need to fragment packet */ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST); diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c index b19dc2fa90e1..e99adcfb1bcf 100644 --- a/net/nfc/nci/lib.c +++ b/net/nfc/nci/lib.c @@ -42,12 +42,9 @@ int nci_to_errno(__u8 code) case NCI_STATUS_REJECTED: return -EBUSY; - case NCI_STATUS_MESSAGE_CORRUPTED: + case NCI_STATUS_RF_FRAME_CORRUPTED: return -EBADMSG; - case NCI_STATUS_BUFFER_FULL: - return -ENOBUFS; - case NCI_STATUS_NOT_INITIALIZED: return -EHOSTDOWN; @@ -80,9 +77,6 @@ int nci_to_errno(__u8 code) case NCI_STATUS_NFCEE_TIMEOUT_ERROR: return -ETIMEDOUT; - case NCI_STATUS_RF_LINK_LOSS_ERROR: - return -ENOLINK; - case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED: return -EDQUOT; diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c index 96633f5cda4f..c1bf54172c25 100644 --- a/net/nfc/nci/ntf.c +++ b/net/nfc/nci/ntf.c @@ -54,7 +54,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, ntf->conn_entries[i].conn_id, ntf->conn_entries[i].credits); - if (ntf->conn_entries[i].conn_id == ndev->conn_id) { + if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) { /* found static rf connection */ atomic_add(ntf->conn_entries[i].credits, &ndev->credits_cnt); @@ -66,22 +66,12 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev, queue_work(ndev->tx_wq, &ndev->tx_work); } -static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) -{ - struct nci_rf_field_info_ntf *ntf = (void *) skb->data; - - nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status); -} - -static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev, - struct nci_rf_activate_ntf *ntf, __u8 *data) +static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev, + struct nci_rf_intf_activated_ntf *ntf, __u8 *data) { struct rf_tech_specific_params_nfca_poll *nfca_poll; - struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep; nfca_poll = &ntf->rf_tech_specific_params.nfca_poll; - nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep; nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data)); data += 2; @@ -100,32 +90,32 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev, if (nfca_poll->sel_res_len != 0) nfca_poll->sel_res = *data++; - ntf->rf_interface_type = *data++; - ntf->activation_params_len = *data++; - - nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d", + nfc_dbg("sel_res_len %d, sel_res 0x%x", nfca_poll->sel_res_len, - nfca_poll->sel_res, - ntf->rf_interface_type, - ntf->activation_params_len); - - switch (ntf->rf_interface_type) { - case NCI_RF_INTERFACE_ISO_DEP: - nfca_poll_iso_dep->rats_res_len = *data++; - if (nfca_poll_iso_dep->rats_res_len > 0) { - memcpy(nfca_poll_iso_dep->rats_res, + nfca_poll->sel_res); + + return data; +} + +static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev, + struct nci_rf_intf_activated_ntf *ntf, __u8 *data) +{ + struct activation_params_nfca_poll_iso_dep *nfca_poll; + + switch (ntf->activation_rf_tech_and_mode) { + case NCI_NFC_A_PASSIVE_POLL_MODE: + nfca_poll = &ntf->activation_params.nfca_poll_iso_dep; + nfca_poll->rats_res_len = *data++; + if (nfca_poll->rats_res_len > 0) { + memcpy(nfca_poll->rats_res, data, - nfca_poll_iso_dep->rats_res_len); + nfca_poll->rats_res_len); } break; - case NCI_RF_INTERFACE_FRAME: - /* no activation params */ - break; - default: - nfc_err("unsupported rf_interface_type 0x%x", - ntf->rf_interface_type); + nfc_err("unsupported activation_rf_tech_and_mode 0x%x", + ntf->activation_rf_tech_and_mode); return -EPROTO; } @@ -133,7 +123,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev, } static void nci_target_found(struct nci_dev *ndev, - struct nci_rf_activate_ntf *ntf) + struct nci_rf_intf_activated_ntf *ntf) { struct nfc_target nfc_tgt; @@ -141,6 +131,8 @@ static void nci_target_found(struct nci_dev *ndev, nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK; else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */ nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK; + else + nfc_tgt.supported_protocols = 0; nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res; nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res; @@ -158,49 +150,86 @@ static void nci_target_found(struct nci_dev *ndev, nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1); } -static void nci_rf_activate_ntf_packet(struct nci_dev *ndev, - struct sk_buff *skb) +static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev, + struct sk_buff *skb) { - struct nci_rf_activate_ntf ntf; + struct nci_rf_intf_activated_ntf ntf; __u8 *data = skb->data; - int rc = -1; + int err = 0; clear_bit(NCI_DISCOVERY, &ndev->flags); set_bit(NCI_POLL_ACTIVE, &ndev->flags); - ntf.target_handle = *data++; + ntf.rf_discovery_id = *data++; + ntf.rf_interface_type = *data++; ntf.rf_protocol = *data++; - ntf.rf_tech_and_mode = *data++; + ntf.activation_rf_tech_and_mode = *data++; ntf.rf_tech_specific_params_len = *data++; - nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d", - ntf.target_handle, - ntf.rf_protocol, - ntf.rf_tech_and_mode, + nfc_dbg("rf_discovery_id %d", ntf.rf_discovery_id); + nfc_dbg("rf_interface_type 0x%x", ntf.rf_interface_type); + nfc_dbg("rf_protocol 0x%x", ntf.rf_protocol); + nfc_dbg("activation_rf_tech_and_mode 0x%x", + ntf.activation_rf_tech_and_mode); + nfc_dbg("rf_tech_specific_params_len %d", ntf.rf_tech_specific_params_len); - switch (ntf.rf_tech_and_mode) { - case NCI_NFC_A_PASSIVE_POLL_MODE: - rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf, - data); - break; + if (ntf.rf_tech_specific_params_len > 0) { + switch (ntf.activation_rf_tech_and_mode) { + case NCI_NFC_A_PASSIVE_POLL_MODE: + data = nci_extract_rf_params_nfca_passive_poll(ndev, + &ntf, data); + break; + + default: + nfc_err("unsupported activation_rf_tech_and_mode 0x%x", + ntf.activation_rf_tech_and_mode); + return; + } + } - default: - nfc_err("unsupported rf_tech_and_mode 0x%x", - ntf.rf_tech_and_mode); - return; + ntf.data_exch_rf_tech_and_mode = *data++; + ntf.data_exch_tx_bit_rate = *data++; + ntf.data_exch_rx_bit_rate = *data++; + ntf.activation_params_len = *data++; + + nfc_dbg("data_exch_rf_tech_and_mode 0x%x", + ntf.data_exch_rf_tech_and_mode); + nfc_dbg("data_exch_tx_bit_rate 0x%x", + ntf.data_exch_tx_bit_rate); + nfc_dbg("data_exch_rx_bit_rate 0x%x", + ntf.data_exch_rx_bit_rate); + nfc_dbg("activation_params_len %d", + ntf.activation_params_len); + + if (ntf.activation_params_len > 0) { + switch (ntf.rf_interface_type) { + case NCI_RF_INTERFACE_ISO_DEP: + err = nci_extract_activation_params_iso_dep(ndev, + &ntf, data); + break; + + case NCI_RF_INTERFACE_FRAME: + /* no activation params */ + break; + + default: + nfc_err("unsupported rf_interface_type 0x%x", + ntf.rf_interface_type); + return; + } } - if (!rc) + if (!err) nci_target_found(ndev, &ntf); } static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { - __u8 type = skb->data[0]; + struct nci_rf_deactivate_ntf *ntf = (void *) skb->data; - nfc_dbg("entry, type 0x%x", type); + nfc_dbg("entry, type 0x%x, reason 0x%x", ntf->type, ntf->reason); clear_bit(NCI_POLL_ACTIVE, &ndev->flags); ndev->target_active_prot = 0; @@ -214,6 +243,9 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev, ndev->rx_data_reassembly = 0; } + /* set the available credits to initial value */ + atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); + /* complete the data exchange transaction, if exists */ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags)) nci_data_exchange_complete(ndev, NULL, -EIO); @@ -237,12 +269,8 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_conn_credits_ntf_packet(ndev, skb); break; - case NCI_OP_RF_FIELD_INFO_NTF: - nci_rf_field_info_ntf_packet(ndev, skb); - break; - - case NCI_OP_RF_ACTIVATE_NTF: - nci_rf_activate_ntf_packet(ndev, skb); + case NCI_OP_RF_INTF_ACTIVATED_NTF: + nci_rf_intf_activated_ntf_packet(ndev, skb); break; case NCI_OP_RF_DEACTIVATE_NTF: diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c index 0403d4cd0917..0591f5aff89f 100644 --- a/net/nfc/nci/rsp.c +++ b/net/nfc/nci/rsp.c @@ -42,10 +42,11 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nfc_dbg("entry, status 0x%x", rsp->status); - if (rsp->status == NCI_STATUS_OK) + if (rsp->status == NCI_STATUS_OK) { ndev->nci_ver = rsp->nci_ver; - - nfc_dbg("nci_ver 0x%x", ndev->nci_ver); + nfc_dbg("nci_ver 0x%x, config_status 0x%x", + rsp->nci_ver, rsp->config_status); + } nci_req_complete(ndev, rsp->status); } @@ -58,13 +59,13 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nfc_dbg("entry, status 0x%x", rsp_1->status); if (rsp_1->status != NCI_STATUS_OK) - return; + goto exit; ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features); ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces; if (ndev->num_supported_rf_interfaces > - NCI_MAX_SUPPORTED_RF_INTERFACES) { + NCI_MAX_SUPPORTED_RF_INTERFACES) { ndev->num_supported_rf_interfaces = NCI_MAX_SUPPORTED_RF_INTERFACES; } @@ -73,20 +74,26 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) rsp_1->supported_rf_interfaces, ndev->num_supported_rf_interfaces); - rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces); + rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces); ndev->max_logical_connections = rsp_2->max_logical_connections; ndev->max_routing_table_size = __le16_to_cpu(rsp_2->max_routing_table_size); - ndev->max_control_packet_payload_length = - rsp_2->max_control_packet_payload_length; - ndev->rf_sending_buffer_size = - __le16_to_cpu(rsp_2->rf_sending_buffer_size); - ndev->rf_receiving_buffer_size = - __le16_to_cpu(rsp_2->rf_receiving_buffer_size); - ndev->manufacturer_id = - __le16_to_cpu(rsp_2->manufacturer_id); + ndev->max_ctrl_pkt_payload_len = + rsp_2->max_ctrl_pkt_payload_len; + ndev->max_size_for_large_params = + __le16_to_cpu(rsp_2->max_size_for_large_params); + ndev->max_data_pkt_payload_size = + rsp_2->max_data_pkt_payload_size; + ndev->initial_num_credits = + rsp_2->initial_num_credits; + ndev->manufact_id = + rsp_2->manufact_id; + ndev->manufact_specific_info = + __le32_to_cpu(rsp_2->manufact_specific_info); + + atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); nfc_dbg("nfcc_features 0x%x", ndev->nfcc_features); @@ -104,39 +111,23 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) ndev->max_logical_connections); nfc_dbg("max_routing_table_size %d", ndev->max_routing_table_size); - nfc_dbg("max_control_packet_payload_length %d", - ndev->max_control_packet_payload_length); - nfc_dbg("rf_sending_buffer_size %d", - ndev->rf_sending_buffer_size); - nfc_dbg("rf_receiving_buffer_size %d", - ndev->rf_receiving_buffer_size); - nfc_dbg("manufacturer_id 0x%x", - ndev->manufacturer_id); - + nfc_dbg("max_ctrl_pkt_payload_len %d", + ndev->max_ctrl_pkt_payload_len); + nfc_dbg("max_size_for_large_params %d", + ndev->max_size_for_large_params); + nfc_dbg("max_data_pkt_payload_size %d", + ndev->max_data_pkt_payload_size); + nfc_dbg("initial_num_credits %d", + ndev->initial_num_credits); + nfc_dbg("manufact_id 0x%x", + ndev->manufact_id); + nfc_dbg("manufact_specific_info 0x%x", + ndev->manufact_specific_info); + +exit: nci_req_complete(ndev, rsp_1->status); } -static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev, - struct sk_buff *skb) -{ - struct nci_core_conn_create_rsp *rsp = (void *) skb->data; - - nfc_dbg("entry, status 0x%x", rsp->status); - - if (rsp->status != NCI_STATUS_OK) - return; - - ndev->max_pkt_payload_size = rsp->max_pkt_payload_size; - ndev->initial_num_credits = rsp->initial_num_credits; - ndev->conn_id = rsp->conn_id; - - atomic_set(&ndev->credits_cnt, ndev->initial_num_credits); - - nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size); - nfc_dbg("initial_num_credits %d", ndev->initial_num_credits); - nfc_dbg("conn_id %d", ndev->conn_id); -} - static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { @@ -196,10 +187,6 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) nci_core_init_rsp_packet(ndev, skb); break; - case NCI_OP_CORE_CONN_CREATE_RSP: - nci_core_conn_create_rsp_packet(ndev, skb); - break; - case NCI_OP_RF_DISCOVER_MAP_RSP: nci_rf_disc_map_rsp_packet(ndev, skb); break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 82a6f34d39d0..0da505c9ac23 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1499,10 +1499,11 @@ retry: if (!skb) { size_t reserved = LL_RESERVED_SPACE(dev); + int tlen = dev->needed_tailroom; unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0; rcu_read_unlock(); - skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL); + skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL); if (skb == NULL) return -ENOBUFS; /* FIXME: Save some space for broken drivers that write a hard @@ -1944,7 +1945,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, int size_max, - __be16 proto, unsigned char *addr) + __be16 proto, unsigned char *addr, int hlen) { union { struct tpacket_hdr *h1; @@ -1978,7 +1979,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, return -EMSGSIZE; } - skb_reserve(skb, LL_RESERVED_SPACE(dev)); + skb_reserve(skb, hlen); skb_reset_network_header(skb); data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll); @@ -2053,6 +2054,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) unsigned char *addr; int len_sum = 0; int status = 0; + int hlen, tlen; mutex_lock(&po->pg_vec_lock); @@ -2101,16 +2103,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } status = TP_STATUS_SEND_REQUEST; + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(&po->sk, - LL_ALLOCATED_SPACE(dev) - + sizeof(struct sockaddr_ll), + hlen + tlen + sizeof(struct sockaddr_ll), 0, &err); if (unlikely(skb == NULL)) goto out_status; tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, - addr); + addr, hlen); if (unlikely(tp_len < 0)) { if (po->tp_loss) { @@ -2207,6 +2210,7 @@ static int packet_snd(struct socket *sock, int vnet_hdr_len; struct packet_sock *po = pkt_sk(sk); unsigned short gso_type = 0; + int hlen, tlen; /* * Get and verify the address. @@ -2291,8 +2295,9 @@ static int packet_snd(struct socket *sock, goto out_unlock; err = -ENOBUFS; - skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev), - LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len, + hlen = LL_RESERVED_SPACE(dev); + tlen = dev->needed_tailroom; + skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len, msg->msg_flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out_unlock; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 2ba6e9fb4cbc..9f60008740e3 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -534,6 +534,29 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) return pipe_handler_send_created_ind(sk); } +static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) +{ + struct pnpipehdr *hdr = pnp_hdr(skb); + + if (hdr->error_code != PN_PIPE_NO_ERROR) + return -ECONNREFUSED; + + return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */, + NULL, 0, GFP_ATOMIC); + +} + +static void pipe_start_flow_control(struct sock *sk) +{ + struct pep_sock *pn = pep_sk(sk); + + if (!pn_flow_safe(pn->tx_fc)) { + atomic_set(&pn->tx_credits, 1); + sk->sk_write_space(sk); + } + pipe_grant_credits(sk, GFP_ATOMIC); +} + /* Queue an skb to an actively connected sock. * Socket lock must be held. */ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) @@ -579,13 +602,25 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_state = TCP_CLOSE_WAIT; break; } + if (pn->init_enable == PN_PIPE_DISABLE) + sk->sk_state = TCP_SYN_RECV; + else { + sk->sk_state = TCP_ESTABLISHED; + pipe_start_flow_control(sk); + } + break; - sk->sk_state = TCP_ESTABLISHED; - if (!pn_flow_safe(pn->tx_fc)) { - atomic_set(&pn->tx_credits, 1); - sk->sk_write_space(sk); + case PNS_PEP_ENABLE_RESP: + if (sk->sk_state != TCP_SYN_SENT) + break; + + if (pep_enableresp_rcv(sk, skb)) { + sk->sk_state = TCP_CLOSE_WAIT; + break; } - pipe_grant_credits(sk, GFP_ATOMIC); + + sk->sk_state = TCP_ESTABLISHED; + pipe_start_flow_control(sk); break; case PNS_PEP_DISCONNECT_RESP: @@ -864,14 +899,32 @@ static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) int err; u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD }; - pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ + if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE) + pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ + err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ, - PN_PIPE_ENABLE, data, 4); + pn->init_enable, data, 4); if (err) { pn->pipe_handle = PN_PIPE_INVALID_HANDLE; return err; } + sk->sk_state = TCP_SYN_SENT; + + return 0; +} + +static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) +{ + int err; + + err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD, + NULL, 0); + if (err) + return err; + + sk->sk_state = TCP_SYN_SENT; + return 0; } @@ -879,11 +932,14 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) { struct pep_sock *pn = pep_sk(sk); int answ; + int ret = -ENOIOCTLCMD; switch (cmd) { case SIOCINQ: - if (sk->sk_state == TCP_LISTEN) - return -EINVAL; + if (sk->sk_state == TCP_LISTEN) { + ret = -EINVAL; + break; + } lock_sock(sk); if (sock_flag(sk, SOCK_URGINLINE) && @@ -894,10 +950,22 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) else answ = 0; release_sock(sk); - return put_user(answ, (int __user *)arg); + ret = put_user(answ, (int __user *)arg); + break; + + case SIOCPNENABLEPIPE: + lock_sock(sk); + if (sk->sk_state == TCP_SYN_SENT) + ret = -EBUSY; + else if (sk->sk_state == TCP_ESTABLISHED) + ret = -EISCONN; + else + ret = pep_sock_enable(sk, NULL, 0); + release_sock(sk); + break; } - return -ENOIOCTLCMD; + return ret; } static int pep_init(struct sock *sk) @@ -960,6 +1028,18 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, } goto out_norel; + case PNPIPE_HANDLE: + if ((sk->sk_state == TCP_CLOSE) && + (val >= 0) && (val < PN_PIPE_INVALID_HANDLE)) + pn->pipe_handle = val; + else + err = -EINVAL; + break; + + case PNPIPE_INITSTATE: + pn->init_enable = !!val; + break; + default: err = -ENOPROTOOPT; } @@ -995,6 +1075,10 @@ static int pep_getsockopt(struct sock *sk, int level, int optname, return -EINVAL; break; + case PNPIPE_INITSTATE: + val = pn->init_enable; + break; + default: return -ENOPROTOOPT; } diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 5be19575c340..354760ebbbd2 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -644,7 +644,7 @@ static ssize_t rfkill_soft_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - err = strict_strtoul(buf, 0, &state); + err = kstrtoul(buf, 0, &state); if (err) return err; @@ -688,7 +688,7 @@ static ssize_t rfkill_state_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; - err = strict_strtoul(buf, 0, &state); + err = kstrtoul(buf, 0, &state); if (err) return err; diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 43ea7de2fc8e..4cba13e46ffd 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -306,10 +306,9 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, td->data_len = len; if (len > 0) { - td->data = kmalloc(len, GFP_KERNEL); + td->data = kmemdup(xdr, len, GFP_KERNEL); if (!td->data) return -ENOMEM; - memcpy(td->data, xdr, len); len = (len + 3) & ~3; toklen -= len; xdr += len >> 2; @@ -401,10 +400,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, _debug("ticket len %u", len); if (len > 0) { - *_ticket = kmalloc(len, GFP_KERNEL); + *_ticket = kmemdup(xdr, len, GFP_KERNEL); if (!*_ticket) return -ENOMEM; - memcpy(*_ticket, xdr, len); len = (len + 3) & ~3; toklen -= len; xdr += len >> 2; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 7b582300d051..51ff19485e12 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -26,6 +26,8 @@ #include <net/pkt_cls.h> #include <net/ip.h> #include <net/route.h> +#include <net/flow_keys.h> + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netfilter/nf_conntrack.h> #endif @@ -66,134 +68,37 @@ static inline u32 addr_fold(void *addr) return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); } -static u32 flow_get_src(const struct sk_buff *skb, int nhoff) +static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) { - __be32 *data = NULL, hdata; - - switch (skb->protocol) { - case htons(ETH_P_IP): - data = skb_header_pointer(skb, - nhoff + offsetof(struct iphdr, - saddr), - 4, &hdata); - break; - case htons(ETH_P_IPV6): - data = skb_header_pointer(skb, - nhoff + offsetof(struct ipv6hdr, - saddr.s6_addr32[3]), - 4, &hdata); - break; - } - - if (data) - return ntohl(*data); + if (flow->src) + return ntohl(flow->src); return addr_fold(skb->sk); } -static u32 flow_get_dst(const struct sk_buff *skb, int nhoff) +static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - __be32 *data = NULL, hdata; - - switch (skb->protocol) { - case htons(ETH_P_IP): - data = skb_header_pointer(skb, - nhoff + offsetof(struct iphdr, - daddr), - 4, &hdata); - break; - case htons(ETH_P_IPV6): - data = skb_header_pointer(skb, - nhoff + offsetof(struct ipv6hdr, - daddr.s6_addr32[3]), - 4, &hdata); - break; - } - - if (data) - return ntohl(*data); + if (flow->dst) + return ntohl(flow->dst); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } -static u32 flow_get_proto(const struct sk_buff *skb, int nhoff) +static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) { - __u8 *data = NULL, hdata; - - switch (skb->protocol) { - case htons(ETH_P_IP): - data = skb_header_pointer(skb, - nhoff + offsetof(struct iphdr, - protocol), - 1, &hdata); - break; - case htons(ETH_P_IPV6): - data = skb_header_pointer(skb, - nhoff + offsetof(struct ipv6hdr, - nexthdr), - 1, &hdata); - break; - } - if (data) - return *data; - return 0; + return flow->ip_proto; } -/* helper function to get either src or dst port */ -static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff, - __be16 *_port, int dst) +static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { - __be16 *port = NULL; - int poff; - - switch (skb->protocol) { - case htons(ETH_P_IP): { - struct iphdr *iph, _iph; - - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); - if (!iph) - break; - if (ip_is_fragment(iph)) - break; - poff = proto_ports_offset(iph->protocol); - if (poff >= 0) - port = skb_header_pointer(skb, - nhoff + iph->ihl * 4 + poff + dst, - sizeof(*_port), _port); - break; - } - case htons(ETH_P_IPV6): { - struct ipv6hdr *iph, _iph; - - iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); - if (!iph) - break; - poff = proto_ports_offset(iph->nexthdr); - if (poff >= 0) - port = skb_header_pointer(skb, - nhoff + sizeof(*iph) + poff + dst, - sizeof(*_port), _port); - break; - } - } - - return port; -} - -static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff) -{ - __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0); - - if (port) - return ntohs(*port); + if (flow->ports) + return ntohs(flow->port16[0]); return addr_fold(skb->sk); } -static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff) +static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { - __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2); - - if (port) - return ntohs(*port); + if (flow->ports) + return ntohs(flow->port16[1]); return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol; } @@ -239,7 +144,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb) }) #endif -static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff) +static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb->protocol) { case htons(ETH_P_IP): @@ -248,10 +153,10 @@ static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff) return ntohl(CTTUPLE(skb, src.u3.ip6[3])); } fallback: - return flow_get_src(skb, nhoff); + return flow_get_src(skb, flow); } -static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff) +static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) { switch (skb->protocol) { case htons(ETH_P_IP): @@ -260,21 +165,21 @@ static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff) return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); } fallback: - return flow_get_dst(skb, nhoff); + return flow_get_dst(skb, flow); } -static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff) +static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, src.u.all)); fallback: - return flow_get_proto_src(skb, nhoff); + return flow_get_proto_src(skb, flow); } -static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff) +static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) { return ntohs(CTTUPLE(skb, dst.u.all)); fallback: - return flow_get_proto_dst(skb, nhoff); + return flow_get_proto_dst(skb, flow); } static u32 flow_get_rtclassid(const struct sk_buff *skb) @@ -314,21 +219,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb) return skb_get_rxhash(skb); } -static u32 flow_key_get(struct sk_buff *skb, int key) +static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) { - int nhoff = skb_network_offset(skb); - switch (key) { case FLOW_KEY_SRC: - return flow_get_src(skb, nhoff); + return flow_get_src(skb, flow); case FLOW_KEY_DST: - return flow_get_dst(skb, nhoff); + return flow_get_dst(skb, flow); case FLOW_KEY_PROTO: - return flow_get_proto(skb, nhoff); + return flow_get_proto(skb, flow); case FLOW_KEY_PROTO_SRC: - return flow_get_proto_src(skb, nhoff); + return flow_get_proto_src(skb, flow); case FLOW_KEY_PROTO_DST: - return flow_get_proto_dst(skb, nhoff); + return flow_get_proto_dst(skb, flow); case FLOW_KEY_IIF: return flow_get_iif(skb); case FLOW_KEY_PRIORITY: @@ -338,13 +241,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key) case FLOW_KEY_NFCT: return flow_get_nfct(skb); case FLOW_KEY_NFCT_SRC: - return flow_get_nfct_src(skb, nhoff); + return flow_get_nfct_src(skb, flow); case FLOW_KEY_NFCT_DST: - return flow_get_nfct_dst(skb, nhoff); + return flow_get_nfct_dst(skb, flow); case FLOW_KEY_NFCT_PROTO_SRC: - return flow_get_nfct_proto_src(skb, nhoff); + return flow_get_nfct_proto_src(skb, flow); case FLOW_KEY_NFCT_PROTO_DST: - return flow_get_nfct_proto_dst(skb, nhoff); + return flow_get_nfct_proto_dst(skb, flow); case FLOW_KEY_RTCLASSID: return flow_get_rtclassid(skb); case FLOW_KEY_SKUID: @@ -361,6 +264,16 @@ static u32 flow_key_get(struct sk_buff *skb, int key) } } +#define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ + (1 << FLOW_KEY_DST) | \ + (1 << FLOW_KEY_PROTO) | \ + (1 << FLOW_KEY_PROTO_SRC) | \ + (1 << FLOW_KEY_PROTO_DST) | \ + (1 << FLOW_KEY_NFCT_SRC) | \ + (1 << FLOW_KEY_NFCT_DST) | \ + (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ + (1 << FLOW_KEY_NFCT_PROTO_DST)) + static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) { @@ -373,16 +286,19 @@ static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, list_for_each_entry(f, &head->filters, list) { u32 keys[f->nkeys]; + struct flow_keys flow_keys; if (!tcf_em_tree_match(skb, &f->ematches, NULL)) continue; keymask = f->keymask; + if (keymask & FLOW_KEYS_NEEDED) + skb_flow_dissect(skb, &flow_keys); for (n = 0; n < f->nkeys; n++) { key = ffs(keymask) - 1; keymask &= ~(1 << key); - keys[n] = flow_key_get(skb, key); + keys[n] = flow_key_get(skb, key, &flow_keys); } if (f->mode == FLOW_MODE_HASH) diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 3422b25df9e4..205d369a217c 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -19,10 +19,7 @@ #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/red.h> -#include <linux/ip.h> -#include <net/ip.h> -#include <linux/ipv6.h> -#include <net/ipv6.h> +#include <net/flow_keys.h> /* CHOKe stateless AQM for fair bandwidth allocation @@ -142,85 +139,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx) --sch->q.qlen; } -/* - * Compare flow of two packets - * Returns true only if source and destination address and port match. - * false for special cases - */ -static bool choke_match_flow(struct sk_buff *skb1, - struct sk_buff *skb2) -{ - int off1, off2, poff; - const u32 *ports1, *ports2; - u8 ip_proto; - __u32 hash1; - - if (skb1->protocol != skb2->protocol) - return false; - - /* Use hash value as quick check - * Assumes that __skb_get_rxhash makes IP header and ports linear - */ - hash1 = skb_get_rxhash(skb1); - if (!hash1 || hash1 != skb_get_rxhash(skb2)) - return false; - - /* Probably match, but be sure to avoid hash collisions */ - off1 = skb_network_offset(skb1); - off2 = skb_network_offset(skb2); - - switch (skb1->protocol) { - case __constant_htons(ETH_P_IP): { - const struct iphdr *ip1, *ip2; - - ip1 = (const struct iphdr *) (skb1->data + off1); - ip2 = (const struct iphdr *) (skb2->data + off2); - - ip_proto = ip1->protocol; - if (ip_proto != ip2->protocol || - ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr) - return false; - - if (ip_is_fragment(ip1) | ip_is_fragment(ip2)) - ip_proto = 0; - off1 += ip1->ihl * 4; - off2 += ip2->ihl * 4; - break; - } - - case __constant_htons(ETH_P_IPV6): { - const struct ipv6hdr *ip1, *ip2; - - ip1 = (const struct ipv6hdr *) (skb1->data + off1); - ip2 = (const struct ipv6hdr *) (skb2->data + off2); - - ip_proto = ip1->nexthdr; - if (ip_proto != ip2->nexthdr || - ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) || - ipv6_addr_cmp(&ip1->daddr, &ip2->daddr)) - return false; - off1 += 40; - off2 += 40; - } - - default: /* Maybe compare MAC header here? */ - return false; - } - - poff = proto_ports_offset(ip_proto); - if (poff < 0) - return true; - - off1 += poff; - off2 += poff; - - ports1 = (__force u32 *)(skb1->data + off1); - ports2 = (__force u32 *)(skb2->data + off2); - return *ports1 == *ports2; -} - struct choke_skb_cb { - u16 classid; + u16 classid; + u8 keys_valid; + struct flow_keys keys; }; static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) @@ -241,6 +163,32 @@ static u16 choke_get_classid(const struct sk_buff *skb) } /* + * Compare flow of two packets + * Returns true only if source and destination address and port match. + * false for special cases + */ +static bool choke_match_flow(struct sk_buff *skb1, + struct sk_buff *skb2) +{ + if (skb1->protocol != skb2->protocol) + return false; + + if (!choke_skb_cb(skb1)->keys_valid) { + choke_skb_cb(skb1)->keys_valid = 1; + skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys); + } + + if (!choke_skb_cb(skb2)->keys_valid) { + choke_skb_cb(skb2)->keys_valid = 1; + skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys); + } + + return !memcmp(&choke_skb_cb(skb1)->keys, + &choke_skb_cb(skb2)->keys, + sizeof(struct flow_keys)); +} + +/* * Classify flow using either: * 1. pre-existing classification result in skb * 2. fast internal classification @@ -326,6 +274,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) goto other_drop; /* Packet was eaten by filter */ } + choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ p->qavg = red_calc_qavg(p, sch->q.qlen); if (red_is_idling(p)) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 69fca2798804..67fc573e013a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) /* check the reason of requeuing without tx lock first */ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - if (!netif_tx_queue_frozen_or_stopped(txq)) { + if (!netif_xmit_frozen_or_stopped(txq)) { q->gso_skb = NULL; q->q.qlen--; } else @@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, spin_unlock(root_lock); HARD_TX_LOCK(dev, txq, smp_processor_id()); - if (!netif_tx_queue_frozen_or_stopped(txq)) + if (!netif_xmit_frozen_or_stopped(txq)) ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); @@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ret = dev_requeue_skb(skb, q); } - if (ret && netif_tx_queue_frozen_or_stopped(txq)) + if (ret && netif_xmit_frozen_or_stopped(txq)) ret = 0; return ret; @@ -242,10 +242,11 @@ static void dev_watchdog(unsigned long arg) * old device drivers set dev->trans_start */ trans_start = txq->trans_start ? : dev->trans_start; - if (netif_tx_queue_stopped(txq) && + if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { some_queue_timedout = 1; + txq->trans_timeout++; break; } } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index edc1950e0e77..49131d7a7446 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch) /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ - if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { + if (!netif_xmit_stopped( + netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { @@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch) /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ - if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { + if (!netif_xmit_stopped( + netdev_get_tx_queue(qdisc_dev(sch), curband))) { qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index eb3b9a86c6ed..3bfd73344f76 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -79,6 +79,7 @@ struct netem_sched_data { u32 duplicate; u32 reorder; u32 corrupt; + u32 rate; struct crndstate { u32 last; @@ -298,6 +299,14 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } +static psched_time_t packet_len_2_sched_time(unsigned int len, u32 rate) +{ + u64 ticks = (u64)len * NSEC_PER_SEC; + + do_div(ticks, rate); + return PSCHED_NS2TICKS(ticks); +} + /* * Insert one skb into qdisc. * Note: parent depends on return value to account for queue length. @@ -371,6 +380,24 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) &q->delay_cor, q->delay_dist); now = psched_get_time(); + + if (q->rate) { + struct sk_buff_head *list = &q->qdisc->q; + + delay += packet_len_2_sched_time(skb->len, q->rate); + + if (!skb_queue_empty(list)) { + /* + * Last packet in queue is reference point (now). + * First packet in queue is already in flight, + * calculate this time bonus and substract + * from delay. + */ + delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; + now = netem_skb_cb(skb_peek_tail(list))->time_to_send; + } + } + cb->time_to_send = now + delay; ++q->counter; ret = qdisc_enqueue(skb, q->qdisc); @@ -535,6 +562,14 @@ static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr) init_crandom(&q->corrupt_cor, r->correlation); } +static void get_rate(struct Qdisc *sch, const struct nlattr *attr) +{ + struct netem_sched_data *q = qdisc_priv(sch); + const struct tc_netem_rate *r = nla_data(attr); + + q->rate = r->rate; +} + static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr) { struct netem_sched_data *q = qdisc_priv(sch); @@ -594,6 +629,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, + [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, }; @@ -666,6 +702,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) if (tb[TCA_NETEM_CORRUPT]) get_corrupt(sch, tb[TCA_NETEM_CORRUPT]); + if (tb[TCA_NETEM_RATE]) + get_rate(sch, tb[TCA_NETEM_RATE]); + q->loss_model = CLG_RANDOM; if (tb[TCA_NETEM_LOSS]) ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); @@ -846,6 +885,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_netem_corr cor; struct tc_netem_reorder reorder; struct tc_netem_corrupt corrupt; + struct tc_netem_rate rate; qopt.latency = q->latency; qopt.jitter = q->jitter; @@ -868,6 +908,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) corrupt.correlation = q->corrupt_cor.rho; NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); + rate.rate = q->rate; + NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); + if (dump_loss_model(q, skb) != 0) goto nla_put_failure; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index e83c272c0325..96e42cae4c7a 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -26,6 +26,7 @@ #include <net/ip.h> #include <net/pkt_sched.h> #include <net/inet_ecn.h> +#include <net/flow_keys.h> /* * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) @@ -286,6 +287,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) u32 minqlen = ~0; u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; + struct flow_keys keys; if (unlikely(sch->q.qlen >= q->limit)) { sch->qstats.overlimits++; @@ -309,13 +311,19 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, q, &ret, &salt)) goto other_drop; + keys.src = salt; + keys.dst = 0; + keys.ports = 0; } else { - salt = skb_get_rxhash(skb); + skb_flow_dissect(skb, &keys); } slot = q->slot; - sfbhash = jhash_1word(salt, q->bins[slot].perturbation); + sfbhash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src, + (__force u32)keys.ports, + q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; @@ -347,7 +355,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { - sfbhash = jhash_1word(salt, q->bins[slot].perturbation); + sfbhash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src, + (__force u32)keys.ports, + q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 4f5510e2bd6f..30cda707e400 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -17,14 +17,13 @@ #include <linux/in.h> #include <linux/errno.h> #include <linux/init.h> -#include <linux/ipv6.h> #include <linux/skbuff.h> #include <linux/jhash.h> #include <linux/slab.h> #include <linux/vmalloc.h> -#include <net/ip.h> #include <net/netlink.h> #include <net/pkt_sched.h> +#include <net/flow_keys.h> /* Stochastic Fairness Queuing algorithm. @@ -137,61 +136,17 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index return &q->dep[val - SFQ_SLOTS]; } -static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1) +static unsigned int sfq_hash(const struct sfq_sched_data *q, + const struct sk_buff *skb) { - return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1); -} - -static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb) -{ - u32 h, h2; - - switch (skb->protocol) { - case htons(ETH_P_IP): - { - const struct iphdr *iph; - int poff; - - if (!pskb_network_may_pull(skb, sizeof(*iph))) - goto err; - iph = ip_hdr(skb); - h = (__force u32)iph->daddr; - h2 = (__force u32)iph->saddr ^ iph->protocol; - if (ip_is_fragment(iph)) - break; - poff = proto_ports_offset(iph->protocol); - if (poff >= 0 && - pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) { - iph = ip_hdr(skb); - h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff); - } - break; - } - case htons(ETH_P_IPV6): - { - const struct ipv6hdr *iph; - int poff; - - if (!pskb_network_may_pull(skb, sizeof(*iph))) - goto err; - iph = ipv6_hdr(skb); - h = (__force u32)iph->daddr.s6_addr32[3]; - h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr; - poff = proto_ports_offset(iph->nexthdr); - if (poff >= 0 && - pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) { - iph = ipv6_hdr(skb); - h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff); - } - break; - } - default: -err: - h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol; - h2 = (unsigned long)skb->sk; - } + struct flow_keys keys; + unsigned int hash; - return sfq_fold_hash(q, h, h2); + skb_flow_dissect(skb, &keys); + hash = jhash_3words((__force u32)keys.dst, + (__force u32)keys.src ^ keys.ip_proto, + (__force u32)keys.ports, q->perturbation); + return hash & (q->divisor - 1); } static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 4f4c52c0eeb3..ed1336e15920 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -310,7 +310,7 @@ restart: if (slave_txq->qdisc_sleeping != q) continue; - if (__netif_subqueue_stopped(slave, subq) || + if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || !netif_running(slave)) { busy = 1; continue; @@ -321,7 +321,7 @@ restart: if (__netif_tx_trylock(slave_txq)) { unsigned int length = qdisc_pkt_len(skb); - if (!netif_tx_queue_frozen_or_stopped(slave_txq) && + if (!netif_xmit_frozen_or_stopped(slave_txq) && slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { txq_trans_update(slave_txq); __netif_tx_unlock(slave_txq); @@ -333,7 +333,7 @@ restart: } __netif_tx_unlock(slave_txq); } - if (netif_queue_stopped(dev)) + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) busy = 1; break; case 1: diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 810427833bcd..91f479121c55 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -107,7 +107,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifa->addr); + addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&sctp_local_addr_lock); @@ -219,8 +219,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) /* Fill in the dest address from the route entry passed with the skb * and the source address from the transport. */ - ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr); - ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr); + fl6.daddr = transport->ipaddr.v6.sin6_addr; + fl6.saddr = transport->saddr.v6.sin6_addr; fl6.flowlabel = np->flow_label; IP6_ECN_flow_xmit(sk, fl6.flowlabel); @@ -231,7 +231,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) if (np->opt && np->opt->srcrt) { struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; - ipv6_addr_copy(&fl6.daddr, rt0->addr); + fl6.daddr = *rt0->addr; } SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", @@ -265,7 +265,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, sctp_scope_t scope; memset(fl6, 0, sizeof(struct flowi6)); - ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr); + fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) @@ -277,7 +277,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, fl6->fl6_sport = htons(asoc->base.bind_addr.port); if (saddr) { - ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr); + fl6->saddr = saddr->v6.sin6_addr; fl6->fl6_sport = saddr->v6.sin6_port; SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr); } @@ -334,7 +334,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } rcu_read_unlock(); if (baddr) { - ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr); + fl6->saddr = baddr->v6.sin6_addr; fl6->fl6_sport = baddr->v6.sin6_port; dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); } @@ -375,7 +375,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, if (t->dst) { saddr->v6.sin6_family = AF_INET6; - ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr); + saddr->v6.sin6_addr = fl6->saddr; } } @@ -400,7 +400,7 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_port = 0; - ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr); + addr->a.v6.sin6_addr = ifp->addr; addr->a.v6.sin6_scope_id = dev->ifindex; addr->valid = 1; INIT_LIST_HEAD(&addr->list); @@ -416,7 +416,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist, static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, int is_saddr) { - void *from; __be16 *port; struct sctphdr *sh; @@ -428,12 +427,11 @@ static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, sh = sctp_hdr(skb); if (is_saddr) { *port = sh->source; - from = &ipv6_hdr(skb)->saddr; + addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; } else { *port = sh->dest; - from = &ipv6_hdr(skb)->daddr; + addr->v6.sin6_addr = ipv6_hdr(skb)->daddr; } - ipv6_addr_copy(&addr->v6.sin6_addr, from); } /* Initialize an sctp_addr from a socket. */ @@ -441,7 +439,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) { addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = 0; - ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr); + addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ @@ -454,7 +452,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) inet6_sk(sk)->rcv_saddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { - ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr); + inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; } } @@ -467,7 +465,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { - ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr); + inet6_sk(sk)->daddr = addr->v6.sin6_addr; } } @@ -479,7 +477,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr, addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; /* BUG */ - ipv6_addr_copy(&addr->v6.sin6_addr, ¶m->v6.addr); + addr->v6.sin6_addr = param->v6.addr; addr->v6.sin6_scope_id = iif; } @@ -493,7 +491,7 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr, param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; param->v6.param_hdr.length = htons(length); - ipv6_addr_copy(¶m->v6.addr, &addr->v6.sin6_addr); + param->v6.addr = addr->v6.sin6_addr; return length; } @@ -504,7 +502,7 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, { addr->sa.sa_family = AF_INET6; addr->v6.sin6_port = port; - ipv6_addr_copy(&addr->v6.sin6_addr, saddr); + addr->v6.sin6_addr = *saddr; } /* Compare addresses exactly. @@ -759,7 +757,7 @@ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, } sin6from = &asoc->peer.primary_addr.v6; - ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr); + sin6->sin6_addr = sin6from->sin6_addr; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6->sin6_scope_id = sin6from->sin6_scope_id; } @@ -787,7 +785,7 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, } /* Otherwise, just copy the v6 address. */ - ipv6_addr_copy(&sin6->sin6_addr, &ipv6_hdr(skb)->saddr); + sin6->sin6_addr = ipv6_hdr(skb)->saddr; if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { struct sctp_ulpevent *ev = sctp_skb2event(skb); sin6->sin6_scope_id = ev->iif; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 0121e0ab0351..a85eeeb55dd0 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3400,8 +3400,10 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, asconf_len -= length; } - if (no_err && asoc->src_out_of_asoc_ok) + if (no_err && asoc->src_out_of_asoc_ok) { asoc->src_out_of_asoc_ok = 0; + sctp_transport_immediate_rtx(asoc->peer.primary_path); + } /* Free the cached last sent asconf chunk. */ list_del_init(&asconf->transmitted_list); diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 76388b083f28..1ff51c9d18d5 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -666,6 +666,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, struct sctp_chunk *chunk) { sctp_sender_hb_info_t *hbinfo; + int was_unconfirmed = 0; /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the * HEARTBEAT should clear the error counter of the destination @@ -692,9 +693,11 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Mark the destination transport address as active if it is not so * marked. */ - if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { + was_unconfirmed = 1; sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, SCTP_HEARTBEAT_SUCCESS); + } /* The receiver of the HEARTBEAT ACK should also perform an * RTT measurement for that destination transport address @@ -712,6 +715,9 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Update the heartbeat timer. */ if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) sctp_transport_hold(t); + + if (was_unconfirmed && asoc->peer.transport_count == 1) + sctp_transport_immediate_rtx(t); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 13bf5fcdbff1..d56c07a3d435 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -804,7 +804,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk, struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)addrs; - ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr); + asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; } SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ", " at %p\n", asoc, asoc->asconf_addr_del_pending, diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 394c57ca2f54..3889330b7b04 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -641,3 +641,19 @@ void sctp_transport_reset(struct sctp_transport *t) t->cacc.next_tsn_at_change = 0; t->cacc.cacc_saw_newack = 0; } + +/* Schedule retransmission on the given transport */ +void sctp_transport_immediate_rtx(struct sctp_transport *t) +{ + /* Stop pending T3_rtx_timer */ + if (timer_pending(&t->T3_rtx_timer)) { + (void)del_timer(&t->T3_rtx_timer); + sctp_transport_put(t); + } + sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); + if (!timer_pending(&t->T3_rtx_timer)) { + if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) + sctp_transport_hold(t); + } + return; +} diff --git a/net/socket.c b/net/socket.c index 2877647f347b..e62b4f055071 100644 --- a/net/socket.c +++ b/net/socket.c @@ -538,6 +538,8 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; + if (sock_flag(sk, SOCK_WIFI_STATUS)) + *tx_flags |= SKBTX_WIFI_STATUS; return 0; } EXPORT_SYMBOL(sock_tx_timestamp); @@ -549,6 +551,8 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock, sock_update_classid(sock->sk); + sock_update_netprioidx(sock->sk); + si->sock = sock; si->scm = NULL; si->msg = msg; @@ -674,6 +678,22 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, } EXPORT_SYMBOL_GPL(__sock_recv_timestamp); +void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + int ack; + + if (!sock_flag(sk, SOCK_WIFI_STATUS)) + return; + if (!skb->wifi_acked_valid) + return; + + ack = skb->wifi_acked; + + put_cmsg(msg, SOL_SOCKET, SCM_WIFI_STATUS, sizeof(ack), &ack); +} +EXPORT_SYMBOL_GPL(__sock_recv_wifi_status); + static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index ce136323da8b..fe258fc37f50 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -134,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) struct ip_map *item = container_of(citem, struct ip_map, h); strcpy(new->m_class, item->m_class); - ipv6_addr_copy(&new->m_addr, &item->m_addr); + new->m_addr = item->m_addr; } static void update(struct cache_head *cnew, struct cache_head *citem) { @@ -274,7 +274,7 @@ static int ip_map_show(struct seq_file *m, } im = container_of(h, struct ip_map, h); /* class addr domain */ - ipv6_addr_copy(&addr, &im->m_addr); + addr = im->m_addr; if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) @@ -297,7 +297,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct cache_head *ch; strcpy(ip.m_class, class); - ipv6_addr_copy(&ip.m_addr, addr); + ip.m_addr = *addr; ch = sunrpc_cache_lookup(cd, &ip.h, hash_str(class, IP_HASHBITS) ^ hash_ip6(*addr)); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 71bed1c1c77a..4653286fcc9e 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -157,7 +157,7 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) cmh->cmsg_level = SOL_IPV6; cmh->cmsg_type = IPV6_PKTINFO; pki->ipi6_ifindex = daddr->sin6_scope_id; - ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr); + pki->ipi6_addr = daddr->sin6_addr; cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; @@ -523,7 +523,7 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, return 0; daddr->sin6_family = AF_INET6; - ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr); + daddr->sin6_addr = pki->ipi6_addr; daddr->sin6_scope_id = pki->ipi6_ifindex; return 1; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 220f3bd176f8..ccdfed897651 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -492,6 +492,10 @@ int wiphy_register(struct wiphy *wiphy) !(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY))) return -EINVAL; + if (WARN_ON(wiphy->ap_sme_capa && + !(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME))) + return -EINVAL; + if (WARN_ON(wiphy->addresses && !wiphy->n_addresses)) return -EINVAL; diff --git a/net/wireless/core.h b/net/wireless/core.h index b9ec3061ed72..fb08c28fc90a 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -54,6 +54,8 @@ struct cfg80211_registered_device { int opencount; /* also protected by devlist_mtx */ wait_queue_head_t dev_wait; + u32 ap_beacons_nlpid; + /* BSSes/scanning */ spinlock_t bss_lock; struct list_head bss_list; @@ -339,13 +341,17 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt); + struct cfg80211_crypto_settings *crypt, + u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask); int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt); + struct cfg80211_crypto_settings *crypt, + u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask); int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *bssid, const u8 *ie, int ie_len, u16 reason, @@ -376,7 +382,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - u64 *cookie); + bool dont_wait_for_ack, u64 *cookie); +void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, + const struct ieee80211_ht_cap *ht_capa_mask); /* SME */ int __cfg80211_connect(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 21fc9702f81c..438dfc105b4a 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -501,13 +501,32 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, return err; } +/* Do a logical ht_capa &= ht_capa_mask. */ +void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, + const struct ieee80211_ht_cap *ht_capa_mask) +{ + int i; + u8 *p1, *p2; + if (!ht_capa_mask) { + memset(ht_capa, 0, sizeof(*ht_capa)); + return; + } + + p1 = (u8*)(ht_capa); + p2 = (u8*)(ht_capa_mask); + for (i = 0; i<sizeof(*ht_capa); i++) + p1[i] &= p2[i]; +} + int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt) + struct cfg80211_crypto_settings *crypt, + u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask) { struct wireless_dev *wdev = dev->ieee80211_ptr; struct cfg80211_assoc_request req; @@ -537,6 +556,15 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, memcpy(&req.crypto, crypt, sizeof(req.crypto)); req.use_mfp = use_mfp; req.prev_bssid = prev_bssid; + req.flags = assoc_flags; + if (ht_capa) + memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa)); + if (ht_capa_mask) + memcpy(&req.ht_capa_mask, ht_capa_mask, + sizeof(req.ht_capa_mask)); + cfg80211_oper_and_ht_capa(&req.ht_capa_mask, + rdev->wiphy.ht_capa_mod_mask); + req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!req.bss) { @@ -574,14 +602,17 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, const u8 *bssid, const u8 *prev_bssid, const u8 *ssid, int ssid_len, const u8 *ie, int ie_len, bool use_mfp, - struct cfg80211_crypto_settings *crypt) + struct cfg80211_crypto_settings *crypt, + u32 assoc_flags, struct ieee80211_ht_cap *ht_capa, + struct ieee80211_ht_cap *ht_capa_mask) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; wdev_lock(wdev); err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, - ssid, ssid_len, ie, ie_len, use_mfp, crypt); + ssid, ssid_len, ie, ie_len, use_mfp, crypt, + assoc_flags, ht_capa, ht_capa_mask); wdev_unlock(wdev); return err; @@ -879,6 +910,9 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid) } spin_unlock_bh(&wdev->mgmt_registrations_lock); + + if (nlpid == wdev->ap_unexpected_nlpid) + wdev->ap_unexpected_nlpid = 0; } void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev) @@ -901,7 +935,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, bool no_cck, - u64 *cookie) + bool dont_wait_for_ack, u64 *cookie) { struct wireless_dev *wdev = dev->ieee80211_ptr; const struct ieee80211_mgmt *mgmt; @@ -992,7 +1026,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, /* Transmit the Action frame as requested by user space */ return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan, channel_type, channel_type_valid, - wait, buf, len, no_cck, cookie); + wait, buf, len, no_cck, dont_wait_for_ack, + cookie); } bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, @@ -1107,3 +1142,30 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index, nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp); } EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); + +bool cfg80211_rx_spurious_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO)) + return false; + + return nl80211_unexpected_frame(dev, addr, gfp); +} +EXPORT_SYMBOL(cfg80211_rx_spurious_frame); + +bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO && + wdev->iftype != NL80211_IFTYPE_AP_VLAN)) + return false; + + return nl80211_unexpected_4addr_frame(dev, addr, gfp); +} +EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ffafda5022c2..eee9ccc7adaf 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -98,7 +98,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, - [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, + [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, @@ -196,6 +196,14 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 }, [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG }, + [NL80211_ATTR_DONT_WAIT_FOR_ACK] = { .type = NLA_FLAG }, + [NL80211_ATTR_PROBE_RESP] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_DFS_REGION] = { .type = NLA_U8 }, + [NL80211_ATTR_DISABLE_HT] = { .type = NLA_FLAG }, + [NL80211_ATTR_HT_CAPABILITY_MASK] = { + .len = NL80211_HT_CAPABILITY_LEN + }, }; /* policy for the key attributes */ @@ -203,7 +211,7 @@ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, - [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, + [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 16 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, [NL80211_KEY_TYPE] = { .type = NLA_U32 }, @@ -758,6 +766,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, dev->wiphy.available_antennas_rx); + if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) + NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, + dev->wiphy.probe_resp_offload); + if ((dev->wiphy.available_antennas_tx || dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; @@ -874,7 +886,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, CMD(set_pmksa, SET_PMKSA); CMD(del_pmksa, DEL_PMKSA); CMD(flush_pmksa, FLUSH_PMKSA); - CMD(remain_on_channel, REMAIN_ON_CHANNEL); + if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) + CMD(remain_on_channel, REMAIN_ON_CHANNEL); CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); CMD(mgmt_tx, FRAME); CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); @@ -890,6 +903,15 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, } if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) CMD(sched_scan_start, START_SCHED_SCAN); + CMD(probe_client, PROBE_CLIENT); + if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { + i++; + NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); + } + +#ifdef CONFIG_NL80211_TESTMODE + CMD(testmode_cmd, TESTMODE); +#endif #undef CMD @@ -905,11 +927,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, nla_nest_end(msg, nl_cmds); - if (dev->ops->remain_on_channel) + if (dev->ops->remain_on_channel && + dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, dev->wiphy.max_remain_on_channel_duration); - if (dev->ops->mgmt_tx_cancel_wait) + if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); if (mgmt_stypes) { @@ -1007,6 +1030,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, if (nl80211_put_iface_combinations(&dev->wiphy, msg)) goto nla_put_failure; + if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) + NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, + dev->wiphy.ap_sme_capa); + + NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); + + if (dev->wiphy.ht_capa_mod_mask) + NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, + sizeof(*dev->wiphy.ht_capa_mod_mask), + dev->wiphy.ht_capa_mod_mask); + return genlmsg_end(msg, hdr); nla_put_failure: @@ -2155,6 +2189,13 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]); } + if (info->attrs[NL80211_ATTR_PROBE_RESP]) { + params.probe_resp = + nla_data(info->attrs[NL80211_ATTR_PROBE_RESP]); + params.probe_resp_len = + nla_len(info->attrs[NL80211_ATTR_PROBE_RESP]); + } + err = call(&rdev->wiphy, dev, ¶ms); if (!err && params.interval) wdev->beacon_interval = params.interval; @@ -2453,26 +2494,34 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) /* * Get vlan interface making sure it is running and on the right wiphy. */ -static int get_vlan(struct genl_info *info, - struct cfg80211_registered_device *rdev, - struct net_device **vlan) +static struct net_device *get_vlan(struct genl_info *info, + struct cfg80211_registered_device *rdev) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; - *vlan = NULL; - - if (vlanattr) { - *vlan = dev_get_by_index(genl_info_net(info), - nla_get_u32(vlanattr)); - if (!*vlan) - return -ENODEV; - if (!(*vlan)->ieee80211_ptr) - return -EINVAL; - if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) - return -EINVAL; - if (!netif_running(*vlan)) - return -ENETDOWN; + struct net_device *v; + int ret; + + if (!vlanattr) + return NULL; + + v = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); + if (!v) + return ERR_PTR(-ENODEV); + + if (!v->ieee80211_ptr || v->ieee80211_ptr->wiphy != &rdev->wiphy) { + ret = -EINVAL; + goto error; } - return 0; + + if (!netif_running(v)) { + ret = -ENETDOWN; + goto error; + } + + return v; + error: + dev_put(v); + return ERR_PTR(ret); } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) @@ -2522,9 +2571,9 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); - err = get_vlan(info, rdev, ¶ms.vlan); - if (err) - goto out; + params.vlan = get_vlan(info, rdev); + if (IS_ERR(params.vlan)) + return PTR_ERR(params.vlan); /* validate settings */ err = 0; @@ -2692,9 +2741,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))) return -EINVAL; - err = get_vlan(info, rdev, ¶ms.vlan); - if (err) - goto out; + params.vlan = get_vlan(info, rdev); + if (IS_ERR(params.vlan)) + return PTR_ERR(params.vlan); /* validate settings */ err = 0; @@ -3357,6 +3406,9 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, cfg80211_regdomain->alpha2); + if (cfg80211_regdomain->dfs_region) + NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, + cfg80211_regdomain->dfs_region); nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) @@ -3415,6 +3467,7 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) char *alpha2 = NULL; int rem_reg_rules = 0, r = 0; u32 num_rules = 0, rule_idx = 0, size_of_regd; + u8 dfs_region = 0; struct ieee80211_regdomain *rd = NULL; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) @@ -3425,6 +3478,9 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); + if (info->attrs[NL80211_ATTR_DFS_REGION]) + dfs_region = nla_get_u8(info->attrs[NL80211_ATTR_DFS_REGION]); + nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; @@ -3452,6 +3508,13 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; + /* + * Disable DFS master mode if the DFS region was + * not supported or known on this kernel. + */ + if (reg_supported_dfs_region(dfs_region)) + rd->dfs_region = dfs_region; + nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, @@ -4359,6 +4422,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; int err, ssid_len, ie_len = 0; bool use_mfp = false; + u32 flags = 0; + struct ieee80211_ht_cap *ht_capa = NULL; + struct ieee80211_ht_cap *ht_capa_mask = NULL; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; @@ -4402,11 +4468,25 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_PREV_BSSID]) prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) + flags |= ASSOC_REQ_DISABLE_HT; + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + ht_capa_mask = + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]); + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { + if (!ht_capa_mask) + return -EINVAL; + ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); + } + err = nl80211_crypto_settings(rdev, info, &crypto, 1); if (!err) err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, ssid, ssid_len, ie, ie_len, use_mfp, - &crypto); + &crypto, flags, ht_capa, + ht_capa_mask); return err; } @@ -4896,6 +4976,22 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(connkeys); } + if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) + connect.flags |= ASSOC_REQ_DISABLE_HT; + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + memcpy(&connect.ht_capa_mask, + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]), + sizeof(connect.ht_capa_mask)); + + if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { + if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + return -EINVAL; + memcpy(&connect.ht_capa, + nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), + sizeof(connect.ht_capa)); + } + err = cfg80211_connect(rdev, dev, &connect, connkeys); if (err) kfree(connkeys); @@ -5083,7 +5179,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, duration > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; - if (!rdev->ops->remain_on_channel) + if (!rdev->ops->remain_on_channel || + !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { @@ -5271,12 +5368,13 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) bool channel_type_valid = false; u32 freq; int err; - void *hdr; + void *hdr = NULL; u64 cookie; - struct sk_buff *msg; + struct sk_buff *msg = NULL; unsigned int wait = 0; - bool offchan; - bool no_cck; + bool offchan, no_cck, dont_wait_for_ack; + + dont_wait_for_ack = info->attrs[NL80211_ATTR_DONT_WAIT_FOR_ACK]; if (!info->attrs[NL80211_ATTR_FRAME] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) @@ -5295,7 +5393,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_DURATION]) { - if (!rdev->ops->mgmt_tx_cancel_wait) + if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) return -EINVAL; wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); } @@ -5313,6 +5411,9 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; + if (offchan && !(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX)) + return -EINVAL; + no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]); freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); @@ -5320,29 +5421,36 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) if (chan == NULL) return -EINVAL; - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!dont_wait_for_ack) { + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; - hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, - NL80211_CMD_FRAME); + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_FRAME); - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); - goto free_msg; + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; + } } + err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type, channel_type_valid, wait, nla_data(info->attrs[NL80211_ATTR_FRAME]), nla_len(info->attrs[NL80211_ATTR_FRAME]), - no_cck, &cookie); + no_cck, dont_wait_for_ack, &cookie); if (err) goto free_msg; - NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (msg) { + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); - genlmsg_end(msg, hdr); - return genlmsg_reply(msg, info); + genlmsg_end(msg, hdr); + return genlmsg_reply(msg, info); + } + + return 0; nla_put_failure: err = -ENOBUFS; @@ -5832,6 +5940,91 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) return err; } +static int nl80211_register_unexpected_frame(struct sk_buff *skb, + struct genl_info *info) +{ + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + + if (wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO) + return -EINVAL; + + if (wdev->ap_unexpected_nlpid) + return -EBUSY; + + wdev->ap_unexpected_nlpid = info->snd_pid; + return 0; +} + +static int nl80211_probe_client(struct sk_buff *skb, + struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct sk_buff *msg; + void *hdr; + const u8 *addr; + u64 cookie; + int err; + + if (wdev->iftype != NL80211_IFTYPE_AP && + wdev->iftype != NL80211_IFTYPE_P2P_GO) + return -EOPNOTSUPP; + + if (!info->attrs[NL80211_ATTR_MAC]) + return -EINVAL; + + if (!rdev->ops->probe_client) + return -EOPNOTSUPP; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, + NL80211_CMD_PROBE_CLIENT); + + if (IS_ERR(hdr)) { + err = PTR_ERR(hdr); + goto free_msg; + } + + addr = nla_data(info->attrs[NL80211_ATTR_MAC]); + + err = rdev->ops->probe_client(&rdev->wiphy, dev, addr, &cookie); + if (err) + goto free_msg; + + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + + genlmsg_end(msg, hdr); + + return genlmsg_reply(msg, info); + + nla_put_failure: + err = -ENOBUFS; + free_msg: + nlmsg_free(msg); + return err; +} + +static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + + if (!(rdev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS)) + return -EOPNOTSUPP; + + if (rdev->ap_beacons_nlpid) + return -EBUSY; + + rdev->ap_beacons_nlpid = info->snd_pid; + + return 0; +} + #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 @@ -6387,6 +6580,30 @@ static struct genl_ops nl80211_ops[] = { .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, + { + .cmd = NL80211_CMD_UNEXPECTED_FRAME, + .doit = nl80211_register_unexpected_frame, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_PROBE_CLIENT, + .doit = nl80211_probe_client, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_NETDEV | + NL80211_FLAG_NEED_RTNL, + }, + { + .cmd = NL80211_CMD_REGISTER_BEACONS, + .doit = nl80211_register_beacons, + .policy = nl80211_policy, + .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, + }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { @@ -6639,10 +6856,7 @@ void nl80211_send_reg_change_event(struct regulatory_request *request) if (wiphy_idx_valid(request->wiphy_idx)) NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, @@ -6678,10 +6892,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -6762,10 +6973,7 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -6821,10 +7029,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -6862,10 +7067,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev, if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -6903,10 +7105,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, if (ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); @@ -6939,10 +7138,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -6977,10 +7173,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, if (ie_len && ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7019,10 +7212,7 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, if (tsc) NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7073,10 +7263,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy, goto nla_put_failure; nla_nest_end(msg, nl_freq); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, @@ -7119,10 +7306,7 @@ static void nl80211_send_remain_on_chan_event( if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7193,10 +7377,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7207,13 +7388,68 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } +static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, + const u8 *addr, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct sk_buff *msg; + void *hdr; + int err; + u32 nlpid = ACCESS_ONCE(wdev->ap_unexpected_nlpid); + + if (!nlpid) + return false; + + msg = nlmsg_new(100, gfp); + if (!msg) + return true; + + hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); + if (!hdr) { + nlmsg_free(msg); + return true; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + + err = genlmsg_end(msg, hdr); + if (err < 0) { + nlmsg_free(msg); + return true; + } + + genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); + return true; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); + return true; +} + +bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) +{ + return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, + addr, gfp); +} + +bool nl80211_unexpected_4addr_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp) +{ + return __nl80211_unexpected_frame(dev, + NL80211_CMD_UNEXPECTED_4ADDR_FRAME, + addr, gfp); +} + int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 nlpid, int freq, const u8 *buf, size_t len, gfp_t gfp) { struct sk_buff *msg; void *hdr; - int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) @@ -7230,16 +7466,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); - err = genlmsg_end(msg, hdr); - if (err < 0) { - nlmsg_free(msg); - return err; - } + genlmsg_end(msg, hdr); - err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); - if (err < 0) - return err; - return 0; + return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); nla_put_failure: genlmsg_cancel(msg, hdr); @@ -7272,10 +7501,7 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, if (ack) NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); return; @@ -7317,10 +7543,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, pinfoattr); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7362,10 +7585,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, rekey_attr); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7408,10 +7628,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, attr); - if (genlmsg_end(msg, hdr) < 0) { - nlmsg_free(msg); - return; - } + genlmsg_end(msg, hdr); genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); @@ -7453,7 +7670,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, nla_nest_end(msg, pinfoattr); - if (genlmsg_end(msg, hdr) < 0) { + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} + +void cfg80211_probe_status(struct net_device *dev, const u8 *addr, + u64 cookie, bool acked, gfp_t gfp) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct sk_buff *msg; + void *hdr; + int err; + + msg = nlmsg_new(NLMSG_GOODSIZE, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PROBE_CLIENT); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); + NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); + NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); + if (acked) + NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); + + err = genlmsg_end(msg, hdr); + if (err < 0) { nlmsg_free(msg); return; } @@ -7466,6 +7721,45 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, genlmsg_cancel(msg, hdr); nlmsg_free(msg); } +EXPORT_SYMBOL(cfg80211_probe_status); + +void cfg80211_report_obss_beacon(struct wiphy *wiphy, + const u8 *frame, size_t len, + int freq, gfp_t gfp) +{ + struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); + struct sk_buff *msg; + void *hdr; + u32 nlpid = ACCESS_ONCE(rdev->ap_beacons_nlpid); + + if (!nlpid) + return; + + msg = nlmsg_new(len + 100, gfp); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); + if (!hdr) { + nlmsg_free(msg); + return; + } + + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); + if (freq) + NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); + NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); + + genlmsg_end(msg, hdr); + + genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); + return; + + nla_put_failure: + genlmsg_cancel(msg, hdr); + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_report_obss_beacon); static int nl80211_netlink_notify(struct notifier_block * nb, unsigned long state, @@ -7480,9 +7774,12 @@ static int nl80211_netlink_notify(struct notifier_block * nb, rcu_read_lock(); - list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) + list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) { list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) cfg80211_mlme_unregister_socket(wdev, notify->pid); + if (rdev->ap_beacons_nlpid == notify->pid) + rdev->ap_beacons_nlpid = 0; + } rcu_read_unlock(); diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index f24a1fbeaf19..12bf4d185abe 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -117,4 +117,9 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, int index, const u8 *bssid, bool preauth, gfp_t gfp); +bool nl80211_unexpected_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); +bool nl80211_unexpected_4addr_frame(struct net_device *dev, + const u8 *addr, gfp_t gfp); + #endif /* __NET_WIRELESS_NL80211_H */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 77e926738014..76b35df39623 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1123,6 +1123,8 @@ static void wiphy_update_regulatory(struct wiphy *wiphy, if (ignore_reg_update(wiphy, initiator)) return; + last_request->dfs_region = cfg80211_regdomain->dfs_region; + for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (wiphy->bands[band]) handle_band(wiphy, band, initiator); @@ -1948,6 +1950,42 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) } } +bool reg_supported_dfs_region(u8 dfs_region) +{ + switch (dfs_region) { + case NL80211_DFS_UNSET: + case NL80211_DFS_FCC: + case NL80211_DFS_ETSI: + case NL80211_DFS_JP: + return true; + default: + REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n", + dfs_region); + return false; + } +} + +static void print_dfs_region(u8 dfs_region) +{ + if (!dfs_region) + return; + + switch (dfs_region) { + case NL80211_DFS_FCC: + pr_info(" DFS Master region FCC"); + break; + case NL80211_DFS_ETSI: + pr_info(" DFS Master region ETSI"); + break; + case NL80211_DFS_JP: + pr_info(" DFS Master region JP"); + break; + default: + pr_info(" DFS Master region Uknown"); + break; + } +} + static void print_regdomain(const struct ieee80211_regdomain *rd) { @@ -1975,6 +2013,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd) pr_info("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } + print_dfs_region(rd->dfs_region); print_rd_rules(rd); } diff --git a/net/wireless/reg.h b/net/wireless/reg.h index 4a56799d868d..786e414afd91 100644 --- a/net/wireless/reg.h +++ b/net/wireless/reg.h @@ -5,6 +5,7 @@ extern const struct ieee80211_regdomain *cfg80211_regdomain; bool is_world_regdom(const char *alpha2); bool reg_is_valid_request(const char *alpha2); +bool reg_supported_dfs_region(u8 dfs_region); int regulatory_hint_user(const char *alpha2); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index dc23b31594e0..31119e32e092 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -355,8 +355,8 @@ static bool is_mesh(struct cfg80211_bss *a, sizeof(struct ieee80211_meshconf_ie) - 2) == 0; } -static int cmp_bss(struct cfg80211_bss *a, - struct cfg80211_bss *b) +static int cmp_bss_core(struct cfg80211_bss *a, + struct cfg80211_bss *b) { int r; @@ -378,7 +378,15 @@ static int cmp_bss(struct cfg80211_bss *a, b->len_information_elements); } - r = memcmp(a->bssid, b->bssid, ETH_ALEN); + return memcmp(a->bssid, b->bssid, ETH_ALEN); +} + +static int cmp_bss(struct cfg80211_bss *a, + struct cfg80211_bss *b) +{ + int r; + + r = cmp_bss_core(a, b); if (r) return r; @@ -389,6 +397,52 @@ static int cmp_bss(struct cfg80211_bss *a, b->len_information_elements); } +static int cmp_hidden_bss(struct cfg80211_bss *a, + struct cfg80211_bss *b) +{ + const u8 *ie1; + const u8 *ie2; + int i; + int r; + + r = cmp_bss_core(a, b); + if (r) + return r; + + ie1 = cfg80211_find_ie(WLAN_EID_SSID, + a->information_elements, + a->len_information_elements); + ie2 = cfg80211_find_ie(WLAN_EID_SSID, + b->information_elements, + b->len_information_elements); + + /* Key comparator must use same algorithm in any rb-tree + * search function (order is important), otherwise ordering + * of items in the tree is broken and search gives incorrect + * results. This code uses same order as cmp_ies() does. */ + + /* sort missing IE before (left of) present IE */ + if (!ie1) + return -1; + if (!ie2) + return 1; + + /* zero-size SSID is used as an indication of the hidden bss */ + if (!ie2[1]) + return 0; + + /* sort by length first, then by contents */ + if (ie1[1] != ie2[1]) + return ie2[1] - ie1[1]; + + /* zeroed SSID ie is another indication of a hidden bss */ + for (i = 0; i < ie2[1]; i++) + if (ie2[i + 2]) + return -1; + + return 0; +} + struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, @@ -505,6 +559,48 @@ rb_find_bss(struct cfg80211_registered_device *dev, } static struct cfg80211_internal_bss * +rb_find_hidden_bss(struct cfg80211_registered_device *dev, + struct cfg80211_internal_bss *res) +{ + struct rb_node *n = dev->bss_tree.rb_node; + struct cfg80211_internal_bss *bss; + int r; + + while (n) { + bss = rb_entry(n, struct cfg80211_internal_bss, rbn); + r = cmp_hidden_bss(&res->pub, &bss->pub); + + if (r == 0) + return bss; + else if (r < 0) + n = n->rb_left; + else + n = n->rb_right; + } + + return NULL; +} + +static void +copy_hidden_ies(struct cfg80211_internal_bss *res, + struct cfg80211_internal_bss *hidden) +{ + if (unlikely(res->pub.beacon_ies)) + return; + if (WARN_ON(!hidden->pub.beacon_ies)) + return; + + res->pub.beacon_ies = kmalloc(hidden->pub.len_beacon_ies, GFP_ATOMIC); + if (unlikely(!res->pub.beacon_ies)) + return; + + res->beacon_ies_allocated = true; + res->pub.len_beacon_ies = hidden->pub.len_beacon_ies; + memcpy(res->pub.beacon_ies, hidden->pub.beacon_ies, + res->pub.len_beacon_ies); +} + +static struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *dev, struct cfg80211_internal_bss *res) { @@ -607,6 +703,21 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, kref_put(&res->ref, bss_release); } else { + struct cfg80211_internal_bss *hidden; + + /* First check if the beacon is a probe response from + * a hidden bss. If so, copy beacon ies (with nullified + * ssid) into the probe response bss entry (with real ssid). + * It is required basically for PSM implementation + * (probe responses do not contain tim ie) */ + + /* TODO: The code is not trying to update existing probe + * response bss entries when beacon ies are + * getting changed. */ + hidden = rb_find_hidden_bss(dev, res); + if (hidden) + copy_hidden_ies(res, hidden); + /* this "consumes" the reference */ list_add_tail(&res->list, &dev->bss_list); rb_insert_bss(dev, res); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 0acfdc9beacf..f0c900ce2fb9 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -190,7 +190,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev) prev_bssid, params->ssid, params->ssid_len, params->ie, params->ie_len, - false, ¶ms->crypto); + false, ¶ms->crypto, + params->flags, ¶ms->ht_capa, + ¶ms->ht_capa_mask); if (err) __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, NULL, 0, @@ -774,6 +776,9 @@ int __cfg80211_connect(struct cfg80211_registered_device *rdev, wdev->connect_keys = NULL; } + cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, + rdev->wiphy.ht_capa_mod_mask); + if (connkeys && connkeys->def >= 0) { int idx; u32 cipher; diff --git a/net/wireless/util.c b/net/wireless/util.c index 4dde429441d2..9c601d59b77a 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -7,7 +7,6 @@ #include <linux/bitops.h> #include <linux/etherdevice.h> #include <linux/slab.h> -#include <linux/crc32.h> #include <net/cfg80211.h> #include <net/ip.h> #include "core.h" @@ -240,17 +239,6 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, return 0; } -/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ -/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ -const unsigned char rfc1042_header[] __aligned(2) = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; -EXPORT_SYMBOL(rfc1042_header); - -/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ -const unsigned char bridge_tunnel_header[] __aligned(2) = - { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; -EXPORT_SYMBOL(bridge_tunnel_header); - unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc) { unsigned int hdrlen = 24; @@ -1051,169 +1039,13 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband, return 0; } -u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, - struct ieee802_11_elems *elems, - u64 filter, u32 crc) -{ - size_t left = len; - u8 *pos = start; - bool calc_crc = filter != 0; - - memset(elems, 0, sizeof(*elems)); - elems->ie_start = start; - elems->total_len = len; - - while (left >= 2) { - u8 id, elen; - - id = *pos++; - elen = *pos++; - left -= 2; - - if (elen > left) - break; - - if (calc_crc && id < 64 && (filter & (1ULL << id))) - crc = crc32_be(crc, pos - 2, elen + 2); - - switch (id) { - case WLAN_EID_SSID: - elems->ssid = pos; - elems->ssid_len = elen; - break; - case WLAN_EID_SUPP_RATES: - elems->supp_rates = pos; - elems->supp_rates_len = elen; - break; - case WLAN_EID_FH_PARAMS: - elems->fh_params = pos; - elems->fh_params_len = elen; - break; - case WLAN_EID_DS_PARAMS: - elems->ds_params = pos; - elems->ds_params_len = elen; - break; - case WLAN_EID_CF_PARAMS: - elems->cf_params = pos; - elems->cf_params_len = elen; - break; - case WLAN_EID_TIM: - if (elen >= sizeof(struct ieee80211_tim_ie)) { - elems->tim = (void *)pos; - elems->tim_len = elen; - } - break; - case WLAN_EID_IBSS_PARAMS: - elems->ibss_params = pos; - elems->ibss_params_len = elen; - break; - case WLAN_EID_CHALLENGE: - elems->challenge = pos; - elems->challenge_len = elen; - break; - case WLAN_EID_VENDOR_SPECIFIC: - if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && - pos[2] == 0xf2) { - /* Microsoft OUI (00:50:F2) */ - - if (calc_crc) - crc = crc32_be(crc, pos - 2, elen + 2); - - if (pos[3] == 1) { - /* OUI Type 1 - WPA IE */ - elems->wpa = pos; - elems->wpa_len = elen; - } else if (elen >= 5 && pos[3] == 2) { - /* OUI Type 2 - WMM IE */ - if (pos[4] == 0) { - elems->wmm_info = pos; - elems->wmm_info_len = elen; - } else if (pos[4] == 1) { - elems->wmm_param = pos; - elems->wmm_param_len = elen; - } - } - } - break; - case WLAN_EID_RSN: - elems->rsn = pos; - elems->rsn_len = elen; - break; - case WLAN_EID_ERP_INFO: - elems->erp_info = pos; - elems->erp_info_len = elen; - break; - case WLAN_EID_EXT_SUPP_RATES: - elems->ext_supp_rates = pos; - elems->ext_supp_rates_len = elen; - break; - case WLAN_EID_HT_CAPABILITY: - if (elen >= sizeof(struct ieee80211_ht_cap)) - elems->ht_cap_elem = (void *)pos; - break; - case WLAN_EID_HT_INFORMATION: - if (elen >= sizeof(struct ieee80211_ht_info)) - elems->ht_info_elem = (void *)pos; - break; - case WLAN_EID_MESH_ID: - elems->mesh_id = pos; - elems->mesh_id_len = elen; - break; - case WLAN_EID_MESH_CONFIG: - if (elen >= sizeof(struct ieee80211_meshconf_ie)) - elems->mesh_config = (void *)pos; - break; - case WLAN_EID_PEER_MGMT: - elems->peering = pos; - elems->peering_len = elen; - break; - case WLAN_EID_PREQ: - elems->preq = pos; - elems->preq_len = elen; - break; - case WLAN_EID_PREP: - elems->prep = pos; - elems->prep_len = elen; - break; - case WLAN_EID_PERR: - elems->perr = pos; - elems->perr_len = elen; - break; - case WLAN_EID_RANN: - if (elen >= sizeof(struct ieee80211_rann_ie)) - elems->rann = (void *)pos; - break; - case WLAN_EID_CHANNEL_SWITCH: - elems->ch_switch_elem = pos; - elems->ch_switch_elem_len = elen; - break; - case WLAN_EID_QUIET: - if (!elems->quiet_elem) { - elems->quiet_elem = pos; - elems->quiet_elem_len = elen; - } - elems->num_of_quiet_elem++; - break; - case WLAN_EID_COUNTRY: - elems->country_elem = pos; - elems->country_elem_len = elen; - break; - case WLAN_EID_PWR_CONSTRAINT: - elems->pwr_constr_elem = pos; - elems->pwr_constr_elem_len = elen; - break; - case WLAN_EID_TIMEOUT_INTERVAL: - elems->timeout_int = pos; - elems->timeout_int_len = elen; - break; - default: - break; - } - - left -= elen; - pos += elen; - } +/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ +/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ +const unsigned char rfc1042_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; +EXPORT_SYMBOL(rfc1042_header); - return crc; -} -EXPORT_SYMBOL(ieee802_11_parse_elems_crc); +/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ +const unsigned char bridge_tunnel_header[] __aligned(2) = + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; +EXPORT_SYMBOL(bridge_tunnel_header); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 6897436b1d3f..3c24eb97e9d7 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -819,12 +819,24 @@ static int cfg80211_wext_giwfreq(struct net_device *dev, struct iw_freq *freq, char *extra) { struct wireless_dev *wdev = dev->ieee80211_ptr; + struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); + struct ieee80211_channel *chan; switch (wdev->iftype) { case NL80211_IFTYPE_STATION: return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra); case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra); + case NL80211_IFTYPE_MONITOR: + if (!rdev->ops->get_channel) + return -EINVAL; + + chan = rdev->ops->get_channel(wdev->wiphy); + if (!chan) + return -EINVAL; + freq->m = chan->center_freq; + freq->e = 6; + return 0; default: if (!wdev->channel) return -EINVAL; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 2118d6446630..4fce1cec193e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -61,8 +61,8 @@ __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) { const struct flowi4 *fl4 = &fl->u.ip4; - return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) && - addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) && + return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && + addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && (fl4->flowi4_proto == sel->proto || !sel->proto) && diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9414b9c5b1e4..5b228f97d4b3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1035,16 +1035,12 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, break; case AF_INET6: - ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, - (const struct in6_addr *)daddr); - ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, - (const struct in6_addr *)saddr); + *(struct in6_addr *)x->sel.daddr.a6 = *(struct in6_addr *)daddr; + *(struct in6_addr *)x->sel.saddr.a6 = *(struct in6_addr *)saddr; x->sel.prefixlen_d = 128; x->sel.prefixlen_s = 128; - ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, - (const struct in6_addr *)saddr); - ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, - (const struct in6_addr *)daddr); + *(struct in6_addr *)x->props.saddr.a6 = *(struct in6_addr *)saddr; + *(struct in6_addr *)x->id.daddr.a6 = *(struct in6_addr *)daddr; break; } |