diff options
Diffstat (limited to 'net')
35 files changed, 295 insertions, 177 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 7ea5cf9ea08a..86bff9b1ac47 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -586,9 +586,14 @@ static void vlan_dev_uninit(struct net_device *dev) static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; + u32 old_features = features; features &= real_dev->features; features &= real_dev->vlan_features; + + if (old_features & NETIF_F_SOFT_FEATURES) + features |= old_features & NETIF_F_SOFT_FEATURES; + if (dev_ethtool_get_rx_csum(real_dev)) features |= NETIF_F_RXCSUM; features |= NETIF_F_LLTX; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3163330cd4f1..bcd158f40bb9 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -393,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn) hci_dev_put(hdev); + if (conn->handle == 0) + kfree(conn); + return 0; } @@ -608,11 +611,11 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) goto encrypt; auth: - if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) return 0; - hci_conn_auth(conn, sec_level, auth_type); - return 0; + if (!hci_conn_auth(conn, sec_level, auth_type)) + return 0; encrypt: if (conn->link_mode & HCI_LM_ENCRYPT) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index c405a954a603..43b4c2deb7cc 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg) { struct hidp_session *session = (struct hidp_session *) arg; - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(session->task); } static void hidp_set_timer(struct hidp_session *session) @@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session, skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(current); } } @@ -706,9 +708,8 @@ static int hidp_session(void *arg) add_wait_queue(sk_sleep(intr_sk), &intr_wait); session->waiting_for_startup = 0; wake_up_interruptible(&session->startup_queue); - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - + set_current_state(TASK_INTERRUPTIBLE); + while (!atomic_read(&session->terminate)) { if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) break; @@ -726,6 +727,7 @@ static int hidp_session(void *arg) hidp_process_transmit(session); schedule(); + set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(intr_sk), &intr_wait); @@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, err_add_device: hid_destroy_device(session->hid); session->hid = NULL; - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(session->task); unlink: hidp_del_timer(session); @@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req) skb_queue_purge(&session->ctrl_transmit); skb_queue_purge(&session->intr_transmit); - kthread_stop(session->task); + atomic_inc(&session->terminate); + wake_up_process(session->task); } } else err = -ENOENT; diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h index 19e95004b286..af1bcc823f26 100644 --- a/net/bluetooth/hidp/hidp.h +++ b/net/bluetooth/hidp/hidp.h @@ -142,6 +142,7 @@ struct hidp_session { uint ctrl_mtu; uint intr_mtu; + atomic_t terminate; struct task_struct *task; unsigned char keys[8]; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index e64a1c2df238..ebff14c69078 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -2323,7 +2323,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr sk = chan->sk; - if (sk->sk_state != BT_CONFIG) { + if ((bt_sk(sk)->defer_setup && sk->sk_state != BT_CONNECT2) || + (!bt_sk(sk)->defer_setup && sk->sk_state != BT_CONFIG)) { struct l2cap_cmd_rej rej; rej.reason = cpu_to_le16(0x0002); @@ -2334,7 +2335,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr /* Reject if config buffer is too small. */ len = cmd_len - sizeof(*req); - if (chan->conf_len + len > sizeof(chan->conf_req)) { + if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) { l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, L2CAP_CONF_REJECT, flags), rsp); @@ -4002,21 +4003,30 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } } else if (sk->sk_state == BT_CONNECT2) { struct l2cap_conn_rsp rsp; - __u16 result; + __u16 res, stat; if (!status) { - sk->sk_state = BT_CONFIG; - result = L2CAP_CR_SUCCESS; + if (bt_sk(sk)->defer_setup) { + struct sock *parent = bt_sk(sk)->parent; + res = L2CAP_CR_PEND; + stat = L2CAP_CS_AUTHOR_PEND; + parent->sk_data_ready(parent, 0); + } else { + sk->sk_state = BT_CONFIG; + res = L2CAP_CR_SUCCESS; + stat = L2CAP_CS_NO_INFO; + } } else { sk->sk_state = BT_DISCONN; l2cap_sock_set_timer(sk, HZ / 10); - result = L2CAP_CR_SEC_BLOCK; + res = L2CAP_CR_SEC_BLOCK; + stat = L2CAP_CS_NO_INFO; } rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); - rsp.result = cpu_to_le16(result); - rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = cpu_to_le16(res); + rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index c188c803c09c..32b8f9f7f79e 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_pull(skb, ETH_HLEN); rcu_read_lock(); - if (is_multicast_ether_addr(dest)) { + if (is_broadcast_ether_addr(dest)) + br_flood_deliver(br, skb); + else if (is_multicast_ether_addr(dest)) { if (unlikely(netpoll_tx_running(dev))) { br_flood_deliver(br, skb); goto out; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index f3ac1e858ee1..f06ee39c73fd 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb) br = p->br; br_fdb_update(br, p, eth_hdr(skb)->h_source); - if (is_multicast_ether_addr(dest) && + if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && br_multicast_rcv(br, p, skb)) goto drop; @@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; - if (is_multicast_ether_addr(dest)) { + if (is_broadcast_ether_addr(dest)) + skb2 = skb; + else if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 29b9812c8da0..2d85ca7111d3 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1379,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) return -EINVAL; - if (iph->protocol != IPPROTO_IGMP) + if (iph->protocol != IPPROTO_IGMP) { + if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) + BR_INPUT_SKB_CB(skb)->mrouters_only = 1; return 0; + } len = ntohs(iph->tot_len); if (skb->len < len || len < ip_hdrlen(skb)) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 9cb627a4073a..7330c2757c0c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, calc_layout(osdc, vino, layout, off, plen, req, ops); req->r_file_layout = *layout; /* keep a copy */ - /* in case it differs from natural alignment that calc_layout - filled in for us */ + /* in case it differs from natural (file) alignment that + calc_layout filled in for us */ + req->r_num_pages = calc_pages_for(page_align, *plen); req->r_page_alignment = page_align; ceph_osdc_build_request(req, off, plen, ops, @@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, int want = calc_pages_for(req->r_page_alignment, data_len); if (unlikely(req->r_num_pages < want)) { - pr_warning("tid %lld reply %d > expected %d pages\n", - tid, want, m->nr_pages); + pr_warning("tid %lld reply has %d bytes %d pages, we" + " had only %d pages ready\n", tid, data_len, + want, req->r_num_pages); *skip = 1; ceph_msg_put(m); m = NULL; diff --git a/net/core/dst.c b/net/core/dst.c index 9ccca038444f..6135f3671692 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -190,7 +190,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev, dst->lastuse = jiffies; dst->flags = flags; dst->next = NULL; - dst_entries_add(ops, 1); + if (!(flags & DST_NOCOUNT)) + dst_entries_add(ops, 1); return dst; } EXPORT_SYMBOL(dst_alloc); @@ -243,7 +244,8 @@ again: neigh_release(neigh); } - dst_entries_add(dst->ops, -1); + if (!(dst->flags & DST_NOCOUNT)) + dst_entries_add(dst->ops, -1); if (dst->ops->destroy) dst->ops->destroy(dst); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index eae1f676f870..ef1528af7abf 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -465,8 +465,10 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (addr_len < sizeof(struct sockaddr_in)) goto out; - if (addr->sin_family != AF_INET) + if (addr->sin_family != AF_INET) { + err = -EAFNOSUPPORT; goto out; + } chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a8024eaa0e87..84f26e8e6c60 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -802,8 +802,6 @@ static int __ip_append_data(struct sock *sk, skb = skb_peek_tail(queue); exthdrlen = !skb ? rt->dst.header_len : 0; - length += exthdrlen; - transhdrlen += exthdrlen; mtu = cork->fragsize; hh_len = LL_RESERVED_SPACE(rt->dst.dev); @@ -830,7 +828,7 @@ static int __ip_append_data(struct sock *sk, cork->length += length; if (((length > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO)) { + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { err = ip_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags); @@ -883,17 +881,16 @@ alloc_new_skb: else alloclen = fraglen; + alloclen += exthdrlen; + /* The last fragment gets additional space at tail. * Note, with MSG_MORE we overallocate on fragments, * because we have no idea what fragment will be * the last. */ - if (datalen == length + fraggap) { + if (datalen == length + fraggap) alloclen += rt->dst.trailer_len; - /* make sure mtu is not reached */ - if (datalen > mtu - fragheaderlen - rt->dst.trailer_len) - datalen -= ALIGN(rt->dst.trailer_len, 8); - } + if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len + 15, @@ -926,11 +923,11 @@ alloc_new_skb: /* * Find where to start putting bytes. */ - data = skb_put(skb, fraglen); + data = skb_put(skb, fraglen + exthdrlen); skb_set_network_header(skb, exthdrlen); skb->transport_header = (skb->network_header + fragheaderlen); - data += fragheaderlen; + data += fragheaderlen + exthdrlen; if (fraggap) { skb->csum = skb_copy_and_csum_bits( @@ -1064,7 +1061,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, */ *rtp = NULL; cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ? - rt->dst.dev->mtu : dst_mtu(rt->dst.path); + rt->dst.dev->mtu : dst_mtu(&rt->dst); cork->dst = &rt->dst; cork->length = 0; cork->tx_flags = ipc->tx_flags; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 4614babdc45f..2e97e3ec1eb7 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -17,51 +17,35 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; - unsigned long orefdst; + __be32 saddr = iph->saddr; + __u8 flags = 0; unsigned int hh_len; - unsigned int type; - type = inet_addr_type(net, iph->saddr); - if (skb->sk && inet_sk(skb->sk)->transparent) - type = RTN_LOCAL; - if (addr_type == RTN_UNSPEC) - addr_type = type; + if (!skb->sk && addr_type != RTN_LOCAL) { + if (addr_type == RTN_UNSPEC) + addr_type = inet_addr_type(net, saddr); + if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) + flags |= FLOWI_FLAG_ANYSRC; + else + saddr = 0; + } /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ - if (addr_type == RTN_LOCAL) { - fl4.daddr = iph->daddr; - if (type == RTN_LOCAL) - fl4.saddr = iph->saddr; - fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; - fl4.flowi4_mark = skb->mark; - fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; - rt = ip_route_output_key(net, &fl4); - if (IS_ERR(rt)) - return -1; - - /* Drop old route. */ - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - } else { - /* non-local src, find valid iif to satisfy - * rp-filter when calling ip_route_input. */ - fl4.daddr = iph->saddr; - rt = ip_route_output_key(net, &fl4); - if (IS_ERR(rt)) - return -1; + fl4.daddr = iph->daddr; + fl4.saddr = saddr; + fl4.flowi4_tos = RT_TOS(iph->tos); + fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + fl4.flowi4_mark = skb->mark; + fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags; + rt = ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) + return -1; - orefdst = skb->_skb_refdst; - if (ip_route_input(skb, iph->daddr, iph->saddr, - RT_TOS(iph->tos), rt->dst.dev) != 0) { - dst_release(&rt->dst); - return -1; - } - dst_release(&rt->dst); - refdst_drop(orefdst); - } + /* Drop old route. */ + skb_dst_drop(skb); + skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return -1; diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 1ff79e557f96..51f13f8ec724 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c @@ -40,7 +40,6 @@ static void send_reset(struct sk_buff *oldskb, int hook) struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _otcph, *tcph; - unsigned int addr_type; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) @@ -55,6 +54,9 @@ static void send_reset(struct sk_buff *oldskb, int hook) if (oth->rst) return; + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + return; + /* Check checksum */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) return; @@ -101,19 +103,11 @@ static void send_reset(struct sk_buff *oldskb, int hook) nskb->csum_start = (unsigned char *)tcph - nskb->head; nskb->csum_offset = offsetof(struct tcphdr, check); - addr_type = RTN_UNSPEC; - if (hook != NF_INET_FORWARD -#ifdef CONFIG_BRIDGE_NETFILTER - || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED) -#endif - ) - addr_type = RTN_LOCAL; - /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set_noref(nskb, skb_dst(oldskb)); nskb->protocol = htons(ETH_P_IP); - if (ip_route_me_harder(nskb, addr_type)) + if (ip_route_me_harder(nskb, RTN_UNSPEC)) goto free_nskb; niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 054a59d21eb0..46febcacb729 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries); void __init tcp_init(void) { struct sk_buff *skb = NULL; - unsigned long nr_pages, limit; + unsigned long limit; int i, max_share, cnt; unsigned long jiffy = jiffies; @@ -3277,13 +3277,7 @@ void __init tcp_init(void) sysctl_tcp_max_orphans = cnt / 2; sysctl_max_syn_backlog = max(128, cnt / 256); - /* Set the pressure threshold to be a fraction of global memory that - * is up to 1/2 at 256 MB, decreasing toward zero with the amount of - * memory, with a floor of 128 pages. - */ - nr_pages = totalram_pages - totalhigh_pages; - limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); - limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_tcp_mem[0] = limit / 4 * 3; sysctl_tcp_mem[1] = limit; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index abca870d8ff6..198f75b7bdd3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1249,6 +1249,9 @@ csum_copy_err: if (noblock) return -EAGAIN; + + /* starting over for a new packet */ + msg->msg_flags &= ~MSG_TRUNC; goto try_again; } @@ -2206,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name) void __init udp_init(void) { - unsigned long nr_pages, limit; + unsigned long limit; udp_table_init(&udp_table, "UDP"); - /* Set the pressure threshold up by the same strategy of TCP. It is a - * fraction of global memory that is up to 1/2 at 256 MB, decreasing - * toward zero with the amount of memory, with a floor of 128 pages. - */ - nr_pages = totalram_pages - totalhigh_pages; - limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); - limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 2d51840e53a1..327a617d594c 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) dst = skb_dst(skb); mtu = dst_mtu(dst); if (skb->len > mtu) { - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + if (skb->sk) + ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, + inet_sk(skb->sk)->inet_dport, mtu); + else + icmp_send(skb, ICMP_DEST_UNREACH, + ICMP_FRAG_NEEDED, htonl(mtu)); ret = -EMSGSIZE; } out: diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index d450a2f9fc06..3b5669a2582d 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -274,7 +274,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return -EINVAL; if (addr->sin6_family != AF_INET6) - return -EINVAL; + return -EAFNOSUPPORT; addr_type = ipv6_addr_type(&addr->sin6_addr); if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index de2b1decd786..0ef1f086feb8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -228,9 +228,10 @@ static struct rt6_info ip6_blk_hole_entry_template = { /* allocate dst with ip6_dst_ops */ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, - struct net_device *dev) + struct net_device *dev, + int flags) { - struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0); + struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); @@ -1042,7 +1043,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, if (unlikely(idev == NULL)) return NULL; - rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev); + rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0); if (unlikely(rt == NULL)) { in6_dev_put(idev); goto out; @@ -1062,14 +1063,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); rt->dst.output = ip6_output; -#if 0 /* there's no chance to use these for ndisc */ - rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST - ? DST_HOST - : 0; - ipv6_addr_copy(&rt->rt6i_dst.addr, addr); - rt->rt6i_dst.plen = 128; -#endif - spin_lock_bh(&icmp6_dst_lock); rt->dst.next = icmp6_dst_gc_list; icmp6_dst_gc_list = &rt->dst; @@ -1214,7 +1207,7 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } - rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL); + rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT); if (rt == NULL) { err = -ENOMEM; @@ -1244,7 +1237,7 @@ int ip6_route_add(struct fib6_config *cfg) ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) - rt->dst.flags = DST_HOST; + rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); @@ -1734,7 +1727,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) { struct net *net = dev_net(ort->rt6i_dev); struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, - ort->dst.dev); + ort->dst.dev, 0); if (rt) { rt->dst.input = ort->dst.input; @@ -2013,7 +2006,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, { struct net *net = dev_net(idev->dev); struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, - net->loopback_dev); + net->loopback_dev, 0); struct neighbour *neigh; if (rt == NULL) { @@ -2025,7 +2018,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, in6_dev_hold(idev); - rt->dst.flags = DST_HOST; + rt->dst.flags |= DST_HOST; rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 41f8c9c08dba..328985c40883 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -453,8 +453,11 @@ csum_copy_err: } unlock_sock_fast(sk, slow); - if (flags & MSG_DONTWAIT) + if (noblock) return -EAGAIN; + + /* starting over for a new packet */ + msg->msg_flags &= ~MSG_TRUNC; goto try_again; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 58ffa7d069c7..669d2e32efb6 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -877,7 +877,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata, for (i = 0; i < IEEE80211_NUM_BANDS; i++) { local->sched_scan_ies.ie[i] = kzalloc(2 + IEEE80211_MAX_SSID_LEN + - local->scan_ies_len, + local->scan_ies_len + + req->ie_len, GFP_KERNEL); if (!local->sched_scan_ies.ie[i]) { ret = -ENOMEM; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 9dc3b5f26e80..8f6a302d2ac3 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -86,6 +86,11 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int queue = rx->queue; + + /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ + if (rx->queue == NUM_RX_DATA_QUEUES - 1) + queue = 0; /* * it makes no sense to check for MIC errors on anything other @@ -148,13 +153,19 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) update_iv: /* update IV in key information to be able to detect replays */ - rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; - rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; + rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32; + rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16; return RX_CONTINUE; mic_fail: - mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, + /* + * In some cases the key can be unset - e.g. a multicast packet, in + * a driver that supports HW encryption. Send up the key idx only if + * the key is set. + */ + mac80211_ev_michael_mic_failure(rx->sdata, + rx->key ? rx->key->conf.keyidx : -1, (void *) skb->data, NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } @@ -235,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + int queue = rx->queue; + + /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */ + if (rx->queue == NUM_RX_DATA_QUEUES - 1) + queue = 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); @@ -255,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, - hdr->addr1, hwaccel, rx->queue, + hdr->addr1, hwaccel, queue, &rx->tkip_iv32, &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK) diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 1c88c8911dc5..d03682109b7a 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1582,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q, #endif /* SCTP_DEBUG */ if (transport) { if (bytes_acked) { + struct sctp_association *asoc = transport->asoc; + /* We may have counted DATA that was migrated * to this transport due to DEL-IP operation. * Subtract those bytes, since the were never @@ -1600,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q, transport->error_count = 0; transport->asoc->overall_error_count = 0; + /* + * While in SHUTDOWN PENDING, we may have started + * the T5 shutdown guard timer after reaching the + * retransmission limit. Stop that timer as soon + * as the receiver acknowledged any data. + */ + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && + del_timer(&asoc->timers + [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) + sctp_association_put(asoc); + /* Mark the destination transport address as * active if it is not so marked. */ @@ -1629,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q, * A sender is doing zero window probing when the * receiver's advertised window is zero, and there is * only one data chunk in flight to the receiver. + * + * Allow the association to timeout while in SHUTDOWN + * PENDING or SHUTDOWN RECEIVED in case the receiver + * stays in zero window mode forever. */ if (!q->asoc->peer.rwnd && !list_empty(&tlist) && - (sack_ctsn+2 == q->asoc->next_tsn)) { + (sack_ctsn+2 == q->asoc->next_tsn) && + q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { SCTP_DEBUG_PRINTK("%s: SACK received for zero " "window probe: %u\n", __func__, sack_ctsn); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 67380a29e2e9..207175b2f40a 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1058,7 +1058,6 @@ SCTP_STATIC __init int sctp_init(void) int status = -EINVAL; unsigned long goal; unsigned long limit; - unsigned long nr_pages; int max_share; int order; @@ -1148,15 +1147,7 @@ SCTP_STATIC __init int sctp_init(void) /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); - /* Set the pressure threshold to be a fraction of global memory that - * is up to 1/2 at 256 MB, decreasing toward zero with the amount of - * memory, with a floor of 128 pages. - * Note this initializes the data in sctpv6_prot too - * Unabashedly stolen from tcp_init - */ - nr_pages = totalram_pages - totalhigh_pages; - limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); - limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); + limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 534c2e5feb05..6e0f88295aaf 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the * HEARTBEAT should clear the error counter of the destination * transport address to which the HEARTBEAT was sent. - * The association's overall error count is also cleared. */ t->error_count = 0; - t->asoc->overall_error_count = 0; + + /* + * Although RFC4960 specifies that the overall error count must + * be cleared when a HEARTBEAT ACK is received, we make an + * exception while in SHUTDOWN PENDING. If the peer keeps its + * window shut forever, we may never be able to transmit our + * outstanding data and rely on the retransmission limit be reached + * to shutdown the association. + */ + if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING) + t->asoc->overall_error_count = 0; /* Clear the hb_sent flag to signal that we had a good * acknowledgement. @@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); break; + case SCTP_CMD_TIMER_START_ONCE: + timer = &asoc->timers[cmd->obj.to]; + + if (timer_pending(timer)) + break; + /* fall through */ + case SCTP_CMD_TIMER_START: timer = &asoc->timers[cmd->obj.to]; timeout = asoc->timeouts[cmd->obj.to]; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index a297283154d5..246117142b5c 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -5154,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( * The sender of the SHUTDOWN MAY also start an overall guard timer * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. */ - sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); if (asoc->autoclose) @@ -5299,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); if (asoc->overall_error_count >= asoc->max_retrans) { - sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, - SCTP_ERROR(ETIMEDOUT)); - /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ - sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, - SCTP_PERR(SCTP_ERROR_NO_ERROR)); - SCTP_INC_STATS(SCTP_MIB_ABORTEDS); - SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); - return SCTP_DISPOSITION_DELETE_TCB; + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { + /* + * We are here likely because the receiver had its rwnd + * closed for a while and we have not been able to + * transmit the locally queued data within the maximum + * retransmission attempts limit. Start the T5 + * shutdown guard timer to give the receiver one last + * chance and some additional time to recover before + * aborting. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_DELETE_TCB; + } } /* E1) For the destination address for which the timer diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 0338dc6fdc9d..7c211a7f90f4 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ - TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6766913a53e6..d3ccf7973c59 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1384,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) struct sctp_endpoint *ep; struct sctp_association *asoc; struct list_head *pos, *temp; + unsigned int data_was_unread; SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); @@ -1393,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) ep = sctp_sk(sk)->ep; + /* Clean up any skbs sitting on the receive queue. */ + data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); + data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); + /* Walk all associations on an endpoint. */ list_for_each_safe(pos, temp, &ep->asocs) { asoc = list_entry(pos, struct sctp_association, asocs); @@ -1410,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) } } - if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { + if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || + !skb_queue_empty(&asoc->ulpq.reasm) || + (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { struct sctp_chunk *chunk; chunk = sctp_make_abort_user(asoc, NULL, 0); @@ -1420,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) sctp_primitive_SHUTDOWN(asoc, NULL); } - /* Clean up any skbs sitting on the receive queue. */ - sctp_queue_purge_ulpevents(&sk->sk_receive_queue); - sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); - /* On a TCP-style socket, block for at most linger_time if set. */ if (sctp_style(sk, TCP) && timeout) sctp_wait_for_close(sk, timeout); @@ -2073,10 +2076,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk, static int sctp_setsockopt_events(struct sock *sk, char __user *optval, unsigned int optlen) { + struct sctp_association *asoc; + struct sctp_ulpevent *event; + if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; + + /* + * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, + * if there is no data to be sent or retransmit, the stack will + * immediately send up this notification. + */ + if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, + &sctp_sk(sk)->subscribe)) { + asoc = sctp_id2assoc(sk, 0); + + if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { + event = sctp_ulpevent_make_sender_dry_event(asoc, + GFP_ATOMIC); + if (!event) + return -ENOMEM; + + sctp_ulpq_tail_event(&asoc->ulpq, event); + } + } + return 0; } diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index e70e5fc87890..8a84017834c2 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event) } /* Purge the skb lists holding ulpevents. */ -void sctp_queue_purge_ulpevents(struct sk_buff_head *list) +unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) { struct sk_buff *skb; - while ((skb = skb_dequeue(list)) != NULL) - sctp_ulpevent_free(sctp_skb2event(skb)); + unsigned int data_unread = 0; + + while ((skb = skb_dequeue(list)) != NULL) { + struct sctp_ulpevent *event = sctp_skb2event(skb); + + if (!sctp_ulpevent_is_notification(event)) + data_unread += skb->len; + + sctp_ulpevent_free(event); + } + + return data_unread; } diff --git a/net/wireless/core.c b/net/wireless/core.c index c22ef3492ee6..880dbe2e6f94 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv) mutex_init(&rdev->mtx); mutex_init(&rdev->devlist_mtx); + mutex_init(&rdev->sched_scan_mtx); INIT_LIST_HEAD(&rdev->netdev_list); spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); @@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) rfkill_destroy(rdev->rfkill); mutex_destroy(&rdev->mtx); mutex_destroy(&rdev->devlist_mtx); + mutex_destroy(&rdev->sched_scan_mtx); list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&scan->pub); cfg80211_rdev_free_wowlan(rdev); @@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work) ___cfg80211_scan_done(rdev, true); } + cfg80211_unlock_rdev(rdev); + + mutex_lock(&rdev->sched_scan_mtx); + if (WARN_ON(rdev->sched_scan_req && rdev->sched_scan_req->dev == wdev->netdev)) { __cfg80211_stop_sched_scan(rdev, false); } - cfg80211_unlock_rdev(rdev); + mutex_unlock(&rdev->sched_scan_mtx); mutex_lock(&rdev->devlist_mtx); rdev->opencount--; @@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb, break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: - cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->sched_scan_mtx); __cfg80211_stop_sched_scan(rdev, false); - cfg80211_unlock_rdev(rdev); + mutex_unlock(&rdev->sched_scan_mtx); wdev_lock(wdev); #ifdef CONFIG_CFG80211_WEXT diff --git a/net/wireless/core.h b/net/wireless/core.h index 3dce1f167eba..a570ff9214ec 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -65,6 +65,8 @@ struct cfg80211_registered_device { struct work_struct scan_done_wk; struct work_struct sched_scan_results_wk; + struct mutex sched_scan_mtx; + #ifdef CONFIG_NL80211_TESTMODE struct genl_info *testmode_info; #endif diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 98fa8eb6cc4b..cea338150d05 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3461,9 +3461,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; - if (rdev->sched_scan_req) - return -EINPROGRESS; - if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return -EINVAL; @@ -3502,12 +3499,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, if (ie_len > wiphy->max_scan_ie_len) return -EINVAL; + mutex_lock(&rdev->sched_scan_mtx); + + if (rdev->sched_scan_req) { + err = -EINPROGRESS; + goto out; + } + request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); - if (!request) - return -ENOMEM; + if (!request) { + err = -ENOMEM; + goto out; + } if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; @@ -3605,6 +3611,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb, out_free: kfree(request); out: + mutex_unlock(&rdev->sched_scan_mtx); return err; } @@ -3612,12 +3619,17 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; + int err; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; - return __cfg80211_stop_sched_scan(rdev, false); + mutex_lock(&rdev->sched_scan_mtx); + err = __cfg80211_stop_sched_scan(rdev, false); + mutex_unlock(&rdev->sched_scan_mtx); + + return err; } static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, @@ -6463,7 +6475,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, if (addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); - NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); + if (key_id != -1) + NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); if (tsc) NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 7a6c67667d70..ae0c2256ba3b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk) rdev = container_of(wk, struct cfg80211_registered_device, sched_scan_results_wk); - cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->sched_scan_mtx); /* we don't have sched_scan_req anymore if the scan is stopping */ if (rdev->sched_scan_req) nl80211_send_sched_scan_results(rdev, rdev->sched_scan_req->dev); - cfg80211_unlock_rdev(rdev); + mutex_unlock(&rdev->sched_scan_mtx); } void cfg80211_sched_scan_results(struct wiphy *wiphy) @@ -123,9 +123,9 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); - cfg80211_lock_rdev(rdev); + mutex_lock(&rdev->sched_scan_mtx); __cfg80211_stop_sched_scan(rdev, true); - cfg80211_unlock_rdev(rdev); + mutex_unlock(&rdev->sched_scan_mtx); } EXPORT_SYMBOL(cfg80211_sched_scan_stopped); @@ -135,7 +135,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, int err; struct net_device *dev; - ASSERT_RDEV_LOCK(rdev); + lockdep_assert_held(&rdev->sched_scan_mtx); if (!rdev->sched_scan_req) return 0; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9bec2e8a838c..5ce74a385525 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); static void xfrm_init_pmtu(struct dst_entry *dst); static int stale_bundle(struct dst_entry *dst); -static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family); +static int xfrm_bundle_ok(struct xfrm_dst *xdst); static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, @@ -2241,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) static int stale_bundle(struct dst_entry *dst) { - return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC); + return !xfrm_bundle_ok((struct xfrm_dst *)dst); } void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) @@ -2313,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst) * still valid. */ -static int xfrm_bundle_ok(struct xfrm_dst *first, int family) +static int xfrm_bundle_ok(struct xfrm_dst *first) { struct dst_entry *dst = &first->u.dst; struct xfrm_dst *last; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index d70f85eb7864..9414b9c5b1e4 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1345,6 +1345,8 @@ out: xfrm_state_check_expire(x1); err = 0; + x->km.state = XFRM_STATE_DEAD; + __xfrm_state_put(x); } spin_unlock_bh(&x1->lock); |