diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-16 22:04:48 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 22:04:48 +0100 |
commit | 78f902ccc597d6ce3e8d1477d70f2d79e960ba7a (patch) | |
tree | c6ceab663de16501d1dda1c1596fe2dacaaef8e3 /net | |
parent | x86/doc: spelling fix for grub (diff) | |
parent | Linux 2.6.28-rc8 (diff) | |
download | linux-78f902ccc597d6ce3e8d1477d70f2d79e960ba7a.tar.xz linux-78f902ccc597d6ce3e8d1477d70f2d79e960ba7a.zip |
Merge commit 'v2.6.28-rc8' into x86/doc
Diffstat (limited to 'net')
74 files changed, 753 insertions, 435 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 916061f681b6..68ced4bf158c 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -3,11 +3,20 @@ #include <linux/if_vlan.h> #include "vlan.h" +struct vlan_hwaccel_cb { + struct net_device *dev; +}; + +static inline struct vlan_hwaccel_cb *vlan_hwaccel_cb(struct sk_buff *skb) +{ + return (struct vlan_hwaccel_cb *)skb->cb; +} + /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, u16 vlan_tci, int polling) { - struct net_device_stats *stats; + struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); if (skb_bond_should_drop(skb)) { dev_kfree_skb_any(skb); @@ -15,23 +24,35 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, } skb->vlan_tci = vlan_tci; + cb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); + + return (polling ? netif_receive_skb(skb) : netif_rx(skb)); +} +EXPORT_SYMBOL(__vlan_hwaccel_rx); + +int vlan_hwaccel_do_receive(struct sk_buff *skb) +{ + struct vlan_hwaccel_cb *cb = vlan_hwaccel_cb(skb); + struct net_device *dev = cb->dev; + struct net_device_stats *stats; + netif_nit_deliver(skb); - skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); - if (skb->dev == NULL) { - dev_kfree_skb_any(skb); - /* Not NET_RX_DROP, this is not being dropped - * due to congestion. */ - return NET_RX_SUCCESS; + if (dev == NULL) { + kfree_skb(skb); + return -1; } - skb->dev->last_rx = jiffies; + + skb->dev = dev; + skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); skb->vlan_tci = 0; - stats = &skb->dev->stats; + dev->last_rx = jiffies; + + stats = &dev->stats; stats->rx_packets++; stats->rx_bytes += skb->len; - skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: break; @@ -43,13 +64,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, - skb->dev->dev_addr)) + dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; }; - return (polling ? netif_receive_skb(skb) : netif_rx(skb)); + return 0; } -EXPORT_SYMBOL(__vlan_hwaccel_rx); struct net_device *vlan_dev_real_dev(const struct net_device *dev) { diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index 0feefa4e1a4b..3628e0a81b40 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -314,7 +314,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset) dev_info->ingress_priority_map[6], dev_info->ingress_priority_map[7]); - seq_printf(seq, "EGRESSS priority Mappings: "); + seq_printf(seq, " EGRESS priority mappings: "); for (i = 0; i < 16; i++) { const struct vlan_priority_tci_mapping *mp = dev_info->egress_priority_map[i]; diff --git a/net/9p/Kconfig b/net/9p/Kconfig index c42c0c400bf9..0663f99e977a 100644 --- a/net/9p/Kconfig +++ b/net/9p/Kconfig @@ -13,22 +13,24 @@ menuconfig NET_9P If unsure, say N. +if NET_9P + config NET_9P_VIRTIO - depends on NET_9P && EXPERIMENTAL && VIRTIO + depends on EXPERIMENTAL && VIRTIO tristate "9P Virtio Transport (Experimental)" help This builds support for a transports between guest partitions and a host partition. config NET_9P_RDMA - depends on NET_9P && INFINIBAND && EXPERIMENTAL + depends on INET && INFINIBAND && EXPERIMENTAL tristate "9P RDMA Transport (Experimental)" help - This builds support for a RDMA transport. + This builds support for an RDMA transport. config NET_9P_DEBUG bool "Debug information" - depends on NET_9P help Say Y if you want the 9P subsystem to log debug information. +endif diff --git a/net/9p/client.c b/net/9p/client.c index 67717f69412e..4b529454616d 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -189,6 +189,9 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag) printk(KERN_ERR "Couldn't grow tag array\n"); kfree(req->tc); kfree(req->rc); + kfree(req->wq); + req->tc = req->rc = NULL; + req->wq = NULL; return ERR_PTR(-ENOMEM); } req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); @@ -311,12 +314,6 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r) r->status = REQ_STATUS_IDLE; if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool)) p9_idpool_put(tag, c->tagpool); - - /* if this was a flush request we have to free response fcall */ - if (r->rc->id == P9_RFLUSH) { - kfree(r->tc); - kfree(r->rc); - } } /** @@ -611,19 +608,21 @@ reterr: static struct p9_fid *p9_fid_create(struct p9_client *clnt) { - int err; + int ret; struct p9_fid *fid; + unsigned long flags; P9_DPRINTK(P9_DEBUG_FID, "clnt %p\n", clnt); fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); if (!fid) return ERR_PTR(-ENOMEM); - fid->fid = p9_idpool_get(clnt->fidpool); + ret = p9_idpool_get(clnt->fidpool); if (fid->fid < 0) { - err = -ENOSPC; + ret = -ENOSPC; goto error; } + fid->fid = ret; memset(&fid->qid, 0, sizeof(struct p9_qid)); fid->mode = -1; @@ -632,27 +631,28 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) fid->clnt = clnt; fid->aux = NULL; - spin_lock(&clnt->lock); + spin_lock_irqsave(&clnt->lock, flags); list_add(&fid->flist, &clnt->fidlist); - spin_unlock(&clnt->lock); + spin_unlock_irqrestore(&clnt->lock, flags); return fid; error: kfree(fid); - return ERR_PTR(err); + return ERR_PTR(ret); } static void p9_fid_destroy(struct p9_fid *fid) { struct p9_client *clnt; + unsigned long flags; P9_DPRINTK(P9_DEBUG_FID, "fid %d\n", fid->fid); clnt = fid->clnt; p9_idpool_put(fid->fid, clnt->fidpool); - spin_lock(&clnt->lock); + spin_lock_irqsave(&clnt->lock, flags); list_del(&fid->flist); - spin_unlock(&clnt->lock); + spin_unlock_irqrestore(&clnt->lock, flags); kfree(fid); } @@ -818,7 +818,9 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, } P9_DPRINTK(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n", - qid.type, qid.path, qid.version); + qid.type, + (unsigned long long)qid.path, + qid.version); memmove(&fid->qid, &qid, sizeof(struct p9_qid)); @@ -865,7 +867,9 @@ p9_client_auth(struct p9_client *clnt, char *uname, u32 n_uname, char *aname) } P9_DPRINTK(P9_DEBUG_9P, "<<< RAUTH qid %x.%llx.%x\n", - qid.type, qid.path, qid.version); + qid.type, + (unsigned long long)qid.path, + qid.version); memmove(&afid->qid, &qid, sizeof(struct p9_qid)); p9_free_req(clnt, req); @@ -930,7 +934,8 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, for (count = 0; count < nwqids; count++) P9_DPRINTK(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n", - count, wqids[count].type, wqids[count].path, + count, wqids[count].type, + (unsigned long long)wqids[count].path, wqids[count].version); if (nwname) @@ -980,7 +985,9 @@ int p9_client_open(struct p9_fid *fid, int mode) } P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n", - qid.type, qid.path, qid.version, iounit); + qid.type, + (unsigned long long)qid.path, + qid.version, iounit); fid->mode = mode; fid->iounit = iounit; @@ -1023,7 +1030,9 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, } P9_DPRINTK(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n", - qid.type, qid.path, qid.version, iounit); + qid.type, + (unsigned long long)qid.path, + qid.version, iounit); fid->mode = mode; fid->iounit = iounit; @@ -1230,9 +1239,9 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid) "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n" "<<< uid=%d gid=%d n_muid=%d\n", ret->size, ret->type, ret->dev, ret->qid.type, - ret->qid.path, ret->qid.version, ret->mode, - ret->atime, ret->mtime, ret->length, ret->name, - ret->uid, ret->gid, ret->muid, ret->extension, + (unsigned long long)ret->qid.path, ret->qid.version, ret->mode, + ret->atime, ret->mtime, (unsigned long long)ret->length, + ret->name, ret->uid, ret->gid, ret->muid, ret->extension, ret->n_uid, ret->n_gid, ret->n_muid); free_and_error: @@ -1255,9 +1264,9 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) " name=%s uid=%s gid=%s muid=%s extension=(%s)\n" " uid=%d gid=%d n_muid=%d\n", wst->size, wst->type, wst->dev, wst->qid.type, - wst->qid.path, wst->qid.version, wst->mode, - wst->atime, wst->mtime, wst->length, wst->name, - wst->uid, wst->gid, wst->muid, wst->extension, + (unsigned long long)wst->qid.path, wst->qid.version, wst->mode, + wst->atime, wst->mtime, (unsigned long long)wst->length, + wst->name, wst->uid, wst->gid, wst->muid, wst->extension, wst->n_uid, wst->n_gid, wst->n_muid); err = 0; clnt = fid->clnt; diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 8d6cc4777aae..2f1fe5fc1228 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -45,7 +45,6 @@ #include <net/9p/transport.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> -#include <rdma/ib_verbs.h> #define P9_PORT 5640 #define P9_RDMA_SQ_DEPTH 32 @@ -589,6 +588,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) if (IS_ERR(rdma->cm_id)) goto error; + /* Associate the client with the transport */ + client->trans = rdma; + /* Resolve the server's address */ rdma->addr.sin_family = AF_INET; rdma->addr.sin_addr.s_addr = in_aton(addr); @@ -669,7 +671,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) if (err || (rdma->state != P9_RDMA_CONNECTED)) goto error; - client->trans = rdma; client->status = Connected; return 0; diff --git a/net/atm/svc.c b/net/atm/svc.c index de1e4f2f3a43..8fb54dc870b3 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog) error = -EINVAL; goto out; } - vcc_insert_socket(sk); + if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { + error = -EADDRINUSE; + goto out; + } set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); @@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog) goto out; } set_bit(ATM_VF_LISTEN,&vcc->flags); + vcc_insert_socket(sk); sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; error = -sk->sk_err; out: diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index fa5cda4e552a..45f61c348e36 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -101,6 +101,18 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb) pppoe_proto(skb) == htons(PPP_IPV6) && \ brnf_filter_pppoe_tagged) +static void fake_update_pmtu(struct dst_entry *dst, u32 mtu) +{ +} + +static struct dst_ops fake_dst_ops = { + .family = AF_INET, + .protocol = __constant_htons(ETH_P_IP), + .update_pmtu = fake_update_pmtu, + .entry_size = sizeof(struct rtable), + .entries = ATOMIC_INIT(0), +}; + /* * Initialize bogus route table used to keep netfilter happy. * Currently, we fill in the PMTU entry because netfilter @@ -117,6 +129,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) rt->u.dst.path = &rt->u.dst; rt->u.dst.metrics[RTAX_MTU - 1] = 1500; rt->u.dst.flags = DST_NOXFRM; + rt->u.dst.ops = &fake_dst_ops; } static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) diff --git a/net/can/af_can.c b/net/can/af_can.c index 7d4d2b3c137e..3dadb338addd 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev) return n ? d : NULL; } +/** + * find_rcv_list - determine optimal filterlist inside device filter struct + * @can_id: pointer to CAN identifier of a given can_filter + * @mask: pointer to CAN mask of a given can_filter + * @d: pointer to the device filter struct + * + * Description: + * Returns the optimal filterlist to reduce the filter handling in the + * receive path. This function is called by service functions that need + * to register or unregister a can_filter in the filter lists. + * + * A filter matches in general, when + * + * <received_can_id> & mask == can_id & mask + * + * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe + * relevant bits for the filter. + * + * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can + * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames + * there is a special filterlist and a special rx path filter handling. + * + * Return: + * Pointer to optimal filterlist for the given can_id/mask pair. + * Constistency checked mask. + * Reduced can_id to have a preprocessed filter compare value. + */ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, struct dev_rcv_lists *d) { canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ - /* filter error frames */ + /* filter for error frames in extra filterlist */ if (*mask & CAN_ERR_FLAG) { - /* clear CAN_ERR_FLAG in list entry */ + /* clear CAN_ERR_FLAG in filter entry */ *mask &= CAN_ERR_MASK; return &d->rx[RX_ERR]; } - /* ensure valid values in can_mask */ - if (*mask & CAN_EFF_FLAG) - *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); - else - *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); + /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */ + +#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG) + + /* ensure valid values in can_mask for 'SFF only' frame filtering */ + if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) + *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); /* reduce condition testing at receive time */ *can_id &= *mask; @@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, if (!(*mask)) return &d->rx[RX_ALL]; - /* use extra filterset for the subscription of exactly *ONE* can_id */ - if (*can_id & CAN_EFF_FLAG) { - if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { - /* RFC: a use-case for hash-tables in the future? */ - return &d->rx[RX_EFF]; + /* extra filterlists for the subscription of a single non-RTR can_id */ + if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) + && !(*can_id & CAN_RTR_FLAG)) { + + if (*can_id & CAN_EFF_FLAG) { + if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) { + /* RFC: a future use-case for hash-tables? */ + return &d->rx[RX_EFF]; + } + } else { + if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) + return &d->rx_sff[*can_id]; } - } else { - if (*mask == CAN_SFF_MASK) - return &d->rx_sff[*can_id]; } /* default: filter via can_id/can_mask */ @@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) } } - /* check CAN_ID specific entries */ + /* check filterlists for single non-RTR can_ids */ + if (can_id & CAN_RTR_FLAG) + return matches; + if (can_id & CAN_EFF_FLAG) { hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { if (r->can_id == can_id) { diff --git a/net/can/bcm.c b/net/can/bcm.c index d0dd382001e2..da0d426c0ce4 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -64,10 +64,11 @@ #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ /* get best masking value for can_rx_register() for a given single can_id */ -#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ - (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) +#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ + (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ + (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) -#define CAN_BCM_VERSION "20080415" +#define CAN_BCM_VERSION CAN_VERSION static __initdata const char banner[] = KERN_INFO "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; diff --git a/net/compat.c b/net/compat.c index 67fb6a3834a3..a3a2ba0fac08 100644 --- a/net/compat.c +++ b/net/compat.c @@ -226,14 +226,14 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat return 0; /* XXX: return error? check spec. */ } - if (level == SOL_SOCKET && type == SO_TIMESTAMP) { + if (level == SOL_SOCKET && type == SCM_TIMESTAMP) { struct timeval *tv = (struct timeval *)data; ctv.tv_sec = tv->tv_sec; ctv.tv_usec = tv->tv_usec; data = &ctv; len = sizeof(ctv); } - if (level == SOL_SOCKET && type == SO_TIMESTAMPNS) { + if (level == SOL_SOCKET && type == SCM_TIMESTAMPNS) { struct timespec *ts = (struct timespec *)data; cts.tv_sec = ts->tv_sec; cts.tv_nsec = ts->tv_nsec; @@ -725,7 +725,7 @@ EXPORT_SYMBOL(compat_mc_getsockopt); static unsigned char nas[19]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), - AL(6)}; + AL(4)}; #undef AL asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) @@ -738,52 +738,13 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } -asmlinkage long compat_sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, - const compat_sigset_t __user *sigmask, - compat_size_t sigsetsize, int flags) -{ - compat_sigset_t ss32; - sigset_t ksigmask, sigsaved; - int ret; - - if (sigmask) { - if (sigsetsize != sizeof(compat_sigset_t)) - return -EINVAL; - if (copy_from_user(&ss32, sigmask, sizeof(ss32))) - return -EFAULT; - sigset_from_compat(&ksigmask, &ss32); - - sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); - } - - ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); - - if (ret == -ERESTARTNOHAND) { - /* - * Don't restore the signal mask yet. Let do_signal() deliver - * the signal on the way back to userspace, before the signal - * mask is restored. - */ - if (sigmask) { - memcpy(¤t->saved_sigmask, &sigsaved, - sizeof(sigsaved)); - set_restore_sigmask(); - } - } else if (sigmask) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - - return ret; -} - asmlinkage long compat_sys_socketcall(int call, u32 __user *args) { int ret; u32 a[6]; u32 a0, a1; - if (call < SYS_SOCKET || call > SYS_PACCEPT) + if (call < SYS_SOCKET || call > SYS_ACCEPT4) return -EINVAL; if (copy_from_user(a, args, nas[call])) return -EFAULT; @@ -804,7 +765,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) ret = sys_listen(a0, a1); break; case SYS_ACCEPT: - ret = do_accept(a0, compat_ptr(a1), compat_ptr(a[2]), 0); + ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0); break; case SYS_GETSOCKNAME: ret = sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2])); @@ -844,9 +805,8 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args) case SYS_RECVMSG: ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); break; - case SYS_PACCEPT: - ret = compat_sys_paccept(a0, compat_ptr(a1), compat_ptr(a[2]), - compat_ptr(a[3]), a[4], a[5]); + case SYS_ACCEPT4: + ret = sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); break; default: ret = -EINVAL; diff --git a/net/core/dev.c b/net/core/dev.c index d9038e328cc1..9174c77d3112 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2218,6 +2218,9 @@ int netif_receive_skb(struct sk_buff *skb) int ret = NET_RX_DROP; __be16 type; + if (skb->vlan_tci && vlan_hwaccel_do_receive(skb)) + return NET_RX_SUCCESS; + /* if we've gotten here through NAPI, check netpoll */ if (netpoll_receive_skb(skb)) return NET_RX_DROP; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index f1d07b5c1e17..1895a4ca9c4f 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -325,6 +325,38 @@ void unregister_pernet_subsys(struct pernet_operations *module) } EXPORT_SYMBOL_GPL(unregister_pernet_subsys); +int register_pernet_gen_subsys(int *id, struct pernet_operations *ops) +{ + int rv; + + mutex_lock(&net_mutex); +again: + rv = ida_get_new_above(&net_generic_ids, 1, id); + if (rv < 0) { + if (rv == -EAGAIN) { + ida_pre_get(&net_generic_ids, GFP_KERNEL); + goto again; + } + goto out; + } + rv = register_pernet_operations(first_device, ops); + if (rv < 0) + ida_remove(&net_generic_ids, *id); + mutex_unlock(&net_mutex); +out: + return rv; +} +EXPORT_SYMBOL_GPL(register_pernet_gen_subsys); + +void unregister_pernet_gen_subsys(int id, struct pernet_operations *ops) +{ + mutex_lock(&net_mutex); + unregister_pernet_operations(ops); + ida_remove(&net_generic_ids, id); + mutex_unlock(&net_mutex); +} +EXPORT_SYMBOL_GPL(unregister_pernet_gen_subsys); + /** * register_pernet_device - register a network namespace device * @ops: pernet operations structure for the subsystem diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 99f656d35b4f..8997e912aaaf 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1973,28 +1973,21 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) /* make sure that we don't pick a non-existing transmit queue */ ntxq = pkt_dev->odev->real_num_tx_queues; - if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) { - printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU " - "disabled because CPU count (%d) exceeds number ", - num_online_cpus()); - printk(KERN_WARNING "pktgen: WARNING: of tx queues " - "(%d) on %s \n", ntxq, pkt_dev->odev->name); - pkt_dev->flags &= ~F_QUEUE_MAP_CPU; - } + if (ntxq <= pkt_dev->queue_map_min) { printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_min (%d) exceeds number of tx\n", - pkt_dev->queue_map_min); - printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " - "%s, resetting\n", ntxq, pkt_dev->odev->name); + "queue_map_min (zero-based) (%d) exceeds valid range " + "[0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq, + pkt_dev->odev->name); pkt_dev->queue_map_min = ntxq - 1; } - if (ntxq <= pkt_dev->queue_map_max) { + if (pkt_dev->queue_map_max >= ntxq) { printk(KERN_WARNING "pktgen: WARNING: Requested " - "queue_map_max (%d) exceeds number of tx\n", - pkt_dev->queue_map_max); - printk(KERN_WARNING "pktgen: WARNING: queues (%d) on " - "%s, resetting\n", ntxq, pkt_dev->odev->name); + "queue_map_max (zero-based) (%d) exceeds valid range " + "[0 - %d] for (%d) queues on %s, resetting\n", + pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq, + pkt_dev->odev->name); pkt_dev->queue_map_max = ntxq - 1; } @@ -2203,6 +2196,7 @@ static void set_cur_queue_map(struct pktgen_dev *pkt_dev) } pkt_dev->cur_queue_map = t; } + pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; } /* Increment/randomize headers according to flags and current values diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 31f29d2989fd..4dfb6b4d4559 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -878,7 +878,9 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, if (ifm->ifi_change) flags = (flags & ifm->ifi_change) | (dev->flags & ~ifm->ifi_change); - dev_change_flags(dev, flags); + err = dev_change_flags(dev, flags); + if (err < 0) + goto errout; } if (tb[IFLA_TXQLEN]) diff --git a/net/core/scm.c b/net/core/scm.c index 10f5c65f6a47..b12303dd39d9 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -106,9 +106,25 @@ void __scm_destroy(struct scm_cookie *scm) if (fpl) { scm->fp = NULL; - for (i=fpl->count-1; i>=0; i--) - fput(fpl->fp[i]); - kfree(fpl); + if (current->scm_work_list) { + list_add_tail(&fpl->list, current->scm_work_list); + } else { + LIST_HEAD(work_list); + + current->scm_work_list = &work_list; + + list_add(&fpl->list, &work_list); + while (!list_empty(&work_list)) { + fpl = list_first_entry(&work_list, struct scm_fp_list, list); + + list_del(&fpl->list); + for (i=fpl->count-1; i>=0; i--) + fput(fpl->fp[i]); + kfree(fpl); + } + + current->scm_work_list = NULL; + } } } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e22e3a35359..65f7757465bd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -149,7 +149,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) void skb_truesize_bug(struct sk_buff *skb) { - printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " + WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) " "len=%u, sizeof(sk_buff)=%Zd\n", skb->truesize, skb->len, sizeof(struct sk_buff)); } @@ -449,6 +449,18 @@ void kfree_skb(struct sk_buff *skb) __kfree_skb(skb); } +/** + * skb_recycle_check - check if skb can be reused for receive + * @skb: buffer + * @skb_size: minimum receive buffer size + * + * Checks that the skb passed in is not shared or cloned, and + * that it is linear and its head portion at least as large as + * skb_size so that it can be recycled as a receive buffer. + * If these conditions are met, this function does any necessary + * reference count dropping and cleans up the skbuff as if it + * just came from __alloc_skb(). + */ int skb_recycle_check(struct sk_buff *skb, int skb_size) { struct skb_shared_info *shinfo; @@ -474,8 +486,8 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) shinfo->frag_list = NULL; memset(skb, 0, offsetof(struct sk_buff, tail)); - skb_reset_tail_pointer(skb); skb->data = skb->head + NET_SKB_PAD; + skb_reset_tail_pointer(skb); return 1; } diff --git a/net/core/sock.c b/net/core/sock.c index 5e2a3132a8c9..edf7220889a4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -136,7 +136,6 @@ static struct lock_class_key af_family_keys[AF_MAX]; static struct lock_class_key af_family_slock_keys[AF_MAX]; -#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make lock validator output more readable. (we pre-construct these * strings build-time, so that runtime initialization of socket @@ -187,7 +186,6 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_MAX" }; -#endif /* * sk_callback_lock locking rules are per-address-family, @@ -2037,9 +2035,6 @@ static inline void release_proto_idx(struct proto *prot) int proto_register(struct proto *prot, int alloc_slab) { - char *request_sock_slab_name = NULL; - char *timewait_sock_slab_name; - if (alloc_slab) { prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2053,12 +2048,12 @@ int proto_register(struct proto *prot, int alloc_slab) if (prot->rsk_prot != NULL) { static const char mask[] = "request_sock_%s"; - request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); - if (request_sock_slab_name == NULL) + prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); + if (prot->rsk_prot->slab_name == NULL) goto out_free_sock_slab; - sprintf(request_sock_slab_name, mask, prot->name); - prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, + sprintf(prot->rsk_prot->slab_name, mask, prot->name); + prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, prot->rsk_prot->obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2072,14 +2067,14 @@ int proto_register(struct proto *prot, int alloc_slab) if (prot->twsk_prot != NULL) { static const char mask[] = "tw_sock_%s"; - timewait_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); + prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); - if (timewait_sock_slab_name == NULL) + if (prot->twsk_prot->twsk_slab_name == NULL) goto out_free_request_sock_slab; - sprintf(timewait_sock_slab_name, mask, prot->name); + sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name); prot->twsk_prot->twsk_slab = - kmem_cache_create(timewait_sock_slab_name, + kmem_cache_create(prot->twsk_prot->twsk_slab_name, prot->twsk_prot->twsk_obj_size, 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2095,14 +2090,14 @@ int proto_register(struct proto *prot, int alloc_slab) return 0; out_free_timewait_sock_slab_name: - kfree(timewait_sock_slab_name); + kfree(prot->twsk_prot->twsk_slab_name); out_free_request_sock_slab: if (prot->rsk_prot && prot->rsk_prot->slab) { kmem_cache_destroy(prot->rsk_prot->slab); prot->rsk_prot->slab = NULL; } out_free_request_sock_slab_name: - kfree(request_sock_slab_name); + kfree(prot->rsk_prot->slab_name); out_free_sock_slab: kmem_cache_destroy(prot->slab); prot->slab = NULL; @@ -2125,18 +2120,14 @@ void proto_unregister(struct proto *prot) } if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { - const char *name = kmem_cache_name(prot->rsk_prot->slab); - kmem_cache_destroy(prot->rsk_prot->slab); - kfree(name); + kfree(prot->rsk_prot->slab_name); prot->rsk_prot->slab = NULL; } if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { - const char *name = kmem_cache_name(prot->twsk_prot->twsk_slab); - kmem_cache_destroy(prot->twsk_prot->twsk_slab); - kfree(name); + kfree(prot->twsk_prot->twsk_slab_name); prot->twsk_prot->twsk_slab = NULL; } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 37616884b8a9..1af5a79309e9 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/phy.h> #include "dsa_priv.h" @@ -49,11 +50,57 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds) /* slave device handling ****************************************************/ static int dsa_slave_open(struct net_device *dev) { + struct dsa_slave_priv *p = netdev_priv(dev); + struct net_device *master = p->parent->master_netdev; + int err; + + if (!(master->flags & IFF_UP)) + return -ENETDOWN; + + if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { + err = dev_unicast_add(master, dev->dev_addr, ETH_ALEN); + if (err < 0) + goto out; + } + + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(master, 1); + if (err < 0) + goto del_unicast; + } + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(master, 1); + if (err < 0) + goto clear_allmulti; + } + return 0; + +clear_allmulti: + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(master, -1); +del_unicast: + if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); +out: + return err; } static int dsa_slave_close(struct net_device *dev) { + struct dsa_slave_priv *p = netdev_priv(dev); + struct net_device *master = p->parent->master_netdev; + + dev_mc_unsync(master, dev); + dev_unicast_unsync(master, dev); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(master, -1); + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(master, -1); + + if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); + return 0; } @@ -77,9 +124,30 @@ static void dsa_slave_set_rx_mode(struct net_device *dev) dev_unicast_sync(master, dev); } -static int dsa_slave_set_mac_address(struct net_device *dev, void *addr) +static int dsa_slave_set_mac_address(struct net_device *dev, void *a) { - memcpy(dev->dev_addr, addr + 2, 6); + struct dsa_slave_priv *p = netdev_priv(dev); + struct net_device *master = p->parent->master_netdev; + struct sockaddr *addr = a; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (!(dev->flags & IFF_UP)) + goto out; + + if (compare_ether_addr(addr->sa_data, master->dev_addr)) { + err = dev_unicast_add(master, addr->sa_data, ETH_ALEN); + if (err < 0) + return err; + } + + if (compare_ether_addr(dev->dev_addr, master->dev_addr)) + dev_unicast_delete(master, dev->dev_addr, ETH_ALEN); + +out: + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); return 0; } diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index bdc0510b53b7..31866543332e 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -159,6 +159,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev, skb->dev = ds->ports[source_port]; skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); skb->dev->last_rx = jiffies; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index f985ea993843..9f4ce55eae59 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -178,6 +178,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev, skb->dev = ds->ports[source_port]; skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); skb->dev->last_rx = jiffies; diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index d3117764b2c2..efd26697e716 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -95,6 +95,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev, skb->dev = ds->ports[source_port]; skb_push(skb, ETH_HLEN); + skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); skb->dev->last_rx = jiffies; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 1fbff5fa4241..1aa2dc9e380e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1117,6 +1117,7 @@ int inet_sk_rebuild_header(struct sock *sk) }, }, .proto = sk->sk_protocol, + .flags = inet_sk_flowi_flags(sk), .uli_u = { .ports = { .sport = inet->sport, diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 490e035c6d90..2e78f6bd9775 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -2063,9 +2063,10 @@ int cipso_v4_skbuff_setattr(struct sk_buff *skb, u32 opt_len; int len_delta; - buf_len = cipso_v4_genopt(buf, buf_len, doi_def, secattr); - if (buf_len < 0) - return buf_len; + ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); + if (ret_val < 0) + return ret_val; + buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 861978a4f1a8..cfb38ac9d698 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -209,9 +209,17 @@ static int ip_local_deliver_finish(struct sk_buff *skb) hash = protocol & (MAX_INET_PROTOS - 1); ipprot = rcu_dereference(inet_protos[hash]); - if (ipprot != NULL && (net == &init_net || ipprot->netns_ok)) { + if (ipprot != NULL) { int ret; + if (!net_eq(net, &init_net) && !ipprot->netns_ok) { + if (net_ratelimit()) + printk("%s: proto %d isn't netns-ready\n", + __func__, protocol); + kfree_skb(skb); + goto out; + } + if (!ipprot->no_policy) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { kfree_skb(skb); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b42e082cc170..25924b1eb2ef 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1945,13 +1945,14 @@ int __init ip_mr_init(void) goto proc_cache_fail; #endif return 0; -reg_notif_fail: - kmem_cache_destroy(mrt_cachep); #ifdef CONFIG_PROC_FS -proc_vif_fail: - unregister_netdevice_notifier(&ip_mr_notifier); proc_cache_fail: proc_net_remove(&init_net, "ip_mr_vif"); +proc_vif_fail: + unregister_netdevice_notifier(&ip_mr_notifier); #endif +reg_notif_fail: + del_timer(&ipmr_expire_timer); + kmem_cache_destroy(mrt_cachep); return err; } diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 8f5a403f6f6b..a631a1f110ca 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -237,43 +237,45 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_SENTINEL }; +static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, + unsigned short *type, int count) +{ + int j; + + if (count) { + seq_printf(seq, "\nIcmpMsg:"); + for (j = 0; j < count; ++j) + seq_printf(seq, " %sType%u", + type[j] & 0x100 ? "Out" : "In", + type[j] & 0xff); + seq_printf(seq, "\nIcmpMsg:"); + for (j = 0; j < count; ++j) + seq_printf(seq, " %lu", vals[j]); + } +} + static void icmpmsg_put(struct seq_file *seq) { #define PERLINE 16 - int j, i, count; - static int out[PERLINE]; + int i, count; + unsigned short type[PERLINE]; + unsigned long vals[PERLINE], val; struct net *net = seq->private; count = 0; for (i = 0; i < ICMPMSG_MIB_MAX; i++) { - - if (snmp_fold_field((void **) net->mib.icmpmsg_statistics, i)) - out[count++] = i; - if (count < PERLINE) - continue; - - seq_printf(seq, "\nIcmpMsg:"); - for (j = 0; j < PERLINE; ++j) - seq_printf(seq, " %sType%u", i & 0x100 ? "Out" : "In", - i & 0xff); - seq_printf(seq, "\nIcmpMsg: "); - for (j = 0; j < PERLINE; ++j) - seq_printf(seq, " %lu", - snmp_fold_field((void **) net->mib.icmpmsg_statistics, - out[j])); - seq_putc(seq, '\n'); - } - if (count) { - seq_printf(seq, "\nIcmpMsg:"); - for (j = 0; j < count; ++j) - seq_printf(seq, " %sType%u", out[j] & 0x100 ? "Out" : - "In", out[j] & 0xff); - seq_printf(seq, "\nIcmpMsg:"); - for (j = 0; j < count; ++j) - seq_printf(seq, " %lu", snmp_fold_field((void **) - net->mib.icmpmsg_statistics, out[j])); + val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i); + if (val) { + type[count] = i; + vals[count++] = val; + } + if (count == PERLINE) { + icmpmsg_put_line(seq, vals, type, count); + count = 0; + } } + icmpmsg_put_line(seq, vals, type, count); #undef PERLINE } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index eccb7165a80c..c5aca0bb116a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1374,8 +1374,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || - signal_pending(current) || - (flags & MSG_PEEK)) + signal_pending(current)) break; } else { if (sock_flag(sk, SOCK_DONE)) diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index af99776146ff..937549b8a921 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -69,9 +69,12 @@ static u32 htcp_cwnd_undo(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); struct htcp *ca = inet_csk_ca(sk); - ca->last_cong = ca->undo_last_cong; - ca->maxRTT = ca->undo_maxRTT; - ca->old_maxB = ca->undo_old_maxB; + if (ca->undo_last_cong) { + ca->last_cong = ca->undo_last_cong; + ca->maxRTT = ca->undo_maxRTT; + ca->old_maxB = ca->undo_old_maxB; + ca->undo_last_cong = 0; + } return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); } @@ -268,7 +271,10 @@ static void htcp_state(struct sock *sk, u8 new_state) case TCP_CA_Open: { struct htcp *ca = inet_csk_ca(sk); - ca->last_cong = jiffies; + if (ca->undo_last_cong) { + ca->last_cong = jiffies; + ca->undo_last_cong = 0; + } } break; case TCP_CA_CWR: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e4c5ac9fe89b..fe3b4bdfd251 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1028,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) /* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. - * - * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up - * cannot be large. However, taking into account rare use of URG, this - * is not a big flaw. */ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) { @@ -1046,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) mss_now = tp->mss_cache; - if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) + if (large_allowed && sk_can_gso(sk)) doing_tso = 1; if (dst) { @@ -1516,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk) * send_head. This happens as incoming acks open up the remote * window for us. * + * LARGESEND note: !tcp_urg_mode is overkill, only frames between + * snd_up-64k-mss .. snd_up cannot be large. However, taking into + * account rare use of URG, this is not a big flaw. + * * Returns 1, if no segments are in flight and we have queued segments, but * cannot send anything now because of SWS or another problem. */ @@ -1567,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) } limit = mss_now; - if (tso_segs > 1) + if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, cwnd_quota); @@ -1616,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, */ void tcp_push_one(struct sock *sk, unsigned int mss_now) { + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); unsigned int tso_segs, cwnd_quota; @@ -1630,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) BUG_ON(!tso_segs); limit = mss_now; - if (tso_segs > 1) + if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, cwnd_quota); @@ -2279,6 +2280,11 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, } memset(&opts, 0, sizeof(opts)); +#ifdef CONFIG_SYN_COOKIES + if (unlikely(req->cookie_ts)) + TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); + else +#endif TCP_SKB_CB(skb)->when = tcp_time_stamp; tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5) + @@ -2304,11 +2310,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rcv_wnd, 65535U)); -#ifdef CONFIG_SYN_COOKIES - if (unlikely(req->cookie_ts)) - TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); - else -#endif tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); th->doff = (tcp_header_size >> 2); TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 14504dada116..7cd22262de3a 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c @@ -326,6 +326,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) tp->snd_cwnd = 2; else if (tp->snd_cwnd > tp->snd_cwnd_clamp) tp->snd_cwnd = tp->snd_cwnd_clamp; + + tp->snd_ssthresh = tcp_current_ssthresh(sk); } /* Wipe the slate clean for the next RTT. */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2095abc3caba..98c1fd09be88 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -284,7 +284,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, } EXPORT_SYMBOL_GPL(udp4_lib_lookup); -static inline struct sock *udp_v4_mcast_next(struct sock *sk, +static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) @@ -296,7 +296,8 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk, sk_for_each_from(s, node) { struct inet_sock *inet = inet_sk(s); - if (s->sk_hash != hnum || + if (!net_eq(sock_net(s), net) || + s->sk_hash != hnum || (inet->daddr && inet->daddr != rmt_addr) || (inet->dport != rmt_port && inet->dport) || (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || @@ -632,6 +633,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, .saddr = saddr, .tos = tos } }, .proto = sk->sk_protocol, + .flags = inet_sk_flowi_flags(sk), .uli_u = { .ports = { .sport = inet->sport, .dport = dport } } }; @@ -1079,15 +1081,16 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, read_lock(&udp_hash_lock); sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); dif = skb->dev->ifindex; - sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); + sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); if (sk) { struct sock *sknext = NULL; do { struct sk_buff *skb1 = skb; - sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, - uh->source, saddr, dif); + sknext = udp_v4_mcast_next(net, sk_next(sk), uh->dest, + daddr, uh->source, saddr, + dif); if (sknext) skb1 = skb_clone(skb, GFP_ATOMIC); diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c index 07735ed280d7..55dc6beab9aa 100644 --- a/net/ipv4/xfrm4_state.c +++ b/net/ipv4/xfrm4_state.c @@ -33,6 +33,7 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl, x->sel.dport_mask = htons(0xffff); x->sel.sport = xfrm_flowi_sport(fl); x->sel.sport_mask = htons(0xffff); + x->sel.family = AF_INET; x->sel.prefixlen_d = 32; x->sel.prefixlen_s = 32; x->sel.proto = fl->proto; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index eea9542728ca..d9da5eb9dcb2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2483,8 +2483,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (!idev && dev->mtu >= IPV6_MIN_MTU) idev = ipv6_add_dev(dev); - if (idev) + if (idev) { idev->if_flags |= IF_READY; + run_pending = 1; + } } else { if (!addrconf_qdisc_ok(dev)) { /* device is still not ready. */ diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 410046a8cc91..e44deb8d4df2 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -661,6 +661,11 @@ int datagram_send_ctl(struct net *net, switch (rthdr->type) { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPV6_SRCRT_TYPE_2: + if (rthdr->hdrlen != 2 || + rthdr->segments_left != 1) { + err = -EINVAL; + goto exit_f; + } break; #endif default: diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 182f8a177e7f..0524769632e7 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -224,7 +224,7 @@ static struct file_operations ip6mr_vif_fops = { .open = ip6mr_vif_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, }; static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos) @@ -338,7 +338,7 @@ static struct file_operations ip6mr_mfc_fops = { .open = ipmr_mfc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = seq_release_private, }; #endif @@ -981,14 +981,15 @@ int __init ip6_mr_init(void) goto proc_cache_fail; #endif return 0; -reg_notif_fail: - kmem_cache_destroy(mrt_cachep); #ifdef CONFIG_PROC_FS -proc_vif_fail: - unregister_netdevice_notifier(&ip6_mr_notifier); proc_cache_fail: proc_net_remove(&init_net, "ip6_mr_vif"); +proc_vif_fail: + unregister_netdevice_notifier(&ip6_mr_notifier); #endif +reg_notif_fail: + del_timer(&ipmr_expire_timer); + kmem_cache_destroy(mrt_cachep); return err; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4e5eac301f91..2aa294be0c79 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -366,11 +366,16 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, } /* routing header option needs extra check */ + retv = -EINVAL; if (optname == IPV6_RTHDR && opt && opt->srcrt) { struct ipv6_rt_hdr *rthdr = opt->srcrt; switch (rthdr->type) { #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) case IPV6_SRCRT_TYPE_2: + if (rthdr->hdrlen != 2 || + rthdr->segments_left != 1) + goto sticky_done; + break; #endif default: diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 07f0b76e7427..97c17fdd6f75 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -132,7 +132,7 @@ static struct snmp_mib snmp6_udplite6_list[] = { static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) { - static char name[32]; + char name[32]; int i; /* print by name -- deprecated items */ @@ -144,7 +144,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) p = icmp6type2name[icmptype]; if (!p) /* don't print un-named types here */ continue; - (void) snprintf(name, sizeof(name)-1, "Icmp6%s%s", + snprintf(name, sizeof(name), "Icmp6%s%s", i & 0x100 ? "Out" : "In", p); seq_printf(seq, "%-32s\t%lu\n", name, snmp_fold_field(mib, i)); @@ -157,7 +157,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib) val = snmp_fold_field(mib, i); if (!val) continue; - (void) snprintf(name, sizeof(name)-1, "Icmp6%sType%u", + snprintf(name, sizeof(name), "Icmp6%sType%u", i & 0x100 ? "Out" : "In", i & 0xff); seq_printf(seq, "%-32s\t%lu\n", name, val); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e51da8c092fa..8b48512ebf6a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -138,6 +138,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, int peeked; int err; int is_udplite = IS_UDPLITE(sk); + int is_udp4; if (addr_len) *addr_len=sizeof(struct sockaddr_in6); @@ -158,6 +159,8 @@ try_again: else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; + is_udp4 = (skb->protocol == htons(ETH_P_IP)); + /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial @@ -180,9 +183,14 @@ try_again: if (err) goto out_free; - if (!peeked) - UDP6_INC_STATS_USER(sock_net(sk), - UDP_MIB_INDATAGRAMS, is_udplite); + if (!peeked) { + if (is_udp4) + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INDATAGRAMS, is_udplite); + else + UDP6_INC_STATS_USER(sock_net(sk), + UDP_MIB_INDATAGRAMS, is_udplite); + } sock_recv_timestamp(msg, sk, skb); @@ -196,7 +204,7 @@ try_again: sin6->sin6_flowinfo = 0; sin6->sin6_scope_id = 0; - if (skb->protocol == htons(ETH_P_IP)) + if (is_udp4) ipv6_addr_set(&sin6->sin6_addr, 0, 0, htonl(0xffff), ip_hdr(skb)->saddr); else { @@ -207,7 +215,7 @@ try_again: } } - if (skb->protocol == htons(ETH_P_IP)) { + if (is_udp4) { if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); } else { @@ -228,8 +236,14 @@ out: csum_copy_err: lock_sock(sk); - if (!skb_kill_datagram(sk, skb, flags)) - UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); + if (!skb_kill_datagram(sk, skb, flags)) { + if (is_udp4) + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); + else + UDP6_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); + } release_sock(sk); if (flags & MSG_DONTWAIT) @@ -328,7 +342,7 @@ drop: return -1; } -static struct sock *udp_v6_mcast_next(struct sock *sk, +static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, struct in6_addr *loc_addr, __be16 rmt_port, struct in6_addr *rmt_addr, int dif) @@ -340,7 +354,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk, sk_for_each_from(s, node) { struct inet_sock *inet = inet_sk(s); - if (sock_net(s) != sock_net(sk)) + if (!net_eq(sock_net(s), net)) continue; if (s->sk_hash == num && s->sk_family == PF_INET6) { @@ -383,14 +397,14 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, read_lock(&udp_hash_lock); sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]); dif = inet6_iif(skb); - sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); + sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); if (!sk) { kfree_skb(skb); goto out; } sk2 = sk; - while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr, + while ((sk2 = udp_v6_mcast_next(net, sk_next(sk2), uh->dest, daddr, uh->source, saddr, dif))) { struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); if (buff) { diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c index 89884a4f23aa..60c78cfc2737 100644 --- a/net/ipv6/xfrm6_state.c +++ b/net/ipv6/xfrm6_state.c @@ -34,6 +34,7 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl, x->sel.dport_mask = htons(0xffff); x->sel.sport = xfrm_flowi_sport(fl); x->sel.sport_mask = htons(0xffff); + x->sel.family = AF_INET6; x->sel.prefixlen_d = 128; x->sel.prefixlen_s = 128; x->sel.proto = fl->proto; diff --git a/net/key/af_key.c b/net/key/af_key.c index e55e0441e4d9..5b22e011653b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2075,7 +2075,6 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, struct xfrm_policy *xp, in req_size += socklen * 2; } else { size -= 2*socklen; - socklen = 0; } rq = (void*)skb_put(skb, req_size); pol->sadb_x_policy_len += req_size/8; @@ -3189,6 +3188,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, return xp; out: + xp->walk.dead = 1; xfrm_policy_destroy(xp); return NULL; } diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 189d0bafa91a..b85c4f27b361 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -199,7 +199,7 @@ static ssize_t sta_agg_status_write(struct file *file, /* toggle Rx aggregation command */ tid_num = tid_num - 100; if (tid_static_rx[tid_num] == 1) { - strcpy(state, "off "); + strcpy(state, "off"); ieee80211_sta_stop_rx_ba_session(sta->sdata, da, tid_num, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); sta->ampdu_mlme.tid_state_rx[tid_num] |= diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 87665d7bb4f9..409bb7716236 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2560,25 +2560,3 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) ieee80211_restart_sta_timer(sdata); rcu_read_unlock(); } - -/* driver notification call */ -void ieee80211_notify_mac(struct ieee80211_hw *hw, - enum ieee80211_notification_types notif_type) -{ - struct ieee80211_local *local = hw_to_local(hw); - struct ieee80211_sub_if_data *sdata; - - switch (notif_type) { - case IEEE80211_NOTIFY_RE_ASSOC: - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (sdata->vif.type != NL80211_IFTYPE_STATION) - continue; - - ieee80211_sta_req_auth(sdata, &sdata->u.sta); - } - rcu_read_unlock(); - break; - } -} -EXPORT_SYMBOL(ieee80211_notify_mac); diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c index 0b024cd6b809..98f480708050 100644 --- a/net/mac80211/rc80211_minstrel_debugfs.c +++ b/net/mac80211/rc80211_minstrel_debugfs.c @@ -94,8 +94,8 @@ minstrel_stats_open(struct inode *inode, struct file *file) prob / 10, prob % 10, mr->last_success, mr->last_attempts, - mr->succ_hist, - mr->att_hist); + (unsigned long long)mr->succ_hist, + (unsigned long long)mr->att_hist); } p += sprintf(p, "\nTotal packet count:: ideal %d " "lookaround %d\n\n", @@ -106,7 +106,7 @@ minstrel_stats_open(struct inode *inode, struct file *file) return 0; } -static int +static ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) { struct minstrel_stats_info *ms; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7fef8ea1f5ec..d254446b85b5 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr) sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); while (sta) { - if (compare_ether_addr(sta->sta.addr, addr) == 0) + if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) break; sta = rcu_dereference(sta->hnext); } diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 742f811ca416..ab4ddba874be 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -271,6 +271,7 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev, __u32 *mode, char *extra) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = sdata->local; int type; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) @@ -281,6 +282,13 @@ static int ieee80211_ioctl_siwmode(struct net_device *dev, type = NL80211_IFTYPE_STATION; break; case IW_MODE_ADHOC: + /* Setting ad-hoc mode on non ibss channel is not + * supported. + */ + if (local->oper_channel && + (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS)) + return -EOPNOTSUPP; + type = NL80211_IFTYPE_ADHOC; break; case IW_MODE_REPEAT: diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 02ddc2b3ce2e..e90d52f199bc 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -713,7 +713,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, iph = ipv6_hdr(skb); iph->version = 6; iph->nexthdr = IPPROTO_IPV6; - iph->payload_len = old_iph->payload_len + sizeof(old_iph); + iph->payload_len = old_iph->payload_len; + be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); iph->priority = old_iph->priority; memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); iph->daddr = rt->rt6i_dst.addr; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 622d7c671cb7..233fdd2d7d21 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -305,9 +305,7 @@ void nf_conntrack_hash_insert(struct nf_conn *ct) hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); - spin_lock_bh(&nf_conntrack_lock); __nf_conntrack_hash_insert(ct, hash, repl_hash); - spin_unlock_bh(&nf_conntrack_lock); } EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 9c06b9f86ad4..c39b6a994133 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -21,6 +21,7 @@ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rculist.h> +#include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> @@ -167,10 +168,12 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) */ synchronize_rcu(); + rtnl_lock(); spin_lock_bh(&nf_conntrack_lock); for_each_net(net) __nf_conntrack_helper_unregister(me, net); spin_unlock_bh(&nf_conntrack_lock); + rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index a040d46f85d6..5f4a6516b3b6 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1090,7 +1090,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[], struct nf_conn_help *help; struct nf_conntrack_helper *helper; - ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_KERNEL); + ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC); if (ct == NULL || IS_ERR(ct)) return -ENOMEM; @@ -1138,7 +1138,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[], } } - nf_ct_acct_ext_add(ct, GFP_KERNEL); + nf_ct_acct_ext_add(ct, GFP_ATOMIC); #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) @@ -1212,13 +1212,14 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb, atomic_inc(&master_ct->ct_general.use); } - spin_unlock_bh(&nf_conntrack_lock); err = -ENOENT; if (nlh->nlmsg_flags & NLM_F_CREATE) err = ctnetlink_create_conntrack(cda, &otuple, &rtuple, master_ct); + spin_unlock_bh(&nf_conntrack_lock); + if (err < 0 && master_ct) nf_ct_put(master_ct); diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index a59a307e685d..592d73344d46 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -22,6 +22,7 @@ #include <linux/notifier.h> #include <linux/kernel.h> #include <linux/netdevice.h> +#include <linux/rtnetlink.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_l3proto.h> @@ -221,8 +222,10 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto) synchronize_rcu(); /* Remove all contrack entries for this protocol */ + rtnl_lock(); for_each_net(net) nf_ct_iterate_cleanup(net, kill_l3proto, proto); + rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister); @@ -333,8 +336,10 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto) synchronize_rcu(); /* Remove all contrack entries for this protocol */ + rtnl_lock(); for_each_net(net) nf_ct_iterate_cleanup(net, kill_l4proto, l4proto); + rtnl_unlock(); } EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister); diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index a2cdbcbf64c4..4ab62ad85dd4 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -335,7 +335,7 @@ static int __init nf_ct_proto_gre_init(void) rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4); if (rv < 0) return rv; - rv = register_pernet_gen_device(&proto_gre_net_id, &proto_gre_net_ops); + rv = register_pernet_gen_subsys(&proto_gre_net_id, &proto_gre_net_ops); if (rv < 0) nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); return rv; @@ -344,7 +344,7 @@ static int __init nf_ct_proto_gre_init(void) static void nf_ct_proto_gre_fini(void) { nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4); - unregister_pernet_gen_device(proto_gre_net_id, &proto_gre_net_ops); + unregister_pernet_gen_subsys(proto_gre_net_id, &proto_gre_net_ops); } module_init(nf_ct_proto_gre_init); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 02a8fed21082..1acc089be7e9 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par) sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr, sport, dport, par->in, false); if (sk != NULL) { - bool wildcard = (inet_sk(sk)->rcv_saddr == 0); + bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0); nf_tproxy_put_sock(sk); if (wildcard) diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c index b0925a303353..249f6b92f153 100644 --- a/net/netlabel/netlabel_addrlist.c +++ b/net/netlabel/netlabel_addrlist.c @@ -315,6 +315,7 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr, * Audit Helper Functions */ +#ifdef CONFIG_AUDIT /** * netlbl_af4list_audit_addr - Audit an IPv4 address * @audit_buf: audit buffer @@ -386,3 +387,4 @@ void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, } } #endif /* IPv6 */ +#endif /* CONFIG_AUDIT */ diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h index 0242bead405f..07ae7fd82be1 100644 --- a/net/netlabel/netlabel_addrlist.h +++ b/net/netlabel/netlabel_addrlist.h @@ -120,9 +120,19 @@ struct netlbl_af4list *netlbl_af4list_search(__be32 addr, struct netlbl_af4list *netlbl_af4list_search_exact(__be32 addr, __be32 mask, struct list_head *head); + +#ifdef CONFIG_AUDIT void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, __be32 addr, __be32 mask); +#else +static inline void netlbl_af4list_audit_addr(struct audit_buffer *audit_buf, + int src, const char *dev, + __be32 addr, __be32 mask) +{ + return; +} +#endif #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -179,11 +189,23 @@ struct netlbl_af6list *netlbl_af6list_search(const struct in6_addr *addr, struct netlbl_af6list *netlbl_af6list_search_exact(const struct in6_addr *addr, const struct in6_addr *mask, struct list_head *head); + +#ifdef CONFIG_AUDIT void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, int src, const char *dev, const struct in6_addr *addr, const struct in6_addr *mask); +#else +static inline void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, + int src, + const char *dev, + const struct in6_addr *addr, + const struct in6_addr *mask) +{ + return; +} +#endif #endif /* IPV6 */ #endif diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index ee769ecaa13c..0a0ef17b2a40 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -265,7 +265,7 @@ add_failure: static int netlbl_mgmt_listentry(struct sk_buff *skb, struct netlbl_dom_map *entry) { - int ret_val; + int ret_val = 0; struct nlattr *nla_a; struct nlattr *nla_b; struct netlbl_af4list *iter4; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index e8a5c32b0f10..90c8506a0aac 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -574,9 +574,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net, list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, &iface->addr4_list); spin_unlock(&netlbl_unlhsh_lock); - if (list_entry == NULL) + if (list_entry != NULL) + entry = netlbl_unlhsh_addr4_entry(list_entry); + else ret_val = -ENOENT; - entry = netlbl_unlhsh_addr4_entry(list_entry); audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_info); @@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net, spin_lock(&netlbl_unlhsh_lock); list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); spin_unlock(&netlbl_unlhsh_lock); - if (list_entry == NULL) + if (list_entry != NULL) + entry = netlbl_unlhsh_addr6_entry(list_entry); + else ret_val = -ENOENT; - entry = netlbl_unlhsh_addr6_entry(list_entry); audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_info); diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index b9d97effebe3..9d211f12582b 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -33,9 +33,30 @@ #include <net/phonet/phonet.h> #include <net/phonet/pn_dev.h> -static struct net_proto_family phonet_proto_family; -static struct phonet_protocol *phonet_proto_get(int protocol); -static inline void phonet_proto_put(struct phonet_protocol *pp); +/* Transport protocol registration */ +static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; +static DEFINE_SPINLOCK(proto_tab_lock); + +static struct phonet_protocol *phonet_proto_get(int protocol) +{ + struct phonet_protocol *pp; + + if (protocol >= PHONET_NPROTO) + return NULL; + + spin_lock(&proto_tab_lock); + pp = proto_tab[protocol]; + if (pp && !try_module_get(pp->prot->owner)) + pp = NULL; + spin_unlock(&proto_tab_lock); + + return pp; +} + +static inline void phonet_proto_put(struct phonet_protocol *pp) +{ + module_put(pp->prot->owner); +} /* protocol family functions */ @@ -144,8 +165,8 @@ static int pn_send(struct sk_buff *skb, struct net_device *dev, struct phonethdr *ph; int err; - if (skb->len + 2 > 0xffff) { - /* Phonet length field would overflow */ + if (skb->len + 2 > 0xffff /* Phonet length field limit */ || + skb->len + sizeof(struct phonethdr) > dev->mtu) { err = -EMSGSIZE; goto drop; } @@ -261,6 +282,8 @@ static inline int can_respond(struct sk_buff *skb) return 0; /* we are not the destination */ if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5)) return 0; + if (ph->pn_res == PN_COMMGR) /* indications */ + return 0; ph = pn_hdr(skb); /* re-acquires the pointer */ pm = pn_msg(skb); @@ -309,7 +332,8 @@ static int send_reset_indications(struct sk_buff *rskb) return pn_raw_send(data, sizeof(data), rskb->dev, pn_object(oph->pn_sdev, 0x00), - pn_object(oph->pn_rdev, oph->pn_robj), 0x10); + pn_object(oph->pn_rdev, oph->pn_robj), + PN_COMMGR); } @@ -372,10 +396,6 @@ static struct packet_type phonet_packet_type = { .func = phonet_rcv, }; -/* Transport protocol registration */ -static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; -static DEFINE_SPINLOCK(proto_tab_lock); - int __init_or_module phonet_proto_register(int protocol, struct phonet_protocol *pp) { @@ -409,27 +429,6 @@ void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) } EXPORT_SYMBOL(phonet_proto_unregister); -static struct phonet_protocol *phonet_proto_get(int protocol) -{ - struct phonet_protocol *pp; - - if (protocol >= PHONET_NPROTO) - return NULL; - - spin_lock(&proto_tab_lock); - pp = proto_tab[protocol]; - if (pp && !try_module_get(pp->prot->owner)) - pp = NULL; - spin_unlock(&proto_tab_lock); - - return pp; -} - -static inline void phonet_proto_put(struct phonet_protocol *pp) -{ - module_put(pp->prot->owner); -} - /* Module registration */ static int __init phonet_init(void) { diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 53be9fc82aaa..f93ff8ef47d0 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -115,7 +115,7 @@ int phonet_address_del(struct net_device *dev, u8 addr) pnd = __phonet_get(dev); if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) err = -EADDRNOTAVAIL; - if (bitmap_empty(pnd->addrs, 64)) + else if (bitmap_empty(pnd->addrs, 64)) __phonet_device_free(pnd); spin_unlock_bh(&pndevs.lock); return err; diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index b1770d66bc8d..242fe8f8c322 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -123,6 +123,7 @@ nla_put_failure: static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { + struct net *net = sock_net(skb->sk); struct phonet_device *pnd; int dev_idx = 0, dev_start_idx = cb->args[0]; int addr_idx = 0, addr_start_idx = cb->args[1]; @@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) list_for_each_entry(pnd, &pndevs.list, list) { u8 addr; + if (!net_eq(dev_net(pnd->netdev), net)) + continue; if (dev_idx > dev_start_idx) addr_start_idx = 0; if (dev_idx++ < dev_start_idx) diff --git a/net/rfkill/rfkill-input.c b/net/rfkill/rfkill-input.c index 21124ec0a73d..bfdade72e066 100644 --- a/net/rfkill/rfkill-input.c +++ b/net/rfkill/rfkill-input.c @@ -256,6 +256,11 @@ static struct input_handler rfkill_handler = { static int __init rfkill_handler_init(void) { + unsigned long last_run = jiffies - msecs_to_jiffies(500); + rfkill_wlan.last = last_run; + rfkill_bt.last = last_run; + rfkill_uwb.last = last_run; + rfkill_wimax.last = last_run; return input_register_handler(&rfkill_handler); } diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index f949a482b007..25ba3bd57e66 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -603,7 +603,7 @@ static int rfkill_check_duplicity(const struct rfkill *rfkill) } /* 0: first switch of its kind */ - return test_bit(rfkill->type, seen); + return (test_bit(rfkill->type, seen)) ? 1 : 0; } static int rfkill_add_switch(struct rfkill *rfkill) diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index a7f1ce11bc22..0c1cc7612800 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1072,6 +1072,10 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, unsigned char *asmptr; int n, size, qbit = 0; + /* ROSE empty frame has no meaning : don't send */ + if (len == 0) + return 0; + if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1265,6 +1269,12 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, skb_reset_transport_header(skb); copied = skb->len; + /* ROSE empty frame has no meaning : ignore it */ + if (copied == 0) { + skb_free_datagram(sk, skb); + return copied; + } + if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b16ad2972c6b..6ab4a2f92ca0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -417,6 +417,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab) struct nlattr *nest; nest = nla_nest_start(skb, TCA_STAB); + if (nest == NULL) + goto nla_put_failure; NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); nla_nest_end(skb, nest); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 93cd30ce6501..cdcd16fcfeda 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -270,6 +270,8 @@ static void dev_watchdog_down(struct net_device *dev) void netif_carrier_on(struct net_device *dev) { if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); if (netif_running(dev)) __netdev_watchdog_up(dev); @@ -285,8 +287,11 @@ EXPORT_SYMBOL(netif_carrier_on); */ void netif_carrier_off(struct net_device *dev) { - if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) + if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) { + if (dev->reg_state == NETREG_UNINITIALIZED) + return; linkwatch_fire_event(dev); + } } EXPORT_SYMBOL(netif_carrier_off); diff --git a/net/socket.c b/net/socket.c index 2b7a4b5c9b72..92764d836891 100644 --- a/net/socket.c +++ b/net/socket.c @@ -990,7 +990,6 @@ static int sock_close(struct inode *inode, struct file *filp) printk(KERN_DEBUG "sock_close: NULL inode\n"); return 0; } - sock_fasync(-1, filp, 0); sock_release(SOCKET_I(inode)); return 0; } @@ -1427,8 +1426,8 @@ asmlinkage long sys_listen(int fd, int backlog) * clean when we restucture accept also. */ -long do_accept(int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags) +asmlinkage long sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags) { struct socket *sock, *newsock; struct file *newfile; @@ -1511,66 +1510,10 @@ out_fd: goto out_put; } -#if 0 -#ifdef HAVE_SET_RESTORE_SIGMASK -asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, - const sigset_t __user *sigmask, - size_t sigsetsize, int flags) -{ - sigset_t ksigmask, sigsaved; - int ret; - - if (sigmask) { - /* XXX: Don't preclude handling different sized sigset_t's. */ - if (sigsetsize != sizeof(sigset_t)) - return -EINVAL; - if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) - return -EFAULT; - - sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP)); - sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); - } - - ret = do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); - - if (ret < 0 && signal_pending(current)) { - /* - * Don't restore the signal mask yet. Let do_signal() deliver - * the signal on the way back to userspace, before the signal - * mask is restored. - */ - if (sigmask) { - memcpy(¤t->saved_sigmask, &sigsaved, - sizeof(sigsaved)); - set_restore_sigmask(); - } - } else if (sigmask) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - - return ret; -} -#else -asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, - const sigset_t __user *sigmask, - size_t sigsetsize, int flags) -{ - /* The platform does not support restoring the signal mask in the - * return path. So we do not allow using paccept() with a signal - * mask. */ - if (sigmask) - return -EINVAL; - - return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); -} -#endif -#endif - asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen) { - return do_accept(fd, upeer_sockaddr, upeer_addrlen, 0); + return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0); } /* @@ -2097,7 +2040,7 @@ static const unsigned char nargs[19]={ AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), - AL(6) + AL(4) }; #undef AL @@ -2116,7 +2059,7 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args) unsigned long a0, a1; int err; - if (call < 1 || call > SYS_PACCEPT) + if (call < 1 || call > SYS_ACCEPT4) return -EINVAL; /* copy_from_user should be SMP safe. */ @@ -2144,9 +2087,8 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args) err = sys_listen(a0, a1); break; case SYS_ACCEPT: - err = - do_accept(a0, (struct sockaddr __user *)a1, - (int __user *)a[2], 0); + err = sys_accept4(a0, (struct sockaddr __user *)a1, + (int __user *)a[2], 0); break; case SYS_GETSOCKNAME: err = @@ -2193,12 +2135,9 @@ asmlinkage long sys_socketcall(int call, unsigned long __user *args) case SYS_RECVMSG: err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); break; - case SYS_PACCEPT: - err = - sys_paccept(a0, (struct sockaddr __user *)a1, - (int __user *)a[2], - (const sigset_t __user *) a[3], - a[4], a[5]); + case SYS_ACCEPT4: + err = sys_accept4(a0, (struct sockaddr __user *)a1, + (int __user *)a[2], a[3]); break; default: err = -EINVAL; diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 436bf1b4b76c..cb216b2df666 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -228,19 +228,21 @@ static int rpcauth_prune_expired(struct list_head *free, int nr_to_scan) { spinlock_t *cache_lock; - struct rpc_cred *cred; + struct rpc_cred *cred, *next; unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM; - while (!list_empty(&cred_unused)) { - cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); + list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { + + /* Enforce a 60 second garbage collection moratorium */ + if (time_in_range(cred->cr_expire, expired, jiffies) && + test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) + continue; + list_del_init(&cred->cr_lru); number_cred_unused--; if (atomic_read(&cred->cr_count) != 0) continue; - /* Enforce a 5 second garbage collection moratorium */ - if (time_in_range(cred->cr_expire, expired, jiffies) && - test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) - continue; + cache_lock = &cred->cr_auth->au_credcache->lock; spin_lock(cache_lock); if (atomic_read(&cred->cr_count) == 0) { @@ -453,7 +455,7 @@ need_lock: } if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) rpcauth_unhash_cred(cred); - else if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { + if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) { cred->cr_expire = jiffies; list_add_tail(&cred->cr_lru, &cred_unused); number_cred_unused++; diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c index 744b79fdcb19..4028502f0528 100644 --- a/net/sunrpc/auth_generic.c +++ b/net/sunrpc/auth_generic.c @@ -133,13 +133,29 @@ static int generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags) { struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base); + int i; if (gcred->acred.uid != acred->uid || gcred->acred.gid != acred->gid || - gcred->acred.group_info != acred->group_info || gcred->acred.machine_cred != acred->machine_cred) - return 0; + goto out_nomatch; + + /* Optimisation in the case where pointers are identical... */ + if (gcred->acred.group_info == acred->group_info) + goto out_match; + + /* Slow path... */ + if (gcred->acred.group_info->ngroups != acred->group_info->ngroups) + goto out_nomatch; + for (i = 0; i < gcred->acred.group_info->ngroups; i++) { + if (GROUP_AT(gcred->acred.group_info, i) != + GROUP_AT(acred->group_info, i)) + goto out_nomatch; + } +out_match: return 1; +out_nomatch: + return 0; } void __init rpc_init_generic_auth(void) diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 95293f549e9c..a1951dcc5776 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1183,7 +1183,11 @@ int svc_addsock(struct svc_serv *serv, else if (so->state > SS_UNCONNECTED) err = -EISCONN; else { - svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); + if (!try_module_get(THIS_MODULE)) + err = -ENOENT; + else + svsk = svc_setup_socket(serv, so, &err, + SVC_SOCK_DEFAULTS); if (svsk) { struct sockaddr_storage addr; struct sockaddr *sin = (struct sockaddr *)&addr; @@ -1196,7 +1200,8 @@ int svc_addsock(struct svc_serv *serv, spin_unlock_bh(&serv->sv_lock); svc_xprt_received(&svsk->sk_xprt); err = 0; - } + } else + module_put(THIS_MODULE); } if (err) { sockfd_put(so); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9a288d5eea64..0a50361e3d83 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -249,6 +249,7 @@ struct sock_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); + void (*old_error_report)(struct sock *); }; /* @@ -698,8 +699,9 @@ static int xs_tcp_send_request(struct rpc_task *task) case -EAGAIN: xs_nospace(task); break; - case -ECONNREFUSED: case -ECONNRESET: + xs_tcp_shutdown(xprt); + case -ECONNREFUSED: case -ENOTCONN: case -EPIPE: status = -ENOTCONN; @@ -742,6 +744,22 @@ out_release: xprt_release_xprt(xprt, task); } +static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) +{ + transport->old_data_ready = sk->sk_data_ready; + transport->old_state_change = sk->sk_state_change; + transport->old_write_space = sk->sk_write_space; + transport->old_error_report = sk->sk_error_report; +} + +static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) +{ + sk->sk_data_ready = transport->old_data_ready; + sk->sk_state_change = transport->old_state_change; + sk->sk_write_space = transport->old_write_space; + sk->sk_error_report = transport->old_error_report; +} + /** * xs_close - close a socket * @xprt: transport @@ -765,9 +783,8 @@ static void xs_close(struct rpc_xprt *xprt) transport->sock = NULL; sk->sk_user_data = NULL; - sk->sk_data_ready = transport->old_data_ready; - sk->sk_state_change = transport->old_state_change; - sk->sk_write_space = transport->old_write_space; + + xs_restore_old_callbacks(transport, sk); write_unlock_bh(&sk->sk_callback_lock); sk->sk_no_check = 0; @@ -1180,6 +1197,28 @@ static void xs_tcp_state_change(struct sock *sk) } /** + * xs_tcp_error_report - callback mainly for catching RST events + * @sk: socket + */ +static void xs_tcp_error_report(struct sock *sk) +{ + struct rpc_xprt *xprt; + + read_lock(&sk->sk_callback_lock); + if (sk->sk_err != ECONNRESET || sk->sk_state != TCP_ESTABLISHED) + goto out; + if (!(xprt = xprt_from_sock(sk))) + goto out; + dprintk("RPC: %s client %p...\n" + "RPC: error %d\n", + __func__, xprt, sk->sk_err); + + xprt_force_disconnect(xprt); +out: + read_unlock(&sk->sk_callback_lock); +} + +/** * xs_udp_write_space - callback invoked when socket buffer space * becomes available * @sk: socket whose state has changed @@ -1454,10 +1493,9 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) write_lock_bh(&sk->sk_callback_lock); + xs_save_old_callbacks(transport, sk); + sk->sk_user_data = xprt; - transport->old_data_ready = sk->sk_data_ready; - transport->old_state_change = sk->sk_state_change; - transport->old_write_space = sk->sk_write_space; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; sk->sk_no_check = UDP_CSUM_NORCV; @@ -1589,13 +1627,13 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) write_lock_bh(&sk->sk_callback_lock); + xs_save_old_callbacks(transport, sk); + sk->sk_user_data = xprt; - transport->old_data_ready = sk->sk_data_ready; - transport->old_state_change = sk->sk_state_change; - transport->old_write_space = sk->sk_write_space; sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; + sk->sk_error_report = xs_tcp_error_report; sk->sk_allocation = GFP_ATOMIC; /* socket options */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index dc504d308ec0..66d5ac4773ab 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1302,14 +1302,23 @@ static void unix_destruct_fds(struct sk_buff *skb) sock_wfree(skb); } -static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; + + /* + * Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. + */ + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + for (i=scm->fp->count-1; i>=0; i--) unix_inflight(scm->fp->fp[i]); - UNIXCB(skb).fp = scm->fp; skb->destructor = unix_destruct_fds; - scm->fp = NULL; + return 0; } /* @@ -1334,6 +1343,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; + wait_for_unix_gc(); err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; @@ -1368,8 +1378,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, goto out; memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); - if (siocb->scm->fp) - unix_attach_fds(siocb->scm, skb); + if (siocb->scm->fp) { + err = unix_attach_fds(siocb->scm, skb); + if (err) + goto out_free; + } unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); @@ -1481,6 +1494,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; + wait_for_unix_gc(); err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; @@ -1538,8 +1552,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, size = min_t(int, size, skb_tailroom(skb)); memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); - if (siocb->scm->fp) - unix_attach_fds(siocb->scm, skb); + if (siocb->scm->fp) { + err = unix_attach_fds(siocb->scm, skb); + if (err) { + kfree_skb(skb); + goto out_err; + } + } if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { kfree_skb(skb); @@ -2213,7 +2232,7 @@ static int unix_net_init(struct net *net) #endif error = 0; out: - return 0; + return error; } static void unix_net_exit(struct net *net) diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 2a27b84f740b..abb3ab34cb1e 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -80,6 +80,7 @@ #include <linux/file.h> #include <linux/proc_fs.h> #include <linux/mutex.h> +#include <linux/wait.h> #include <net/sock.h> #include <net/af_unix.h> @@ -91,6 +92,7 @@ static LIST_HEAD(gc_inflight_list); static LIST_HEAD(gc_candidates); static DEFINE_SPINLOCK(unix_gc_lock); +static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; @@ -186,8 +188,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), */ struct sock *sk = unix_get_socket(*fp++); if (sk) { - hit = true; - func(unix_sk(sk)); + struct unix_sock *u = unix_sk(sk); + + /* + * Ignore non-candidates, they could + * have been added to the queues after + * starting the garbage collection + */ + if (u->gc_candidate) { + hit = true; + func(u); + } } } if (hit && hitlist != NULL) { @@ -249,24 +260,29 @@ static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* - * If this is still a candidate, move it to the end of the - * list, so that it's checked even if it was already passed - * over + * If this still might be part of a cycle, move it to the end + * of the list, so that it's checked even if it was already + * passed over */ - if (u->gc_candidate) + if (u->gc_maybe_cycle) list_move_tail(&u->link, &gc_candidates); } -/* The external entry point: unix_gc() */ +static bool gc_in_progress = false; -void unix_gc(void) +void wait_for_unix_gc(void) { - static bool gc_in_progress = false; + wait_event(unix_gc_wait, gc_in_progress == false); +} +/* The external entry point: unix_gc() */ +void unix_gc(void) +{ struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; + LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); @@ -282,10 +298,14 @@ void unix_gc(void) * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external - * reference. This also means, that since there are no - * possible receivers, the receive queues of these sockets are - * static during the GC, even though the dequeue is done - * before the detach without atomicity guarantees. + * reference. Since there are no possible receivers, all + * buffers currently on the candidates' queues stay there + * during the garbage collection. + * + * We also know that no new candidate can be added onto the + * receive queues. Other, non candidate sockets _can_ be + * added to queue, so we must make sure only to touch + * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; @@ -299,6 +319,7 @@ void unix_gc(void) if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); u->gc_candidate = 1; + u->gc_maybe_cycle = 1; } } @@ -325,14 +346,24 @@ void unix_gc(void) list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { - list_move_tail(&u->link, &gc_inflight_list); - u->gc_candidate = 0; + list_move_tail(&u->link, ¬_cycle_list); + u->gc_maybe_cycle = 0; scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* + * not_cycle_list contains those sockets which do not make up a + * cycle. Restore these to the inflight list. + */ + while (!list_empty(¬_cycle_list)) { + u = list_entry(not_cycle_list.next, struct unix_sock, link); + u->gc_candidate = 0; + list_move_tail(&u->link, &gc_inflight_list); + } + + /* * Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). @@ -351,6 +382,7 @@ void unix_gc(void) /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; + wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 626dbb688499..eb3b1a9f9b12 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -343,9 +343,9 @@ static int ignore_request(struct wiphy *wiphy, enum reg_set_by set_by, return 0; return -EALREADY; } - if (WARN_ON(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2)), + if (WARN(!is_alpha2_set(alpha2) || !is_an_alpha2(alpha2), "Invalid Country IE regulatory hint passed " - "to the wireless core\n") + "to the wireless core\n")) return -EINVAL; /* We ignore Country IE hints for now, as we haven't yet * added the dot11MultiDomainCapabilityEnabled flag diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 832b47c1de80..fb216c9adf86 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -315,9 +315,9 @@ static void xfrm_policy_kill(struct xfrm_policy *policy) return; } - spin_lock(&xfrm_policy_gc_lock); + spin_lock_bh(&xfrm_policy_gc_lock); hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); - spin_unlock(&xfrm_policy_gc_lock); + spin_unlock_bh(&xfrm_policy_gc_lock); schedule_work(&xfrm_policy_gc_work); } @@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info) continue; hlist_del(&pol->bydst); hlist_del(&pol->byidx); + list_del(&pol->walk.all); write_unlock_bh(&xfrm_policy_lock); xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, @@ -1251,6 +1252,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl, -EINVAL : -EAGAIN); xfrm_state_put(x); } + else if (error == -ESRCH) + error = -EAGAIN; if (!tmpl->optional) goto fail; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 4a8a1abb59ee..a278a6f3b991 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1816,7 +1816,7 @@ static int copy_to_user_kmaddress(struct xfrm_kmaddress *k, struct sk_buff *skb) uk.family = k->family; uk.reserved = k->reserved; memcpy(&uk.local, &k->local, sizeof(uk.local)); - memcpy(&uk.remote, &k->local, sizeof(uk.remote)); + memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); } |