diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-06-16 18:08:13 +0200 |
commit | f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b (patch) | |
tree | c2c130a74be25b0b2dff992e1a195e2728bdaadd /net | |
parent | fix typos concerning "instead" (diff) | |
parent | Linux 2.6.35-rc3 (diff) | |
download | linux-f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b.tar.xz linux-f1bbbb6912662b9f6070c5bfc4ca9eb1f06a9d5b.zip |
Merge branch 'master' into for-next
Diffstat (limited to 'net')
74 files changed, 731 insertions, 398 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index bd537fc10254..50f58f5f1c34 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -12,7 +12,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, return NET_RX_DROP; if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) - goto drop; + skb->deliver_no_wcard = 1; skb->skb_iif = skb->dev->ifindex; __vlan_hwaccel_put_tag(skb, vlan_tci); @@ -84,7 +84,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, struct sk_buff *p; if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) - goto drop; + skb->deliver_no_wcard = 1; skb->skb_iif = skb->dev->ifindex; __vlan_hwaccel_put_tag(skb, vlan_tci); diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 55be90826f5f..529842677817 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -708,7 +708,8 @@ static int vlan_dev_init(struct net_device *dev) netif_carrier_off(dev); /* IFF_BROADCAST|IFF_MULTICAST; ??? */ - dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI); + dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | + IFF_MASTER | IFF_SLAVE); dev->iflink = real_dev->ifindex; dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))) | diff --git a/net/9p/client.c b/net/9p/client.c index 0aa79faa9850..37c8da07a80b 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1321,7 +1321,8 @@ static int p9_client_statsize(struct p9_wstat *wst, int proto_version) if (wst->muid) ret += strlen(wst->muid); - if (proto_version == p9_proto_2000u) { + if ((proto_version == p9_proto_2000u) || + (proto_version == p9_proto_2000L)) { ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ if (wst->extension) ret += strlen(wst->extension); @@ -1364,3 +1365,70 @@ error: return err; } EXPORT_SYMBOL(p9_client_wstat); + +int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); + + req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, + &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, + &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); + if (err) { + p9pdu_dump(1, req->rc); + p9_free_req(clnt, req); + goto error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld " + "blocks %llu bfree %llu bavail %llu files %llu ffree %llu " + "fsid %llu namelen %ld\n", + fid->fid, (long unsigned int)sb->type, (long int)sb->bsize, + sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree, + sb->fsid, (long int)sb->namelen); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_statfs); + +int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", + fid->fid, newdirfid->fid, name); + + req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, + newdirfid->fid, name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_rename); + diff --git a/net/9p/protocol.c b/net/9p/protocol.c index e7541d5b0118..149f82160130 100644 --- a/net/9p/protocol.c +++ b/net/9p/protocol.c @@ -341,7 +341,8 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, } break; case '?': - if (proto_version != p9_proto_2000u) + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) return 0; break; default: @@ -393,7 +394,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, const char *sptr = va_arg(ap, const char *); int16_t len = 0; if (sptr) - len = MIN(strlen(sptr), USHORT_MAX); + len = MIN(strlen(sptr), USHRT_MAX); errcode = p9pdu_writef(pdu, proto_version, "w", len); @@ -488,7 +489,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, } break; case '?': - if (proto_version != p9_proto_2000u) + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) return 0; break; default: diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 7eb78ecc1618..dcfbe99ff81c 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq) P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n"); - while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) { + while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) { P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc); P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); req = p9_tag_lookup(chan->client, rc->tag); @@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req) req->status = REQ_STATUS_SENT; - if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { + if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) { P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc add_buf returned failure"); return -EIO; } - chan->vq->vq_ops->kick(chan->vq); + virtqueue_kick(chan->vq); P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n"); return 0; diff --git a/net/caif/Kconfig b/net/caif/Kconfig index cd1daf6008bd..ed651786f16b 100644 --- a/net/caif/Kconfig +++ b/net/caif/Kconfig @@ -2,10 +2,8 @@ # CAIF net configurations # -#menu "CAIF Support" -comment "CAIF Support" menuconfig CAIF - tristate "Enable CAIF support" + tristate "CAIF support" select CRC_CCITT default n ---help--- @@ -45,4 +43,3 @@ config CAIF_NETDEV If unsure say Y. endif -#endmenu diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index c3a70c5c893a..3d0e09584fae 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -60,7 +60,7 @@ struct debug_fs_counter { atomic_t num_rx_flow_off; atomic_t num_rx_flow_on; }; -struct debug_fs_counter cnt; +static struct debug_fs_counter cnt; #define dbfs_atomic_inc(v) atomic_inc(v) #define dbfs_atomic_dec(v) atomic_dec(v) #else @@ -128,17 +128,17 @@ static void caif_read_unlock(struct sock *sk) mutex_unlock(&cf_sk->readlock); } -int sk_rcvbuf_lowwater(struct caifsock *cf_sk) +static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) { /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; } -void caif_flow_ctrl(struct sock *sk, int mode) +static void caif_flow_ctrl(struct sock *sk, int mode) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); - if (cf_sk->layer.dn) + if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); } @@ -146,7 +146,7 @@ void caif_flow_ctrl(struct sock *sk, int mode) * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * not dropped, but CAIF is sending flow off instead. */ -int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; @@ -162,9 +162,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); - if (cf_sk->layer.dn) - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, - CAIF_MODEMCMD_FLOW_OFF_REQ); + dbfs_atomic_inc(&cnt.num_rx_flow_off); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } err = sk_filter(sk, skb); @@ -175,9 +174,8 @@ int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) trace_printk("CAIF: %s():" " sending flow OFF due to rmem_schedule\n", __func__); - if (cf_sk->layer.dn) - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, - CAIF_MODEMCMD_FLOW_OFF_REQ); + dbfs_atomic_inc(&cnt.num_rx_flow_off); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; skb_set_owner_r(skb, sk); @@ -285,65 +283,51 @@ static void caif_check_flow_release(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL) - return; if (rx_flow_is_on(cf_sk)) return; if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { dbfs_atomic_inc(&cnt.num_rx_flow_on); set_rx_flow_on(cf_sk); - cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, - CAIF_MODEMCMD_FLOW_ON_REQ); + caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); } } + /* - * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer - * has sufficient size. + * Copied from unix_dgram_recvmsg, but removed credit checks, + * changed locking, address handling and added MSG_TRUNC. */ - static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *m, size_t buf_len, int flags) + struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; - int ret = 0; - int len; + int ret; + int copylen; - if (unlikely(!buf_len)) - return -EINVAL; + ret = -EOPNOTSUPP; + if (m->msg_flags&MSG_OOB) + goto read_error; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; - - len = skb->len; - - if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) { - len = buf_len; - /* - * Push skb back on receive queue if buffer too small. - * This has a built-in race where multi-threaded receive - * may get packet in wrong order, but multiple read does - * not really guarantee ordered delivery anyway. - * Let's optimize for speed without taking locks. - */ - - skb_queue_head(&sk->sk_receive_queue, skb); - ret = -EMSGSIZE; - goto read_error; + copylen = skb->len; + if (len < copylen) { + m->msg_flags |= MSG_TRUNC; + copylen = len; } - ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len); + ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) - goto read_error; + goto out_free; + ret = (flags & MSG_TRUNC) ? skb->len : copylen; +out_free: skb_free_datagram(sk, skb); - caif_check_flow_release(sk); - - return len; + return ret; read_error: return ret; @@ -920,17 +904,17 @@ wait_connect: timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); - err = wait_event_interruptible_timeout(*sk_sleep(sk), + err = -ERESTARTSYS; + timeo = wait_event_interruptible_timeout(*sk_sleep(sk), sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); - if (err < 0) + if (timeo < 0) goto out; /* -ERESTARTSYS */ - if (err == 0 && sk->sk_state != CAIF_CONNECTED) { - err = -ETIMEDOUT; - goto out; - } + err = -ETIMEDOUT; + if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) + goto out; if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); @@ -945,7 +929,6 @@ out: return err; } - /* * caif_release() - Disconnect a CAIF Socket * Copied and modified af_irda.c:irda_release(). @@ -1019,10 +1002,6 @@ static unsigned int caif_poll(struct file *file, (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; - /* Connection-based need to check for termination and startup */ - if (sk->sk_state == CAIF_DISCONNECTED) - mask |= POLLHUP; - /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. @@ -1194,7 +1173,7 @@ static struct net_proto_family caif_family_ops = { .owner = THIS_MODULE, }; -int af_caif_init(void) +static int af_caif_init(void) { int err = sock_register(&caif_family_ops); if (!err) diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 0ffe1e1ce901..fcfda98a5e6d 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -44,13 +44,14 @@ struct cflayer *cfctrl_create(void) dev_info.id = 0xff; memset(this, 0, sizeof(*this)); cfsrvl_init(&this->serv, 0, &dev_info); - spin_lock_init(&this->info_list_lock); atomic_set(&this->req_seq_no, 1); atomic_set(&this->rsp_seq_no, 1); this->serv.layer.receive = cfctrl_recv; sprintf(this->serv.layer.name, "ctrl"); this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; spin_lock_init(&this->loop_linkid_lock); + spin_lock_init(&this->info_list_lock); + INIT_LIST_HEAD(&this->list); this->loop_linkid = 1; return &this->serv.layer; } @@ -112,20 +113,10 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1, void cfctrl_insert_req(struct cfctrl *ctrl, struct cfctrl_request_info *req) { - struct cfctrl_request_info *p; spin_lock(&ctrl->info_list_lock); - req->next = NULL; atomic_inc(&ctrl->req_seq_no); req->sequence_no = atomic_read(&ctrl->req_seq_no); - if (ctrl->first_req == NULL) { - ctrl->first_req = req; - spin_unlock(&ctrl->info_list_lock); - return; - } - p = ctrl->first_req; - while (p->next != NULL) - p = p->next; - p->next = req; + list_add_tail(&req->list, &ctrl->list); spin_unlock(&ctrl->info_list_lock); } @@ -133,46 +124,28 @@ void cfctrl_insert_req(struct cfctrl *ctrl, struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, struct cfctrl_request_info *req) { - struct cfctrl_request_info *p; - struct cfctrl_request_info *ret; + struct cfctrl_request_info *p, *tmp, *first; spin_lock(&ctrl->info_list_lock); - if (ctrl->first_req == NULL) { - spin_unlock(&ctrl->info_list_lock); - return NULL; - } - - if (cfctrl_req_eq(req, ctrl->first_req)) { - ret = ctrl->first_req; - caif_assert(ctrl->first_req); - atomic_set(&ctrl->rsp_seq_no, - ctrl->first_req->sequence_no); - ctrl->first_req = ctrl->first_req->next; - spin_unlock(&ctrl->info_list_lock); - return ret; - } + first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); - p = ctrl->first_req; - - while (p->next != NULL) { - if (cfctrl_req_eq(req, p->next)) { - pr_warning("CAIF: %s(): Requests are not " + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { + if (cfctrl_req_eq(req, p)) { + if (p != first) + pr_warning("CAIF: %s(): Requests are not " "received in order\n", __func__); - ret = p->next; + atomic_set(&ctrl->rsp_seq_no, - p->next->sequence_no); - p->next = p->next->next; - spin_unlock(&ctrl->info_list_lock); - return ret; + p->sequence_no); + list_del(&p->list); + goto out; } - p = p->next; } + p = NULL; +out: spin_unlock(&ctrl->info_list_lock); - - pr_warning("CAIF: %s(): Request does not match\n", - __func__); - return NULL; + return p; } struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer) @@ -388,31 +361,18 @@ void cfctrl_getstartreason_req(struct cflayer *layer) void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) { - struct cfctrl_request_info *p, *req; + struct cfctrl_request_info *p, *tmp; struct cfctrl *ctrl = container_obj(layr); spin_lock(&ctrl->info_list_lock); - - if (ctrl->first_req == NULL) { - spin_unlock(&ctrl->info_list_lock); - return; - } - - if (ctrl->first_req->client_layer == adap_layer) { - - req = ctrl->first_req; - ctrl->first_req = ctrl->first_req->next; - kfree(req); - } - - p = ctrl->first_req; - while (p != NULL && p->next != NULL) { - if (p->next->client_layer == adap_layer) { - - req = p->next; - p->next = p->next->next; - kfree(p->next); + pr_warning("CAIF: %s(): enter\n", __func__); + + list_for_each_entry_safe(p, tmp, &ctrl->list, list) { + if (p->client_layer == adap_layer) { + pr_warning("CAIF: %s(): cancel req :%d\n", __func__, + p->sequence_no); + list_del(&p->list); + kfree(p); } - p = p->next; } spin_unlock(&ctrl->info_list_lock); @@ -634,7 +594,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: case CAIF_CTRLCMD_FLOW_OFF_IND: spin_lock(&this->info_list_lock); - if (this->first_req != NULL) { + if (!list_empty(&this->list)) { pr_debug("CAIF: %s(): Received flow off in " "control layer", __func__); } diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c index 7372f27f1d32..80c8d332b258 100644 --- a/net/caif/cfmuxl.c +++ b/net/caif/cfmuxl.c @@ -174,10 +174,11 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) spin_lock(&muxl->receive_lock); up = get_up(muxl, id); if (up == NULL) - return NULL; + goto out; memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); list_del(&up->node); cfsrvl_put(up); +out: spin_unlock(&muxl->receive_lock); return up; } diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 83fff2ff6658..a6fdf899741a 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -238,6 +238,7 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) struct sk_buff *lastskb; u8 *to; const u8 *data = data2; + int ret; if (unlikely(is_erronous(pkt))) return -EPROTO; if (unlikely(skb_headroom(skb) < len)) { @@ -246,9 +247,10 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) } /* Make sure data is writable */ - if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) { + ret = skb_cow_data(skb, 0, &lastskb); + if (unlikely(ret < 0)) { PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n"); - return -EPROTO; + return ret; } to = skb_push(skb, len); @@ -316,6 +318,8 @@ EXPORT_SYMBOL(cfpkt_setlen); struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len) { struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); + if (!pkt) + return NULL; if (unlikely(data != NULL)) cfpkt_add_body(pkt, data, len); return pkt; @@ -344,12 +348,13 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, if (dst->tail + neededtailspace > dst->end) { /* Create a dumplicate of 'dst' with more tail space */ + struct cfpkt *tmppkt; dstlen = skb_headlen(dst); createlen = dstlen + neededtailspace; - tmp = pkt_to_skb( - cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX)); - if (!tmp) + tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX); + if (tmppkt == NULL) return NULL; + tmp = pkt_to_skb(tmppkt); skb_set_tail_pointer(tmp, dstlen); tmp->len = dstlen; memcpy(tmp->data, dst->data, dstlen); @@ -368,6 +373,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) { struct sk_buff *skb2; struct sk_buff *skb = pkt_to_skb(pkt); + struct cfpkt *tmppkt; u8 *split = skb->data + pos; u16 len2nd = skb_tail_pointer(skb) - split; @@ -381,9 +387,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) } /* Create a new packet for the second part of the data */ - skb2 = pkt_to_skb( - cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, - PKT_PREFIX)); + tmppkt = cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX, + PKT_PREFIX); + if (tmppkt == NULL) + return NULL; + skb2 = pkt_to_skb(tmppkt); + if (skb2 == NULL) return NULL; diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index cd2830fec935..fd27b172fb5d 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -83,7 +83,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) if (!cfsrvl_ready(service, &ret)) return ret; - if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { + if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { pr_err("CAIF: %s():Packet too large - size=%d\n", __func__, cfpkt_getlen(pkt)); return -EOVERFLOW; diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index 06029ea2da2f..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -59,14 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) u8 stx = CFSERL_STX; int ret; u16 expectlen = 0; + caif_assert(newpkt != NULL); spin_lock(&layr->sync); if (layr->incomplete_frm != NULL) { - layr->incomplete_frm = cfpkt_append(layr->incomplete_frm, newpkt, expectlen); pkt = layr->incomplete_frm; + if (pkt == NULL) { + spin_unlock(&layr->sync); + return -ENOMEM; + } } else { pkt = newpkt; } @@ -154,7 +158,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) if (layr->usestx) { if (tail_pkt != NULL) pkt = cfpkt_append(pkt, tail_pkt, 0); - /* Start search for next STX if frame failed */ continue; } else { diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c index aff31f34528f..6e5b7079a684 100644 --- a/net/caif/cfsrvl.c +++ b/net/caif/cfsrvl.c @@ -123,6 +123,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) struct caif_payload_info *info; u8 flow_off = SRVL_FLOW_OFF; pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE); + if (!pkt) { + pr_warning("CAIF: %s(): Out of memory\n", + __func__); + return -ENOMEM; + } + if (cfpkt_add_head(pkt, &flow_off, 1) < 0) { pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c index 0fd827f49491..e04f7d964e83 100644 --- a/net/caif/cfveil.c +++ b/net/caif/cfveil.c @@ -84,7 +84,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt) return ret; caif_assert(layr->dn != NULL); caif_assert(layr->dn->transmit != NULL); - if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { + if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { pr_warning("CAIF: %s(): Packet too large - size=%d\n", __func__, cfpkt_getlen(pkt)); return -EOVERFLOW; diff --git a/net/core/datagram.c b/net/core/datagram.c index e0097531417a..f5b6f43a4c2e 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -229,15 +229,17 @@ EXPORT_SYMBOL(skb_free_datagram); void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb) { + bool slow; + if (likely(atomic_read(&skb->users) == 1)) smp_rmb(); else if (likely(!atomic_dec_and_test(&skb->users))) return; - lock_sock_bh(sk); + slow = lock_sock_fast(sk); skb_orphan(skb); sk_mem_reclaim_partial(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); /* skb is now orphaned, can be freed outside of locked section */ __kfree_skb(skb); diff --git a/net/core/dev.c b/net/core/dev.c index d273e4e3ecdc..2b3bf53bc687 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -954,18 +954,22 @@ int dev_alloc_name(struct net_device *dev, const char *name) } EXPORT_SYMBOL(dev_alloc_name); -static int dev_get_valid_name(struct net *net, const char *name, char *buf, - bool fmt) +static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) { + struct net *net; + + BUG_ON(!dev_net(dev)); + net = dev_net(dev); + if (!dev_valid_name(name)) return -EINVAL; if (fmt && strchr(name, '%')) - return __dev_alloc_name(net, name, buf); + return dev_alloc_name(dev, name); else if (__dev_get_by_name(net, name)) return -EEXIST; - else if (buf != name) - strlcpy(buf, name, IFNAMSIZ); + else if (dev->name != name) + strlcpy(dev->name, name, IFNAMSIZ); return 0; } @@ -997,7 +1001,7 @@ int dev_change_name(struct net_device *dev, const char *newname) memcpy(oldname, dev->name, IFNAMSIZ); - err = dev_get_valid_name(net, newname, dev->name, 1); + err = dev_get_valid_name(dev, newname, 1); if (err < 0) return err; @@ -2249,11 +2253,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->num_rx_queues)) { - if (net_ratelimit()) { - pr_warning("%s received packet on queue " - "%u, but number of RX queues is %u\n", - dev->name, index, dev->num_rx_queues); - } + WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " + "on queue %u, but number of RX queues is %u\n", + dev->name, index, dev->num_rx_queues); goto done; } rxqueue = dev->_rx + index; @@ -2421,10 +2423,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, if (skb_queue_len(&sd->input_pkt_queue)) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); -#ifdef CONFIG_RPS - *qtail = sd->input_queue_head + - skb_queue_len(&sd->input_pkt_queue); -#endif + input_queue_tail_incr_save(sd, qtail); rps_unlock(sd); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -2794,7 +2793,7 @@ static int __netif_receive_skb(struct sk_buff *skb) struct net_device *orig_dev; struct net_device *master; struct net_device *null_or_orig; - struct net_device *null_or_bond; + struct net_device *orig_or_bond; int ret = NET_RX_DROP; __be16 type; @@ -2811,13 +2810,24 @@ static int __netif_receive_skb(struct sk_buff *skb) if (!skb->skb_iif) skb->skb_iif = skb->dev->ifindex; + /* + * bonding note: skbs received on inactive slaves should only + * be delivered to pkt handlers that are exact matches. Also + * the deliver_no_wcard flag will be set. If packet handlers + * are sensitive to duplicate packets these skbs will need to + * be dropped at the handler. The vlan accel path may have + * already set the deliver_no_wcard flag. + */ null_or_orig = NULL; orig_dev = skb->dev; master = ACCESS_ONCE(orig_dev->master); - if (master) { - if (skb_bond_should_drop(skb, master)) + if (skb->deliver_no_wcard) + null_or_orig = orig_dev; + else if (master) { + if (skb_bond_should_drop(skb, master)) { + skb->deliver_no_wcard = 1; null_or_orig = orig_dev; /* deliver only exact match */ - else + } else skb->dev = master; } @@ -2867,10 +2877,10 @@ ncls: * device that may have registered for a specific ptype. The * handler may have to adjust skb->dev and orig_dev. */ - null_or_bond = NULL; + orig_or_bond = orig_dev; if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) && (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) { - null_or_bond = vlan_dev_real_dev(skb->dev); + orig_or_bond = vlan_dev_real_dev(skb->dev); } type = skb->protocol; @@ -2878,7 +2888,7 @@ ncls: &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && (ptype->dev == null_or_orig || ptype->dev == skb->dev || ptype->dev == orig_dev || - ptype->dev == null_or_bond)) { + ptype->dev == orig_or_bond)) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -2959,7 +2969,7 @@ static void flush_backlog(void *arg) if (skb->dev == dev) { __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); - input_queue_head_add(sd, 1); + input_queue_head_incr(sd); } } rps_unlock(sd); @@ -2968,6 +2978,7 @@ static void flush_backlog(void *arg) if (skb->dev == dev) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); + input_queue_head_incr(sd); } } } @@ -3323,18 +3334,20 @@ static int process_backlog(struct napi_struct *napi, int quota) while ((skb = __skb_dequeue(&sd->process_queue))) { local_irq_enable(); __netif_receive_skb(skb); - if (++work >= quota) - return work; local_irq_disable(); + input_queue_head_incr(sd); + if (++work >= quota) { + local_irq_enable(); + return work; + } } rps_lock(sd); qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen) { - input_queue_head_add(sd, qlen); + if (qlen) skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); - } + if (qlen < quota - work) { /* * Inline a custom version of __napi_complete(). @@ -4960,7 +4973,7 @@ int register_netdevice(struct net_device *dev) } } - ret = dev_get_valid_name(net, dev->name, dev->name, 0); + ret = dev_get_valid_name(dev, dev->name, 0); if (ret) goto err_uninit; @@ -5558,7 +5571,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* We get here if we can't use the current device name */ if (!pat) goto out; - if (dev_get_valid_name(net, pat, dev->name, 1)) + if (dev_get_valid_name(dev, pat, 1)) goto out; } @@ -5661,12 +5674,14 @@ static int dev_cpu_callback(struct notifier_block *nfb, local_irq_enable(); /* Process offline CPU's input_pkt_queue */ - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx(skb); - input_queue_head_add(oldsd, 1); + input_queue_head_incr(oldsd); } - while ((skb = __skb_dequeue(&oldsd->process_queue))) + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); + input_queue_head_incr(oldsd); + } return NOTIFY_OK; } diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cf208d8042b1..ad41529fb60f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -172,12 +172,12 @@ out: return; } -static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) +static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) { trace_drop_common(skb, location); } -static void trace_napi_poll_hit(struct napi_struct *napi) +static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi) { struct dm_hw_stat_delta *new_stat; @@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) switch (state) { case TRACE_ON: - rc |= register_trace_kfree_skb(trace_kfree_skb_hit); - rc |= register_trace_napi_poll(trace_napi_poll_hit); + rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); + rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL); break; case TRACE_OFF: - rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); - rc |= unregister_trace_napi_poll(trace_napi_poll_hit); + rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); + rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL); tracepoint_synchronize_unregister(); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe0..785e5276a300 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock); /* Protects against soft lockup during large deletion */ static struct rb_root est_root = RB_ROOT; +static DEFINE_SPINLOCK(est_tree_lock); static void est_timer(unsigned long arg) { @@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats * * Returns 0 on success or a negative error code. * - * NOTE: Called under rtnl_mutex */ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est, @@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, est->last_packets = bstats->packets; est->avpps = rate_est->pps<<10; + spin_lock(&est_tree_lock); if (!elist[idx].timer.function) { INIT_LIST_HEAD(&elist[idx].list); setup_timer(&elist[idx].timer, est_timer, idx); @@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, list_add_rcu(&est->list, &elist[idx].list); gen_add_node(est); + spin_unlock(&est_tree_lock); return 0; } @@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head) * * Removes the rate estimator specified by &bstats and &rate_est. * - * NOTE: Called under rtnl_mutex */ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est *rate_est) { struct gen_estimator *e; + spin_lock(&est_tree_lock); while ((e = gen_find_node(bstats, rate_est))) { rb_erase(&e->node, &est_root); @@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, list_del_rcu(&e->list); call_rcu(&e->e_rcu, __gen_kill_estimator); } + spin_unlock(&est_tree_lock); } EXPORT_SYMBOL(gen_kill_estimator); @@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator); bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est *rate_est) { + bool res; + ASSERT_RTNL(); - return gen_find_node(bstats, rate_est) != NULL; + spin_lock(&est_tree_lock); + res = gen_find_node(bstats, rate_est) != NULL; + spin_unlock(&est_tree_lock); + + return res; } EXPORT_SYMBOL(gen_estimator_active); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bff37908bd55..6ba1c0eece03 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -934,6 +934,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) kfree_skb(buff); NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards); } + skb_dst_force(skb); __skb_queue_tail(&neigh->arp_queue, skb); } rc = 1; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2ad68da418df..1dacd7ba8dbb 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2170,7 +2170,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) end_time = ktime_now(); pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); - pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); + pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); } static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e4b9870e4706..1a2af24e9e3d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -650,11 +650,12 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev) if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { int num_vfs = dev_num_vf(dev->dev.parent); - size_t size = nlmsg_total_size(sizeof(struct nlattr)); - size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); - size += num_vfs * (sizeof(struct ifla_vf_mac) + - sizeof(struct ifla_vf_vlan) + - sizeof(struct ifla_vf_tx_rate)); + size_t size = nla_total_size(sizeof(struct nlattr)); + size += nla_total_size(num_vfs * sizeof(struct nlattr)); + size += num_vfs * + (nla_total_size(sizeof(struct ifla_vf_mac)) + + nla_total_size(sizeof(struct ifla_vf_vlan)) + + nla_total_size(sizeof(struct ifla_vf_tx_rate))); return size; } else return 0; @@ -722,14 +723,13 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev) for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) { vf_port = nla_nest_start(skb, IFLA_VF_PORT); - if (!vf_port) { - nla_nest_cancel(skb, vf_ports); - return -EMSGSIZE; - } + if (!vf_port) + goto nla_put_failure; NLA_PUT_U32(skb, IFLA_PORT_VF, vf); err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); + if (err == -EMSGSIZE) + goto nla_put_failure; if (err) { -nla_put_failure: nla_nest_cancel(skb, vf_port); continue; } @@ -739,6 +739,10 @@ nla_put_failure: nla_nest_end(skb, vf_ports); return 0; + +nla_put_failure: + nla_nest_cancel(skb, vf_ports); + return -EMSGSIZE; } static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) @@ -753,7 +757,7 @@ static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev) err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb); if (err) { nla_nest_cancel(skb, port_self); - return err; + return (err == -EMSGSIZE) ? err : 0; } nla_nest_end(skb, port_self); @@ -1199,8 +1203,10 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr *attr; int rem; nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { - if (nla_type(attr) != IFLA_VF_INFO) + if (nla_type(attr) != IFLA_VF_INFO) { + err = -EINVAL; goto errout; + } err = do_setvfinfo(dev, attr); if (err < 0) goto errout; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c543dd252433..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); * reference count dropping and cleans up the skbuff as if it * just came from __alloc_skb(). */ -int skb_recycle_check(struct sk_buff *skb, int skb_size) +bool skb_recycle_check(struct sk_buff *skb, int skb_size) { struct skb_shared_info *shinfo; if (irqs_disabled()) - return 0; + return false; if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) - return 0; + return false; skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); if (skb_end_pointer(skb) - skb->head < skb_size) - return 0; + return false; if (skb_shared(skb) || skb_cloned(skb)) - return 0; + return false; skb_release_head_state(skb); @@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) skb->data = skb->head + NET_SKB_PAD; skb_reset_tail_pointer(skb); - return 1; + return true; } EXPORT_SYMBOL(skb_recycle_check); @@ -1406,12 +1406,13 @@ new_page: /* * Fill page/offset/length into spd, if it can hold more pages. */ -static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, +static inline int spd_fill_page(struct splice_pipe_desc *spd, + struct pipe_inode_info *pipe, struct page *page, unsigned int *len, unsigned int offset, struct sk_buff *skb, int linear, struct sock *sk) { - if (unlikely(spd->nr_pages == PIPE_BUFFERS)) + if (unlikely(spd->nr_pages == pipe->buffers)) return 1; if (linear) { @@ -1447,7 +1448,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, unsigned int plen, unsigned int *off, unsigned int *len, struct sk_buff *skb, struct splice_pipe_desc *spd, int linear, - struct sock *sk) + struct sock *sk, + struct pipe_inode_info *pipe) { if (!*len) return 1; @@ -1470,7 +1472,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, /* the linear region may spread across several pages */ flen = min_t(unsigned int, flen, PAGE_SIZE - poff); - if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) + if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) return 1; __segment_seek(&page, &poff, &plen, flen); @@ -1485,9 +1487,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff, * Map linear and fragment data from the skb to spd. It reports failure if the * pipe is full or if we already spliced the requested length. */ -static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, - unsigned int *len, struct splice_pipe_desc *spd, - struct sock *sk) +static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, + unsigned int *offset, unsigned int *len, + struct splice_pipe_desc *spd, struct sock *sk) { int seg; @@ -1497,7 +1499,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, if (__splice_segment(virt_to_page(skb->data), (unsigned long) skb->data & (PAGE_SIZE - 1), skb_headlen(skb), - offset, len, skb, spd, 1, sk)) + offset, len, skb, spd, 1, sk, pipe)) return 1; /* @@ -1507,7 +1509,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; if (__splice_segment(f->page, f->page_offset, f->size, - offset, len, skb, spd, 0, sk)) + offset, len, skb, spd, 0, sk, pipe)) return 1; } @@ -1524,8 +1526,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct pipe_inode_info *pipe, unsigned int tlen, unsigned int flags) { - struct partial_page partial[PIPE_BUFFERS]; - struct page *pages[PIPE_BUFFERS]; + struct partial_page partial[PIPE_DEF_BUFFERS]; + struct page *pages[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages, .partial = partial, @@ -1535,12 +1537,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, }; struct sk_buff *frag_iter; struct sock *sk = skb->sk; + int ret = 0; + + if (splice_grow_spd(pipe, &spd)) + return -ENOMEM; /* * __skb_splice_bits() only fails if the output has no room left, * so no point in going over the frag_list for the error case. */ - if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) + if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) goto done; else if (!tlen) goto done; @@ -1551,14 +1557,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, skb_walk_frags(skb, frag_iter) { if (!tlen) break; - if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) + if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk)) break; } done: if (spd.nr_pages) { - int ret; - /* * Drop the socket lock, otherwise we have reverse * locking dependencies between sk_lock and i_mutex @@ -1571,10 +1575,10 @@ done: release_sock(sk); ret = splice_to_pipe(pipe, &spd); lock_sock(sk); - return ret; } - return 0; + splice_shrink_spd(pipe, &spd); + return ret; } /** @@ -2718,6 +2722,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p); skb_shinfo(nskb)->frag_list = p; skb_shinfo(nskb)->gso_size = pinfo->gso_size; + pinfo->gso_size = 0; skb_header_release(p); nskb->prev = p; @@ -2960,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) } EXPORT_SYMBOL_GPL(skb_cow_data); +static void sock_rmem_free(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); +} + +/* + * Note: We dont mem charge error packets (no sk_forward_alloc changes) + */ +int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) +{ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) + return -ENOMEM; + + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_rmem_free; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + + skb_queue_tail(&sk->sk_error_queue, skb); + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk, skb->len); + return 0; +} +EXPORT_SYMBOL(sock_queue_err_skb); + void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) { @@ -2991,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + err = sock_queue_err_skb(sk, skb); + if (err) kfree_skb(skb); } diff --git a/net/core/sock.c b/net/core/sock.c index bf88a167c8f2..2cf7f9f7e775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -123,6 +123,7 @@ #include <linux/net_tstamp.h> #include <net/xfrm.h> #include <linux/ipsec.h> +#include <net/cls_cgroup.h> #include <linux/filter.h> @@ -217,6 +218,11 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); EXPORT_SYMBOL(sysctl_optmem_max); +#if defined(CONFIG_CGROUPS) && !defined(CONFIG_NET_CLS_CGROUP) +int net_cls_subsys_id = -1; +EXPORT_SYMBOL_GPL(net_cls_subsys_id); +#endif + static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) { struct timeval tv; @@ -1050,6 +1056,17 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) module_put(owner); } +#ifdef CONFIG_CGROUPS +void sock_update_classid(struct sock *sk) +{ + u32 classid = task_cls_classid(current); + + if (classid && classid != sk->sk_classid) + sk->sk_classid = classid; +} +EXPORT_SYMBOL(sock_update_classid); +#endif + /** * sk_alloc - All socket objects are allocated here * @net: the applicable net namespace @@ -1073,6 +1090,8 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, sock_lock_init(sk); sock_net_set(sk, get_net(net)); atomic_set(&sk->sk_wmem_alloc, 1); + + sock_update_classid(sk); } return sk; @@ -1988,6 +2007,39 @@ void release_sock(struct sock *sk) } EXPORT_SYMBOL(release_sock); +/** + * lock_sock_fast - fast version of lock_sock + * @sk: socket + * + * This version should be used for very small section, where process wont block + * return false if fast path is taken + * sk_lock.slock locked, owned = 0, BH disabled + * return true if slow path is taken + * sk_lock.slock unlocked, owned = 1, BH enabled + */ +bool lock_sock_fast(struct sock *sk) +{ + might_sleep(); + spin_lock_bh(&sk->sk_lock.slock); + + if (!sk->sk_lock.owned) + /* + * Note : We must disable BH + */ + return false; + + __lock_sock(sk); + sk->sk_lock.owned = 1; + spin_unlock(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); + local_bh_enable(); + return true; +} +EXPORT_SYMBOL(lock_sock_fast); + int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) { struct timeval tv; diff --git a/net/dccp/input.c b/net/dccp/input.c index 58f7bc156850..6beb6a7d6fba 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -124,9 +124,9 @@ static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) return queued; } -static u8 dccp_reset_code_convert(const u8 code) +static u16 dccp_reset_code_convert(const u8 code) { - const u8 error_code[] = { + const u16 error_code[] = { [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ [DCCP_RESET_CODE_ABORTED] = ECONNRESET, @@ -148,7 +148,7 @@ static u8 dccp_reset_code_convert(const u8 code) static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) { - u8 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); + u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); sk->sk_err = err; diff --git a/net/dccp/options.c b/net/dccp/options.c index 1b08cae9c65b..07395f861d35 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -296,7 +296,7 @@ static inline u8 dccp_ndp_len(const u64 ndp) { if (likely(ndp <= 0xFF)) return 1; - return likely(ndp <= USHORT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); + return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); } int dccp_insert_option(struct sock *sk, struct sk_buff *skb, diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c index 3d803a1b9fb6..1627ef2e8522 100644 --- a/net/ieee802154/wpan-class.c +++ b/net/ieee802154/wpan-class.c @@ -147,13 +147,15 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size, GFP_KERNEL); + if (!phy) + goto out; mutex_lock(&wpan_phy_mutex); phy->idx = wpan_phy_idx++; if (unlikely(!wpan_phy_idx_valid(phy->idx))) { wpan_phy_idx--; mutex_unlock(&wpan_phy_mutex); kfree(phy); - return NULL; + goto out; } mutex_unlock(&wpan_phy_mutex); @@ -168,6 +170,9 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size) phy->current_page = 0; /* for compatibility */ return phy; + +out: + return NULL; } EXPORT_SYMBOL(wpan_phy_alloc); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 8e3a1fd938ab..7c3a7d191249 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -303,7 +303,7 @@ config ARPD If unsure, say N. config SYN_COOKIES - bool "IP: TCP syncookie support (disabled per default)" + bool "IP: TCP syncookie support" ---help--- Normal TCP/IP networking is open to an attack known as "SYN flooding". This denial-of-service attack prevents legitimate remote @@ -328,13 +328,13 @@ config SYN_COOKIES server is really overloaded. If this happens frequently better turn them off. - If you say Y here, note that SYN cookies aren't enabled by default; - you can enable them by saying Y to "/proc file system support" and + If you say Y here, you can disable SYN cookies at run time by + saying Y to "/proc file system support" and "Sysctl support" below and executing the command - echo 1 >/proc/sys/net/ipv4/tcp_syncookies + echo 0 > /proc/sys/net/ipv4/tcp_syncookies - at boot time after the /proc file system has been mounted. + after the /proc file system has been mounted. If unsure, say N. diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 45889103b3e2..757f25eb9b4b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -267,8 +267,10 @@ static void __net_exit ipmr_rules_exit(struct net *net) { struct mr_table *mrt, *next; - list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) + list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { + list_del(&mrt->list); kfree(mrt); + } fib_rules_unregister(net->ipv4.mr_rules_ops); } #else @@ -1911,7 +1913,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mfc_parent > MAXVIFS) + if (c->mfc_parent >= MAXVIFS) return -ENOENT; if (VIF_EXISTS(mrt, c->mfc_parent)) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 63958f3394a5..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, cpu = smp_processor_id(); table_base = private->entries[cpu]; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; - stackptr = &private->stackptr[cpu]; + stackptr = per_cpu_ptr(private->stackptr, cpu); origptr = *stackptr; e = get_entry(table_base, private->hook_entry[hook]); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 5c24db4a3c91..9f6b22206c52 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -347,7 +347,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, { .sport = th->dest, .dport = th->source } } }; security_req_classify_flow(req, &fl); - if (ip_route_output_key(&init_net, &rt, &fl)) { + if (ip_route_output_key(sock_net(sk), &rt, &fl)) { reqsk_free(req); goto out; } diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index c209e054a634..377bc9349371 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c @@ -126,8 +126,8 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) * calculate 2^fract in a <<7 value. */ is_slowstart = 1; - increment = ((1 << ca->rho) * hybla_fraction(rho_fractions)) - - 128; + increment = ((1 << min(ca->rho, 16U)) * + hybla_fraction(rho_fractions)) - 128; } else { /* * congestion avoidance diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) if (sk->sk_family == AF_INET) { printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, - &inet->daddr, ntohs(inet->dport), + &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); @@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) struct ipv6_pinfo *np = inet6_sk(sk); printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, - &np->daddr, ntohs(inet->dport), + &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 202cf09c4cd4..fe193e53af44 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1555,6 +1555,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) #endif if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ + sock_rps_save_rxhash(sk, skb->rxhash); TCP_CHECK_TIMER(sk); if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; @@ -1579,7 +1580,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) } return 0; } - } + } else + sock_rps_save_rxhash(sk, skb->rxhash); + TCP_CHECK_TIMER(sk); if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) { @@ -1672,8 +1675,6 @@ process: skb->dev = NULL; - sock_rps_save_rxhash(sk, skb->rxhash); - bh_lock_sock_nested(sk); ret = 0; if (!sock_owned_by_user(sk)) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9de6a698f91d..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; - } else { + } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); - } + sk->sk_err = err; sk->sk_error_report(sk); out: @@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk) spin_unlock_bh(&rcvq->lock); if (!skb_queue_empty(&list_kill)) { - lock_sock_bh(sk); + bool slow = lock_sock_fast(sk); + __skb_queue_purge(&list_kill); sk_mem_reclaim_partial(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); } return res; } @@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, int peeked; int err; int is_udplite = IS_UDPLITE(sk); + bool slow; /* * Check any passed addresses @@ -1197,10 +1199,10 @@ out: return err; csum_copy_err: - lock_sock_bh(sk); + slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; @@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb) void udp_destroy_sock(struct sock *sk) { - lock_sock_bh(sk); + bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); } /* @@ -1686,8 +1688,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; - else if (val > USHORT_MAX) - val = USHORT_MAX; + else if (val > USHRT_MAX) + val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; @@ -1700,8 +1702,8 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; - else if (val > USHORT_MAX) - val = USHORT_MAX; + else if (val > USHRT_MAX) + val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ce7992982557..03e62f94ff8e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -483,7 +483,7 @@ route_done: np->tclass, NULL, &fl, (struct rt6_info*)dst, MSG_DONTWAIT, np->dontfrag); if (err) { - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); + ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); goto out_put; } @@ -565,7 +565,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) np->dontfrag); if (err) { - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); + ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); goto out_put; } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cd963f64e27c..89425af0684c 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -507,7 +507,7 @@ int ip6_forward(struct sk_buff *skb) if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - if (skb->len > mtu) { + if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index bd9e7d3e9c8e..66078dad7fe8 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -120,7 +120,7 @@ static void mroute_clean_tables(struct mr6_table *mrt); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES -#define ip6mr_for_each_table(mrt, met) \ +#define ip6mr_for_each_table(mrt, net) \ list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list) static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) @@ -254,8 +254,10 @@ static void __net_exit ip6mr_rules_exit(struct net *net) { struct mr6_table *mrt, *next; - list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) + list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { + list_del(&mrt->list); ip6mr_free_table(mrt); + } fib_rules_unregister(net->ipv6.mr6_rules_ops); } #else @@ -2017,7 +2019,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, struct rtattr *mp_head; /* If cache is unresolved, don't try to parse IIF and OIF */ - if (c->mf6c_parent > MAXMIFS) + if (c->mf6c_parent >= MAXMIFS) return -ENOENT; if (MIF_EXISTS(mrt, c->mf6c_parent)) diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 59f1881968c7..ab1622d7d409 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1356,7 +1356,10 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - skb = sock_alloc_send_skb(sk, size + LL_ALLOCATED_SPACE(dev), 1, &err); + size += LL_ALLOCATED_SPACE(dev); + /* limit our allocations to order-0 page */ + size = min_t(int, size, SKB_MAX_ORDER(0, 0)); + skb = sock_alloc_send_skb(sk, size, 1, &err); if (!skb) return NULL; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6f517bd83692..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, cpu = smp_processor_id(); table_base = private->entries[cpu]; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; - stackptr = &private->stackptr[cpu]; + stackptr = per_cpu_ptr(private->stackptr, cpu); origptr = *stackptr; e = get_entry(table_base, private->hook_entry[hook]); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, { int flags = 0; - if (fl->oif || rt6_need_strict(&fl->fl6_dst)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl->fl6_src)) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 3d7a2c0b836a..87be58673b55 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -328,6 +328,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, int err; int is_udplite = IS_UDPLITE(sk); int is_udp4; + bool slow; if (addr_len) *addr_len=sizeof(struct sockaddr_in6); @@ -424,7 +425,7 @@ out: return err; csum_copy_err: - lock_sock_bh(sk); + slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { if (is_udp4) UDP_INC_STATS_USER(sock_net(sk), @@ -433,7 +434,7 @@ csum_copy_err: UDP6_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - unlock_sock_bh(sk); + unlock_sock_fast(sk, slow); if (flags & MSG_DONTWAIT) return -EAGAIN; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index c8b4599a752e..9637e45744fa 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1619,7 +1619,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) save_message: save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); if (!save_msg) - return; + goto out_unlock; save_msg->path = path; save_msg->msg = *msg; diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index fd8b28361a64..f28ad2cc8428 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -632,13 +632,14 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); + iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) { kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); @@ -647,7 +648,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; - return NOTIFY_BAD; + return notifier_from_errno(-ENOMEM); } break; case CPU_UP_CANCELED: @@ -677,7 +678,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, cpu_clear(cpu, cpumask); if (cpus_empty(cpumask)) /* Can't offline last IUCV enabled cpu. */ - return NOTIFY_BAD; + return notifier_from_errno(-EINVAL); smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); if (cpus_empty(iucv_irq_cpumask)) smp_call_function_single(first_cpu(iucv_buffer_cpumask), diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index c163d0a149f4..98258b7341e3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -332,14 +332,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) IEEE80211_QUEUE_STOP_REASON_AGGREGATION); spin_unlock(&local->ampdu_lock); - spin_unlock_bh(&sta->lock); - /* send an addBA request */ + /* prepare tid data */ sta->ampdu_mlme.dialog_token_allocator++; sta->ampdu_mlme.tid_tx[tid]->dialog_token = sta->ampdu_mlme.dialog_token_allocator; sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; + spin_unlock_bh(&sta->lock); + + /* send AddBA request */ ieee80211_send_addba_request(sdata, pubsta->addr, tid, sta->ampdu_mlme.tid_tx[tid]->dialog_token, sta->ampdu_mlme.tid_tx[tid]->ssn, diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -5,7 +5,7 @@ #include <linux/nl80211.h> #include "ieee80211_i.h" -enum ieee80211_chan_mode +static enum ieee80211_chan_mode __ieee80211_get_channel_mode(struct ieee80211_local *local, struct ieee80211_sub_if_data *ignore) { diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 4f2271316650..9c1da0809160 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -349,7 +349,7 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx, struct survey_info *survey) { int ret = -EOPNOTSUPP; - if (local->ops->conf_tx) + if (local->ops->get_survey) ret = local->ops->get_survey(&local->hw, idx, survey); /* trace_drv_get_survey(local, idx, survey, ret); */ return ret; diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 8d4b41787dcf..e8f6e3b252d8 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -140,7 +140,6 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) struct ieee80211_sub_if_data, u.ap); - key->conf.ap_addr = sdata->dev->dev_addr; ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); if (!ret) { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0839c4e8fd2e..f803f8b72a93 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1692,14 +1692,52 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: - if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_BACK: { + struct ieee80211_local *local = sdata->local; + int len = skb->len; + struct sta_info *sta; + + rcu_read_lock(); + sta = sta_info_get(sdata, mgmt->sa); + if (!sta) { + rcu_read_unlock(); + break; + } + + local_bh_disable(); + + switch (mgmt->u.action.u.addba_req.action_code) { + case WLAN_ACTION_ADDBA_REQ: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_req))) + break; + ieee80211_process_addba_request(local, sta, mgmt, len); + break; + case WLAN_ACTION_ADDBA_RESP: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.addba_resp))) + break; + ieee80211_process_addba_resp(local, sta, mgmt, len); + break; + case WLAN_ACTION_DELBA: + if (len < (IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.delba))) + break; + ieee80211_process_delba(sdata, sta, mgmt, len); + break; + } + local_bh_enable(); + rcu_read_unlock(); break; - - ieee80211_sta_process_chanswitch(sdata, - &mgmt->u.action.u.chan_switch.sw_elem, - (void *)ifmgd->associated->priv, - rx_status->mactime); - break; + } + case WLAN_CATEGORY_SPECTRUM_MGMT: + ieee80211_sta_process_chanswitch(sdata, + &mgmt->u.action.u.chan_switch.sw_elem, + (void *)ifmgd->associated->priv, + rx_status->mactime); + break; + } } mutex_unlock(&ifmgd->mtx); @@ -1722,9 +1760,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mutex_unlock(&ifmgd->mtx); if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && - (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { + struct ieee80211_local *local = sdata->local; + struct ieee80211_work *wk; + + mutex_lock(&local->work_mtx); + list_for_each_entry(wk, &local->work_list, list) { + if (wk->sdata != sdata) + continue; + + if (wk->type != IEEE80211_WORK_ASSOC) + continue; + + if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) + continue; + if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) + continue; + /* + * Printing the message only here means we can't + * spuriously print it, but it also means that it + * won't be printed when the frame comes in before + * we even tried to associate or in similar cases. + * + * Ultimately, I suspect cfg80211 should print the + * messages instead. + */ + printk(KERN_DEBUG + "%s: deauthenticated from %pM (Reason: %u)\n", + sdata->name, mgmt->bssid, + le16_to_cpu(mgmt->u.deauth.reason_code)); + + list_del_rcu(&wk->list); + free_work(wk); + break; + } + mutex_unlock(&local->work_mtx); + + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); + } out: kfree_skb(skb); } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6e2a7bcd8cb8..be9abc2e6348 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1818,17 +1818,26 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) return RX_CONTINUE; if (ieee80211_is_back_req(bar->frame_control)) { + struct { + __le16 control, start_seq_num; + } __packed bar_data; + if (!rx->sta) return RX_DROP_MONITOR; + + if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), + &bar_data, sizeof(bar_data))) + return RX_DROP_MONITOR; + spin_lock(&rx->sta->lock); - tid = le16_to_cpu(bar->control) >> 12; + tid = le16_to_cpu(bar_data.control) >> 12; if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { spin_unlock(&rx->sta->lock); return RX_DROP_MONITOR; } tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; - start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; + start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; /* reset session timer */ if (tid_agg_rx->timeout) @@ -1935,6 +1944,9 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (len < IEEE80211_MIN_ACTION_SIZE + 1) break; + if (sdata->vif.type == NL80211_IFTYPE_STATION) + return ieee80211_sta_rx_mgmt(sdata, rx->skb); + switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: if (len < (IEEE80211_MIN_ACTION_SIZE + diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 730197591ab5..ba9360a475b0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -259,7 +259,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, skb_queue_head_init(&sta->tx_filtered); for (i = 0; i < NUM_RX_DATA_QUEUES; i++) - sta->last_seq_ctrl[i] = cpu_to_le16(USHORT_MAX); + sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX); #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: Allocated STA %pM\n", diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 48a5e80957f0..df9d45544ca5 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -145,7 +145,7 @@ enum plink_state { /** * struct sta_ampdu_mlme - STA aggregation information. * - * @tid_state_rx: TID's state in Rx session state machine. + * @tid_active_rx: TID's state in Rx session state machine. * @tid_rx: aggregation info for Rx per TID * @tid_state_tx: TID's state in Tx session state machine. * @tid_tx: aggregation info for Tx per TID diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index b83c530c5e0a..eeeb8bc73982 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -424,6 +424,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) spin_lock_bh(&nf_conntrack_lock); + /* We have to check the DYING flag inside the lock to prevent + a race against nf_ct_get_next_corpse() possibly called from + user context, else we insert an already 'dead' hash, blocking + further use of that particular connection -JM */ + + if (unlikely(nf_ct_is_dying(ct))) { + spin_unlock_bh(&nf_conntrack_lock); + return NF_ACCEPT; + } + /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're not in the hash. If there is, we lost race. */ diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index b20f4275893c..53d892210a04 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1393,10 +1393,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff, nf_ct_refresh(ct, skb, sip_timeout * HZ); - if (skb_is_nonlinear(skb)) { - pr_debug("Copy of skbuff not supported yet.\n"); - return NF_ACCEPT; - } + if (unlikely(skb_linearize(skb))) + return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; @@ -1455,10 +1453,8 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, nf_ct_refresh(ct, skb, sip_timeout * HZ); - if (skb_is_nonlinear(skb)) { - pr_debug("Copy of skbuff not supported yet.\n"); - return NF_ACCEPT; - } + if (unlikely(skb_linearize(skb))) + return NF_DROP; dptr = skb->data + dataoff; datalen = skb->len - dataoff; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) vfree(info->jumpstack); else kfree(info->jumpstack); - if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) - vfree(info->stackptr); - else - kfree(info->stackptr); + + free_percpu(info->stackptr); kfree(info); } @@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) unsigned int size; int cpu; - size = sizeof(unsigned int) * nr_cpu_ids; - if (size > PAGE_SIZE) - i->stackptr = vmalloc(size); - else - i->stackptr = kmalloc(size, GFP_KERNEL); + i->stackptr = alloc_percpu(unsigned int); if (i->stackptr == NULL) return -ENOMEM; - memset(i->stackptr, 0, size); size = sizeof(void **) * nr_cpu_ids; if (size > PAGE_SIZE) @@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table_info *private; struct xt_table *t, *table; - ret = xt_jumpstack_alloc(newinfo); - if (ret < 0) - return ERR_PTR(ret); - /* Don't add one object to multiple lists. */ table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); if (!table) { diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c index d7920d9f49e9..859d9fd429c8 100644 --- a/net/netfilter/xt_TEE.c +++ b/net/netfilter/xt_TEE.c @@ -76,7 +76,7 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info) if (ip_route_output_key(net, &rt, &fl) != 0) return false; - dst_release(skb_dst(skb)); + skb_dst_drop(skb); skb_dst_set(skb, &rt->u.dst); skb->dev = rt->u.dst.dev; skb->protocol = htons(ETH_P_IP); @@ -157,7 +157,7 @@ tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info) if (dst == NULL) return false; - dst_release(skb_dst(skb)); + skb_dst_drop(skb); skb_dst_set(skb, dst); skb->dev = dst->dev; skb->protocol = htons(ETH_P_IPV6); diff --git a/net/phonet/pep.c b/net/phonet/pep.c index af4d38bc3b22..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -626,6 +626,7 @@ static void pep_sock_close(struct sock *sk, long timeout) struct pep_sock *pn = pep_sk(sk); int ifindex = 0; + sock_hold(sk); /* keep a reference after sk_common_release() */ sk_common_release(sk); lock_sock(sk); @@ -644,6 +645,7 @@ static void pep_sock_close(struct sock *sk, long timeout) if (ifindex) gprs_detach(sk); + sock_put(sk); } static int pep_wait_connreq(struct sock *sk, int noblock) @@ -1043,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) lock_sock(sk); if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { skparent = pn->listener; - sk_del_node_init(sk); release_sock(sk); - sk = skparent; pn = pep_sk(skparent); - lock_sock(sk); + lock_sock(skparent); + sk_del_node_init(sk); + sk = skparent; } /* Unhash a listening sock only when it is closed * and all of its active connected pipes are closed. */ diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, err = rds_ib_setup_qp(conn); if (err) { rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); + mutex_unlock(&conn->c_cm_lock); goto out; } diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, err = rds_iw_setup_qp(conn); if (err) { rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); + mutex_unlock(&conn->c_cm_lock); goto out; } diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index d885ba311564..570949417f38 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -159,6 +159,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, iph->daddr = new_addr; csum_replace4(&iph->check, addr, new_addr); + } else if ((iph->frag_off & htons(IP_OFFSET)) || + iph->protocol != IPPROTO_ICMP) { + goto out; } ihl = iph->ihl * 4; @@ -247,6 +250,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, break; } +out: return action; drop: diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fdbd0b7bd840..50e3d945e1f4 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -125,7 +125,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, { struct tcf_pedit *p = a->priv; int i, munged = 0; - u8 *pptr; + unsigned int off; if (!(skb->tc_verd & TC_OK2MUNGE)) { /* should we set skb->cloned? */ @@ -134,7 +134,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, } } - pptr = skb_network_header(skb); + off = skb_network_offset(skb); spin_lock(&p->tcf_lock); @@ -144,17 +144,17 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tc_pedit_key *tkey = p->tcfp_keys; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { - u32 *ptr; + u32 *ptr, _data; int offset = tkey->off; if (tkey->offmask) { - if (skb->len > tkey->at) { - char *j = pptr + tkey->at; - offset += ((*j & tkey->offmask) >> - tkey->shift); - } else { + char *d, _d; + + d = skb_header_pointer(skb, off + tkey->at, 1, + &_d); + if (!d) goto bad; - } + offset += (*d & tkey->offmask) >> tkey->shift; } if (offset % 4) { @@ -169,9 +169,13 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, goto bad; } - ptr = (u32 *)(pptr+offset); + ptr = skb_header_pointer(skb, off + offset, 4, &_data); + if (!ptr) + goto bad; /* just do it, baby */ *ptr = ((*ptr & tkey->mask) ^ tkey->val); + if (ptr == &_data) + skb_store_bits(skb, off + offset, ptr, 4); munged++; } diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 221180384fd7..78ef2c5e130b 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -16,14 +16,11 @@ #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/cgroup.h> +#include <linux/rcupdate.h> #include <net/rtnetlink.h> #include <net/pkt_cls.h> - -struct cgroup_cls_state -{ - struct cgroup_subsys_state css; - u32 classid; -}; +#include <net/sock.h> +#include <net/cls_cgroup.h> static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, struct cgroup *cgrp); @@ -112,6 +109,10 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, struct cls_cgroup_head *head = tp->root; u32 classid; + rcu_read_lock(); + classid = task_cls_state(current)->classid; + rcu_read_unlock(); + /* * Due to the nature of the classifier it is required to ignore all * packets originating from softirq context as accessing `current' @@ -122,12 +123,12 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ - if (softirq_count() != SOFTIRQ_OFFSET) - return -1; - - rcu_read_lock(); - classid = task_cls_state(current)->classid; - rcu_read_unlock(); + if (softirq_count() != SOFTIRQ_OFFSET) { + /* If there is an sk_classid we'll use that. */ + if (!skb->sk) + return -1; + classid = skb->sk->sk_classid; + } if (!classid) return -1; @@ -289,18 +290,35 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = { static int __init init_cgroup_cls(void) { - int ret = register_tcf_proto_ops(&cls_cgroup_ops); - if (ret) - return ret; + int ret; + ret = cgroup_load_subsys(&net_cls_subsys); if (ret) - unregister_tcf_proto_ops(&cls_cgroup_ops); + goto out; + +#ifndef CONFIG_NET_CLS_CGROUP + /* We can't use rcu_assign_pointer because this is an int. */ + smp_wmb(); + net_cls_subsys_id = net_cls_subsys.subsys_id; +#endif + + ret = register_tcf_proto_ops(&cls_cgroup_ops); + if (ret) + cgroup_unload_subsys(&net_cls_subsys); + +out: return ret; } static void __exit exit_cgroup_cls(void) { unregister_tcf_proto_ops(&cls_cgroup_ops); + +#ifndef CONFIG_NET_CLS_CGROUP + net_cls_subsys_id = -1; + synchronize_rcu(); +#endif + cgroup_unload_subsys(&net_cls_subsys); } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 96275422c619..4f522143811e 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -98,11 +98,11 @@ static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_re { struct { struct tc_u_knode *knode; - u8 *ptr; + unsigned int off; } stack[TC_U32_MAXDEPTH]; struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; - u8 *ptr = skb_network_header(skb); + unsigned int off = skb_network_offset(skb); struct tc_u_knode *n; int sdepth = 0; int off2 = 0; @@ -134,8 +134,14 @@ next_knode: #endif for (i = n->sel.nkeys; i>0; i--, key++) { - - if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { + unsigned int toff; + __be32 *data, _data; + + toff = off + key->off + (off2 & key->offmask); + data = skb_header_pointer(skb, toff, 4, &_data); + if (!data) + goto out; + if ((*data ^ key->val) & key->mask) { n = n->next; goto next_knode; } @@ -174,29 +180,45 @@ check_terminal: if (sdepth >= TC_U32_MAXDEPTH) goto deadloop; stack[sdepth].knode = n; - stack[sdepth].ptr = ptr; + stack[sdepth].off = off; sdepth++; ht = n->ht_down; sel = 0; - if (ht->divisor) - sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); - + if (ht->divisor) { + __be32 *data, _data; + + data = skb_header_pointer(skb, off + n->sel.hoff, 4, + &_data); + if (!data) + goto out; + sel = ht->divisor & u32_hash_fold(*data, &n->sel, + n->fshift); + } if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) goto next_ht; if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { off2 = n->sel.off + 3; - if (n->sel.flags&TC_U32_VAROFFSET) - off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; + if (n->sel.flags & TC_U32_VAROFFSET) { + __be16 *data, _data; + + data = skb_header_pointer(skb, + off + n->sel.offoff, + 2, &_data); + if (!data) + goto out; + off2 += ntohs(n->sel.offmask & *data) >> + n->sel.offshift; + } off2 &= ~3; } if (n->sel.flags&TC_U32_EAT) { - ptr += off2; + off += off2; off2 = 0; } - if (ptr < skb_tail_pointer(skb)) + if (off < skb->len) goto next_ht; } @@ -204,9 +226,10 @@ check_terminal: if (sdepth--) { n = stack[sdepth].knode; ht = n->ht_up; - ptr = stack[sdepth].ptr; + off = stack[sdepth].off; goto check_terminal; } +out: return -1; deadloop: diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fe35c1f338c2..b9e8c3b7d406 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1195,6 +1195,11 @@ nla_put_failure: return -1; } +static bool tc_qdisc_dump_ignore(struct Qdisc *q) +{ + return (q->flags & TCQ_F_BUILTIN) ? true : false; +} + static int qdisc_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, struct Qdisc *old, struct Qdisc *new) @@ -1206,11 +1211,11 @@ static int qdisc_notify(struct net *net, struct sk_buff *oskb, if (!skb) return -ENOBUFS; - if (old && old->handle) { + if (old && !tc_qdisc_dump_ignore(old)) { if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0) goto err_out; } - if (new) { + if (new && !tc_qdisc_dump_ignore(new)) { if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0) goto err_out; } @@ -1223,11 +1228,6 @@ err_out: return -EINVAL; } -static bool tc_qdisc_dump_ignore(struct Qdisc *q) -{ - return (q->flags & TCQ_F_BUILTIN) ? true : false; -} - static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, int *q_idx_p, int s_q_idx) diff --git a/net/socket.c b/net/socket.c index f9f7d0872cac..367d5477d00f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -94,6 +94,7 @@ #include <net/compat.h> #include <net/wext.h> +#include <net/cls_cgroup.h> #include <net/sock.h> #include <linux/netfilter.h> @@ -558,6 +559,8 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct sock_iocb *si = kiocb_to_siocb(iocb); int err; + sock_update_classid(sock->sk); + si->sock = sock; si->scm = NULL; si->msg = msg; @@ -684,6 +687,8 @@ static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock, { struct sock_iocb *si = kiocb_to_siocb(iocb); + sock_update_classid(sock->sk); + si->sock = sock; si->scm = NULL; si->msg = msg; @@ -777,6 +782,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, if (unlikely(!sock->ops->splice_read)) return -EINVAL; + sock_update_classid(sock->sk); + return sock->ops->splice_read(sock, ppos, pipe, len, flags); } @@ -3069,6 +3076,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { + sock_update_classid(sock->sk); + if (sock->ops->sendpage) return sock->ops->sendpage(sock, page, offset, size, flags); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index c2173ebdb33c..58de76c8540c 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -34,6 +34,7 @@ #include <linux/sunrpc/cache.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/rpc_pipe_fs.h> +#include <linux/smp_lock.h> #define RPCDBG_FACILITY RPCDBG_CACHE @@ -1545,12 +1546,18 @@ static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) return cache_poll(filp, wait, cd); } -static int cache_ioctl_pipefs(struct inode *inode, struct file *filp, +static long cache_ioctl_pipefs(struct file *filp, unsigned int cmd, unsigned long arg) { + struct inode *inode = filp->f_dentry->d_inode; struct cache_detail *cd = RPC_I(inode)->private; + long ret; - return cache_ioctl(inode, filp, cmd, arg, cd); + lock_kernel(); + ret = cache_ioctl(inode, filp, cmd, arg, cd); + unlock_kernel(); + + return ret; } static int cache_open_pipefs(struct inode *inode, struct file *filp) @@ -1573,7 +1580,7 @@ const struct file_operations cache_file_operations_pipefs = { .read = cache_read_pipefs, .write = cache_write_pipefs, .poll = cache_poll_pipefs, - .ioctl = cache_ioctl_pipefs, /* for FIONREAD */ + .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ .open = cache_open_pipefs, .release = cache_release_pipefs, }; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 20e30c6f8355..95ccbcf45d3e 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -27,6 +27,7 @@ #include <linux/workqueue.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/cache.h> +#include <linux/smp_lock.h> static struct vfsmount *rpc_mount __read_mostly; static int rpc_mount_count; @@ -309,8 +310,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) } static int -rpc_pipe_ioctl(struct inode *ino, struct file *filp, - unsigned int cmd, unsigned long arg) +rpc_pipe_ioctl_unlocked(struct file *filp, unsigned int cmd, unsigned long arg) { struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode); int len; @@ -331,13 +331,25 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp, } } +static long +rpc_pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + lock_kernel(); + ret = rpc_pipe_ioctl_unlocked(filp, cmd, arg); + unlock_kernel(); + + return ret; +} + static const struct file_operations rpc_pipe_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = rpc_pipe_read, .write = rpc_pipe_write, .poll = rpc_pipe_poll, - .ioctl = rpc_pipe_ioctl, + .unlocked_ioctl = rpc_pipe_ioctl, .open = rpc_pipe_open, .release = rpc_pipe_release, }; diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 121105355f60..dac219a56ae1 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -783,7 +783,7 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, port = ntohl(*p); dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, task->tk_msg.rpc_proc->p_name, port); - if (unlikely(port > USHORT_MAX)) + if (unlikely(port > USHRT_MAX)) return -EIO; rpcb->r_port = port; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3fc325399ee4..dcd0132396ba 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -166,7 +166,6 @@ EXPORT_SYMBOL_GPL(xprt_unregister_transport); int xprt_load_transport(const char *transport_name) { struct xprt_class *t; - char module_name[sizeof t->name + 5]; int result; result = 0; @@ -178,9 +177,7 @@ int xprt_load_transport(const char *transport_name) } } spin_unlock(&xprt_list_lock); - strcpy(module_name, "xprt"); - strncat(module_name, transport_name, sizeof t->name); - result = request_module(module_name); + result = request_module("xprt%s", transport_name); out: return result; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b7cd8cccbe72..2a9675136c68 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2293,6 +2293,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) struct sockaddr *addr = args->dstaddr; struct rpc_xprt *xprt; struct sock_xprt *transport; + struct rpc_xprt *ret; xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries); if (IS_ERR(xprt)) @@ -2330,8 +2331,8 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6); break; default: - kfree(xprt); - return ERR_PTR(-EAFNOSUPPORT); + ret = ERR_PTR(-EAFNOSUPPORT); + goto out_err; } if (xprt_bound(xprt)) @@ -2346,10 +2347,11 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) if (try_module_get(THIS_MODULE)) return xprt; - + ret = ERR_PTR(-EINVAL); +out_err: kfree(xprt->slot); kfree(xprt); - return ERR_PTR(-EINVAL); + return ret; } static const struct rpc_timeout xs_tcp_default_timeout = { @@ -2368,6 +2370,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) struct sockaddr *addr = args->dstaddr; struct rpc_xprt *xprt; struct sock_xprt *transport; + struct rpc_xprt *ret; xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) @@ -2403,8 +2406,8 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6); break; default: - kfree(xprt); - return ERR_PTR(-EAFNOSUPPORT); + ret = ERR_PTR(-EAFNOSUPPORT); + goto out_err; } if (xprt_bound(xprt)) @@ -2420,10 +2423,11 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) if (try_module_get(THIS_MODULE)) return xprt; - + ret = ERR_PTR(-EINVAL); +out_err: kfree(xprt->slot); kfree(xprt); - return ERR_PTR(-EINVAL); + return ret; } /** @@ -2437,6 +2441,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) struct rpc_xprt *xprt; struct sock_xprt *transport; struct svc_sock *bc_sock; + struct rpc_xprt *ret; xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries); if (IS_ERR(xprt)) @@ -2476,8 +2481,8 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) RPCBIND_NETID_TCP6); break; default: - kfree(xprt); - return ERR_PTR(-EAFNOSUPPORT); + ret = ERR_PTR(-EAFNOSUPPORT); + goto out_err; } if (xprt_bound(xprt)) @@ -2499,9 +2504,11 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) if (try_module_get(THIS_MODULE)) return xprt; + ret = ERR_PTR(-EINVAL); +out_err: kfree(xprt->slot); kfree(xprt); - return ERR_PTR(-EINVAL); + return ret; } static struct xprt_class xs_udp_transport = { diff --git a/net/wireless/chan.c b/net/wireless/chan.c index d92d088026bf..b01a6f6397d7 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -50,7 +50,7 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, struct ieee80211_channel *chan; int result; - if (wdev->iftype == NL80211_IFTYPE_MONITOR) + if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) wdev = NULL; if (wdev) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index aaa1aad566cd..db71150b8040 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4443,9 +4443,10 @@ static int nl80211_remain_on_channel(struct sk_buff *skb, if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && - channel_type != NL80211_CHAN_HT40MINUS) + channel_type != NL80211_CHAN_HT40MINUS) { err = -EINVAL; goto out; + } } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); @@ -4717,9 +4718,10 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && - channel_type != NL80211_CHAN_HT40MINUS) + channel_type != NL80211_CHAN_HT40MINUS) { err = -EINVAL; goto out; + } } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a026c6d56bd3..58401d246bda 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -515,7 +515,7 @@ cfg80211_inform_bss(struct wiphy *wiphy, privsz = wiphy->bss_priv_size; - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && (signal < 0 || signal > 100))) return NULL; @@ -571,7 +571,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy, u.probe_resp.variable); size_t privsz = wiphy->bss_priv_size; - if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC && + if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && (signal < 0 || signal > 100))) return NULL; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 6a329158bdfa..a3cca0a94346 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -95,13 +95,13 @@ resume: goto error_nolock; } - dst = dst_pop(dst); + dst = skb_dst_pop(skb); if (!dst) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); err = -EHOSTUNREACH; goto error_nolock; } - skb_dst_set(skb, dst); + skb_dst_set_noref(skb, dst); x = dst->xfrm; } while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL)); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d965a2bad8d3..4bf27d901333 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2153,6 +2153,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) return 0; } + skb_dst_force(skb); dst = skb_dst(skb); res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0; |