summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c18
-rw-r--r--net/802/stp.c4
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/9p/client.c178
-rw-r--r--net/9p/protocol.c5
-rw-r--r--net/9p/trans_virtio.c76
-rw-r--r--net/bluetooth/bnep/core.c1
-rw-r--r--net/bluetooth/cmtp/core.c1
-rw-r--r--net/bluetooth/hci_conn.c23
-rw-r--r--net/bluetooth/hci_core.c66
-rw-r--r--net/bluetooth/hci_event.c177
-rw-r--r--net/bluetooth/hci_sock.c17
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/l2cap.c94
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/bluetooth/rfcomm/sock.c24
-rw-r--r--net/bluetooth/rfcomm/tty.c28
-rw-r--r--net/bluetooth/sco.c22
-rw-r--r--net/compat.c10
-rw-r--r--net/core/dev.c38
-rw-r--r--net/core/fib_rules.c21
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/iovec.c20
-rw-r--r--net/core/net-sysfs.c20
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/pktgen.c37
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sysctl_net_core.c3
-rw-r--r--net/dccp/ccid.h34
-rw-r--r--net/dccp/ccids/ccid2.c23
-rw-r--r--net/dccp/ccids/ccid2.h5
-rw-r--r--net/dccp/ccids/ccid3.c12
-rw-r--r--net/dccp/dccp.h5
-rw-r--r--net/dccp/output.c209
-rw-r--r--net/dccp/proto.c21
-rw-r--r--net/dccp/timer.c27
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_hash.c54
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/inetpeer.c138
-rw-r--r--net/ipv4/ip_gre.c7
-rw-r--r--net/ipv4/ip_sockglue.c10
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/route.c75
-rw-r--r--net/ipv4/tunnel4.c29
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ip6_tunnel.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c5
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/tunnel6.c24
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/iucv/iucv.c3
-rw-r--r--net/l2tp/l2tp_core.c53
-rw-r--r--net/l2tp/l2tp_core.h33
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/aes_ccm.c3
-rw-r--r--net/mac80211/aes_cmac.c3
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/cfg.c190
-rw-r--r--net/mac80211/debugfs.c60
-rw-r--r--net/mac80211/debugfs.h2
-rw-r--r--net/mac80211/debugfs_key.c19
-rw-r--r--net/mac80211/debugfs_netdev.c2
-rw-r--r--net/mac80211/debugfs_sta.c55
-rw-r--r--net/mac80211/driver-ops.h37
-rw-r--r--net/mac80211/driver-trace.h71
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c30
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/main.c5
-rw-r--r--net/mac80211/mesh.c36
-rw-r--r--net/mac80211/mesh.h23
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c7
-rw-r--r--net/mac80211/mlme.c227
-rw-r--r--net/mac80211/rate.c18
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c19
-rw-r--r--net/mac80211/rx.c25
-rw-r--r--net/mac80211/sta_info.c19
-rw-r--r--net/mac80211/sta_info.h35
-rw-r--r--net/mac80211/status.c51
-rw-r--r--net/mac80211/tx.c23
-rw-r--r--net/mac80211/util.c40
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/mac80211/work.c27
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/xt_TPROXY.c10
-rw-r--r--net/netfilter/xt_socket.c19
-rw-r--r--net/netlink/af_netlink.c65
-rw-r--r--net/rds/message.c5
-rw-r--r--net/rds/rdma.c126
-rw-r--r--net/rds/send.c4
-rw-r--r--net/rfkill/core.c14
-rw-r--r--net/socket.c20
-rw-r--r--net/sunrpc/Kconfig19
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/auth_generic.c2
-rw-r--r--net/sunrpc/auth_gss/Makefile5
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c247
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c186
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c267
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c127
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c51
-rw-r--r--net/sunrpc/cache.c288
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/netns.h19
-rw-r--r--net/sunrpc/rpc_pipe.c19
-rw-r--r--net/sunrpc/rpcb_clnt.c60
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/stats.c43
-rw-r--r--net/sunrpc/sunrpc_syms.c58
-rw-r--r--net/sunrpc/svc.c3
-rw-r--r--net/sunrpc/svc_xprt.c59
-rw-r--r--net/sunrpc/svcauth_unix.c194
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--net/sunrpc/xdr.c61
-rw-r--r--net/sunrpc/xprt.c39
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma.c11
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c82
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c49
-rw-r--r--net/sunrpc/xprtrdma/transport.c25
-rw-r--r--net/sunrpc/xprtsock.c358
-rw-r--r--net/unix/af_unix.c14
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/core.c23
-rw-r--r--net/wireless/core.h17
-rw-r--r--net/wireless/lib80211.c8
-rw-r--r--net/wireless/lib80211_crypt_tkip.c16
-rw-r--r--net/wireless/mesh.c140
-rw-r--r--net/wireless/mlme.c21
-rw-r--r--net/wireless/nl80211.c374
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c259
-rw-r--r--net/wireless/util.c12
-rw-r--r--net/wireless/wext-core.c10
148 files changed, 3681 insertions, 2659 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index 941f2a324d3a..c1df2dad8c6b 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -346,8 +346,8 @@ int garp_request_join(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -366,8 +366,8 @@ void garp_request_leave(const struct net_device *dev,
const struct garp_application *appl,
const void *data, u8 len, u8 type)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
struct garp_attr *attr;
spin_lock_bh(&app->lock);
@@ -546,11 +546,11 @@ static int garp_init_port(struct net_device *dev)
static void garp_release_port(struct net_device *dev)
{
- struct garp_port *port = dev->garp_port;
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
unsigned int i;
for (i = 0; i <= GARP_APPLICATION_MAX; i++) {
- if (port->applicants[i])
+ if (rtnl_dereference(port->applicants[i]))
return;
}
rcu_assign_pointer(dev->garp_port, NULL);
@@ -565,7 +565,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
ASSERT_RTNL();
- if (!dev->garp_port) {
+ if (!rtnl_dereference(dev->garp_port)) {
err = garp_init_port(dev);
if (err < 0)
goto err1;
@@ -601,8 +601,8 @@ EXPORT_SYMBOL_GPL(garp_init_applicant);
void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
{
- struct garp_port *port = dev->garp_port;
- struct garp_applicant *app = port->applicants[appl->type];
+ struct garp_port *port = rtnl_dereference(dev->garp_port);
+ struct garp_applicant *app = rtnl_dereference(port->applicants[appl->type]);
ASSERT_RTNL();
diff --git a/net/802/stp.c b/net/802/stp.c
index 53c8f77f0ccd..978c30b1b36b 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -21,8 +21,8 @@
#define GARP_ADDR_MAX 0x2F
#define GARP_ADDR_RANGE (GARP_ADDR_MAX - GARP_ADDR_MIN)
-static const struct stp_proto *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
-static const struct stp_proto *stp_proto __read_mostly;
+static const struct stp_proto __rcu *garp_protos[GARP_ADDR_RANGE + 1] __read_mostly;
+static const struct stp_proto __rcu *stp_proto __read_mostly;
static struct llc_sap *sap __read_mostly;
static unsigned int sap_registered;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 05b867e43757..52077ca22072 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -112,7 +112,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
ASSERT_RTNL();
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
BUG_ON(!grp);
/* Take it out of our own structures, but be sure to interlock with
@@ -177,7 +177,7 @@ int register_vlan_dev(struct net_device *dev)
struct vlan_group *grp, *ngrp = NULL;
int err;
- grp = real_dev->vlgrp;
+ grp = rtnl_dereference(real_dev->vlgrp);
if (!grp) {
ngrp = grp = vlan_group_alloc(real_dev);
if (!grp)
@@ -385,7 +385,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
}
- grp = dev->vlgrp;
+ grp = rtnl_dereference(dev->vlgrp);
if (!grp)
goto out;
diff --git a/net/9p/client.c b/net/9p/client.c
index 83bf0541d66f..a848bca9fbff 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -450,32 +450,43 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
return err;
}
- if (type == P9_RERROR) {
+ if (type == P9_RERROR || type == P9_RLERROR) {
int ecode;
- char *ename;
- err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
- if (err) {
- P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n",
- err);
- return err;
- }
+ if (!p9_is_proto_dotl(c)) {
+ char *ename;
- if (p9_is_proto_dotu(c) ||
- p9_is_proto_dotl(c))
- err = -ecode;
+ err = p9pdu_readf(req->rc, c->proto_version, "s?d",
+ &ename, &ecode);
+ if (err)
+ goto out_err;
+
+ if (p9_is_proto_dotu(c))
+ err = -ecode;
+
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
- if (!err || !IS_ERR_VALUE(err))
- err = p9_errstr2errno(ename, strlen(ename));
+ kfree(ename);
+ }
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
- kfree(ename);
} else
err = 0;
return err;
+
+out_err:
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+
+ return err;
}
/**
@@ -568,11 +579,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
va_start(ap, fmt);
err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
va_end(ap);
+ if (err)
+ goto reterr;
p9pdu_finalize(req->tc);
err = c->trans_mod->request(c, req);
if (err < 0) {
- c->status = Disconnected;
+ if (err != -ERESTARTSYS)
+ c->status = Disconnected;
goto reterr;
}
@@ -1151,12 +1165,44 @@ int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
}
EXPORT_SYMBOL(p9_client_link);
+int p9_client_fsync(struct p9_fid *fid, int datasync)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n",
+ fid->fid, datasync);
+ err = 0;
+ clnt = fid->clnt;
+
+ req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto error;
+ }
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid);
+
+ p9_free_req(clnt, req);
+
+error:
+ return err;
+}
+EXPORT_SYMBOL(p9_client_fsync);
+
int p9_client_clunk(struct p9_fid *fid)
{
int err;
struct p9_client *clnt;
struct p9_req_t *req;
+ if (!fid) {
+ P9_EPRINTK(KERN_WARNING, "Trying to clunk with NULL fid\n");
+ dump_stack();
+ return 0;
+ }
+
P9_DPRINTK(P9_DEBUG_9P, ">>> TCLUNK fid %d\n", fid->fid);
err = 0;
clnt = fid->clnt;
@@ -1240,16 +1286,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
if (data) {
memmove(data, dataptr, count);
- }
-
- if (udata) {
+ } else {
err = copy_to_user(udata, dataptr, count);
if (err) {
err = -EFAULT;
goto free_and_error;
}
}
-
p9_free_req(clnt, req);
return count;
@@ -1761,3 +1804,96 @@ error:
}
EXPORT_SYMBOL(p9_client_mkdir_dotl);
+
+int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d "
+ "start %lld length %lld proc_id %d client_id %s\n",
+ fid->fid, flock->type, flock->flags, flock->start,
+ flock->length, flock->proc_id, flock->client_id);
+
+ req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type,
+ flock->flags, flock->start, flock->length,
+ flock->proc_id, flock->client_id);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
+error:
+ p9_free_req(clnt, req);
+ return err;
+
+}
+EXPORT_SYMBOL(p9_client_lock_dotl);
+
+int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld "
+ "length %lld proc_id %d client_id %s\n", fid->fid, glock->type,
+ glock->start, glock->length, glock->proc_id, glock->client_id);
+
+ req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type,
+ glock->start, glock->length, glock->proc_id, glock->client_id);
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type,
+ &glock->start, &glock->length, &glock->proc_id,
+ &glock->client_id);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
+ "proc_id %d client_id %s\n", glock->type, glock->start,
+ glock->length, glock->proc_id, glock->client_id);
+error:
+ p9_free_req(clnt, req);
+ return err;
+}
+EXPORT_SYMBOL(p9_client_getlock_dotl);
+
+int p9_client_readlink(struct p9_fid *fid, char **target)
+{
+ int err;
+ struct p9_client *clnt;
+ struct p9_req_t *req;
+
+ err = 0;
+ clnt = fid->clnt;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid);
+
+ req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
+ if (err) {
+ p9pdu_dump(1, req->rc);
+ goto error;
+ }
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
+error:
+ p9_free_req(clnt, req);
+ return err;
+}
+EXPORT_SYMBOL(p9_client_readlink);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 3acd3afb20c8..45c15f491401 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -122,9 +122,8 @@ static size_t
pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
{
size_t len = MIN(pdu->capacity - pdu->size, size);
- int err = copy_from_user(&pdu->sdata[pdu->size], udata, len);
- if (err)
- printk(KERN_WARNING "pdu_write_u returning: %d\n", err);
+ if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
+ len = 0;
pdu->size += len;
return size - len;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index b88515936e4b..c8f3f72ab20e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -75,6 +75,8 @@ struct virtio_chan {
struct p9_client *client;
struct virtio_device *vdev;
struct virtqueue *vq;
+ int ring_bufs_avail;
+ wait_queue_head_t *vc_wq;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
@@ -134,16 +136,30 @@ static void req_done(struct virtqueue *vq)
struct p9_fcall *rc;
unsigned int len;
struct p9_req_t *req;
+ unsigned long flags;
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
- P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
- P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
- req = p9_tag_lookup(chan->client, rc->tag);
- req->status = REQ_STATUS_RCVD;
- p9_client_cb(chan->client, req);
- }
+ do {
+ spin_lock_irqsave(&chan->lock, flags);
+ rc = virtqueue_get_buf(chan->vq, &len);
+
+ if (rc != NULL) {
+ if (!chan->ring_bufs_avail) {
+ chan->ring_bufs_avail = 1;
+ wake_up(chan->vc_wq);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n",
+ rc->tag);
+ req = p9_tag_lookup(chan->client, rc->tag);
+ req->status = REQ_STATUS_RCVD;
+ p9_client_cb(chan->client, req);
+ } else {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+ } while (rc != NULL);
}
/**
@@ -199,23 +215,43 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
int in, out;
struct virtio_chan *chan = client->trans;
char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
+ unsigned long flags;
+ int err;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+req_retry:
+ req->status = REQ_STATUS_SENT;
+
+ spin_lock_irqsave(&chan->lock, flags);
out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
req->tc->size);
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata,
client->msize);
- req->status = REQ_STATUS_SENT;
-
- if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
- P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: virtio rpc add_buf returned failure");
- return -EIO;
+ err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ chan->ring_bufs_avail = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ err = wait_event_interruptible(*chan->vc_wq,
+ chan->ring_bufs_avail);
+ if (err == -ERESTARTSYS)
+ return err;
+
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ goto req_retry;
+ } else {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ return -EIO;
+ }
}
virtqueue_kick(chan->vq);
+ spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
return 0;
@@ -290,14 +326,23 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->tag_len = tag_len;
err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
if (err) {
- kfree(tag);
- goto out_free_vq;
+ goto out_free_tag;
}
+ chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
+ if (!chan->vc_wq) {
+ err = -ENOMEM;
+ goto out_free_tag;
+ }
+ init_waitqueue_head(chan->vc_wq);
+ chan->ring_bufs_avail = 1;
+
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
mutex_unlock(&virtio_9p_lock);
return 0;
+out_free_tag:
+ kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
kfree(chan);
@@ -371,6 +416,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
mutex_unlock(&virtio_9p_lock);
sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
kfree(chan->tag);
+ kfree(chan->vc_wq);
kfree(chan);
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index f10b41fb05a0..5868597534e5 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -648,6 +648,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s)
{
+ memset(ci, 0, sizeof(*ci));
memcpy(ci->dst, s->eh.h_source, ETH_ALEN);
strcpy(ci->device, s->dev->name);
ci->flags = s->flags;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index ec0a1347f933..8e5f292529ac 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -78,6 +78,7 @@ static void __cmtp_unlink_session(struct cmtp_session *session)
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
{
+ memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 0b1e460fe440..6b90a4191734 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -39,7 +39,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -66,7 +66,8 @@ void hci_acl_connect(struct hci_conn *conn)
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
- if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
@@ -368,8 +369,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
BT_DBG("%s dst %s", hdev->name, batostr(dst));
- if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
- if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
+ acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (!acl) {
+ acl = hci_conn_add(hdev, ACL_LINK, dst);
+ if (!acl)
return NULL;
}
@@ -389,8 +392,10 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
if (type == ACL_LINK)
return acl;
- if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) {
- if (!(sco = hci_conn_add(hdev, type, dst))) {
+ sco = hci_conn_hash_lookup_ba(hdev, type, dst);
+ if (!sco) {
+ sco = hci_conn_add(hdev, type, dst);
+ if (!sco) {
hci_conn_put(acl);
return NULL;
}
@@ -647,10 +652,12 @@ int hci_get_conn_list(void __user *arg)
size = sizeof(req) + req.conn_num * sizeof(*ci);
- if (!(cl = kmalloc(size, GFP_KERNEL)))
+ cl = kmalloc(size, GFP_KERNEL);
+ if (!cl)
return -ENOMEM;
- if (!(hdev = hci_dev_get(req.dev_id))) {
+ hdev = hci_dev_get(req.dev_id);
+ if (!hdev) {
kfree(cl);
return -ENODEV;
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index bc2a052e518b..51c61f75a797 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -44,7 +44,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -349,20 +349,23 @@ struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *b
void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
{
struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *e;
+ struct inquiry_entry *ie;
BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
- if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
+ ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
+ if (!ie) {
/* Entry not in the cache. Add new one. */
- if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
+ ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
+ if (!ie)
return;
- e->next = cache->list;
- cache->list = e;
+
+ ie->next = cache->list;
+ cache->list = ie;
}
- memcpy(&e->data, data, sizeof(*data));
- e->timestamp = jiffies;
+ memcpy(&ie->data, data, sizeof(*data));
+ ie->timestamp = jiffies;
cache->timestamp = jiffies;
}
@@ -422,16 +425,20 @@ int hci_inquiry(void __user *arg)
hci_dev_lock_bh(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
- inquiry_cache_empty(hdev) ||
- ir.flags & IREQ_CACHE_FLUSH) {
+ inquiry_cache_empty(hdev) ||
+ ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
hci_dev_unlock_bh(hdev);
timeo = ir.length * msecs_to_jiffies(2000);
- if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
- goto done;
+
+ if (do_inquiry) {
+ err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
+ if (err < 0)
+ goto done;
+ }
/* for unlimited number of responses we will use buffer with 255 entries */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
@@ -439,7 +446,8 @@ int hci_inquiry(void __user *arg)
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
- if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
+ buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
+ if (!buf) {
err = -ENOMEM;
goto done;
}
@@ -611,7 +619,8 @@ int hci_dev_close(__u16 dev)
struct hci_dev *hdev;
int err;
- if (!(hdev = hci_dev_get(dev)))
+ hdev = hci_dev_get(dev);
+ if (!hdev)
return -ENODEV;
err = hci_dev_do_close(hdev);
hci_dev_put(hdev);
@@ -623,7 +632,8 @@ int hci_dev_reset(__u16 dev)
struct hci_dev *hdev;
int ret = 0;
- if (!(hdev = hci_dev_get(dev)))
+ hdev = hci_dev_get(dev);
+ if (!hdev)
return -ENODEV;
hci_req_lock(hdev);
@@ -663,7 +673,8 @@ int hci_dev_reset_stat(__u16 dev)
struct hci_dev *hdev;
int ret = 0;
- if (!(hdev = hci_dev_get(dev)))
+ hdev = hci_dev_get(dev);
+ if (!hdev)
return -ENODEV;
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
@@ -682,7 +693,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (copy_from_user(&dr, arg, sizeof(dr)))
return -EFAULT;
- if (!(hdev = hci_dev_get(dr.dev_id)))
+ hdev = hci_dev_get(dr.dev_id);
+ if (!hdev)
return -ENODEV;
switch (cmd) {
@@ -763,7 +775,8 @@ int hci_get_dev_list(void __user *arg)
size = sizeof(*dl) + dev_num * sizeof(*dr);
- if (!(dl = kzalloc(size, GFP_KERNEL)))
+ dl = kzalloc(size, GFP_KERNEL);
+ if (!dl)
return -ENOMEM;
dr = dl->dev_req;
@@ -797,7 +810,8 @@ int hci_get_dev_info(void __user *arg)
if (copy_from_user(&di, arg, sizeof(di)))
return -EFAULT;
- if (!(hdev = hci_dev_get(di.dev_id)))
+ hdev = hci_dev_get(di.dev_id);
+ if (!hdev)
return -ENODEV;
strcpy(di.name, hdev->name);
@@ -905,7 +919,7 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->sniff_max_interval = 800;
hdev->sniff_min_interval = 80;
- tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
+ tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
@@ -1368,7 +1382,8 @@ void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
- if (!(list = skb_shinfo(skb)->frag_list)) {
+ list = skb_shinfo(skb)->frag_list;
+ if (!list) {
/* Non fragmented */
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
@@ -1609,7 +1624,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_enter_active_mode(conn);
/* Send to upper protocol */
- if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
+ hp = hci_proto[HCI_PROTO_L2CAP];
+ if (hp && hp->recv_acldata) {
hp->recv_acldata(conn, skb, flags);
return;
}
@@ -1644,7 +1660,8 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
register struct hci_proto *hp;
/* Send to upper protocol */
- if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
+ hp = hci_proto[HCI_PROTO_SCO];
+ if (hp && hp->recv_scodata) {
hp->recv_scodata(conn, skb);
return;
}
@@ -1727,7 +1744,8 @@ static void hci_cmd_task(unsigned long arg)
if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
kfree_skb(hdev->sent_cmd);
- if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
+ hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+ if (hdev->sent_cmd) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
hdev->cmd_last_tx = jiffies;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 84093b0000b9..8923b36a67a2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -39,7 +39,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -677,9 +677,50 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
+static int hci_outgoing_auth_needed(struct hci_dev *hdev,
+ struct hci_conn *conn)
+{
+ if (conn->state != BT_CONFIG || !conn->out)
+ return 0;
+
+ if (conn->sec_level == BT_SECURITY_SDP)
+ return 0;
+
+ /* Only request authentication for SSP connections or non-SSP
+ * devices with sec_level HIGH */
+ if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
+ conn->sec_level != BT_SECURITY_HIGH)
+ return 0;
+
+ return 1;
+}
+
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
{
+ struct hci_cp_remote_name_req *cp;
+ struct hci_conn *conn;
+
BT_DBG("%s status 0x%x", hdev->name, status);
+
+ /* If successful wait for the name req complete event before
+ * checking for the need to do authentication */
+ if (!status)
+ return;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
+ if (!cp)
+ return;
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = __cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+ }
+
+ hci_dev_unlock(hdev);
}
static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
@@ -955,12 +996,14 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_dev_lock(hdev);
- if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr)))
+ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+ if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
+ conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+ if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
return;
@@ -1090,9 +1133,23 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
+ struct hci_ev_remote_name *ev = (void *) skb->data;
+ struct hci_conn *conn;
+
BT_DBG("%s", hdev->name);
hci_conn_check_pending(hdev);
+
+ hci_dev_lock(hdev);
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
+ if (conn && hci_outgoing_auth_needed(hdev, conn)) {
+ struct hci_cp_auth_requested cp;
+ cp.handle = __cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+ }
+
+ hci_dev_unlock(hdev);
}
static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1162,33 +1219,39 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- if (!ev->status)
- memcpy(conn->features, ev->features, 8);
+ if (!conn)
+ goto unlock;
- if (conn->state == BT_CONFIG) {
- if (!ev->status && lmp_ssp_capable(hdev) &&
- lmp_ssp_capable(conn)) {
- struct hci_cp_read_remote_ext_features cp;
- cp.handle = ev->handle;
- cp.page = 0x01;
- hci_send_cmd(hdev,
- HCI_OP_READ_REMOTE_EXT_FEATURES,
- sizeof(cp), &cp);
- } else if (!ev->status && conn->out &&
- conn->sec_level == BT_SECURITY_HIGH) {
- struct hci_cp_auth_requested cp;
- cp.handle = ev->handle;
- hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+ if (!ev->status)
+ memcpy(conn->features, ev->features, 8);
+
+ if (conn->state != BT_CONFIG)
+ goto unlock;
+
+ if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
+ struct hci_cp_read_remote_ext_features cp;
+ cp.handle = ev->handle;
+ cp.page = 0x01;
+ hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
sizeof(cp), &cp);
- } else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
- }
- }
+ goto unlock;
+ }
+
+ if (!ev->status) {
+ struct hci_cp_remote_name_req cp;
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+ hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
}
+ if (!hci_outgoing_auth_needed(hdev, conn)) {
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_put(conn);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1449,10 +1512,12 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
conn->sent -= count;
if (conn->type == ACL_LINK) {
- if ((hdev->acl_cnt += count) > hdev->acl_pkts)
+ hdev->acl_cnt += count;
+ if (hdev->acl_cnt > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
} else {
- if ((hdev->sco_cnt += count) > hdev->sco_pkts)
+ hdev->sco_cnt += count;
+ if (hdev->sco_cnt > hdev->sco_pkts)
hdev->sco_cnt = hdev->sco_pkts;
}
}
@@ -1547,7 +1612,8 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
if (conn && !ev->status) {
struct inquiry_entry *ie;
- if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) {
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie) {
ie->data.clock_offset = ev->clock_offset;
ie->timestamp = jiffies;
}
@@ -1581,7 +1647,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_lock(hdev);
- if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr))) {
+ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+ if (ie) {
ie->data.pscan_rep_mode = ev->pscan_rep_mode;
ie->timestamp = jiffies;
}
@@ -1646,32 +1713,37 @@ static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_b
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
- if (conn) {
- if (!ev->status && ev->page == 0x01) {
- struct inquiry_entry *ie;
+ if (!conn)
+ goto unlock;
- if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst)))
- ie->data.ssp_mode = (ev->features[0] & 0x01);
+ if (!ev->status && ev->page == 0x01) {
+ struct inquiry_entry *ie;
- conn->ssp_mode = (ev->features[0] & 0x01);
- }
+ ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
+ if (ie)
+ ie->data.ssp_mode = (ev->features[0] & 0x01);
- if (conn->state == BT_CONFIG) {
- if (!ev->status && hdev->ssp_mode > 0 &&
- conn->ssp_mode > 0 && conn->out &&
- conn->sec_level != BT_SECURITY_SDP) {
- struct hci_cp_auth_requested cp;
- cp.handle = ev->handle;
- hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
- } else {
- conn->state = BT_CONNECTED;
- hci_proto_connect_cfm(conn, ev->status);
- hci_conn_put(conn);
- }
- }
+ conn->ssp_mode = (ev->features[0] & 0x01);
}
+ if (conn->state != BT_CONFIG)
+ goto unlock;
+
+ if (!ev->status) {
+ struct hci_cp_remote_name_req cp;
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.pscan_rep_mode = 0x02;
+ hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
+ }
+
+ if (!hci_outgoing_auth_needed(hdev, conn)) {
+ conn->state = BT_CONNECTED;
+ hci_proto_connect_cfm(conn, ev->status);
+ hci_conn_put(conn);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -1821,7 +1893,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
hci_dev_lock(hdev);
- if ((ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr)))
+ ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
+ if (ie)
ie->data.ssp_mode = (ev->features[0] & 0x01);
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 83acd164d39e..b3753bad2a55 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -43,7 +43,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -125,7 +125,8 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
continue;
}
- if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
continue;
/* Put type byte before the data */
@@ -370,7 +371,8 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
}
if (haddr->hci_dev != HCI_DEV_NONE) {
- if (!(hdev = hci_dev_get(haddr->hci_dev))) {
+ hdev = hci_dev_get(haddr->hci_dev);
+ if (!hdev) {
err = -ENODEV;
goto done;
}
@@ -457,7 +459,8 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state == BT_CLOSED)
return 0;
- if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
return err;
msg->msg_namelen = 0;
@@ -499,7 +502,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
lock_sock(sk);
- if (!(hdev = hci_pi(sk)->hdev)) {
+ hdev = hci_pi(sk)->hdev;
+ if (!hdev) {
err = -EBADFD;
goto done;
}
@@ -509,7 +513,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto done;
}
- if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
+ skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
goto done;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c0ee8b3928ed..29544c21f4b5 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -107,6 +107,7 @@ static void __hidp_unlink_session(struct hidp_session *session)
static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci)
{
+ memset(ci, 0, sizeof(*ci));
bacpy(&ci->bdaddr, &session->bdaddr);
ci->flags = session->flags;
@@ -115,7 +116,6 @@ static void __hidp_copy_session(struct hidp_session *session, struct hidp_connin
ci->vendor = 0x0000;
ci->product = 0x0000;
ci->version = 0x0000;
- memset(ci->name, 0, 128);
if (session->input) {
ci->vendor = session->input->id.vendor;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index cd8f6ea03841..c12eccfdfe01 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -57,7 +57,7 @@
#define VERSION "2.15"
-static int disable_ertm = 0;
+static int disable_ertm;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
@@ -83,6 +83,18 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
/* ---- L2CAP timers ---- */
+static void l2cap_sock_set_timer(struct sock *sk, long timeout)
+{
+ BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
+}
+
+static void l2cap_sock_clear_timer(struct sock *sk)
+{
+ BT_DBG("sock %p state %d", sk, sk->sk_state);
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
static void l2cap_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
@@ -92,6 +104,14 @@ static void l2cap_sock_timeout(unsigned long arg)
bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ /* sk is owned by user. Try again later */
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ sock_put(sk);
+ return;
+ }
+
if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
reason = ECONNREFUSED;
else if (sk->sk_state == BT_CONNECT &&
@@ -108,18 +128,6 @@ static void l2cap_sock_timeout(unsigned long arg)
sock_put(sk);
}
-static void l2cap_sock_set_timer(struct sock *sk, long timeout)
-{
- BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
-}
-
-static void l2cap_sock_clear_timer(struct sock *sk)
-{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
- sk_stop_timer(sk, &sk->sk_timer);
-}
-
/* ---- L2CAP channels ---- */
static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
{
@@ -743,11 +751,13 @@ found:
/* Find socket with psm and source bdaddr.
* Returns closest match.
*/
-static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
+static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
+ read_lock(&l2cap_sk_list.lock);
+
sk_for_each(sk, node, &l2cap_sk_list.head) {
if (state && sk->sk_state != state)
continue;
@@ -762,20 +772,10 @@ static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src
sk1 = sk;
}
}
- return node ? sk : sk1;
-}
-/* Find socket with given address (psm, src).
- * Returns locked socket */
-static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
-{
- struct sock *s;
- read_lock(&l2cap_sk_list.lock);
- s = __l2cap_get_sock_by_psm(state, psm, src);
- if (s)
- bh_lock_sock(s);
read_unlock(&l2cap_sk_list.lock);
- return s;
+
+ return node ? sk : sk1;
}
static void l2cap_sock_destruct(struct sock *sk)
@@ -2926,6 +2926,8 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
goto sendresp;
}
+ bh_lock_sock(parent);
+
/* Check if the ACL is secure enough (if not SDP) */
if (psm != cpu_to_le16(0x0001) &&
!hci_conn_check_link_mode(conn->hcon)) {
@@ -3078,6 +3080,14 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
break;
default:
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ break;
+ }
+
l2cap_chan_del(sk, ECONNREFUSED);
break;
}
@@ -3283,6 +3293,15 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ return 0;
+ }
+
l2cap_chan_del(sk, ECONNRESET);
bh_unlock_sock(sk);
@@ -3305,6 +3324,15 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
if (!sk)
return 0;
+ /* don't delete l2cap channel if sk is owned by user */
+ if (sock_owned_by_user(sk)) {
+ sk->sk_state = BT_DISCONN;
+ l2cap_sock_clear_timer(sk);
+ l2cap_sock_set_timer(sk, HZ / 5);
+ bh_unlock_sock(sk);
+ return 0;
+ }
+
l2cap_chan_del(sk, 0);
bh_unlock_sock(sk);
@@ -4134,11 +4162,10 @@ static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
__mod_retrans_timer();
pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
- if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
l2cap_send_ack(pi);
- } else {
+ else
l2cap_ertm_send(sk);
- }
}
}
@@ -4430,6 +4457,8 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
if (!sk)
goto drop;
+ bh_lock_sock(sk);
+
BT_DBG("sk %p, len %d", sk, skb->len);
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
@@ -4841,8 +4870,10 @@ static int __init l2cap_init(void)
return err;
_busy_wq = create_singlethread_workqueue("l2cap");
- if (!_busy_wq)
- goto error;
+ if (!_busy_wq) {
+ proto_unregister(&l2cap_proto);
+ return -ENOMEM;
+ }
err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
if (err < 0) {
@@ -4870,6 +4901,7 @@ static int __init l2cap_init(void)
return 0;
error:
+ destroy_workqueue(_busy_wq);
proto_unregister(&l2cap_proto);
return err;
}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fa642aa652bd..c1e2bbafb549 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -41,7 +41,7 @@
#include <linux/slab.h>
#include <net/sock.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -51,10 +51,10 @@
#define VERSION "1.11"
-static int disable_cfc = 0;
+static int disable_cfc;
+static int l2cap_ertm;
static int channel_mtu = -1;
static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
-static int l2cap_ertm = 0;
static struct task_struct *rfcomm_thread;
@@ -1901,7 +1901,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
BT_DBG("%p state %ld", s, s->state);
- switch(sk->sk_state) {
+ switch (sk->sk_state) {
case BT_CONNECTED:
s->state = BT_CONNECT;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index aec505f934df..66cc1f0c3df8 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -45,7 +45,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -140,11 +140,13 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
-static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
+static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
struct hlist_node *node;
+ read_lock(&rfcomm_sk_list.lock);
+
sk_for_each(sk, node, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
@@ -159,19 +161,10 @@ static struct sock *__rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t
sk1 = sk;
}
}
- return node ? sk : sk1;
-}
-/* Find socket with given address (channel, src).
- * Returns locked socket */
-static inline struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
-{
- struct sock *s;
- read_lock(&rfcomm_sk_list.lock);
- s = __rfcomm_get_sock_by_channel(state, channel, src);
- if (s) bh_lock_sock(s);
read_unlock(&rfcomm_sk_list.lock);
- return s;
+
+ return node ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
@@ -895,7 +888,8 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
BT_DBG("sock %p, sk %p", sock, sk);
- if (!sk) return 0;
+ if (!sk)
+ return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
@@ -945,6 +939,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
if (!parent)
return 0;
+ bh_lock_sock(parent);
+
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index a9b81f5dacd1..2575c2db6404 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,9 +58,9 @@ struct rfcomm_dev {
bdaddr_t src;
bdaddr_t dst;
- u8 channel;
+ u8 channel;
- uint modem_status;
+ uint modem_status;
struct rfcomm_dlc *dlc;
struct tty_struct *tty;
@@ -69,7 +69,7 @@ struct rfcomm_dev {
struct device *tty_dev;
- atomic_t wmem_alloc;
+ atomic_t wmem_alloc;
struct sk_buff_head pending;
};
@@ -431,7 +431,8 @@ static int rfcomm_release_dev(void __user *arg)
BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags);
- if (!(dev = rfcomm_dev_get(req.dev_id)))
+ dev = rfcomm_dev_get(req.dev_id);
+ if (!dev)
return -ENODEV;
if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) {
@@ -470,7 +471,8 @@ static int rfcomm_get_dev_list(void __user *arg)
size = sizeof(*dl) + dev_num * sizeof(*di);
- if (!(dl = kmalloc(size, GFP_KERNEL)))
+ dl = kmalloc(size, GFP_KERNEL);
+ if (!dl)
return -ENOMEM;
di = dl->dev_info;
@@ -513,7 +515,8 @@ static int rfcomm_get_dev_info(void __user *arg)
if (copy_from_user(&di, arg, sizeof(di)))
return -EFAULT;
- if (!(dev = rfcomm_dev_get(di.id)))
+ dev = rfcomm_dev_get(di.id);
+ if (!dev)
return -ENODEV;
di.flags = dev->flags;
@@ -561,7 +564,8 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
return;
}
- if (!(tty = dev->tty) || !skb_queue_empty(&dev->pending)) {
+ tty = dev->tty;
+ if (!tty || !skb_queue_empty(&dev->pending)) {
skb_queue_tail(&dev->pending, skb);
return;
}
@@ -796,7 +800,8 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
memcpy(skb_put(skb, size), buf + sent, size);
- if ((err = rfcomm_dlc_send(dlc, skb)) < 0) {
+ err = rfcomm_dlc_send(dlc, skb);
+ if (err < 0) {
kfree_skb(skb);
break;
}
@@ -892,7 +897,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
/* Parity on/off and when on, odd/even */
if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) ||
- ((old->c_cflag & PARODD) != (new->c_cflag & PARODD)) ) {
+ ((old->c_cflag & PARODD) != (new->c_cflag & PARODD))) {
changes |= RFCOMM_RPN_PM_PARITY;
BT_DBG("Parity change detected.");
}
@@ -937,11 +942,10 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
/* POSIX does not support 1.5 stop bits and RFCOMM does not
* support 2 stop bits. So a request for 2 stop bits gets
* translated to 1.5 stop bits */
- if (new->c_cflag & CSTOPB) {
+ if (new->c_cflag & CSTOPB)
stop_bits = RFCOMM_RPN_STOP_15;
- } else {
+ else
stop_bits = RFCOMM_RPN_STOP_1;
- }
/* Handle number of data bits [5-8] */
if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE))
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 66b9e5c0523a..960c6d1637da 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -44,7 +44,7 @@
#include <net/sock.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -52,7 +52,7 @@
#define VERSION "0.6"
-static int disable_esco = 0;
+static int disable_esco;
static const struct proto_ops sco_sock_ops;
@@ -138,16 +138,17 @@ static inline struct sock *sco_chan_get(struct sco_conn *conn)
static int sco_conn_del(struct hci_conn *hcon, int err)
{
- struct sco_conn *conn;
+ struct sco_conn *conn = hcon->sco_data;
struct sock *sk;
- if (!(conn = hcon->sco_data))
+ if (!conn)
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
- if ((sk = sco_chan_get(conn))) {
+ sk = sco_chan_get(conn);
+ if (sk) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
@@ -185,7 +186,8 @@ static int sco_connect(struct sock *sk)
BT_DBG("%s -> %s", batostr(src), batostr(dst));
- if (!(hdev = hci_get_route(dst, src)))
+ hdev = hci_get_route(dst, src);
+ if (!hdev)
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
@@ -510,7 +512,8 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
/* Set destination address and psm */
bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
- if ((err = sco_connect(sk)))
+ err = sco_connect(sk);
+ if (err)
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
@@ -828,13 +831,14 @@ static void sco_chan_del(struct sock *sk, int err)
static void sco_conn_ready(struct sco_conn *conn)
{
- struct sock *parent, *sk;
+ struct sock *parent;
+ struct sock *sk = conn->sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
- if ((sk = conn->sk)) {
+ if (sk) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->sk_state = BT_CONNECTED;
diff --git a/net/compat.c b/net/compat.c
index 63d260e81472..3649d5895361 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov,
compat_size_t len;
if (get_user(len, &uiov32->iov_len) ||
- get_user(buf, &uiov32->iov_base)) {
- tot_len = -EFAULT;
- break;
- }
+ get_user(buf, &uiov32->iov_base))
+ return -EFAULT;
+
+ if (len > INT_MAX - tot_len)
+ len = INT_MAX - tot_len;
+
tot_len += len;
kiov->iov_base = compat_ptr(buf);
kiov->iov_len = (__kernel_size_t) len;
diff --git a/net/core/dev.c b/net/core/dev.c
index 78b5a89b0f40..35dfb8318483 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1685,10 +1685,10 @@ EXPORT_SYMBOL(netif_device_attach);
static bool can_checksum_protocol(unsigned long features, __be16 protocol)
{
- return ((features & NETIF_F_GEN_CSUM) ||
- ((features & NETIF_F_IP_CSUM) &&
+ return ((features & NETIF_F_NO_CSUM) ||
+ ((features & NETIF_F_V4_CSUM) &&
protocol == htons(ETH_P_IP)) ||
- ((features & NETIF_F_IPV6_CSUM) &&
+ ((features & NETIF_F_V6_CSUM) &&
protocol == htons(ETH_P_IPV6)) ||
((features & NETIF_F_FCOE_CRC) &&
protocol == htons(ETH_P_FCOE)));
@@ -1696,22 +1696,18 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
{
+ __be16 protocol = skb->protocol;
int features = dev->features;
- if (vlan_tx_tag_present(skb))
+ if (vlan_tx_tag_present(skb)) {
features &= dev->vlan_features;
-
- if (can_checksum_protocol(features, skb->protocol))
- return true;
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ } else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- if (can_checksum_protocol(dev->features & dev->vlan_features,
- veh->h_vlan_encapsulated_proto))
- return true;
+ protocol = veh->h_vlan_encapsulated_proto;
+ features &= dev->vlan_features;
}
- return false;
+ return can_checksum_protocol(features, protocol);
}
/**
@@ -2213,7 +2209,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
}
static DEFINE_PER_CPU(int, xmit_recursion);
-#define RECURSION_LIMIT 3
+#define RECURSION_LIMIT 10
/**
* dev_queue_xmit - transmit a buffer
@@ -2413,7 +2409,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
#ifdef CONFIG_RPS
/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
EXPORT_SYMBOL(rps_sock_flow_table);
/*
@@ -2425,7 +2421,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow **rflowp)
{
struct netdev_rx_queue *rxqueue;
- struct rps_map *map = NULL;
+ struct rps_map *map;
struct rps_dev_flow_table *flow_table;
struct rps_sock_flow_table *sock_flow_table;
int cpu = -1;
@@ -2444,15 +2440,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
} else
rxqueue = dev->_rx;
- if (rxqueue->rps_map) {
- map = rcu_dereference(rxqueue->rps_map);
- if (map && map->len == 1) {
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map) {
+ if (map->len == 1) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
goto done;
}
- } else if (!rxqueue->rps_flow_table) {
+ } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
goto done;
}
@@ -5416,7 +5412,7 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
WARN_ON(rcu_dereference_raw(dev->ip_ptr));
- WARN_ON(dev->ip6_ptr);
+ WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
if (dev->destructor)
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bc3f253ba6c..82a4369ae150 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -351,12 +351,12 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_for_each_entry(r, &ops->rules_list, list) {
if (r->pref == rule->target) {
- rule->ctarget = r;
+ RCU_INIT_POINTER(rule->ctarget, r);
break;
}
}
- if (rule->ctarget == NULL)
+ if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
unresolved = 1;
} else if (rule->action == FR_ACT_GOTO)
goto errout_free;
@@ -373,6 +373,11 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
fib_rule_get(rule);
+ if (last)
+ list_add_rcu(&rule->list, &last->list);
+ else
+ list_add_rcu(&rule->list, &ops->rules_list);
+
if (ops->unresolved_rules) {
/*
* There are unresolved goto rules in the list, check if
@@ -381,7 +386,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
list_for_each_entry(r, &ops->rules_list, list) {
if (r->action == FR_ACT_GOTO &&
r->target == rule->pref) {
- BUG_ON(r->ctarget != NULL);
+ BUG_ON(rtnl_dereference(r->ctarget) != NULL);
rcu_assign_pointer(r->ctarget, rule);
if (--ops->unresolved_rules == 0)
break;
@@ -395,11 +400,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (unresolved)
ops->unresolved_rules++;
- if (last)
- list_add_rcu(&rule->list, &last->list);
- else
- list_add_rcu(&rule->list, &ops->rules_list);
-
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
flush_route_cache(ops);
rules_ops_put(ops);
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
*/
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
- if (tmp->ctarget == rule) {
+ if (rtnl_dereference(tmp->ctarget) == rule) {
rcu_assign_pointer(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
@@ -545,7 +545,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->action = rule->action;
frh->flags = rule->flags;
- if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
+ if (rule->action == FR_ACT_GOTO &&
+ rcu_dereference_raw(rule->ctarget) == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 7adf50352918..7beaec36b541 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -89,8 +89,8 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
rcu_read_lock_bh();
filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
- unsigned int pkt_len = sk_run_filter(skb, filter->insns,
- filter->len);
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+
err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
}
rcu_read_unlock_bh();
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 72aceb1fe4fa..c40f27e7d208 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,10 +35,9 @@
* in any case.
*/
-long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, ct;
- long err;
+ int size, ct, err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
@@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
err = 0;
for (ct = 0; ct < m->msg_iovlen; ct++) {
- err += iov[ct].iov_len;
- /*
- * Goal is not to verify user data, but to prevent returning
- * negative value, which is interpreted as errno.
- * Overflow is still possible, but it is harmless.
- */
- if (err < 0)
- return -EMSGSIZE;
+ size_t len = iov[ct].iov_len;
+
+ if (len > INT_MAX - err) {
+ len = INT_MAX - err;
+ iov[ct].iov_len = len;
+ }
+ err += len;
}
return err;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index b143173e3eb2..a5ff5a89f376 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
}
spin_lock(&rps_map_lock);
- old_map = queue->rps_map;
+ old_map = rcu_dereference_protected(queue->rps_map,
+ lockdep_is_held(&rps_map_lock));
rcu_assign_pointer(queue->rps_map, map);
spin_unlock(&rps_map_lock);
@@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
table = NULL;
spin_lock(&rps_dev_flow_lock);
- old_table = queue->rps_flow_table;
+ old_table = rcu_dereference_protected(queue->rps_flow_table,
+ lockdep_is_held(&rps_dev_flow_lock));
rcu_assign_pointer(queue->rps_flow_table, table);
spin_unlock(&rps_dev_flow_lock);
@@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj)
{
struct netdev_rx_queue *queue = to_rx_queue(kobj);
struct netdev_rx_queue *first = queue->first;
+ struct rps_map *map;
+ struct rps_dev_flow_table *flow_table;
- if (queue->rps_map)
- call_rcu(&queue->rps_map->rcu, rps_map_release);
- if (queue->rps_flow_table)
- call_rcu(&queue->rps_flow_table->rcu,
- rps_dev_flow_table_release);
+ map = rcu_dereference_raw(queue->rps_map);
+ if (map)
+ call_rcu(&map->rcu, rps_map_release);
+
+ flow_table = rcu_dereference_raw(queue->rps_flow_table);
+ if (flow_table)
+ call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
if (atomic_dec_and_test(&first->count))
kfree(first);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index c988e685433a..3f860261c5ee 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -42,7 +42,9 @@ static int net_assign_generic(struct net *net, int id, void *data)
BUG_ON(!mutex_is_locked(&net_mutex));
BUG_ON(id == 0);
- ng = old_ng = net->gen;
+ old_ng = rcu_dereference_protected(net->gen,
+ lockdep_is_held(&net_mutex));
+ ng = old_ng;
if (old_ng->len >= id)
goto assign;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 2c0df0f95b3d..fbce4b05a53e 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -771,10 +771,10 @@ done:
static unsigned long num_arg(const char __user * user_buffer,
unsigned long maxlen, unsigned long *num)
{
- int i = 0;
+ int i;
*num = 0;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -789,9 +789,9 @@ static unsigned long num_arg(const char __user * user_buffer,
static int strn_len(const char __user * user_buffer, unsigned int maxlen)
{
- int i = 0;
+ int i;
- for (; i < maxlen; i++) {
+ for (i = 0; i < maxlen; i++) {
char c;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
@@ -846,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
{
struct seq_file *seq = file->private_data;
struct pktgen_dev *pkt_dev = seq->private;
- int i = 0, max, len;
+ int i, max, len;
char name[16], valstr[32];
unsigned long value = 0;
char *pg_result = NULL;
@@ -860,13 +860,13 @@ static ssize_t pktgen_if_write(struct file *file,
return -EINVAL;
}
- max = count - i;
- tmp = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ tmp = count_trail_chars(user_buffer, max);
if (tmp < 0) {
pr_warning("illegal format\n");
return tmp;
}
- i += tmp;
+ i = tmp;
/* Read variable name */
@@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file,
i += len;
if (debug) {
- char tb[count + 1];
- if (copy_from_user(tb, user_buffer, count))
+ size_t copy = min(count, 1023);
+ char tb[copy + 1];
+ if (copy_from_user(tb, user_buffer, copy))
return -EFAULT;
- tb[count] = 0;
+ tb[copy] = 0;
printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name,
(unsigned long)count, tb);
}
@@ -1764,7 +1765,7 @@ static ssize_t pktgen_thread_write(struct file *file,
{
struct seq_file *seq = file->private_data;
struct pktgen_thread *t = seq->private;
- int i = 0, max, len, ret;
+ int i, max, len, ret;
char name[40];
char *pg_result;
@@ -1773,12 +1774,12 @@ static ssize_t pktgen_thread_write(struct file *file,
return -EINVAL;
}
- max = count - i;
- len = count_trail_chars(&user_buffer[i], max);
+ max = count;
+ len = count_trail_chars(user_buffer, max);
if (len < 0)
return len;
- i += len;
+ i = len;
/* Read variable name */
@@ -1975,7 +1976,7 @@ static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
const char *ifname)
{
char b[IFNAMSIZ+5];
- int i = 0;
+ int i;
for (i = 0; ifname[i] != '@'; i++) {
if (i == IFNAMSIZ)
@@ -2519,8 +2520,8 @@ static void free_SAs(struct pktgen_dev *pkt_dev)
{
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
- int i = 0;
- for (; i < pkt_dev->cflows; i++) {
+ int i;
+ for (i = 0; i < pkt_dev->cflows; i++) {
struct xfrm_state *x = pkt_dev->flows[i].x;
if (x) {
xfrm_state_put(x);
diff --git a/net/core/sock.c b/net/core/sock.c
index 11db43632df8..3eed5424e659 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1225,7 +1225,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
sock_reset_flag(newsk, SOCK_DONE);
skb_queue_head_init(&newsk->sk_error_queue);
- filter = newsk->sk_filter;
+ filter = rcu_dereference_protected(newsk->sk_filter, 1);
if (filter != NULL)
sk_filter_charge(newsk, filter);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 01eee5d984be..385b6095fdc4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
mutex_lock(&sock_flow_mutex);
- orig_sock_table = rps_sock_flow_table;
+ orig_sock_table = rcu_dereference_protected(rps_sock_flow_table,
+ lockdep_is_held(&sock_flow_mutex));
size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 117fb093dcaf..75c3582a7678 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -134,13 +134,41 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+/*
+ * Congestion control of queued data packets via CCID decision.
+ *
+ * The TX CCID performs its congestion-control by indicating whether and when a
+ * queued packet may be sent, using the return code of ccid_hc_tx_send_packet().
+ * The following modes are supported via the symbolic constants below:
+ * - timer-based pacing (CCID returns a delay value in milliseconds);
+ * - autonomous dequeueing (CCID internally schedules dccps_xmitlet).
+ */
+
+enum ccid_dequeueing_decision {
+ CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */
+ CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */
+ CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */
+ CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */
+ CCID_PACKET_ERR = 0xF0000, /* error condition */
+};
+
+static inline int ccid_packet_dequeue_eval(const int return_code)
+{
+ if (return_code < 0)
+ return CCID_PACKET_ERR;
+ if (return_code == 0)
+ return CCID_PACKET_SEND_AT_ONCE;
+ if (return_code <= CCID_PACKET_DELAY_MAX)
+ return CCID_PACKET_DELAY;
+ return return_code;
+}
+
static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
struct sk_buff *skb)
{
- int rc = 0;
if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL)
- rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
- return rc;
+ return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
+ return CCID_PACKET_SEND_AT_ONCE;
}
static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d850e291f87c..6576eae9e779 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_pipe < hc->tx_cwnd)
- return 0;
-
- return 1; /* XXX CCID should dequeue when ready instead of polling */
+ if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
+ return CCID_PACKET_WILL_DEQUEUE_LATER;
+ return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
@@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
if (hc->tx_rto > DCCP_RTO_MAX)
hc->tx_rto = DCCP_RTO_MAX;
- sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-
/* adjust pipe, cwnd etc */
hc->tx_ssthresh = hc->tx_cwnd / 2;
if (hc->tx_ssthresh < 2)
@@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hc->tx_rpseq = 0;
hc->tx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
+
+ /* if we were blocked before, we may now send cwnd=1 packet */
+ if (sender_was_blocked)
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ /* restart backed-off timer */
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out:
bh_unlock_sock(sk);
sock_put(sk);
@@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
u64 ackno, seqno;
struct ccid2_seq *seqp;
unsigned char *vector;
@@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk_stop_timer(sk, &hc->tx_rtotimer);
else
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
+
+ /* check if incoming Acks allow pending packets to be sent */
+ if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
+ tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
}
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 9731c2dc1487..25cb6b216eda 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock {
u64 tx_high_ack;
};
+static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
+{
+ return hc->tx_pipe >= hc->tx_cwnd;
+}
+
struct ccid2_hc_rx_sock {
int rx_data;
};
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 3060a60ed5ab..3d604e1349c0 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -268,11 +268,11 @@ out:
sock_put(sk);
}
-/*
- * returns
- * > 0: delay (in msecs) that should pass before actually sending
- * = 0: can send immediately
- * < 0: error condition; do not send packet
+/**
+ * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
+ * @skb: next packet candidate to send on @sk
+ * This function uses the convention of ccid_packet_dequeue_eval() and
+ * returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/
static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
{
@@ -348,7 +348,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
/* set the nominal send time for the next following packet */
hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi);
- return 0;
+ return CCID_PACKET_SEND_AT_ONCE;
}
static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 3eb264b60823..a8ed459508b2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -243,8 +243,9 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
extern void dccp_send_sync(struct sock *sk, const u64 seq,
const enum dccp_pkt_type pkt_type);
-extern void dccp_write_xmit(struct sock *sk, int block);
-extern void dccp_write_space(struct sock *sk);
+extern void dccp_write_xmit(struct sock *sk);
+extern void dccp_write_space(struct sock *sk);
+extern void dccp_flush_write_queue(struct sock *sk, long *time_budget);
extern void dccp_init_xmit_timers(struct sock *sk);
static inline void dccp_clear_xmit_timers(struct sock *sk)
diff --git a/net/dccp/output.c b/net/dccp/output.c
index a988fe9ffcba..45b91853f5ae 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -209,108 +209,150 @@ void dccp_write_space(struct sock *sk)
}
/**
- * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
+ * dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for
- * @skb: current skb to pass on for waiting
- * @delay: sleep timeout in milliseconds (> 0)
- * This function is called by default when the socket is closed, and
- * when a non-zero linger time is set on the socket. For consistency
+ * @delay: timeout in jiffies
+ * This is used by CCIDs which need to delay the send time in process context.
*/
-static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
+static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
{
- struct dccp_sock *dp = dccp_sk(sk);
DEFINE_WAIT(wait);
- unsigned long jiffdelay;
- int rc;
+ long remaining;
+
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ sk->sk_write_pending++;
+ release_sock(sk);
+
+ remaining = schedule_timeout(delay);
+
+ lock_sock(sk);
+ sk->sk_write_pending--;
+ finish_wait(sk_sleep(sk), &wait);
+
+ if (signal_pending(current) || sk->sk_err)
+ return -1;
+ return remaining;
+}
+
+/**
+ * dccp_xmit_packet - Send data packet under control of CCID
+ * Transmits next-queued payload and informs CCID to account for the packet.
+ */
+static void dccp_xmit_packet(struct sock *sk)
+{
+ int err, len;
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
- do {
- dccp_pr_debug("delayed send by %d msec\n", delay);
- jiffdelay = msecs_to_jiffies(delay);
+ if (unlikely(skb == NULL))
+ return;
+ len = skb->len;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ if (sk->sk_state == DCCP_PARTOPEN) {
+ const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
+ /*
+ * See 8.1.5 - Handshake Completion.
+ *
+ * For robustness we resend Confirm options until the client has
+ * entered OPEN. During the initial feature negotiation, the MPS
+ * is smaller than usual, reduced by the Change/Confirm options.
+ */
+ if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
+ DCCP_WARN("Payload too large (%d) for featneg.\n", len);
+ dccp_send_ack(sk);
+ dccp_feat_list_purge(&dp->dccps_featneg);
+ }
- sk->sk_write_pending++;
- release_sock(sk);
- schedule_timeout(jiffdelay);
- lock_sock(sk);
- sk->sk_write_pending--;
+ inet_csk_schedule_ack(sk);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ inet_csk(sk)->icsk_rto,
+ DCCP_RTO_MAX);
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
+ } else if (dccp_ack_pending(sk)) {
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
+ } else {
+ DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
+ }
+
+ err = dccp_transmit_skb(sk, skb);
+ if (err)
+ dccp_pr_debug("transmit_skb() returned err=%d\n", err);
+ /*
+ * Register this one as sent even if an error occurred. To the remote
+ * end a local packet drop is indistinguishable from network loss, i.e.
+ * any local drop will eventually be reported via receiver feedback.
+ */
+ ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
+}
- if (sk->sk_err)
- goto do_error;
- if (signal_pending(current))
- goto do_interrupted;
+/**
+ * dccp_flush_write_queue - Drain queue at end of connection
+ * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
+ * happen that the TX queue is not empty at the end of a connection. We give the
+ * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
+ * returns with a non-empty write queue, it will be purged later.
+ */
+void dccp_flush_write_queue(struct sock *sk, long *time_budget)
+{
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct sk_buff *skb;
+ long delay, rc;
+ while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
- } while ((delay = rc) > 0);
-out:
- finish_wait(sk_sleep(sk), &wait);
- return rc;
-
-do_error:
- rc = -EPIPE;
- goto out;
-do_interrupted:
- rc = -EINTR;
- goto out;
+
+ switch (ccid_packet_dequeue_eval(rc)) {
+ case CCID_PACKET_WILL_DEQUEUE_LATER:
+ /*
+ * If the CCID determines when to send, the next sending
+ * time is unknown or the CCID may not even send again
+ * (e.g. remote host crashes or lost Ack packets).
+ */
+ DCCP_WARN("CCID did not manage to send all packets\n");
+ return;
+ case CCID_PACKET_DELAY:
+ delay = msecs_to_jiffies(rc);
+ if (delay > *time_budget)
+ return;
+ rc = dccp_wait_for_ccid(sk, delay);
+ if (rc < 0)
+ return;
+ *time_budget -= (delay - rc);
+ /* check again if we can send now */
+ break;
+ case CCID_PACKET_SEND_AT_ONCE:
+ dccp_xmit_packet(sk);
+ break;
+ case CCID_PACKET_ERR:
+ skb_dequeue(&sk->sk_write_queue);
+ kfree_skb(skb);
+ dccp_pr_debug("packet discarded due to err=%ld\n", rc);
+ }
+ }
}
-void dccp_write_xmit(struct sock *sk, int block)
+void dccp_write_xmit(struct sock *sk)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_write_queue))) {
- int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
-
- if (err > 0) {
- if (!block) {
- sk_reset_timer(sk, &dp->dccps_xmit_timer,
- msecs_to_jiffies(err)+jiffies);
- break;
- } else
- err = dccp_wait_for_ccid(sk, skb, err);
- if (err && err != -EINTR)
- DCCP_BUG("err=%d after dccp_wait_for_ccid", err);
- }
+ int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
- skb_dequeue(&sk->sk_write_queue);
- if (err == 0) {
- struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
- const int len = skb->len;
-
- if (sk->sk_state == DCCP_PARTOPEN) {
- const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
- /*
- * See 8.1.5 - Handshake Completion.
- *
- * For robustness we resend Confirm options until the client has
- * entered OPEN. During the initial feature negotiation, the MPS
- * is smaller than usual, reduced by the Change/Confirm options.
- */
- if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
- DCCP_WARN("Payload too large (%d) for featneg.\n", len);
- dccp_send_ack(sk);
- dccp_feat_list_purge(&dp->dccps_featneg);
- }
-
- inet_csk_schedule_ack(sk);
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- inet_csk(sk)->icsk_rto,
- DCCP_RTO_MAX);
- dcb->dccpd_type = DCCP_PKT_DATAACK;
- } else if (dccp_ack_pending(sk))
- dcb->dccpd_type = DCCP_PKT_DATAACK;
- else
- dcb->dccpd_type = DCCP_PKT_DATA;
-
- err = dccp_transmit_skb(sk, skb);
- ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
- if (err)
- DCCP_BUG("err=%d after ccid_hc_tx_packet_sent",
- err);
- } else {
- dccp_pr_debug("packet discarded due to err=%d\n", err);
+ switch (ccid_packet_dequeue_eval(rc)) {
+ case CCID_PACKET_WILL_DEQUEUE_LATER:
+ return;
+ case CCID_PACKET_DELAY:
+ sk_reset_timer(sk, &dp->dccps_xmit_timer,
+ jiffies + msecs_to_jiffies(rc));
+ return;
+ case CCID_PACKET_SEND_AT_ONCE:
+ dccp_xmit_packet(sk);
+ break;
+ case CCID_PACKET_ERR:
+ skb_dequeue(&sk->sk_write_queue);
kfree_skb(skb);
+ dccp_pr_debug("packet discarded due to err=%d\n", rc);
}
}
}
@@ -622,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active)
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
if (active) {
- dccp_write_xmit(sk, 1);
dccp_skb_entail(sk, skb);
dccp_transmit_skb(sk, skb_clone(skb, prio));
/*
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 7e5fc04eb6d1..ef343d53fcea 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -726,7 +726,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
goto out_discard;
skb_queue_tail(&sk->sk_write_queue, skb);
- dccp_write_xmit(sk,0);
+ /*
+ * The xmit_timer is set if the TX CCID is rate-based and will expire
+ * when congestion control permits to release further packets into the
+ * network. Window-based CCIDs do not use this timer.
+ */
+ if (!timer_pending(&dp->dccps_xmit_timer))
+ dccp_write_xmit(sk);
out_release:
release_sock(sk);
return rc ? : len;
@@ -951,9 +957,22 @@ void dccp_close(struct sock *sk, long timeout)
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
} else if (sk->sk_state != DCCP_CLOSED) {
+ /*
+ * Normal connection termination. May need to wait if there are
+ * still packets in the TX queue that are delayed by the CCID.
+ */
+ dccp_flush_write_queue(sk, &timeout);
dccp_terminate_connection(sk);
}
+ /*
+ * Flush write queue. This may be necessary in several cases:
+ * - we have been closed by the peer but still have application data;
+ * - abortive termination (unread data or zero linger time),
+ * - normal termination but queue could not be flushed within time limit
+ */
+ __skb_queue_purge(&sk->sk_write_queue);
+
sk_stream_wait_close(sk, timeout);
adjudge_to_death:
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1a9aa05d4dc4..7587870b7040 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -237,32 +237,35 @@ out:
sock_put(sk);
}
-/* Transmit-delay timer: used by the CCIDs to delay actual send time */
-static void dccp_write_xmit_timer(unsigned long data)
+/**
+ * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface
+ * See the comments above %ccid_dequeueing_decision for supported modes.
+ */
+static void dccp_write_xmitlet(unsigned long data)
{
struct sock *sk = (struct sock *)data;
- struct dccp_sock *dp = dccp_sk(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
- sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
+ sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
else
- dccp_write_xmit(sk, 0);
+ dccp_write_xmit(sk);
bh_unlock_sock(sk);
- sock_put(sk);
}
-static void dccp_init_write_xmit_timer(struct sock *sk)
+static void dccp_write_xmit_timer(unsigned long data)
{
- struct dccp_sock *dp = dccp_sk(sk);
-
- setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
- (unsigned long)sk);
+ dccp_write_xmitlet(data);
+ sock_put((struct sock *)data);
}
void dccp_init_xmit_timers(struct sock *sk)
{
- dccp_init_write_xmit_timer(sk);
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
+ setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
+ (unsigned long)sk);
inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
&dccp_keepalive_timer);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 36e27c2107de..eb6f69a8f27a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1052,7 +1052,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
hlist_del(node);
fib_table_flush(tb);
- kfree(tb);
+ fib_free_table(tb);
}
}
kfree(net->ipv4.fib_table_hash);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 43e1c594ce8f..b3acb0417b21 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -120,11 +120,12 @@ static inline void fn_rebuild_zone(struct fn_zone *fz,
struct fib_node *f;
hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
- struct hlist_head __rcu *new_head;
+ struct hlist_head *new_head;
hlist_del_rcu(&f->fn_hash);
- new_head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ new_head = rcu_dereference_protected(fz->fz_hash, 1) +
+ fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, new_head);
}
}
@@ -179,8 +180,8 @@ static void fn_rehash_zone(struct fn_zone *fz)
memcpy(&nfz, fz, sizeof(nfz));
write_seqlock_bh(&fz->fz_lock);
- old_ht = fz->fz_hash;
- nfz.fz_hash = ht;
+ old_ht = rcu_dereference_protected(fz->fz_hash, 1);
+ RCU_INIT_POINTER(nfz.fz_hash, ht);
nfz.fz_hashmask = new_hashmask;
nfz.fz_divisor = new_divisor;
fn_rebuild_zone(&nfz, old_ht, old_divisor);
@@ -236,7 +237,7 @@ fn_new_zone(struct fn_hash *table, int z)
seqlock_init(&fz->fz_lock);
fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
fz->fz_hashmask = fz->fz_divisor - 1;
- fz->fz_hash = fz->fz_embedded_hash;
+ RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
fz->fz_order = z;
fz->fz_revorder = 32 - z;
fz->fz_mask = inet_make_mask(z);
@@ -272,7 +273,7 @@ int fib_table_lookup(struct fib_table *tb,
for (fz = rcu_dereference(t->fn_zone_list);
fz != NULL;
fz = rcu_dereference(fz->fz_next)) {
- struct hlist_head __rcu *head;
+ struct hlist_head *head;
struct hlist_node *node;
struct fib_node *f;
__be32 k;
@@ -282,7 +283,7 @@ int fib_table_lookup(struct fib_table *tb,
seq = read_seqbegin(&fz->fz_lock);
k = fz_key(flp->fl4_dst, fz);
- head = &fz->fz_hash[fn_hash(k, fz)];
+ head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
hlist_for_each_entry_rcu(f, node, head, fn_hash) {
if (f->fn_key != k)
continue;
@@ -311,6 +312,7 @@ void fib_table_select_default(struct fib_table *tb,
struct fib_info *last_resort;
struct fn_hash *t = (struct fn_hash *)tb->tb_data;
struct fn_zone *fz = t->fn_zones[0];
+ struct hlist_head *head;
if (fz == NULL)
return;
@@ -320,7 +322,8 @@ void fib_table_select_default(struct fib_table *tb,
order = -1;
rcu_read_lock();
- hlist_for_each_entry_rcu(f, node, &fz->fz_hash[0], fn_hash) {
+ head = rcu_dereference(fz->fz_hash);
+ hlist_for_each_entry_rcu(f, node, head, fn_hash) {
struct fib_alias *fa;
list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
@@ -374,7 +377,7 @@ out:
/* Insert node F to FZ. */
static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(f->fn_key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
hlist_add_head_rcu(&f->fn_hash, head);
}
@@ -382,7 +385,7 @@ static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
/* Return the node in FZ matching KEY. */
static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
{
- struct hlist_head *head = &fz->fz_hash[fn_hash(key, fz)];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
struct hlist_node *node;
struct fib_node *f;
@@ -662,7 +665,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
static int fn_flush_list(struct fn_zone *fz, int idx)
{
- struct hlist_head *head = &fz->fz_hash[idx];
+ struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
struct hlist_node *node, *n;
struct fib_node *f;
int found = 0;
@@ -713,6 +716,24 @@ int fib_table_flush(struct fib_table *tb)
return found;
}
+void fib_free_table(struct fib_table *tb)
+{
+ struct fn_hash *table = (struct fn_hash *) tb->tb_data;
+ struct fn_zone *fz, *next;
+
+ next = table->fn_zone_list;
+ while (next != NULL) {
+ fz = next;
+ next = fz->fz_next;
+
+ if (fz->fz_hash != fz->fz_embedded_hash)
+ fz_hash_free(fz->fz_hash, fz->fz_divisor);
+
+ kfree(fz);
+ }
+
+ kfree(tb);
+}
static inline int
fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
@@ -761,14 +782,15 @@ fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
struct fn_zone *fz)
{
int h, s_h;
+ struct hlist_head *head = rcu_dereference(fz->fz_hash);
- if (fz->fz_hash == NULL)
+ if (head == NULL)
return skb->len;
s_h = cb->args[3];
for (h = s_h; h < fz->fz_divisor; h++) {
- if (hlist_empty(&fz->fz_hash[h]))
+ if (hlist_empty(head + h))
continue;
- if (fn_hash_dump_bucket(skb, cb, tb, fz, &fz->fz_hash[h]) < 0) {
+ if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
cb->args[3] = h;
return -1;
}
@@ -872,7 +894,7 @@ static struct fib_alias *fib_get_first(struct seq_file *seq)
if (!iter->zone->fz_nent)
continue;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
maxslot = iter->zone->fz_divisor;
for (iter->bucket = 0; iter->bucket < maxslot;
@@ -957,7 +979,7 @@ static struct fib_alias *fib_get_next(struct seq_file *seq)
goto out;
iter->bucket = 0;
- iter->hash_head = iter->zone->fz_hash;
+ iter->hash_head = rcu_dereference(iter->zone->fz_hash);
hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
list_for_each_entry(fa, &fn->fn_alias, fa_list) {
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b14450895102..200eb538fbb3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1797,6 +1797,11 @@ int fib_table_flush(struct fib_table *tb)
return found;
}
+void fib_free_table(struct fib_table *tb)
+{
+ kfree(tb);
+}
+
void fib_table_select_default(struct fib_table *tb,
const struct flowi *flp,
struct fib_result *res)
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index caea6885fdbd..c6933f2ea310 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -22,7 +22,7 @@
#include <net/gre.h>
-static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
static DEFINE_SPINLOCK(gre_proto_lock);
int gre_add_protocol(const struct gre_protocol *proto, u8 version)
@@ -51,7 +51,8 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
goto err_out;
spin_lock(&gre_proto_lock);
- if (gre_proto[version] != proto)
+ if (rcu_dereference_protected(gre_proto[version],
+ lockdep_is_held(&gre_proto_lock)) != proto)
goto err_out_unlock;
rcu_assign_pointer(gre_proto[version], NULL);
spin_unlock(&gre_proto_lock);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9ffa24b9a804..9e94d7cf4f8a 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -72,18 +72,19 @@ static struct kmem_cache *peer_cachep __read_mostly;
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
static const struct inet_peer peer_fake_node = {
- .avl_left = peer_avl_empty,
- .avl_right = peer_avl_empty,
+ .avl_left = peer_avl_empty_rcu,
+ .avl_right = peer_avl_empty_rcu,
.avl_height = 0
};
static struct {
- struct inet_peer *root;
+ struct inet_peer __rcu *root;
spinlock_t lock;
int total;
} peers = {
- .root = peer_avl_empty,
+ .root = peer_avl_empty_rcu,
.lock = __SPIN_LOCK_UNLOCKED(peers.lock),
.total = 0,
};
@@ -156,11 +157,14 @@ static void unlink_from_unused(struct inet_peer *p)
*/
#define lookup(_daddr, _stack) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
\
stackptr = _stack; \
*stackptr++ = &peers.root; \
- for (u = peers.root; u != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(peers.root, \
+ lockdep_is_held(&peers.lock)); \
+ u != peer_avl_empty; ) { \
if (_daddr == u->v4daddr) \
break; \
if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
@@ -168,7 +172,8 @@ static void unlink_from_unused(struct inet_peer *p)
else \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -209,13 +214,17 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
/* Called with local BH disabled and the pool lock held. */
#define lookup_rightempty(start) \
({ \
- struct inet_peer *u, **v; \
+ struct inet_peer *u; \
+ struct inet_peer __rcu **v; \
*stackptr++ = &start->avl_left; \
v = &start->avl_left; \
- for (u = *v; u->avl_right != peer_avl_empty; ) { \
+ for (u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
+ u->avl_right != peer_avl_empty_rcu; ) { \
v = &u->avl_right; \
*stackptr++ = v; \
- u = *v; \
+ u = rcu_dereference_protected(*v, \
+ lockdep_is_held(&peers.lock)); \
} \
u; \
})
@@ -224,74 +233,86 @@ static struct inet_peer *lookup_rcu_bh(__be32 daddr)
* Variable names are the proof of operation correctness.
* Look into mm/map_avl.c for more detail description of the ideas.
*/
-static void peer_avl_rebalance(struct inet_peer **stack[],
- struct inet_peer ***stackend)
+static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
+ struct inet_peer __rcu ***stackend)
{
- struct inet_peer **nodep, *node, *l, *r;
+ struct inet_peer __rcu **nodep;
+ struct inet_peer *node, *l, *r;
int lh, rh;
while (stackend > stack) {
nodep = *--stackend;
- node = *nodep;
- l = node->avl_left;
- r = node->avl_right;
+ node = rcu_dereference_protected(*nodep,
+ lockdep_is_held(&peers.lock));
+ l = rcu_dereference_protected(node->avl_left,
+ lockdep_is_held(&peers.lock));
+ r = rcu_dereference_protected(node->avl_right,
+ lockdep_is_held(&peers.lock));
lh = node_height(l);
rh = node_height(r);
if (lh > rh + 1) { /* l: RH+2 */
struct inet_peer *ll, *lr, *lrl, *lrr;
int lrh;
- ll = l->avl_left;
- lr = l->avl_right;
+ ll = rcu_dereference_protected(l->avl_left,
+ lockdep_is_held(&peers.lock));
+ lr = rcu_dereference_protected(l->avl_right,
+ lockdep_is_held(&peers.lock));
lrh = node_height(lr);
if (lrh <= node_height(ll)) { /* ll: RH+1 */
- node->avl_left = lr; /* lr: RH or RH+1 */
- node->avl_right = r; /* r: RH */
+ RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = lrh + 1; /* RH+1 or RH+2 */
- l->avl_left = ll; /* ll: RH+1 */
- l->avl_right = node; /* node: RH+1 or RH+2 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */
+ RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */
l->avl_height = node->avl_height + 1;
- *nodep = l;
+ RCU_INIT_POINTER(*nodep, l);
} else { /* ll: RH, lr: RH+1 */
- lrl = lr->avl_left; /* lrl: RH or RH-1 */
- lrr = lr->avl_right; /* lrr: RH or RH-1 */
- node->avl_left = lrr; /* lrr: RH or RH-1 */
- node->avl_right = r; /* r: RH */
+ lrl = rcu_dereference_protected(lr->avl_left,
+ lockdep_is_held(&peers.lock)); /* lrl: RH or RH-1 */
+ lrr = rcu_dereference_protected(lr->avl_right,
+ lockdep_is_held(&peers.lock)); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */
+ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */
node->avl_height = rh + 1; /* node: RH+1 */
- l->avl_left = ll; /* ll: RH */
- l->avl_right = lrl; /* lrl: RH or RH-1 */
+ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */
+ RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */
l->avl_height = rh + 1; /* l: RH+1 */
- lr->avl_left = l; /* l: RH+1 */
- lr->avl_right = node; /* node: RH+1 */
+ RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */
+ RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */
lr->avl_height = rh + 2;
- *nodep = lr;
+ RCU_INIT_POINTER(*nodep, lr);
}
} else if (rh > lh + 1) { /* r: LH+2 */
struct inet_peer *rr, *rl, *rlr, *rll;
int rlh;
- rr = r->avl_right;
- rl = r->avl_left;
+ rr = rcu_dereference_protected(r->avl_right,
+ lockdep_is_held(&peers.lock));
+ rl = rcu_dereference_protected(r->avl_left,
+ lockdep_is_held(&peers.lock));
rlh = node_height(rl);
if (rlh <= node_height(rr)) { /* rr: LH+1 */
- node->avl_right = rl; /* rl: LH or LH+1 */
- node->avl_left = l; /* l: LH */
+ RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = rlh + 1; /* LH+1 or LH+2 */
- r->avl_right = rr; /* rr: LH+1 */
- r->avl_left = node; /* node: LH+1 or LH+2 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */
+ RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */
r->avl_height = node->avl_height + 1;
- *nodep = r;
+ RCU_INIT_POINTER(*nodep, r);
} else { /* rr: RH, rl: RH+1 */
- rlr = rl->avl_right; /* rlr: LH or LH-1 */
- rll = rl->avl_left; /* rll: LH or LH-1 */
- node->avl_right = rll; /* rll: LH or LH-1 */
- node->avl_left = l; /* l: LH */
+ rlr = rcu_dereference_protected(rl->avl_right,
+ lockdep_is_held(&peers.lock)); /* rlr: LH or LH-1 */
+ rll = rcu_dereference_protected(rl->avl_left,
+ lockdep_is_held(&peers.lock)); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */
+ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */
node->avl_height = lh + 1; /* node: LH+1 */
- r->avl_right = rr; /* rr: LH */
- r->avl_left = rlr; /* rlr: LH or LH-1 */
+ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */
+ RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */
r->avl_height = lh + 1; /* r: LH+1 */
- rl->avl_right = r; /* r: LH+1 */
- rl->avl_left = node; /* node: LH+1 */
+ RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */
+ RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */
rl->avl_height = lh + 2;
- *nodep = rl;
+ RCU_INIT_POINTER(*nodep, rl);
}
} else {
node->avl_height = (lh > rh ? lh : rh) + 1;
@@ -303,10 +324,10 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
#define link_to_pool(n) \
do { \
n->avl_height = 1; \
- n->avl_left = peer_avl_empty; \
- n->avl_right = peer_avl_empty; \
- smp_wmb(); /* lockless readers can catch us now */ \
- **--stackptr = n; \
+ n->avl_left = peer_avl_empty_rcu; \
+ n->avl_right = peer_avl_empty_rcu; \
+ /* lockless readers can catch us now */ \
+ rcu_assign_pointer(**--stackptr, n); \
peer_avl_rebalance(stack, stackptr); \
} while (0)
@@ -330,24 +351,25 @@ static void unlink_from_pool(struct inet_peer *p)
* We use refcnt=-1 to alert lockless readers this entry is deleted.
*/
if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
- struct inet_peer **stack[PEER_MAXDEPTH];
- struct inet_peer ***stackptr, ***delp;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH];
+ struct inet_peer __rcu ***stackptr, ***delp;
if (lookup(p->v4daddr, stack) != p)
BUG();
delp = stackptr - 1; /* *delp[0] == p */
- if (p->avl_left == peer_avl_empty) {
+ if (p->avl_left == peer_avl_empty_rcu) {
*delp[0] = p->avl_right;
--stackptr;
} else {
/* look for a node to insert instead of p */
struct inet_peer *t;
t = lookup_rightempty(p);
- BUG_ON(*stackptr[-1] != t);
+ BUG_ON(rcu_dereference_protected(*stackptr[-1],
+ lockdep_is_held(&peers.lock)) != t);
**--stackptr = t->avl_left;
/* t is removed, t->v4daddr > x->v4daddr for any
* x in p->avl_left subtree.
* Put t in the old place of p. */
- *delp[0] = t;
+ RCU_INIT_POINTER(*delp[0], t);
t->avl_left = p->avl_left;
t->avl_right = p->avl_right;
t->avl_height = p->avl_height;
@@ -414,7 +436,7 @@ static int cleanup_once(unsigned long ttl)
struct inet_peer *inet_getpeer(__be32 daddr, int create)
{
struct inet_peer *p;
- struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
+ struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
/* Look up for the address quickly, lockless.
* Because of a concurrent writer, we might not find an existing entry.
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d0ffcbe369b7..70ff77f02eee 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1072,6 +1072,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
ipgre_tunnel_unlink(ign, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
t->parms.i_key = p.i_key;
@@ -1324,7 +1325,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id);
tunnel->dev = dev;
strcpy(tunnel->parms.name, dev->name);
@@ -1335,7 +1335,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
- rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
}
@@ -1382,10 +1381,12 @@ static int __net_init ipgre_init_net(struct net *net)
if ((err = register_netdev(ign->fb_tunnel_dev)))
goto err_reg_dev;
+ rcu_assign_pointer(ign->tunnels_wc[0],
+ netdev_priv(ign->fb_tunnel_dev));
return 0;
err_reg_dev:
- free_netdev(ign->fb_tunnel_dev);
+ ipgre_dev_free(ign->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 64b70ad162e3..3948c86e59ca 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -238,7 +238,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
-struct ip_ra_chain *ip_ra_chain;
+struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
@@ -253,7 +253,8 @@ static void ip_ra_destroy_rcu(struct rcu_head *head)
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
- struct ip_ra_chain *ra, *new_ra, **rap;
+ struct ip_ra_chain *ra, *new_ra;
+ struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
@@ -261,7 +262,10 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
- for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
+ for (rap = &ip_ra_chain;
+ (ra = rcu_dereference_protected(*rap,
+ lockdep_is_held(&ip_ra_lock))) != NULL;
+ rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e9b816e6cd73..cd300aaee78f 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -676,6 +676,7 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
}
t = netdev_priv(dev);
ipip_tunnel_unlink(ipn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 65699c24411c..9ae5c01cd0b2 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,7 +28,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
+const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
/*
* Add a protocol handler to the hash tables
@@ -38,7 +38,8 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -50,7 +51,8 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d6cb2bfcd8e1..987bf9adb318 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -198,7 +198,7 @@ const __u8 ip_tos2prio[16] = {
*/
struct rt_hash_bucket {
- struct rtable *chain;
+ struct rtable __rcu *chain;
};
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
@@ -280,7 +280,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rt_hash_table[st->bucket].chain)
+ if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
continue;
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -300,17 +300,17 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
{
struct rt_cache_iter_state *st = seq->private;
- r = r->dst.rt_next;
+ r = rcu_dereference_bh(r->dst.rt_next);
while (!r) {
rcu_read_unlock_bh();
do {
if (--st->bucket < 0)
return NULL;
- } while (!rt_hash_table[st->bucket].chain);
+ } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh();
- r = rt_hash_table[st->bucket].chain;
+ r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
}
- return rcu_dereference_bh(r);
+ return r;
}
static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -721,19 +721,23 @@ static void rt_do_flush(int process_context)
for (i = 0; i <= rt_hash_mask; i++) {
if (process_context && need_resched())
cond_resched();
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_raw(rt_hash_table[i].chain);
if (!rth)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
#ifdef CONFIG_NET_NS
{
- struct rtable ** prev, * p;
+ struct rtable __rcu **prev;
+ struct rtable *p;
- rth = rt_hash_table[i].chain;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
/* defer releasing the head of the list after spin_unlock */
- for (tail = rth; tail; tail = tail->dst.rt_next)
+ for (tail = rth; tail;
+ tail = rcu_dereference_protected(tail->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i))))
if (!rt_is_expired(tail))
break;
if (rth != tail)
@@ -741,8 +745,12 @@ static void rt_do_flush(int process_context)
/* call rt_free on entries after the tail requiring flush */
prev = &rt_hash_table[i].chain;
- for (p = *prev; p; p = next) {
- next = p->dst.rt_next;
+ for (p = rcu_dereference_protected(*prev,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ p != NULL;
+ p = next) {
+ next = rcu_dereference_protected(p->dst.rt_next,
+ lockdep_is_held(rt_hash_lock_addr(i)));
if (!rt_is_expired(p)) {
prev = &p->dst.rt_next;
} else {
@@ -752,14 +760,15 @@ static void rt_do_flush(int process_context)
}
}
#else
- rth = rt_hash_table[i].chain;
- rt_hash_table[i].chain = NULL;
+ rth = rcu_dereference_protected(rt_hash_table[i].chain,
+ lockdep_is_held(rt_hash_lock_addr(i)));
+ rcu_assign_pointer(rt_hash_table[i].chain, NULL);
tail = NULL;
#endif
spin_unlock_bh(rt_hash_lock_addr(i));
for (; rth != tail; rth = next) {
- next = rth->dst.rt_next;
+ next = rcu_dereference_protected(rth->dst.rt_next, 1);
rt_free(rth);
}
}
@@ -790,7 +799,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
while (aux != rth) {
if (compare_hash_inputs(&aux->fl, &rth->fl))
return 0;
- aux = aux->dst.rt_next;
+ aux = rcu_dereference_protected(aux->dst.rt_next, 1);
}
return ONE;
}
@@ -799,7 +808,8 @@ static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long samples = 0;
unsigned long sum = 0, sum2 = 0;
unsigned long delta;
@@ -825,11 +835,12 @@ static void rt_check_expire(void)
samples++;
- if (*rthp == NULL)
+ if (rcu_dereference_raw(*rthp) == NULL)
continue;
length = 0;
spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
prefetch(rth->dst.rt_next);
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
@@ -941,7 +952,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
static unsigned long last_gc;
static int rover;
static int equilibrium;
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
unsigned long now = jiffies;
int goal;
int entries = dst_entries_get_fast(&ipv4_dst_ops);
@@ -995,7 +1007,8 @@ static int rt_garbage_collect(struct dst_ops *ops)
k = (k + 1) & rt_hash_mask;
rthp = &rt_hash_table[k].chain;
spin_lock_bh(rt_hash_lock_addr(k));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
if (!rt_is_expired(rth) &&
!rt_may_expire(rth, tmo, expire)) {
tmo >>= 1;
@@ -1071,7 +1084,7 @@ static int slow_chain_length(const struct rtable *head)
while (rth) {
length += has_noalias(head, rth);
- rth = rth->dst.rt_next;
+ rth = rcu_dereference_protected(rth->dst.rt_next, 1);
}
return length >> FRACT_BITS;
}
@@ -1079,9 +1092,9 @@ static int slow_chain_length(const struct rtable *head)
static int rt_intern_hash(unsigned hash, struct rtable *rt,
struct rtable **rp, struct sk_buff *skb, int ifindex)
{
- struct rtable *rth, **rthp;
+ struct rtable *rth, *cand;
+ struct rtable __rcu **rthp, **candp;
unsigned long now;
- struct rtable *cand, **candp;
u32 min_score;
int chain_length;
int attempts = !in_softirq();
@@ -1128,7 +1141,8 @@ restart:
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
- while ((rth = *rthp) != NULL) {
+ while ((rth = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (rt_is_expired(rth)) {
*rthp = rth->dst.rt_next;
rt_free(rth);
@@ -1324,12 +1338,14 @@ EXPORT_SYMBOL(__ip_select_ident);
static void rt_del(unsigned hash, struct rtable *rt)
{
- struct rtable **rthp, *aux;
+ struct rtable __rcu **rthp;
+ struct rtable *aux;
rthp = &rt_hash_table[hash].chain;
spin_lock_bh(rt_hash_lock_addr(hash));
ip_rt_put(rt);
- while ((aux = *rthp) != NULL) {
+ while ((aux = rcu_dereference_protected(*rthp,
+ lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
if (aux == rt || rt_is_expired(aux)) {
*rthp = aux->dst.rt_next;
rt_free(aux);
@@ -1346,7 +1362,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
{
int i, k;
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rth, **rthp;
+ struct rtable *rth;
+ struct rtable __rcu **rthp;
__be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
struct netevent_redirect netevent;
@@ -1379,7 +1396,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
rt_genid(net));
- rthp=&rt_hash_table[hash].chain;
+ rthp = &rt_hash_table[hash].chain;
while ((rth = rcu_dereference(*rthp)) != NULL) {
struct rtable *rt;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 9a17bd2a0a37..ac3b3ee4b07c 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,27 +14,32 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
-static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
-static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
{
return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
}
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -52,13 +57,17 @@ EXPORT_SYMBOL(xfrm4_tunnel_register);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
{
- struct xfrm_tunnel **pprev;
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel4_mutex);
- for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ for (pprev = fam_handlers(family);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel4_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b3f7e8cf18ac..28cb2d733a3c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1413,7 +1413,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ec7a91d9e865..e048ec62d109 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -836,7 +836,7 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
@@ -886,12 +886,13 @@ retry:
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
+ age = (jiffies - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
- idev->cnf.temp_valid_lft);
+ idev->cnf.temp_valid_lft + age);
tmp_prefered_lft = min_t(__u32,
ifp->prefered_lft,
- idev->cnf.temp_prefered_lft -
+ idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
@@ -1426,8 +1427,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
- if (addrconf_dad_end(ifp))
+ if (addrconf_dad_end(ifp)) {
+ in6_ifa_put(ifp);
return;
+ }
if (net_ratelimit())
printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
@@ -2021,10 +2024,11 @@ ok:
ipv6_ifa_notify(0, ift);
}
- if (create && in6_dev->cnf.use_tempaddr > 0) {
+ if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
* When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address.
+ * also create a new temporary address. Also create a temporary
+ * address if it's enabled but no temporary address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c2c0f89397b1..2a59610c2a58 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1284,6 +1284,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
t = netdev_priv(dev);
ip6_tnl_unlink(ip6n, t);
+ synchronize_net();
err = ip6_tnl_change(t, &p);
ip6_tnl_link(ip6n, t);
netdev_state_change(dev);
@@ -1371,6 +1372,7 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0553867a317f..d1770e061c08 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,6 +343,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
+ if (!capable(CAP_NET_ADMIN)) {
+ retv = -EPERM;
+ break;
+ }
if (optlen < sizeof(int))
goto e_inval;
/* we don't have a separate transparent bit for IPV6 we use the one in the IPv4 socket */
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 44d2eeac089b..448464844a25 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -5,10 +5,15 @@
menu "IPv6: Netfilter Configuration"
depends on INET && IPV6 && NETFILTER
+config NF_DEFRAG_IPV6
+ tristate
+ default n
+
config NF_CONNTRACK_IPV6
tristate "IPv6 connection tracking support"
depends on INET && IPV6 && NF_CONNTRACK
default m if NETFILTER_ADVANCED=n
+ select NF_DEFRAG_IPV6
---help---
Connection tracking keeps a record of what packets have passed
through your machine, in order to figure out how they are related
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 3f8e4a3d83ce..0a432c9b0795 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -12,11 +12,14 @@ obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
# l3 independent conntrack
obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
+# defrag
+nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
+
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 489d71b844ac..3a3f129a44cb 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -625,21 +625,24 @@ int nf_ct_frag6_init(void)
inet_frags_init_net(&nf_init_frags);
inet_frags_init(&nf_frags);
+#ifdef CONFIG_SYSCTL
nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path,
nf_ct_frag6_sysctl_table);
if (!nf_ct_frag6_sysctl_header) {
inet_frags_fini(&nf_frags);
return -ENOMEM;
}
+#endif
return 0;
}
void nf_ct_frag6_cleanup(void)
{
+#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nf_ct_frag6_sysctl_header);
nf_ct_frag6_sysctl_header = NULL;
-
+#endif
inet_frags_fini(&nf_frags);
nf_init_frags.low_thresh = 0;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index d082eaeefa25..24b3558b8e67 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -126,6 +126,8 @@ static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
+ SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_SENTINEL
};
@@ -134,6 +136,8 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS),
+ SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS),
+ SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9bb936ae2452..9a7978fdc02a 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,13 +25,14 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
+const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int hash = protocol & (MAX_INET_PROTOS - 1);
- return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
+ return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
@@ -43,7 +44,8 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 45e6efb7f171..86c39526ba5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -373,7 +373,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
- if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
+ if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 367a6cc584cc..d6bfaec3bbbf 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -963,6 +963,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
}
t = netdev_priv(dev);
ipip6_tunnel_unlink(sitn, t);
+ synchronize_net();
t->parms.iph.saddr = p.iph.saddr;
t->parms.iph.daddr = p.iph.daddr;
memcpy(dev->dev_addr, &p.iph.saddr, 4);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index d9864725d0c6..4f3cec12aa85 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,23 +30,26 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
-static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel __rcu *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if ((*pprev)->priority > priority)
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
break;
- if ((*pprev)->priority == priority)
+ if (t->priority == priority)
goto err;
}
@@ -65,14 +68,17 @@ EXPORT_SYMBOL(xfrm6_tunnel_register);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
{
- struct xfrm6_tunnel **pprev;
+ struct xfrm6_tunnel __rcu **pprev;
+ struct xfrm6_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel6_mutex);
for (pprev = (family == AF_INET6) ? &tunnel6_handlers : &tunnel46_handlers;
- *pprev; pprev = &(*pprev)->next) {
- if (*pprev == handler) {
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&tunnel6_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index c84dad432114..91def93bec85 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -527,7 +527,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
}
}
- if (sk->sk_filter) {
+ if (rcu_dereference_raw(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 499c045d6910..f7db676de77d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1798,7 +1798,8 @@ static void iucv_work_fn(struct work_struct *work)
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
*/
-static void iucv_external_interrupt(u16 code)
+static void iucv_external_interrupt(unsigned int ext_int_code,
+ unsigned int param32, unsigned long param64)
{
struct iucv_irq_data *p;
struct iucv_irq_list *work;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1712af1c7b3f..c64ce0a0bb03 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -111,6 +111,10 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+
static inline struct l2tp_net *l2tp_pernet(struct net *net)
{
BUG_ON(!net);
@@ -118,6 +122,34 @@ static inline struct l2tp_net *l2tp_pernet(struct net *net)
return net_generic(net, l2tp_net_id);
}
+
+/* Tunnel reference counts. Incremented per session that is added to
+ * the tunnel.
+ */
+static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ atomic_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
+{
+ if (atomic_dec_and_test(&tunnel->ref_count))
+ l2tp_tunnel_free(tunnel);
+}
+#ifdef L2TP_REFCNT_DEBUG
+#define l2tp_tunnel_inc_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_inc_refcount_1(_t); \
+ } while (0)
+#define l2tp_tunnel_dec_refcount(_t) do { \
+ printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
+ l2tp_tunnel_dec_refcount_1(_t); \
+ } while (0)
+#else
+#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
+#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
+#endif
+
/* Session hash global list for L2TPv3.
* The session_id SHOULD be random according to RFC3931, but several
* L2TP implementations use incrementing session_ids. So we do a real
@@ -699,8 +731,8 @@ EXPORT_SYMBOL(l2tp_recv_common);
* Returns 1 if the packet was not a good data packet and could not be
* forwarded. All such packets are passed up to userspace to deal with.
*/
-int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
- int (*payload_hook)(struct sk_buff *skb))
+static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
+ int (*payload_hook)(struct sk_buff *skb))
{
struct l2tp_session *session = NULL;
unsigned char *ptr, *optr;
@@ -812,7 +844,6 @@ error:
return 1;
}
-EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes:
@@ -922,7 +953,8 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
return bufp - optr;
}
-int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
+ size_t data_len)
{
struct l2tp_tunnel *tunnel = session->tunnel;
unsigned int len = skb->len;
@@ -970,7 +1002,6 @@ int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t dat
return 0;
}
-EXPORT_SYMBOL_GPL(l2tp_xmit_core);
/* Automatically called when the skb is freed.
*/
@@ -1089,7 +1120,7 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
* The tunnel context is deleted only when all session sockets have been
* closed.
*/
-void l2tp_tunnel_destruct(struct sock *sk)
+static void l2tp_tunnel_destruct(struct sock *sk)
{
struct l2tp_tunnel *tunnel;
@@ -1128,11 +1159,10 @@ void l2tp_tunnel_destruct(struct sock *sk)
end:
return;
}
-EXPORT_SYMBOL(l2tp_tunnel_destruct);
/* When the tunnel is closed, all the attached sessions need to go too.
*/
-void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
int hash;
struct hlist_node *walk;
@@ -1193,12 +1223,11 @@ again:
}
write_unlock_bh(&tunnel->hlist_lock);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
/* Really kill the tunnel.
* Come here only when all sessions have been cleared from the tunnel.
*/
-void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
+static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
@@ -1217,7 +1246,6 @@ void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
atomic_dec(&l2tp_tunnel_count);
kfree(tunnel);
}
-EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
/* Create a socket for the tunnel, if one isn't set up by
* userspace. This is used for static tunnels where there is no
@@ -1512,7 +1540,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
*/
-void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1525,7 +1553,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
}
}
-EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index f0f318edd3f1..a16a48e79fab 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -231,48 +231,15 @@ extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_i
extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
extern void l2tp_session_free(struct l2tp_session *session);
extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-extern void l2tp_tunnel_destruct(struct sock *sk);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
- atomic_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
- if (atomic_dec_and_test(&tunnel->ref_count))
- l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_inc_refcount_1(_t); \
- } while (0)
-#define l2tp_tunnel_dec_refcount(_t) do { \
- printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
- l2tp_tunnel_dec_refcount_1(_t); \
- } while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
/* Session reference counts. Incremented when code obtains a reference
* to a session.
*/
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 1c770c0644d1..0bf6a59545ab 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -576,7 +576,7 @@ out:
return copied;
}
-struct proto l2tp_ip_prot = {
+static struct proto l2tp_ip_prot = {
.name = "L2TP/IP",
.owner = THIS_MODULE,
.init = l2tp_ip_open,
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4d6f8653ec88..798d9b9462e2 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -6,6 +6,7 @@ config MAC80211
select CRYPTO_ARC4
select CRYPTO_AES
select CRC32
+ select AVERAGE
---help---
This option enables the hardware independent IEEE 802.11
networking stack.
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index d2b03e0851ef..4bd6ef0be380 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -147,6 +147,5 @@ struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[])
void ieee80211_aes_key_free(struct crypto_cipher *tfm)
{
- if (tfm)
- crypto_free_cipher(tfm);
+ crypto_free_cipher(tfm);
}
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index b4d66cca76d6..d502b2684a66 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -128,6 +128,5 @@ struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[])
void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
{
- if (tfm)
- crypto_free_cipher(tfm);
+ crypto_free_cipher(tfm);
}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 720b7a84af59..f138b195d657 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -129,9 +129,7 @@ static void sta_rx_agg_reorder_timer_expired(unsigned long data)
timer_to_tid[0]);
rcu_read_lock();
- spin_lock(&sta->lock);
ieee80211_release_reorder_timeout(sta, *ptid);
- spin_unlock(&sta->lock);
rcu_read_unlock();
}
@@ -256,7 +254,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
}
/* prepare A-MPDU MLME for Rx aggregation */
- tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
+ tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
if (!tid_agg_rx) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
@@ -280,9 +278,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
- kcalloc(buf_size, sizeof(struct sk_buff *), GFP_ATOMIC);
+ kcalloc(buf_size, sizeof(struct sk_buff *), GFP_KERNEL);
tid_agg_rx->reorder_time =
- kcalloc(buf_size, sizeof(unsigned long), GFP_ATOMIC);
+ kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 18bd0e550600..c30b8b72eedb 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -19,9 +19,10 @@
#include "rate.h"
#include "mesh.h"
-static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
+static struct net_device *ieee80211_add_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct net_device *dev;
@@ -29,12 +30,15 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
int err;
err = ieee80211_if_add(local, name, &dev, type, params);
- if (err || type != NL80211_IFTYPE_MONITOR || !flags)
- return err;
+ if (err)
+ return ERR_PTR(err);
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- sdata->u.mntr_flags = *flags;
- return 0;
+ if (type == NL80211_IFTYPE_MONITOR && flags) {
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ sdata->u.mntr_flags = *flags;
+ }
+
+ return dev;
}
static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev)
@@ -56,11 +60,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (ret)
return ret;
- if (ieee80211_vif_is_mesh(&sdata->vif) && params->mesh_id_len)
- ieee80211_sdata_set_mesh_id(sdata,
- params->mesh_id_len,
- params->mesh_id);
-
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
rcu_assign_pointer(sdata->u.vlan.sta, NULL);
@@ -343,8 +342,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
(sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
- sinfo->filled |= STATION_INFO_SIGNAL;
+ sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
sinfo->signal = (s8)sta->last_signal;
+ sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
}
sinfo->txrate.flags = 0;
@@ -999,9 +999,9 @@ static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask)
return (mask >> (parm-1)) & 0x1;
}
-static int ieee80211_set_mesh_params(struct wiphy *wiphy,
- struct net_device *dev,
- const struct mesh_config *nconf, u32 mask)
+static int ieee80211_update_mesh_params(struct wiphy *wiphy,
+ struct net_device *dev, u32 mask,
+ const struct mesh_config *nconf)
{
struct mesh_config *conf;
struct ieee80211_sub_if_data *sdata;
@@ -1024,6 +1024,8 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries;
if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask))
conf->dot11MeshTTL = nconf->dot11MeshTTL;
+ if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
+ conf->dot11MeshTTL = nconf->element_ttl;
if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
conf->auto_open_plinks = nconf->auto_open_plinks;
if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask))
@@ -1050,6 +1052,30 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
return 0;
}
+static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
+ const struct mesh_config *conf,
+ const struct mesh_setup *setup)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+ memcpy(&sdata->u.mesh.mshcfg, conf, sizeof(struct mesh_config));
+ ifmsh->mesh_id_len = setup->mesh_id_len;
+ memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
+
+ ieee80211_start_mesh(sdata);
+
+ return 0;
+}
+
+static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ ieee80211_stop_mesh(sdata);
+
+ return 0;
+}
#endif
static int ieee80211_change_bss(struct wiphy *wiphy,
@@ -1108,6 +1134,12 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS;
}
+ if (params->ht_opmode >= 0) {
+ sdata->vif.bss_conf.ht_operation_mode =
+ (u16) params->ht_opmode;
+ changed |= BSS_CHANGED_HT;
+ }
+
ieee80211_bss_info_change_notify(sdata, changed);
return 0;
@@ -1299,6 +1331,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct ieee80211_local *local = wiphy_priv(wiphy);
int err;
+ if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
+ err = drv_set_frag_threshold(local, wiphy->frag_threshold);
+
+ if (err)
+ return err;
+ }
+
if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
err = drv_set_coverage_class(local, wiphy->coverage_class);
@@ -1544,27 +1583,54 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
}
+static enum work_done_result
+ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
+{
+ /*
+ * Use the data embedded in the work struct for reporting
+ * here so if the driver mangled the SKB before dropping
+ * it (which is the only way we really should get here)
+ * then we don't report mangled data.
+ *
+ * If there was no wait time, then by the time we get here
+ * the driver will likely not have reported the status yet,
+ * so in that case userspace will have to deal with it.
+ */
+
+ if (wk->offchan_tx.wait && wk->offchan_tx.frame)
+ cfg80211_mgmt_tx_status(wk->sdata->dev,
+ (unsigned long) wk->offchan_tx.frame,
+ wk->ie, wk->ie_len, false, GFP_KERNEL);
+
+ return WORK_DONE_DESTROY;
+}
+
static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
- bool channel_type_valid,
+ bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta;
+ struct ieee80211_work *wk;
const struct ieee80211_mgmt *mgmt = (void *)buf;
u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
IEEE80211_TX_CTL_REQ_TX_STATUS;
+ bool is_offchan = false;
/* Check that we are on the requested channel for transmission */
if (chan != local->tmp_channel &&
chan != local->oper_channel)
- return -EBUSY;
+ is_offchan = true;
if (channel_type_valid &&
(channel_type != local->tmp_channel_type &&
channel_type != local->_oper_channel_type))
+ is_offchan = true;
+
+ if (is_offchan && !offchan)
return -EBUSY;
switch (sdata->vif.type) {
@@ -1598,12 +1664,70 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_SKB_CB(skb)->flags = flags;
skb->dev = sdata->dev;
- ieee80211_tx_skb(sdata, skb);
*cookie = (unsigned long) skb;
+
+ /*
+ * Can transmit right away if the channel was the
+ * right one and there's no wait involved... If a
+ * wait is involved, we might otherwise not be on
+ * the right channel for long enough!
+ */
+ if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) {
+ ieee80211_tx_skb(sdata, skb);
+ return 0;
+ }
+
+ wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL);
+ if (!wk) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
+ wk->chan = chan;
+ wk->sdata = sdata;
+ wk->done = ieee80211_offchan_tx_done;
+ wk->offchan_tx.frame = skb;
+ wk->offchan_tx.wait = wait;
+ wk->ie_len = len;
+ memcpy(wk->ie, buf, len);
+
+ ieee80211_add_work(wk);
return 0;
}
+static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk;
+ int ret = -ENOENT;
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+
+ if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+ continue;
+
+ if (cookie != (unsigned long) wk->offchan_tx.frame)
+ continue;
+
+ wk->timeout = jiffies;
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&local->mtx);
+
+ return ret;
+}
+
static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
struct net_device *dev,
u16 frame_type, bool reg)
@@ -1621,6 +1745,23 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
ieee80211_queue_work(&local->hw, &local->reconfig_filter);
}
+static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (local->started)
+ return -EOPNOTSUPP;
+
+ return drv_set_antenna(local, tx_ant, rx_ant);
+}
+
+static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ return drv_get_antenna(local, tx_ant, rx_ant);
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -1645,8 +1786,10 @@ struct cfg80211_ops mac80211_config_ops = {
.change_mpath = ieee80211_change_mpath,
.get_mpath = ieee80211_get_mpath,
.dump_mpath = ieee80211_dump_mpath,
- .set_mesh_params = ieee80211_set_mesh_params,
+ .update_mesh_params = ieee80211_update_mesh_params,
.get_mesh_params = ieee80211_get_mesh_params,
+ .join_mesh = ieee80211_join_mesh,
+ .leave_mesh = ieee80211_leave_mesh,
#endif
.change_bss = ieee80211_change_bss,
.set_txq_params = ieee80211_set_txq_params,
@@ -1671,6 +1814,9 @@ struct cfg80211_ops mac80211_config_ops = {
.remain_on_channel = ieee80211_remain_on_channel,
.cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
.mgmt_tx = ieee80211_mgmt_tx,
+ .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait,
.set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
.mgmt_frame_register = ieee80211_mgmt_frame_register,
+ .set_antenna = ieee80211_set_antenna,
+ .get_antenna = ieee80211_get_antenna,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 18260aa99c56..1f02e599a318 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -21,16 +21,30 @@ int mac80211_open_file_generic(struct inode *inode, struct file *file)
return 0;
}
-#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
+#define DEBUGFS_FORMAT_BUFFER_SIZE 100
+
+int mac80211_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...)
+{
+ va_list args;
+ char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
+ int res;
+
+ va_start(args, fmt);
+ res = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+}
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
static ssize_t name## _read(struct file *file, char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct ieee80211_local *local = file->private_data; \
- char buf[buflen]; \
- int res; \
\
- res = scnprintf(buf, buflen, fmt "\n", ##value); \
- return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+ return mac80211_format_buffer(userbuf, count, ppos, \
+ fmt "\n", ##value); \
} \
\
static const struct file_operations name## _ops = { \
@@ -46,13 +60,13 @@ static const struct file_operations name## _ops = { \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
-DEBUGFS_READONLY_FILE(frequency, 20, "%d",
+DEBUGFS_READONLY_FILE(frequency, "%d",
local->hw.conf.channel->center_freq);
-DEBUGFS_READONLY_FILE(total_ps_buffered, 20, "%d",
+DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
local->total_ps_buffered);
-DEBUGFS_READONLY_FILE(wep_iv, 20, "%#08x",
+DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
local->wep_iv & 0xffffff);
-DEBUGFS_READONLY_FILE(rate_ctrl_alg, 100, "%s",
+DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
static ssize_t tsf_read(struct file *file, char __user *user_buf,
@@ -60,13 +74,11 @@ static ssize_t tsf_read(struct file *file, char __user *user_buf,
{
struct ieee80211_local *local = file->private_data;
u64 tsf;
- char buf[100];
tsf = drv_get_tsf(local);
- snprintf(buf, sizeof(buf), "0x%016llx\n", (unsigned long long) tsf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
+ return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
+ (unsigned long long) tsf);
}
static ssize_t tsf_write(struct file *file,
@@ -131,12 +143,9 @@ static ssize_t noack_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- int res;
- char buf[10];
- res = scnprintf(buf, sizeof(buf), "%d\n", local->wifi_wme_noack_test);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
+ local->wifi_wme_noack_test);
}
static ssize_t noack_write(struct file *file,
@@ -168,12 +177,8 @@ static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- int res;
- char buf[10];
-
- res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
+ local->uapsd_queues);
}
static ssize_t uapsd_queues_write(struct file *file,
@@ -215,12 +220,9 @@ static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- int res;
- char buf[10];
- res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
+ local->uapsd_max_sp_len);
}
static ssize_t uapsd_max_sp_len_write(struct file *file,
diff --git a/net/mac80211/debugfs.h b/net/mac80211/debugfs.h
index 09cc9be34796..7c87529630f5 100644
--- a/net/mac80211/debugfs.h
+++ b/net/mac80211/debugfs.h
@@ -4,6 +4,8 @@
#ifdef CONFIG_MAC80211_DEBUGFS
extern void debugfs_hw_add(struct ieee80211_local *local);
extern int mac80211_open_file_generic(struct inode *inode, struct file *file);
+extern int mac80211_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);
#else
static inline void debugfs_hw_add(struct ieee80211_local *local)
{
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 1243d1db5c59..5822a6ce7671 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -15,18 +15,17 @@
#include "debugfs.h"
#include "debugfs_key.h"
-#define KEY_READ(name, prop, buflen, format_string) \
+#define KEY_READ(name, prop, format_string) \
static ssize_t key_##name##_read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- char buf[buflen]; \
struct ieee80211_key *key = file->private_data; \
- int res = scnprintf(buf, buflen, format_string, key->prop); \
- return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+ return mac80211_format_buffer(userbuf, count, ppos, \
+ format_string, key->prop); \
}
-#define KEY_READ_D(name) KEY_READ(name, name, 20, "%d\n")
-#define KEY_READ_X(name) KEY_READ(name, name, 20, "0x%x\n")
+#define KEY_READ_D(name) KEY_READ(name, name, "%d\n")
+#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
#define KEY_OPS(name) \
static const struct file_operations key_ ##name## _ops = { \
@@ -39,9 +38,9 @@ static const struct file_operations key_ ##name## _ops = { \
KEY_READ_##format(name) \
KEY_OPS(name)
-#define KEY_CONF_READ(name, buflen, format_string) \
- KEY_READ(conf_##name, conf.name, buflen, format_string)
-#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, 20, "%d\n")
+#define KEY_CONF_READ(name, format_string) \
+ KEY_READ(conf_##name, conf.name, format_string)
+#define KEY_CONF_READ_D(name) KEY_CONF_READ(name, "%d\n")
#define KEY_CONF_OPS(name) \
static const struct file_operations key_ ##name## _ops = { \
@@ -59,7 +58,7 @@ KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
KEY_FILE(tx_rx_count, D);
-KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
+KEY_READ(ifindex, sdata->name, "%s\n");
KEY_OPS(ifindex);
static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index cbdf36d7841c..2dabdf7680d0 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -251,6 +251,7 @@ IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
+IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
@@ -355,6 +356,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshConfirmTimeout);
MESHPARAMS_ADD(dot11MeshHoldingTimeout);
MESHPARAMS_ADD(dot11MeshTTL);
+ MESHPARAMS_ADD(element_ttl);
MESHPARAMS_ADD(auto_open_plinks);
MESHPARAMS_ADD(dot11MeshMaxPeerLinks);
MESHPARAMS_ADD(dot11MeshHWMPactivePathTimeout);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 4601fea1784d..8bb5af85f469 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -17,20 +17,18 @@
/* sta attributtes */
-#define STA_READ(name, buflen, field, format_string) \
+#define STA_READ(name, field, format_string) \
static ssize_t sta_ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
- int res; \
struct sta_info *sta = file->private_data; \
- char buf[buflen]; \
- res = scnprintf(buf, buflen, format_string, sta->field); \
- return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
+ return mac80211_format_buffer(userbuf, count, ppos, \
+ format_string, sta->field); \
}
-#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n")
-#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n")
-#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
+#define STA_READ_D(name, field) STA_READ(name, field, "%d\n")
+#define STA_READ_U(name, field) STA_READ(name, field, "%u\n")
+#define STA_READ_S(name, field) STA_READ(name, field, "%s\n")
#define STA_OPS(name) \
static const struct file_operations sta_ ##name## _ops = { \
@@ -79,22 +77,18 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[20];
struct sta_info *sta = file->private_data;
- int res = scnprintf(buf, sizeof(buf), "%u\n",
- skb_queue_len(&sta->ps_tx_buf));
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+ return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
+ skb_queue_len(&sta->ps_tx_buf));
}
STA_OPS(num_ps_buf_frames);
static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[20];
struct sta_info *sta = file->private_data;
- int res = scnprintf(buf, sizeof(buf), "%d\n",
- jiffies_to_msecs(jiffies - sta->last_rx));
- return simple_read_from_buffer(userbuf, count, ppos, buf, res);
+ return mac80211_format_buffer(userbuf, count, ppos, "%d\n",
+ jiffies_to_msecs(jiffies - sta->last_rx));
}
STA_OPS(inactive_ms);
@@ -118,34 +112,35 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
char buf[71 + STA_TID_NUM * 40], *p = buf;
int i;
struct sta_info *sta = file->private_data;
+ struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_tx *tid_tx;
+
+ rcu_read_lock();
- spin_lock_bh(&sta->lock);
p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
p += scnprintf(p, sizeof(buf) + buf - p,
"TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
+
for (i = 0; i < STA_TID_NUM; i++) {
+ tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]);
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[i]);
+
p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
- p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- !!sta->ampdu_mlme.tid_rx[i]);
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_rx);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_rx[i] ?
- sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
+ tid_rx ? tid_rx->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
- sta->ampdu_mlme.tid_rx[i] ?
- sta->ampdu_mlme.tid_rx[i]->ssn : 0);
+ tid_rx ? tid_rx->ssn : 0);
- p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
- !!sta->ampdu_mlme.tid_tx[i]);
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", !!tid_tx);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
- sta->ampdu_mlme.tid_tx[i] ?
- sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
+ tid_tx ? tid_tx->dialog_token : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
- sta->ampdu_mlme.tid_tx[i] ?
- skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
+ tid_tx ? skb_queue_len(&tid_tx->pending) : 0);
p += scnprintf(p, sizeof(buf) + buf - p, "\n");
}
- spin_unlock_bh(&sta->lock);
+ rcu_read_unlock();
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 16983825f8e8..4244554d218a 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -233,6 +233,20 @@ static inline void drv_get_tkip_seq(struct ieee80211_local *local,
trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
}
+static inline int drv_set_frag_threshold(struct ieee80211_local *local,
+ u32 value)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ trace_drv_set_frag_threshold(local, value);
+ if (local->ops->set_frag_threshold)
+ ret = local->ops->set_frag_threshold(&local->hw, value);
+ trace_drv_return_int(local, ret);
+ return ret;
+}
+
static inline int drv_set_rts_threshold(struct ieee80211_local *local,
u32 value)
{
@@ -428,4 +442,27 @@ static inline void drv_channel_switch(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+
+static inline int drv_set_antenna(struct ieee80211_local *local,
+ u32 tx_ant, u32 rx_ant)
+{
+ int ret = -EOPNOTSUPP;
+ might_sleep();
+ if (local->ops->set_antenna)
+ ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant);
+ trace_drv_set_antenna(local, tx_ant, rx_ant, ret);
+ return ret;
+}
+
+static inline int drv_get_antenna(struct ieee80211_local *local,
+ u32 *tx_ant, u32 *rx_ant)
+{
+ int ret = -EOPNOTSUPP;
+ might_sleep();
+ if (local->ops->get_antenna)
+ ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant);
+ trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret);
+ return ret;
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6831fb1641c8..c2772f23ac9c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -531,6 +531,27 @@ TRACE_EVENT(drv_get_tkip_seq,
)
);
+TRACE_EVENT(drv_set_frag_threshold,
+ TP_PROTO(struct ieee80211_local *local, u32 value),
+
+ TP_ARGS(local, value),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, value)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " value:%d",
+ LOCAL_PR_ARG, __entry->value
+ )
+);
+
TRACE_EVENT(drv_set_rts_threshold,
TP_PROTO(struct ieee80211_local *local, u32 value),
@@ -862,6 +883,56 @@ TRACE_EVENT(drv_channel_switch,
)
);
+TRACE_EVENT(drv_set_antenna,
+ TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
+
+ TP_ARGS(local, tx_ant, rx_ant, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, tx_ant)
+ __field(u32, rx_ant)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->tx_ant = tx_ant;
+ __entry->rx_ant = rx_ant;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
+ LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_get_antenna,
+ TP_PROTO(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant, int ret),
+
+ TP_ARGS(local, tx_ant, rx_ant, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, tx_ant)
+ __field(u32, rx_ant)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->tx_ant = tx_ant;
+ __entry->rx_ant = rx_ant;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " tx_ant:%d rx_ant:%d ret:%d",
+ LOCAL_PR_ARG, __entry->tx_ant, __entry->rx_ant, __entry->ret
+ )
+);
+
/*
* Tracing for API calls that drivers call.
*/
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 239c4836a946..410d104b1347 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -915,6 +915,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.basic_rates = params->basic_rates;
+ memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
+ sizeof(params->mcast_rate));
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b80c38689927..72499fe5fc36 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -260,6 +260,7 @@ enum ieee80211_work_type {
IEEE80211_WORK_ASSOC_BEACON_WAIT,
IEEE80211_WORK_ASSOC,
IEEE80211_WORK_REMAIN_ON_CHANNEL,
+ IEEE80211_WORK_OFFCHANNEL_TX,
};
/**
@@ -320,6 +321,10 @@ struct ieee80211_work {
struct {
u32 duration;
} remain;
+ struct {
+ struct sk_buff *frame;
+ u32 wait;
+ } offchan_tx;
};
int ie_len;
@@ -349,8 +354,10 @@ struct ieee80211_if_managed {
struct work_struct chswitch_work;
struct work_struct beacon_connection_loss_work;
+ unsigned long beacon_timeout;
unsigned long probe_timeout;
int probe_send_count;
+ bool nullfunc_failed;
struct mutex mtx;
struct cfg80211_bss *associated;
@@ -602,19 +609,6 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
return container_of(p, struct ieee80211_sub_if_data, vif);
}
-static inline void
-ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
- u8 mesh_id_len, u8 *mesh_id)
-{
-#ifdef CONFIG_MAC80211_MESH
- struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- ifmsh->mesh_id_len = mesh_id_len;
- memcpy(ifmsh->mesh_id, mesh_id, mesh_id_len);
-#else
- WARN_ON(1);
-#endif
-}
-
enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
IEEE80211_SDATA_QUEUE_AGG_START = 1,
@@ -1264,6 +1258,8 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
int powersave);
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr);
+void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_hdr *hdr, bool ack);
void ieee80211_beacon_connection_loss_work(struct work_struct *work);
void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
@@ -1278,6 +1274,9 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
int ieee80211_add_pending_skbs(struct ieee80211_local *local,
struct sk_buff_head *skbs);
+int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
@@ -1287,6 +1286,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
u8 channel);
+struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
+ u8 *dst,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 7aa85591dbe7..f0f11bb794af 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -197,11 +197,6 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
sdata->bss = &sdata->u.ap;
break;
case NL80211_IFTYPE_MESH_POINT:
- if (!ieee80211_vif_is_mesh(&sdata->vif))
- break;
- /* mesh ifaces must set allmulti to forward mcast traffic */
- atomic_inc(&local->iff_allmultis);
- break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_ADHOC:
@@ -273,12 +268,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_stop;
}
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
- local->fif_other_bss++;
- ieee80211_configure_filter(local);
-
- ieee80211_start_mesh(sdata);
- } else if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
local->fif_pspoll++;
local->fif_probe_req++;
@@ -503,18 +493,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_adjust_monitor_flags(sdata, -1);
ieee80211_configure_filter(local);
break;
- case NL80211_IFTYPE_MESH_POINT:
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
- /* other_bss and allmulti are always set on mesh
- * ifaces */
- local->fif_other_bss--;
- atomic_dec(&local->iff_allmultis);
-
- ieee80211_configure_filter(local);
-
- ieee80211_stop_mesh(sdata);
- }
- /* fall through */
default:
flush_work(&sdata->work);
/*
@@ -1204,12 +1182,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if (ret)
goto fail;
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- params && params->mesh_id_len)
- ieee80211_sdata_set_mesh_id(sdata,
- params->mesh_id_len,
- params->mesh_id);
-
mutex_lock(&local->iflist_mtx);
list_add_tail_rcu(&sdata->list, &local->interfaces);
mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ccd676b2f599..72df1ca7299b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -84,10 +84,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
goto out_unsupported;
sdata = key->sdata;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ /*
+ * The driver doesn't know anything about VLAN interfaces.
+ * Hence, don't send GTKs for VLAN interfaces to the driver.
+ */
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ goto out_unsupported;
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
+ }
ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 107a0cbe52ac..973fee9f7d69 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -245,9 +245,12 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.enable_beacon =
!!sdata->u.ibss.presp;
break;
+#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- sdata->vif.bss_conf.enable_beacon = true;
+ sdata->vif.bss_conf.enable_beacon =
+ !!sdata->u.mesh.mesh_id_len;
break;
+#endif
default:
/* not reached */
WARN_ON(1);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index c8a4f19ed13b..63e1188d5062 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -513,6 +513,11 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
+ local->fif_other_bss++;
+ /* mesh ifaces must set allmulti to forward mcast traffic */
+ atomic_inc(&local->iff_allmultis);
+ ieee80211_configure_filter(local);
+
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
ieee80211_queue_work(&local->hw, &sdata->work);
@@ -524,6 +529,13 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+
+ ifmsh->mesh_id_len = 0;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ sta_info_flush(local, NULL);
+
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
/*
@@ -534,6 +546,10 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
* it no longer is.
*/
cancel_work_sync(&sdata->work);
+
+ local->fif_other_bss--;
+ atomic_dec(&local->iff_allmultis);
+ ieee80211_configure_filter(local);
}
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@ -663,26 +679,6 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ieee80211_mesh_housekeeping_timer,
(unsigned long) sdata);
- ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
- ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
- ifmsh->mshcfg.dot11MeshHoldingTimeout = MESH_HOLD_T;
- ifmsh->mshcfg.dot11MeshMaxRetries = MESH_MAX_RETR;
- ifmsh->mshcfg.dot11MeshTTL = MESH_TTL;
- ifmsh->mshcfg.auto_open_plinks = true;
- ifmsh->mshcfg.dot11MeshMaxPeerLinks =
- MESH_MAX_ESTAB_PLINKS;
- ifmsh->mshcfg.dot11MeshHWMPactivePathTimeout =
- MESH_PATH_TIMEOUT;
- ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval =
- MESH_PREQ_MIN_INT;
- ifmsh->mshcfg.dot11MeshHWMPnetDiameterTraversalTime =
- MESH_DIAM_TRAVERSAL_TIME;
- ifmsh->mshcfg.dot11MeshHWMPmaxPREQretries =
- MESH_MAX_PREQ_RETRIES;
- ifmsh->mshcfg.path_refresh_time =
- MESH_PATH_REFRESH_TIME;
- ifmsh->mshcfg.min_discovery_timeout =
- MESH_MIN_DISCOVERY_TIMEOUT;
ifmsh->accepting_plinks = true;
ifmsh->preq_id = 0;
ifmsh->sn = 0;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 58e741128968..039d7fa0af74 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -175,33 +175,10 @@ struct mesh_rmc {
*/
#define MESH_CFG_CMP_LEN (IEEE80211_MESH_CONFIG_LEN - 2)
-/* Default values, timeouts in ms */
-#define MESH_TTL 31
-#define MESH_MAX_RETR 3
-#define MESH_RET_T 100
-#define MESH_CONF_T 100
-#define MESH_HOLD_T 100
-
-#define MESH_PATH_TIMEOUT 5000
-/* Minimum interval between two consecutive PREQs originated by the same
- * interface
- */
-#define MESH_PREQ_MIN_INT 10
-#define MESH_DIAM_TRAVERSAL_TIME 50
-/* A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds before
- * timing out. This way it will remain ACTIVE and no data frames will be
- * unnecesarily held in the pending queue.
- */
-#define MESH_PATH_REFRESH_TIME 1000
-#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
-#define MESH_MAX_PREQ_RETRIES 4
#define MESH_PATH_EXPIRE (600 * HZ)
-/* Default maximum number of established plinks per interface */
-#define MESH_MAX_ESTAB_PLINKS 32
-
/* Default maximum number of plinks per interface */
#define MESH_MAX_PLINKS 256
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 829e08a657d0..5bf64d7112b3 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -232,7 +232,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
*pos++ = WLAN_EID_PERR;
*pos++ = ie_len;
/* ttl */
- *pos++ = MESH_TTL;
+ *pos++ = ttl;
/* number of destinations */
*pos++ = 1;
/*
@@ -522,7 +522,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
if (reply) {
lifetime = PREQ_IE_LIFETIME(preq_elem);
- ttl = ifmsh->mshcfg.dot11MeshTTL;
+ ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
mhwmp_dbg("replying to the PREQ\n");
mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
@@ -877,7 +877,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
sdata->u.mesh.last_sn_update = jiffies;
}
lifetime = default_lifetime(sdata);
- ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
+ ttl = sdata->u.mesh.mshcfg.element_ttl;
if (ttl == 0) {
sdata->u.mesh.mshstats.dropped_frames_ttl++;
spin_unlock_bh(&mpath->state_lock);
@@ -1013,5 +1013,6 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
- 0, MESH_TTL, 0, 0, 0, sdata);
+ 0, sdata->u.mesh.mshcfg.element_ttl,
+ 0, 0, 0, sdata);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 349e466cf08b..8d65b47d9837 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -467,8 +467,8 @@ void mesh_plink_broken(struct sta_info *sta)
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
- mesh_path_error_tx(MESH_TTL, mpath->dst,
- cpu_to_le32(mpath->sn),
+ mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
+ mpath->dst, cpu_to_le32(mpath->sn),
cpu_to_le16(PERR_RCODE_DEST_UNREACH),
bcast, sdata);
} else
@@ -614,7 +614,8 @@ void mesh_path_discard_frame(struct sk_buff *skb,
mpath = mesh_path_lookup(da, sdata);
if (mpath)
sn = ++mpath->sn;
- mesh_path_error_tx(MESH_TTL, skb->data, cpu_to_le32(sn),
+ mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
+ cpu_to_le32(sn),
cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a3a9421555af..45fbb9e33746 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -28,13 +28,19 @@
#include "rate.h"
#include "led.h"
+#define IEEE80211_MAX_NULLFUNC_TRIES 2
#define IEEE80211_MAX_PROBE_TRIES 5
/*
- * beacon loss detection timeout
- * XXX: should depend on beacon interval
+ * Beacon loss timeout is calculated as N frames times the
+ * advertised beacon interval. This may need to be somewhat
+ * higher than what hardware might detect to account for
+ * delays in the host processing frames. But since we also
+ * probe on beacon miss before declaring the connection lost
+ * default to what we want.
*/
-#define IEEE80211_BEACON_LOSS_TIME (2 * HZ)
+#define IEEE80211_BEACON_LOSS_COUNT 7
+
/*
* Time the connection can be idle before we probe
* it to see if we can still talk to the AP.
@@ -121,7 +127,7 @@ void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata)
return;
mod_timer(&sdata->u.mgd.bcn_mon_timer,
- round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
+ round_jiffies_up(jiffies + sdata->u.mgd.beacon_timeout));
}
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata)
@@ -619,11 +625,12 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
/*
* Go to full PSM if the user configures a very low
* latency requirement.
- * The 2 second value is there for compatibility until
- * the PM_QOS_NETWORK_LATENCY is configured with real
- * values.
+ * The 2000 second value is there for compatibility
+ * until the PM_QOS_NETWORK_LATENCY is configured
+ * with real values.
*/
- if (latency > 1900000000 && latency != 2000000000)
+ if (latency > (1900 * USEC_PER_MSEC) &&
+ latency != (2000 * USEC_PER_SEC))
timeout = 0;
else
timeout = 100;
@@ -871,6 +878,9 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
cbss->capability, bss->has_erp_value, bss->erp_value);
+ sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
+ IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
+
sdata->u.mgd.associated = cbss;
memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
@@ -1026,6 +1036,54 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_reset_conn_monitor(sdata);
}
+static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL)))
+ return;
+
+ ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
+ mutex_lock(&sdata->local->iflist_mtx);
+ ieee80211_recalc_ps(sdata->local, -1);
+ mutex_unlock(&sdata->local->iflist_mtx);
+
+ if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
+ return;
+
+ /*
+ * We've received a probe response, but are not sure whether
+ * we have or will be receiving any beacons or data, so let's
+ * schedule the timers again, just in case.
+ */
+ ieee80211_sta_reset_beacon_monitor(sdata);
+
+ mod_timer(&ifmgd->conn_mon_timer,
+ round_jiffies_up(jiffies +
+ IEEE80211_CONNECTION_IDLE_TIME));
+}
+
+void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_hdr *hdr, bool ack)
+{
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (ack)
+ ieee80211_sta_reset_conn_monitor(sdata);
+
+ if (ieee80211_is_nullfunc(hdr->frame_control) &&
+ sdata->u.mgd.probe_send_count > 0) {
+ if (ack)
+ sdata->u.mgd.probe_send_count = 0;
+ else
+ sdata->u.mgd.nullfunc_failed = true;
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ }
+}
+
static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1041,8 +1099,20 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
if (ifmgd->probe_send_count >= unicast_limit)
dst = NULL;
- ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
+ /*
+ * When the hardware reports an accurate Tx ACK status, it's
+ * better to send a nullfunc frame instead of a probe request,
+ * as it will kick us off the AP quickly if we aren't associated
+ * anymore. The timeout will be reset if the frame is ACKed by
+ * the AP.
+ */
+ if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ ifmgd->nullfunc_failed = false;
+ ieee80211_send_nullfunc(sdata->local, sdata, 0);
+ } else {
+ ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0);
+ }
ifmgd->probe_send_count++;
ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
@@ -1108,6 +1178,30 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&ifmgd->mtx);
}
+struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sk_buff *skb;
+ const u8 *ssid;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return NULL;
+
+ ASSERT_MGD_MTX(ifmgd);
+
+ if (!ifmgd->associated)
+ return NULL;
+
+ ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
+ ssid + 2, ssid[1], NULL, 0);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_ap_probereq_get);
+
static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1485,29 +1579,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
if (ifmgd->associated &&
- memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
- ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
- IEEE80211_STA_CONNECTION_POLL)) {
- ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
- IEEE80211_STA_BEACON_POLL);
- mutex_lock(&sdata->local->iflist_mtx);
- ieee80211_recalc_ps(sdata->local, -1);
- mutex_unlock(&sdata->local->iflist_mtx);
-
- if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
- return;
-
- /*
- * We've received a probe response, but are not sure whether
- * we have or will be receiving any beacons or data, so let's
- * schedule the timers again, just in case.
- */
- ieee80211_sta_reset_beacon_monitor(sdata);
-
- mod_timer(&ifmgd->conn_mon_timer,
- round_jiffies_up(jiffies +
- IEEE80211_CONNECTION_IDLE_TIME));
- }
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0)
+ ieee80211_reset_ap_probe(sdata);
}
/*
@@ -1845,6 +1918,31 @@ static void ieee80211_sta_timer(unsigned long data)
ieee80211_queue_work(&local->hw, &sdata->work);
}
+static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
+ u8 *bssid)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+
+ ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
+
+ ieee80211_set_disassoc(sdata, true, true);
+ mutex_unlock(&ifmgd->mtx);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+ /*
+ * must be outside lock due to cfg80211,
+ * but that's not a problem.
+ */
+ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ NULL, true);
+ mutex_lock(&ifmgd->mtx);
+}
+
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -1857,12 +1955,49 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
IEEE80211_STA_CONNECTION_POLL) &&
ifmgd->associated) {
u8 bssid[ETH_ALEN];
+ int max_tries;
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- if (time_is_after_jiffies(ifmgd->probe_timeout))
- run_again(ifmgd, ifmgd->probe_timeout);
- else if (ifmgd->probe_send_count < IEEE80211_MAX_PROBE_TRIES) {
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+ max_tries = IEEE80211_MAX_NULLFUNC_TRIES;
+ else
+ max_tries = IEEE80211_MAX_PROBE_TRIES;
+
+ /* ACK received for nullfunc probing frame */
+ if (!ifmgd->probe_send_count)
+ ieee80211_reset_ap_probe(sdata);
+ else if (ifmgd->nullfunc_failed) {
+ if (ifmgd->probe_send_count < max_tries) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ wiphy_debug(local->hw.wiphy,
+ "%s: No ack for nullfunc frame to"
+ " AP %pM, try %d\n",
+ sdata->name, bssid,
+ ifmgd->probe_send_count);
+#endif
+ ieee80211_mgd_probe_ap_send(sdata);
+ } else {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ wiphy_debug(local->hw.wiphy,
+ "%s: No ack for nullfunc frame to"
+ " AP %pM, disconnecting.\n",
+ sdata->name, bssid);
+#endif
+ ieee80211_sta_connection_lost(sdata, bssid);
+ }
+ } else if (time_is_after_jiffies(ifmgd->probe_timeout))
+ run_again(ifmgd, ifmgd->probe_timeout);
+ else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ wiphy_debug(local->hw.wiphy,
+ "%s: Failed to send nullfunc to AP %pM"
+ " after %dms, disconnecting.\n",
+ sdata->name,
+ bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+#endif
+ ieee80211_sta_connection_lost(sdata, bssid);
+ } else if (ifmgd->probe_send_count < max_tries) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
wiphy_debug(local->hw.wiphy,
"%s: No probe response from AP %pM"
@@ -1877,27 +2012,13 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
* We actually lost the connection ... or did we?
* Let's make sure!
*/
- ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
- IEEE80211_STA_BEACON_POLL);
wiphy_debug(local->hw.wiphy,
"%s: No probe response from AP %pM"
" after %dms, disconnecting.\n",
sdata->name,
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
- ieee80211_set_disassoc(sdata, true, true);
- mutex_unlock(&ifmgd->mtx);
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
- /*
- * must be outside lock due to cfg80211,
- * but that's not a problem.
- */
- ieee80211_send_deauth_disassoc(sdata, bssid,
- IEEE80211_STYPE_DEAUTH,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
- NULL, true);
- mutex_lock(&ifmgd->mtx);
+
+ ieee80211_sta_connection_lost(sdata, bssid);
}
}
@@ -1988,6 +2109,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
add_timer(&ifmgd->timer);
if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
add_timer(&ifmgd->chswitch_timer);
+ ieee80211_sta_reset_beacon_monitor(sdata);
+ ieee80211_restart_sta_timer(sdata);
}
#endif
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 33f76993da08..3d5a2cb835c4 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -211,7 +211,8 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
}
-static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
+static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
+ struct ieee80211_supported_band *sband)
{
u8 i;
@@ -222,7 +223,7 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
if (basic_rates & (1 << *idx))
return; /* selected rate is a basic rate */
- for (i = *idx + 1; i <= max_rate_idx; i++) {
+ for (i = *idx + 1; i <= sband->n_bitrates; i++) {
if (basic_rates & (1 << i)) {
*idx = i;
return;
@@ -237,16 +238,25 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
+ struct ieee80211_supported_band *sband = txrc->sband;
+ int mcast_rate;
if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
- if (!sta && txrc->ap)
+ if (!sta && txrc->bss) {
+ mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
+ if (mcast_rate > 0) {
+ info->control.rates[0].idx = mcast_rate - 1;
+ return true;
+ }
+
rc_send_low_broadcast(&info->control.rates[0].idx,
txrc->bss_conf->basic_rates,
- txrc->sband->n_bitrates);
+ sband);
+ }
return true;
}
return false;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2a18d6602d4a..4ad7a362fcc1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -371,6 +371,9 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru
if (likely(sta->ampdu_mlme.tid_tx[tid]))
return;
+ if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ return;
+
ieee80211_start_tx_ba_session(pubsta, tid);
}
@@ -407,8 +410,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
mi->ampdu_len += info->status.ampdu_len;
if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
- mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
- mi->sample_tries = 3;
+ mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
+ mi->sample_tries = 2;
mi->sample_count--;
}
@@ -506,7 +509,9 @@ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (!mr->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
- if (mr->probability < MINSTREL_FRAC(20, 100))
+ if (sample)
+ rate->count = 1;
+ else if (mr->probability < MINSTREL_FRAC(20, 100))
rate->count = 2;
else if (rtscts)
rate->count = mr->retry_count_rtscts;
@@ -562,7 +567,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
*/
if (minstrel_get_duration(sample_idx) >
minstrel_get_duration(mi->max_tp_rate)) {
- if (mr->sample_skipped < 10)
+ if (mr->sample_skipped < 20)
goto next;
if (mi->sample_slow++ > 2)
@@ -586,6 +591,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct minstrel_ht_sta *mi = &msp->ht;
struct minstrel_priv *mp = priv;
int sample_idx;
+ bool sample = false;
if (rate_control_send_low(sta, priv_sta, txrc))
return;
@@ -596,10 +602,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
info->flags |= mi->tx_flags;
sample_idx = minstrel_get_sample_rate(mp, mi);
if (sample_idx >= 0) {
+ sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
txrc, true, false);
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
- txrc, false, true);
+ txrc, false, false);
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
@@ -607,7 +614,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
txrc, false, true);
}
- minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true);
+ minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, !sample);
ar[3].count = 0;
ar[3].idx = -1;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 54fb4a0e76f0..2fe8f5f86499 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -538,6 +538,8 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
{
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
+ lockdep_assert_held(&tid_agg_rx->reorder_lock);
+
if (!skb)
goto no_frame;
@@ -557,6 +559,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
{
int index;
+ lockdep_assert_held(&tid_agg_rx->reorder_lock);
+
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
@@ -581,6 +585,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
{
int index, j;
+ lockdep_assert_held(&tid_agg_rx->reorder_lock);
+
/* release the buffer until next missing frame */
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
@@ -683,10 +689,11 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
int index;
bool ret = true;
+ spin_lock(&tid_agg_rx->reorder_lock);
+
buf_size = tid_agg_rx->buf_size;
head_seq_num = tid_agg_rx->head_seq_num;
- spin_lock(&tid_agg_rx->reorder_lock);
/* frame with out of date sequence number */
if (seq_less(mpdu_seq_num, head_seq_num)) {
dev_kfree_skb(skb);
@@ -1102,8 +1109,6 @@ static void ap_sta_ps_end(struct sta_info *sta)
atomic_dec(&sdata->bss->num_sta_ps);
- clear_sta_flags(sta, WLAN_STA_PS_STA);
-
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1158,6 +1163,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_fragments++;
sta->rx_bytes += rx->skb->len;
sta->last_signal = status->signal;
+ ewma_add(&sta->avg_signal, -status->signal);
/*
* Change STA power saving mode only at the end of a frame
@@ -1872,9 +1878,8 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
dev->stats.rx_packets++;
dev->stats.rx_bytes += rx->skb->len;
- if (ieee80211_is_data(hdr->frame_control) &&
- !is_multicast_ether_addr(hdr->addr1) &&
- local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
+ if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
+ !is_multicast_ether_addr(((struct ethhdr *)rx->skb->data)->h_dest)) {
mod_timer(&local->dynamic_ps_timer, jiffies +
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
@@ -1923,9 +1928,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
mod_timer(&tid_agg_rx->session_timer,
TU_TO_EXP_TIME(tid_agg_rx->timeout));
+ spin_lock(&tid_agg_rx->reorder_lock);
/* release stored frames up to start of BAR */
ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
frames);
+ spin_unlock(&tid_agg_rx->reorder_lock);
+
kfree_skb(skb);
return RX_QUEUED;
}
@@ -2521,9 +2529,8 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
}
/*
- * This function makes calls into the RX path. Therefore the
- * caller must hold the sta_info->lock and everything has to
- * be under rcu_read_lock protection as well.
+ * This function makes calls into the RX path, therefore
+ * it has to be invoked under RCU read lock.
*/
void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
{
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 6d8f897d8763..c426504ed1cf 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -199,8 +199,11 @@ static void sta_unblock(struct work_struct *wk)
if (!test_sta_flags(sta, WLAN_STA_PS_STA))
ieee80211_sta_ps_deliver_wakeup(sta);
- else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL))
+ else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
+ clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
ieee80211_sta_ps_deliver_poll_response(sta);
+ } else
+ clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
}
static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -241,6 +244,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->local = local;
sta->sdata = sdata;
+ ewma_init(&sta->avg_signal, 1024, 8);
+
if (sta_prepare_rate_control(local, sta, gfp)) {
kfree(sta);
return NULL;
@@ -880,6 +885,13 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
}
EXPORT_SYMBOL(ieee80211_find_sta);
+static void clear_sta_ps_flags(void *_sta)
+{
+ struct sta_info *sta = _sta;
+
+ clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA);
+}
+
/* powersave support code */
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
@@ -894,7 +906,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
/* Send all buffered frames to the station */
sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
- buffered = ieee80211_add_pending_skbs(local, &sta->ps_tx_buf);
+ buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf,
+ clear_sta_ps_flags, sta);
sent += buffered;
local->total_ps_buffered -= buffered;
@@ -973,7 +986,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
if (block)
set_sta_flags(sta, WLAN_STA_PS_DRIVER);
- else
+ else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER))
ieee80211_queue_work(hw, &sta->drv_unblock_wk);
}
EXPORT_SYMBOL(ieee80211_sta_block_awake);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9265acadef32..fdca52cf88de 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
#include <linux/workqueue.h>
+#include <linux/average.h>
#include "key.h"
/**
@@ -81,13 +82,14 @@ enum ieee80211_sta_info_flags {
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
*
- * This structure is protected by RCU and the per-station
- * spinlock. Assignments to the array holding it must hold
- * the spinlock, only the TX path can access it under RCU
- * lock-free if, and only if, the state has the flag
- * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
- * must also acquire the spinlock and re-check the state,
- * see comments in the tx code touching it.
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
+ *
+ * The TX path can access it under RCU lock-free if, and
+ * only if, the state has the flag %HT_AGG_STATE_OPERATIONAL
+ * set. Otherwise, the TX path must also acquire the spinlock
+ * and re-check the state, see comments in the tx code
+ * touching it.
*/
struct tid_ampdu_tx {
struct rcu_head rcu_head;
@@ -115,15 +117,13 @@ struct tid_ampdu_tx {
* @rcu_head: RCU head used for freeing this struct
* @reorder_lock: serializes access to reorder buffer, see below.
*
- * This structure is protected by RCU and the per-station
- * spinlock. Assignments to the array holding it must hold
- * the spinlock.
+ * This structure's lifetime is managed by RCU, assignments to
+ * the array holding it must hold the aggregation mutex.
*
- * The @reorder_lock is used to protect the variables and
- * arrays such as @reorder_buf, @reorder_time, @head_seq_num,
- * @stored_mpdu_num and @reorder_time from being corrupted by
- * concurrent access of the RX path and the expired frame
- * release timer.
+ * The @reorder_lock is used to protect the members of this
+ * struct, except for @timeout, @buf_size and @dialog_token,
+ * which are constant across the lifetime of the struct (the
+ * dialog token being used only for debugging).
*/
struct tid_ampdu_rx {
struct rcu_head rcu_head;
@@ -224,6 +224,7 @@ enum plink_state {
* @rx_fragments: number of received MPDUs
* @rx_dropped: number of dropped MPDUs from this STA
* @last_signal: signal of last received frame from this STA
+ * @avg_signal: moving average of signal of received frames from this STA
* @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
* @tx_filtered_count: number of frames the hardware filtered for this STA
* @tx_retry_failed: number of frames that failed retry
@@ -248,6 +249,7 @@ enum plink_state {
* @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
+ * @lost_packets: number of consecutive lost packets
*/
struct sta_info {
/* General information, mostly static */
@@ -291,6 +293,7 @@ struct sta_info {
unsigned long rx_fragments;
unsigned long rx_dropped;
int last_signal;
+ struct ewma avg_signal;
__le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
/* Updated from TX status path only, no locking requirements */
@@ -335,6 +338,8 @@ struct sta_info {
} debugfs;
#endif
+ unsigned int lost_packets;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 3153c19893b8..38a797217a91 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -157,6 +157,15 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
}
}
+/*
+ * Use a static threshold for now, best value to be determined
+ * by testing ...
+ * Should it depend on:
+ * - on # of retransmissions
+ * - current throughput (higher value for higher tpt)?
+ */
+#define STA_LOST_PKT_THRESHOLD 50
+
void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct sk_buff *skb2;
@@ -173,6 +182,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int retry_count = -1, i;
int rates_idx = -1;
bool send_to_cooked;
+ bool acked;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
/* the HW cannot have attempted that rate */
@@ -198,8 +208,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
continue;
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
* that this TX packet failed because of that.
@@ -231,7 +241,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_unlock();
return;
} else {
- if (!(info->flags & IEEE80211_TX_STAT_ACK))
+ if (!acked)
sta->tx_retry_failed++;
sta->tx_retry_count += retry_count;
}
@@ -240,9 +250,25 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
ieee80211s_update_metric(local, sta, skb);
- if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (info->flags & IEEE80211_TX_STAT_ACK))
+ if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
+
+ if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS))
+ ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data, acked);
+
+ if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ if (sta->lost_packets)
+ sta->lost_packets = 0;
+ } else if (++sta->lost_packets >= STA_LOST_PKT_THRESHOLD) {
+ cfg80211_cqm_pktloss_notify(sta->sdata->dev,
+ sta->sta.addr,
+ sta->lost_packets,
+ GFP_ATOMIC);
+ sta->lost_packets = 0;
+ }
+ }
}
rcu_read_unlock();
@@ -295,10 +321,23 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
msecs_to_jiffies(10));
}
- if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
+ if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
+ struct ieee80211_work *wk;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(wk, &local->work_list, list) {
+ if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
+ continue;
+ if (wk->offchan_tx.frame != skb)
+ continue;
+ wk->offchan_tx.frame = NULL;
+ break;
+ }
+ rcu_read_unlock();
cfg80211_mgmt_tx_status(
skb->dev, (unsigned long) skb, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+ }
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 7a637b80a62e..0ee56bb0ea7e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -622,7 +622,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
- txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
+ txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -665,10 +666,11 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
if (unlikely(info->control.rates[0].idx < 0))
return TX_DROP;
- if (txrc.reported_rate.idx < 0)
+ if (txrc.reported_rate.idx < 0) {
txrc.reported_rate = info->control.rates[0];
-
- if (tx->sta)
+ if (tx->sta && ieee80211_is_data(hdr->frame_control))
+ tx->sta->last_tx_rate = txrc.reported_rate;
+ } else if (tx->sta)
tx->sta->last_tx_rate = txrc.reported_rate;
if (unlikely(!info->control.rates[0].count))
@@ -1033,6 +1035,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_supported_band *sband;
+ bool hw_frag;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
NULL);
@@ -1042,6 +1045,9 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
tx->flags &= ~IEEE80211_TX_FRAGMENTED;
+ /* packet is fragmented in HW if we have a non-NULL driver callback */
+ hw_frag = (tx->local->ops->set_frag_threshold != NULL);
+
/*
* for every radiotap entry that is present
* (ieee80211_radiotap_iterator_next returns -ENOENT when no more
@@ -1078,7 +1084,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
}
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
+ if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
+ !hw_frag)
tx->flags |= IEEE80211_TX_FRAGMENTED;
break;
@@ -1181,8 +1188,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
/*
* Set this flag (used below to indicate "automatic fragmentation"),
* it will be cleared/left by radiotap as desired.
+ * Only valid when fragmentation is done by the stack.
*/
- tx->flags |= IEEE80211_TX_FRAGMENTED;
+ if (!local->ops->set_frag_threshold)
+ tx->flags |= IEEE80211_TX_FRAGMENTED;
/* process and remove the injection radiotap header */
if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
@@ -2321,7 +2330,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
txrc.max_rate_idx = -1;
else
txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
- txrc.ap = true;
+ txrc.bss = true;
rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0b6fc92bc0d7..e497476174ce 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -368,8 +368,9 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
+int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
@@ -394,6 +395,9 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
__skb_queue_tail(&local->pending[queue], skb);
}
+ if (fn)
+ fn(data);
+
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
@@ -402,6 +406,12 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
return ret;
}
+int ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
+{
+ return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+}
+
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
enum queue_stop_reason reason)
{
@@ -1011,9 +1021,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
return pos - buffer;
}
-void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
- const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len)
+struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
+ u8 *dst,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
@@ -1027,7 +1038,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
if (!buf) {
printk(KERN_DEBUG "%s: failed to allocate temporary IE "
"buffer\n", sdata->name);
- return;
+ return NULL;
}
chan = ieee80211_frequency_to_channel(
@@ -1050,8 +1061,20 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
}
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
kfree(buf);
+
+ return skb;
+}
+
+void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len)
+{
+ struct sk_buff *skb;
+
+ skb = ieee80211_build_probe_req(sdata, dst, ssid, ssid_len, ie, ie_len);
+ if (skb)
+ ieee80211_tx_skb(sdata, skb);
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1152,6 +1175,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
mutex_unlock(&local->sta_mtx);
+ /* setup fragmentation threshold */
+ drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
+
/* setup RTS threshold */
drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 34e6d02da779..58e75bbc1f91 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -21,7 +21,16 @@
/* Default mapping in classifier to work with default
* queue setup.
*/
-const int ieee802_1d_to_ac[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
+const int ieee802_1d_to_ac[8] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
static int wme_downgrade_ac(struct sk_buff *skb)
{
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index ae344d1ba056..de43753076d2 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -458,8 +458,9 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
return WORK_ACT_TIMEOUT;
}
- printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
- sdata->name, wk->filter_ta, wk->probe_auth.tries);
+ printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
+ sdata->name, wk->filter_ta, wk->probe_auth.tries,
+ IEEE80211_AUTH_MAX_TRIES);
/*
* Direct probe is sent to broadcast address as some APs
@@ -561,6 +562,25 @@ ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
}
static enum work_action __must_check
+ieee80211_offchannel_tx(struct ieee80211_work *wk)
+{
+ if (!wk->started) {
+ wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait);
+
+ /*
+ * After this, offchan_tx.frame remains but now is no
+ * longer a valid pointer -- we still need it as the
+ * cookie for canceling this work.
+ */
+ ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
+
+ return WORK_ACT_NONE;
+ }
+
+ return WORK_ACT_TIMEOUT;
+}
+
+static enum work_action __must_check
ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
{
if (wk->started)
@@ -955,6 +975,9 @@ static void ieee80211_work_work(struct work_struct *work)
case IEEE80211_WORK_REMAIN_ON_CHANNEL:
rma = ieee80211_remain_on_channel_timeout(wk);
break;
+ case IEEE80211_WORK_OFFCHANNEL_TX:
+ rma = ieee80211_offchannel_tx(wk);
+ break;
case IEEE80211_WORK_ASSOC_BEACON_WAIT:
rma = ieee80211_assoc_beacon_wait(wk);
break;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 43288259f4a1..1534f2b44caf 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -525,6 +525,7 @@ config NETFILTER_XT_TARGET_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `TPROXY' target, which is somewhat similar to
REDIRECT. It can only be used in the mangle table and is useful
@@ -927,6 +928,7 @@ config NETFILTER_XT_MATCH_SOCKET
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DEFRAG_IPV4
+ select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
This option adds a `socket' match, which can be used to match
packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 19c482caf30b..640678f47a2a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -21,7 +21,9 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
@@ -172,7 +174,7 @@ tproxy_tg4_v1(struct sk_buff *skb, const struct xt_action_param *par)
return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
static inline const struct in6_addr *
tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
@@ -372,7 +374,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
.hooks = 1 << NF_INET_PRE_ROUTING,
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
{
.name = "TPROXY",
.family = NFPROTO_IPV6,
@@ -391,7 +393,7 @@ static struct xt_target tproxy_tg_reg[] __read_mostly = {
static int __init tproxy_tg_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_TPROXY_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 2dbd4c857735..00d6ae838303 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -14,7 +14,6 @@
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h>
@@ -22,7 +21,12 @@
#include <net/inet_sock.h>
#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#define XT_SOCKET_HAVE_IPV6 1
+#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#endif
#include <linux/netfilter/xt_socket.h>
@@ -186,12 +190,12 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par)
return socket_match(skb, par, par->matchinfo);
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
static int
extract_icmp6_fields(const struct sk_buff *skb,
unsigned int outside_hdrlen,
- u8 *protocol,
+ int *protocol,
struct in6_addr **raddr,
struct in6_addr **laddr,
__be16 *rport,
@@ -248,8 +252,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk;
struct in6_addr *daddr, *saddr;
__be16 dport, sport;
- int thoff;
- u8 tproto;
+ int thoff, tproto;
const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo;
tproto = ipv6_find_hdr(skb, &thoff, -1, NULL);
@@ -301,7 +304,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
sk = NULL;
}
- pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu "
+ pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu "
"(orig %pI6:%hu) sock %p\n",
tproto, saddr, ntohs(sport),
daddr, ntohs(dport),
@@ -331,7 +334,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
(1 << NF_INET_LOCAL_IN),
.me = THIS_MODULE,
},
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
{
.name = "socket",
.revision = 1,
@@ -348,7 +351,7 @@ static struct xt_match socket_mt_reg[] __read_mostly = {
static int __init socket_mt_init(void)
{
nf_defrag_ipv4_enable();
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#ifdef XT_SOCKET_HAVE_IPV6
nf_defrag_ipv6_enable();
#endif
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index cd96ed3ccee4..478181d53c55 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -83,9 +83,9 @@ struct netlink_sock {
struct module *module;
};
-struct listeners_rcu_head {
- struct rcu_head rcu_head;
- void *ptr;
+struct listeners {
+ struct rcu_head rcu;
+ unsigned long masks[0];
};
#define NETLINK_KERNEL_SOCKET 0x1
@@ -119,7 +119,7 @@ struct nl_pid_hash {
struct netlink_table {
struct nl_pid_hash hash;
struct hlist_head mc_list;
- unsigned long *listeners;
+ struct listeners __rcu *listeners;
unsigned int nl_nonroot;
unsigned int groups;
struct mutex *cb_mutex;
@@ -338,7 +338,7 @@ netlink_update_listeners(struct sock *sk)
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
- tbl->listeners[i] = mask;
+ tbl->listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
@@ -936,7 +936,7 @@ EXPORT_SYMBOL(netlink_unicast);
int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
- unsigned long *listeners;
+ struct listeners *listeners;
BUG_ON(!netlink_is_kernel(sk));
@@ -944,7 +944,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
if (group - 1 < nl_table[sk->sk_protocol].groups)
- res = test_bit(group - 1, listeners);
+ res = test_bit(group - 1, listeners->masks);
rcu_read_unlock();
@@ -1498,7 +1498,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
- unsigned long *listeners = NULL;
+ struct listeners *listeners = NULL;
BUG_ON(!nl_table);
@@ -1523,8 +1523,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
if (groups < 32)
groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
@@ -1541,7 +1540,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
netlink_table_grab();
if (!nl_table[unit].registered) {
nl_table[unit].groups = groups;
- nl_table[unit].listeners = listeners;
+ rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
nl_table[unit].registered = 1;
@@ -1572,43 +1571,28 @@ netlink_kernel_release(struct sock *sk)
EXPORT_SYMBOL(netlink_kernel_release);
-static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+static void listeners_free_rcu(struct rcu_head *head)
{
- struct listeners_rcu_head *lrh;
-
- lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
- kfree(lrh->ptr);
+ kfree(container_of(head, struct listeners, rcu));
}
int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
- unsigned long *listeners, *old = NULL;
- struct listeners_rcu_head *old_rcu_head;
+ struct listeners *new, *old;
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
if (groups < 32)
groups = 32;
if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
- listeners = kzalloc(NLGRPSZ(groups) +
- sizeof(struct listeners_rcu_head),
- GFP_ATOMIC);
- if (!listeners)
+ new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
+ if (!new)
return -ENOMEM;
- old = tbl->listeners;
- memcpy(listeners, old, NLGRPSZ(tbl->groups));
- rcu_assign_pointer(tbl->listeners, listeners);
- /*
- * Free the old memory after an RCU grace period so we
- * don't leak it. We use call_rcu() here in order to be
- * able to call this function from atomic contexts. The
- * allocation of this memory will have reserved enough
- * space for struct listeners_rcu_head at the end.
- */
- old_rcu_head = (void *)(tbl->listeners +
- NLGRPLONGS(tbl->groups));
- old_rcu_head->ptr = old;
- call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+ old = rcu_dereference_raw(tbl->listeners);
+ memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, new);
+
+ call_rcu(&old->rcu, listeners_free_rcu);
}
tbl->groups = groups;
@@ -2104,18 +2088,17 @@ static void __net_exit netlink_net_exit(struct net *net)
static void __init netlink_add_usersock_entry(void)
{
- unsigned long *listeners;
+ struct listeners *listeners;
int groups = 32;
- listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
- GFP_KERNEL);
+ listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
- panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+ panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
netlink_table_grab();
nl_table[NETLINK_USERSOCK].groups = groups;
- nl_table[NETLINK_USERSOCK].listeners = listeners;
+ rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
diff --git a/net/rds/message.c b/net/rds/message.c
index a84545dae370..848cff45183b 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -224,6 +224,9 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
WARN_ON(!nents);
+ if (rm->m_used_sgs + nents > rm->m_total_sgs)
+ return NULL;
+
sg_ret = &sg_first[rm->m_used_sgs];
sg_init_table(sg_ret, nents);
rm->m_used_sgs += nents;
@@ -246,6 +249,8 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+ if (!rm->data.op_sg)
+ return ERR_PTR(-ENOMEM);
for (i = 0; i < rm->data.op_nents; ++i) {
sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 1a41debca1ce..8920f2a83327 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -479,13 +479,38 @@ void rds_atomic_free_op(struct rm_atomic_op *ao)
/*
- * Count the number of pages needed to describe an incoming iovec.
+ * Count the number of pages needed to describe an incoming iovec array.
*/
-static int rds_rdma_pages(struct rds_rdma_args *args)
+static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
+{
+ int tot_pages = 0;
+ unsigned int nr_pages;
+ unsigned int i;
+
+ /* figure out the number of pages in the vector */
+ for (i = 0; i < nr_iovecs; i++) {
+ nr_pages = rds_pages_in_vec(&iov[i]);
+ if (nr_pages == 0)
+ return -EINVAL;
+
+ tot_pages += nr_pages;
+
+ /*
+ * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
+ * so tot_pages cannot overflow without first going negative.
+ */
+ if (tot_pages < 0)
+ return -EINVAL;
+ }
+
+ return tot_pages;
+}
+
+int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
- unsigned int tot_pages = 0;
+ int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
@@ -502,14 +527,16 @@ static int rds_rdma_pages(struct rds_rdma_args *args)
return -EINVAL;
tot_pages += nr_pages;
- }
- return tot_pages;
-}
+ /*
+ * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
+ * so tot_pages cannot overflow without first going negative.
+ */
+ if (tot_pages < 0)
+ return -EINVAL;
+ }
-int rds_rdma_extra_size(struct rds_rdma_args *args)
-{
- return rds_rdma_pages(args) * sizeof(struct scatterlist);
+ return tot_pages * sizeof(struct scatterlist);
}
/*
@@ -520,13 +547,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
- struct rds_iovec vec;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
- struct rds_iovec __user *local_vec;
- unsigned int nr;
+ struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
+ int iov_size;
unsigned int i, j;
int ret = 0;
@@ -546,9 +572,26 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out;
}
- nr_pages = rds_rdma_pages(args);
- if (nr_pages < 0)
+ /* Check whether to allocate the iovec area */
+ iov_size = args->nr_local * sizeof(struct rds_iovec);
+ if (args->nr_local > UIO_FASTIOV) {
+ iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
+ if (!iovs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ nr_pages = rds_rdma_pages(iovs, args->nr_local);
+ if (nr_pages < 0) {
+ ret = -EINVAL;
goto out;
+ }
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -564,6 +607,10 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
+ if (!op->op_sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
@@ -597,50 +644,40 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
- local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
-
for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec))) {
- ret = -EFAULT;
- goto out;
- }
-
- nr = rds_pages_in_vec(&vec);
- if (nr == 0) {
- ret = -EINVAL;
- goto out;
- }
+ struct rds_iovec *iov = &iovs[i];
+ /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
+ unsigned int nr = rds_pages_in_vec(iov);
- rs->rs_user_addr = vec.addr;
- rs->rs_user_bytes = vec.bytes;
+ rs->rs_user_addr = iov->addr;
+ rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
- ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
+ ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
- rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n",
- nr_bytes, nr, vec.bytes, vec.addr);
+ rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
+ nr_bytes, nr, iov->bytes, iov->addr);
- nr_bytes += vec.bytes;
+ nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
- unsigned int offset = vec.addr & ~PAGE_MASK;
+ unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
- min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
+ min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
- rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n",
- sg->offset, sg->length, vec.addr, vec.bytes);
+ rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
+ sg->offset, sg->length, iov->addr, iov->bytes);
- vec.addr += sg->length;
- vec.bytes -= sg->length;
+ iov->addr += sg->length;
+ iov->bytes -= sg->length;
}
op->op_nents += nr;
@@ -655,13 +692,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
}
op->op_bytes = nr_bytes;
- ret = 0;
out:
+ if (iovs != iovstack)
+ sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
if (ret)
rds_rdma_free_op(op);
-
- rds_stats_inc(s_send_rdma);
+ else
+ rds_stats_inc(s_send_rdma);
return ret;
}
@@ -773,6 +811,10 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
+ if (!rm->atomic.op_sg) {
+ ret = -ENOMEM;
+ goto err;
+ }
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
diff --git a/net/rds/send.c b/net/rds/send.c
index 0bc9db17a87d..35b9c2e9caf1 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -973,6 +973,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
/* Attach data to the rm */
if (payload_len) {
rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
+ if (!rm->data.op_sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
if (ret)
goto out;
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 04f599089e6d..0198191b756d 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -149,20 +149,6 @@ static void rfkill_led_trigger_activate(struct led_classdev *led)
rfkill_led_trigger_event(rfkill);
}
-const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
-{
- return rfkill->led_trigger.name;
-}
-EXPORT_SYMBOL(rfkill_get_led_trigger_name);
-
-void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
-{
- BUG_ON(!rfkill);
-
- rfkill->ledtrigname = name;
-}
-EXPORT_SYMBOL(rfkill_set_led_trigger_name);
-
static int rfkill_led_trigger_register(struct rfkill *rfkill)
{
rfkill->led_trigger.name = rfkill->ledtrigname
diff --git a/net/socket.c b/net/socket.c
index abf3e2561521..3ca2fd9e3720 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -305,19 +305,17 @@ static const struct super_operations sockfs_ops = {
.statfs = simple_statfs,
};
-static int sockfs_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
- mnt);
+ return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
}
static struct vfsmount *sock_mnt __read_mostly;
static struct file_system_type sock_fs_type = {
.name = "sockfs",
- .get_sb = sockfs_get_sb,
+ .mount = sockfs_mount,
.kill_sb = kill_anon_super,
};
@@ -377,7 +375,7 @@ static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
&socket_file_ops);
if (unlikely(!file)) {
/* drop dentry, keep inode */
- atomic_inc(&path.dentry->d_inode->i_count);
+ ihold(path.dentry->d_inode);
path_put(&path);
put_unused_fd(fd);
return -ENFILE;
@@ -480,6 +478,7 @@ static struct socket *sock_alloc(void)
sock = SOCKET_I(inode);
kmemcheck_annotate_bitfield(sock, type);
+ inode->i_ino = get_next_ino();
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
@@ -1145,7 +1144,7 @@ call_kill:
}
EXPORT_SYMBOL(sock_wake_async);
-static int __sock_create(struct net *net, int family, int type, int protocol,
+int __sock_create(struct net *net, int family, int type, int protocol,
struct socket **res, int kern)
{
int err;
@@ -1257,6 +1256,7 @@ out_release:
rcu_read_unlock();
goto out_sock_release;
}
+EXPORT_SYMBOL(__sock_create);
int sock_create(int family, int type, int protocol, struct socket **res)
{
@@ -1652,6 +1652,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
struct iovec iov;
int fput_needed;
+ if (len > INT_MAX)
+ len = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
@@ -1709,6 +1711,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
int err, err2;
int fput_needed;
+ if (size > INT_MAX)
+ size = INT_MAX;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 3376d7657185..8873fd8ddacd 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -36,22 +36,3 @@ config RPCSEC_GSS_KRB5
Kerberos support should be installed.
If unsure, say Y.
-
-config RPCSEC_GSS_SPKM3
- tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
- select SUNRPC_GSS
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_DES
- select CRYPTO_CAST5
- select CRYPTO_CBC
- help
- Choose Y here to enable Secure RPC using the SPKM3 public key
- GSS-API mechanism (RFC 2025).
-
- Secure RPC calls with SPKM3 require an auxiliary userspace
- daemon which may be found in the Linux nfs-utils package
- available from http://linux-nfs.org/.
-
- If unsure, say N.
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index e9eaaf7d43c1..afe67849269f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -595,7 +595,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
int
rpcauth_refreshcred(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+ struct rpc_cred *cred;
int err;
cred = task->tk_rqstp->rq_cred;
@@ -658,7 +658,7 @@ out1:
return err;
}
-void __exit rpcauth_remove_module(void)
+void rpcauth_remove_module(void)
{
rpc_destroy_authunix();
rpc_destroy_generic_auth();
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 43162bb3b78f..e010a015d996 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -158,7 +158,7 @@ int __init rpc_init_generic_auth(void)
return rpcauth_init_credcache(&generic_auth);
}
-void __exit rpc_destroy_generic_auth(void)
+void rpc_destroy_generic_auth(void)
{
rpcauth_destroy_credcache(&generic_auth);
}
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 74a231735f67..7350d86a32ee 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -11,8 +11,3 @@ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
-
-obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
-
-rpcsec_gss_spkm3-objs := gss_spkm3_mech.o gss_spkm3_seal.o gss_spkm3_unseal.o \
- gss_spkm3_token.o
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 778e5dfc5144..f375decc024b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -427,7 +427,7 @@ static int
context_derive_keys_rc4(struct krb5_ctx *ctx)
{
struct crypto_hash *hmac;
- char sigkeyconstant[] = "signaturekey";
+ static const char sigkeyconstant[] = "signaturekey";
int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
struct hash_desc desc;
struct scatterlist sg[1];
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
deleted file mode 100644
index adade3d313f2..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_mech.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- * J. Bruce Fields <bfields@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/sunrpc/auth.h>
-#include <linux/in.h>
-#include <linux/sunrpc/svcauth_gss.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/sunrpc/xdr.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
- const void *q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- memcpy(res, p, len);
- return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
- const void *q;
- unsigned int len;
- p = simple_get_bytes(p, end, &len, sizeof(len));
- if (IS_ERR(p))
- return p;
- res->len = len;
- if (len == 0) {
- res->data = NULL;
- return p;
- }
- q = (const void *)((const char *)p + len);
- if (unlikely(q > end || q < p))
- return ERR_PTR(-EFAULT);
- res->data = kmemdup(p, len, GFP_NOFS);
- if (unlikely(res->data == NULL))
- return ERR_PTR(-ENOMEM);
- return q;
-}
-
-static int
-gss_import_sec_context_spkm3(const void *p, size_t len,
- struct gss_ctx *ctx_id,
- gfp_t gfp_mask)
-{
- const void *end = (const void *)((const char *)p + len);
- struct spkm3_ctx *ctx;
- int version;
-
- if (!(ctx = kzalloc(sizeof(*ctx), gfp_mask)))
- goto out_err;
-
- p = simple_get_bytes(p, end, &version, sizeof(version));
- if (IS_ERR(p))
- goto out_err_free_ctx;
- if (version != 1) {
- dprintk("RPC: unknown spkm3 token format: "
- "obsolete nfs-utils?\n");
- p = ERR_PTR(-EINVAL);
- goto out_err_free_ctx;
- }
-
- p = simple_get_netobj(p, end, &ctx->ctx_id);
- if (IS_ERR(p))
- goto out_err_free_ctx;
-
- p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
- if (IS_ERR(p))
- goto out_err_free_ctx_id;
-
- p = simple_get_netobj(p, end, &ctx->mech_used);
- if (IS_ERR(p))
- goto out_err_free_ctx_id;
-
- p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags));
- if (IS_ERR(p))
- goto out_err_free_mech;
-
- p = simple_get_netobj(p, end, &ctx->conf_alg);
- if (IS_ERR(p))
- goto out_err_free_mech;
-
- p = simple_get_netobj(p, end, &ctx->derived_conf_key);
- if (IS_ERR(p))
- goto out_err_free_conf_alg;
-
- p = simple_get_netobj(p, end, &ctx->intg_alg);
- if (IS_ERR(p))
- goto out_err_free_conf_key;
-
- p = simple_get_netobj(p, end, &ctx->derived_integ_key);
- if (IS_ERR(p))
- goto out_err_free_intg_alg;
-
- if (p != end) {
- p = ERR_PTR(-EFAULT);
- goto out_err_free_intg_key;
- }
-
- ctx_id->internal_ctx_id = ctx;
-
- dprintk("RPC: Successfully imported new spkm context.\n");
- return 0;
-
-out_err_free_intg_key:
- kfree(ctx->derived_integ_key.data);
-out_err_free_intg_alg:
- kfree(ctx->intg_alg.data);
-out_err_free_conf_key:
- kfree(ctx->derived_conf_key.data);
-out_err_free_conf_alg:
- kfree(ctx->conf_alg.data);
-out_err_free_mech:
- kfree(ctx->mech_used.data);
-out_err_free_ctx_id:
- kfree(ctx->ctx_id.data);
-out_err_free_ctx:
- kfree(ctx);
-out_err:
- return PTR_ERR(p);
-}
-
-static void
-gss_delete_sec_context_spkm3(void *internal_ctx)
-{
- struct spkm3_ctx *sctx = internal_ctx;
-
- kfree(sctx->derived_integ_key.data);
- kfree(sctx->intg_alg.data);
- kfree(sctx->derived_conf_key.data);
- kfree(sctx->conf_alg.data);
- kfree(sctx->mech_used.data);
- kfree(sctx->ctx_id.data);
- kfree(sctx);
-}
-
-static u32
-gss_verify_mic_spkm3(struct gss_ctx *ctx,
- struct xdr_buf *signbuf,
- struct xdr_netobj *checksum)
-{
- u32 maj_stat = 0;
- struct spkm3_ctx *sctx = ctx->internal_ctx_id;
-
- maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
-
- dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
- return maj_stat;
-}
-
-static u32
-gss_get_mic_spkm3(struct gss_ctx *ctx,
- struct xdr_buf *message_buffer,
- struct xdr_netobj *message_token)
-{
- u32 err = 0;
- struct spkm3_ctx *sctx = ctx->internal_ctx_id;
-
- err = spkm3_make_token(sctx, message_buffer,
- message_token, SPKM_MIC_TOK);
- dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
- return err;
-}
-
-static const struct gss_api_ops gss_spkm3_ops = {
- .gss_import_sec_context = gss_import_sec_context_spkm3,
- .gss_get_mic = gss_get_mic_spkm3,
- .gss_verify_mic = gss_verify_mic_spkm3,
- .gss_delete_sec_context = gss_delete_sec_context_spkm3,
-};
-
-static struct pf_desc gss_spkm3_pfs[] = {
- {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"},
- {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
-};
-
-static struct gss_api_mech gss_spkm3_mech = {
- .gm_name = "spkm3",
- .gm_owner = THIS_MODULE,
- .gm_oid = {7, "\053\006\001\005\005\001\003"},
- .gm_ops = &gss_spkm3_ops,
- .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs),
- .gm_pfs = gss_spkm3_pfs,
-};
-
-static int __init init_spkm3_module(void)
-{
- int status;
-
- status = gss_mech_register(&gss_spkm3_mech);
- if (status)
- printk("Failed to register spkm3 gss mechanism!\n");
- return status;
-}
-
-static void __exit cleanup_spkm3_module(void)
-{
- gss_mech_unregister(&gss_spkm3_mech);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_spkm3_module);
-module_exit(cleanup_spkm3_module);
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
deleted file mode 100644
index 5a3a65a0e2b4..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_seal.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/random.h>
-#include <linux/crypto.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <linux/sunrpc/xdr.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"};
-const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"};
-
-/*
- * spkm3_make_token()
- *
- * Only SPKM_MIC_TOK with md5 intg-alg is supported
- */
-
-u32
-spkm3_make_token(struct spkm3_ctx *ctx,
- struct xdr_buf * text, struct xdr_netobj * token,
- int toktype)
-{
- s32 checksum_type;
- char tokhdrbuf[25];
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf};
- int tokenlen = 0;
- unsigned char *ptr;
- s32 now;
- int ctxelen = 0, ctxzbit = 0;
- int md5elen = 0, md5zbit = 0;
-
- now = jiffies;
-
- if (ctx->ctx_id.len != 16) {
- dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
- ctx->ctx_id.len);
- goto out_err;
- }
-
- if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported I-ALG "
- "algorithm. only support hmac-md5 I-ALG.\n");
- goto out_err;
- } else
- checksum_type = CKSUMTYPE_HMAC_MD5;
-
- if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported C-ALG "
- "algorithm\n");
- goto out_err;
- }
-
- if (toktype == SPKM_MIC_TOK) {
- /* Calculate checksum over the mic-header */
- asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
- spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
- ctxelen, ctxzbit);
- if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
- (char *)mic_hdr.data, mic_hdr.len,
- text, 0, &md5cksum))
- goto out_err;
-
- asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
- tokenlen = 10 + ctxelen + 1 + md5elen + 1;
-
- /* Create token header using generic routines */
- token->len = g_token_size(&ctx->mech_used, tokenlen + 2);
-
- ptr = token->data;
- g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr);
-
- spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
- } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
- dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK "
- "not supported\n");
- goto out_err;
- }
-
- /* XXX need to implement sequence numbers, and ctx->expired */
-
- return GSS_S_COMPLETE;
-out_err:
- token->data = NULL;
- token->len = 0;
- return GSS_S_FAILURE;
-}
-
-static int
-spkm3_checksummer(struct scatterlist *sg, void *data)
-{
- struct hash_desc *desc = data;
-
- return crypto_hash_update(desc, sg, sg->length);
-}
-
-/* checksum the plaintext data and hdrlen bytes of the token header */
-s32
-make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
- unsigned int hdrlen, struct xdr_buf *body,
- unsigned int body_offset, struct xdr_netobj *cksum)
-{
- char *cksumname;
- struct hash_desc desc; /* XXX add to ctx? */
- struct scatterlist sg[1];
- int err;
-
- switch (cksumtype) {
- case CKSUMTYPE_HMAC_MD5:
- cksumname = "hmac(md5)";
- break;
- default:
- dprintk("RPC: spkm3_make_checksum:"
- " unsupported checksum %d", cksumtype);
- return GSS_S_FAILURE;
- }
-
- if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;
-
- desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(desc.tfm))
- return GSS_S_FAILURE;
- cksum->len = crypto_hash_digestsize(desc.tfm);
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
- err = crypto_hash_setkey(desc.tfm, key->data, key->len);
- if (err)
- goto out;
-
- err = crypto_hash_init(&desc);
- if (err)
- goto out;
-
- sg_init_one(sg, header, hdrlen);
- crypto_hash_update(&desc, sg, sg->length);
-
- xdr_process_buf(body, body_offset, body->len - body_offset,
- spkm3_checksummer, &desc);
- crypto_hash_final(&desc, cksum->data);
-
-out:
- crypto_free_hash(desc.tfm);
-
- return err ? GSS_S_FAILURE : 0;
-}
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
deleted file mode 100644
index a99825d7caa0..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_token.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/random.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-/*
- * asn1_bitstring_len()
- *
- * calculate the asn1 bitstring length of the xdr_netobject
- */
-void
-asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
-{
- int i, zbit = 0,elen = in->len;
- char *ptr;
-
- ptr = &in->data[in->len -1];
-
- /* count trailing 0's */
- for(i = in->len; i > 0; i--) {
- if (*ptr == 0) {
- ptr--;
- elen--;
- } else
- break;
- }
-
- /* count number of 0 bits in final octet */
- ptr = &in->data[elen - 1];
- for(i = 0; i < 8; i++) {
- short mask = 0x01;
-
- if (!((mask << i) & *ptr))
- zbit++;
- else
- break;
- }
- *enclen = elen;
- *zerobits = zbit;
-}
-
-/*
- * decode_asn1_bitstring()
- *
- * decode a bitstring into a buffer of the expected length.
- * enclen = bit string length
- * explen = expected length (define in rfc)
- */
-int
-decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
-{
- if (!(out->data = kzalloc(explen,GFP_NOFS)))
- return 0;
- out->len = explen;
- memcpy(out->data, in, enclen);
- return 1;
-}
-
-/*
- * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
- *
- * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
- *
- * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
- *
- * pos value
- * ----------
- * [0] a4 SPKM-MIC tag
- * [1] ?? innertoken length (max 44)
- *
- *
- * tok_hdr piece of checksum data starts here
- *
- * the maximum mic-header len = 9 + 17 = 26
- * mic-header
- * ----------
- * [2] 30 SEQUENCE tag
- * [3] ?? mic-header length: (max 23) = TokenID + ContextID
- *
- * TokenID - all fields constant and can be hardcoded
- * -------
- * [4] 02 Type 2
- * [5] 02 Length 2
- * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
- *
- * ContextID - encoded length not constant, calculated
- * ---------
- * [8] 03 Type 3
- * [9] ?? encoded length
- * [10] ?? ctxzbit
- * [11] contextid
- *
- * mic_header piece of checksum data ends here.
- *
- * int-cksum - encoded length not constant, calculated
- * ---------
- * [??] 03 Type 3
- * [??] ?? encoded length
- * [??] ?? md5zbit
- * [??] int-cksum (NID_md5 = 16)
- *
- * maximum SPKM-MIC innercontext token length =
- * 10 + encoded contextid_size(17 max) + 2 + encoded
- * cksum_size (17 maxfor NID_md5) = 46
- */
-
-/*
- * spkm3_mic_header()
- *
- * Prepare the SPKM_MIC_TOK mic-header for check-sum calculation
- * elen: 16 byte context id asn1 bitstring encoded length
- */
-void
-spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ctxdata, int elen, int zbit)
-{
- char *hptr = *hdrbuf;
- char *top = *hdrbuf;
-
- *(u8 *)hptr++ = 0x30;
- *(u8 *)hptr++ = elen + 7; /* on the wire header length */
-
- /* tokenid */
- *(u8 *)hptr++ = 0x02;
- *(u8 *)hptr++ = 0x02;
- *(u8 *)hptr++ = 0x01;
- *(u8 *)hptr++ = 0x01;
-
- /* coniextid */
- *(u8 *)hptr++ = 0x03;
- *(u8 *)hptr++ = elen + 1; /* add 1 to include zbit */
- *(u8 *)hptr++ = zbit;
- memcpy(hptr, ctxdata, elen);
- hptr += elen;
- *hdrlen = hptr - top;
-}
-
-/*
- * spkm3_mic_innercontext_token()
- *
- * *tokp points to the beginning of the SPKM_MIC token described
- * in rfc 2025, section 3.2.1:
- *
- * toklen is the inner token length
- */
-void
-spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit)
-{
- unsigned char *ict = *tokp;
-
- *(u8 *)ict++ = 0xa4;
- *(u8 *)ict++ = toklen;
- memcpy(ict, mic_hdr->data, mic_hdr->len);
- ict += mic_hdr->len;
-
- *(u8 *)ict++ = 0x03;
- *(u8 *)ict++ = md5elen + 1; /* add 1 to include zbit */
- *(u8 *)ict++ = md5zbit;
- memcpy(ict, md5cksum->data, md5elen);
-}
-
-u32
-spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **cksum)
-{
- struct xdr_netobj spkm3_ctx_id = {.len =0, .data = NULL};
- unsigned char *ptr = *tokp;
- int ctxelen;
- u32 ret = GSS_S_DEFECTIVE_TOKEN;
-
- /* spkm3 innercontext token preamble */
- if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
- dprintk("RPC: BAD SPKM ictoken preamble\n");
- goto out;
- }
-
- *mic_hdrlen = ptr[3];
-
- /* token type */
- if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) {
- dprintk("RPC: BAD asn1 SPKM3 token type\n");
- goto out;
- }
-
- /* only support SPKM_MIC_TOK */
- if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
- dprintk("RPC: ERROR unsupported SPKM3 token\n");
- goto out;
- }
-
- /* contextid */
- if (ptr[8] != 0x03) {
- dprintk("RPC: BAD SPKM3 asn1 context-id type\n");
- goto out;
- }
-
- ctxelen = ptr[9];
- if (ctxelen > 17) { /* length includes asn1 zbit octet */
- dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen);
- goto out;
- }
-
- /* ignore ptr[10] */
-
- if(!decode_asn1_bitstring(&spkm3_ctx_id, &ptr[11], ctxelen - 1, 16))
- goto out;
-
- /*
- * in the current implementation: the optional int-alg is not present
- * so the default int-alg (md5) is used the optional snd-seq field is
- * also not present
- */
-
- if (*mic_hdrlen != 6 + ctxelen) {
- dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only "
- "support default int-alg (should be absent) "
- "and do not support snd-seq\n", *mic_hdrlen);
- goto out;
- }
- /* checksum */
- *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
-
- ret = GSS_S_COMPLETE;
-out:
- kfree(spkm3_ctx_id.data);
- return ret;
-}
-
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
deleted file mode 100644
index cc21ee860bb6..000000000000
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * linux/net/sunrpc/gss_spkm3_unseal.c
- *
- * Copyright (c) 2003 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
-#include <linux/sunrpc/gss_spkm3.h>
-#include <linux/crypto.h>
-
-#ifdef RPC_DEBUG
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-/*
- * spkm3_read_token()
- *
- * only SPKM_MIC_TOK with md5 intg-alg is supported
- */
-u32
-spkm3_read_token(struct spkm3_ctx *ctx,
- struct xdr_netobj *read_token, /* checksum */
- struct xdr_buf *message_buffer, /* signbuf */
- int toktype)
-{
- s32 checksum_type;
- s32 code;
- struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
- char cksumdata[16];
- struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
- unsigned char *ptr = (unsigned char *)read_token->data;
- unsigned char *cksum;
- int bodysize, md5elen;
- int mic_hdrlen;
- u32 ret = GSS_S_DEFECTIVE_TOKEN;
-
- if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
- &bodysize, &ptr, read_token->len))
- goto out;
-
- /* decode the token */
-
- if (toktype != SPKM_MIC_TOK) {
- dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
- goto out;
- }
-
- if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
- goto out;
-
- if (*cksum++ != 0x03) {
- dprintk("RPC: spkm3_read_token BAD checksum type\n");
- goto out;
- }
- md5elen = *cksum++;
- cksum++; /* move past the zbit */
-
- if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
- goto out;
-
- /* HARD CODED FOR MD5 */
-
- /* compute the checksum of the message.
- * ptr + 2 = start of header piece of checksum
- * mic_hdrlen + 2 = length of header piece of checksum
- */
- ret = GSS_S_DEFECTIVE_TOKEN;
- if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
- dprintk("RPC: gss_spkm3_seal: unsupported I-ALG "
- "algorithm\n");
- goto out;
- }
-
- checksum_type = CKSUMTYPE_HMAC_MD5;
-
- code = make_spkm3_checksum(checksum_type,
- &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2,
- message_buffer, 0, &md5cksum);
-
- if (code)
- goto out;
-
- ret = GSS_S_BAD_SIG;
- code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
- if (code) {
- dprintk("RPC: bad MIC checksum\n");
- goto out;
- }
-
-
- /* XXX: need to add expiration and sequencing */
- ret = GSS_S_COMPLETE;
-out:
- kfree(wire_cksum.data);
- return ret;
-}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index cc385b3a59c2..dec2a6fc7c12 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -964,7 +964,7 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
if (rqstp->rq_gssclient == NULL)
return SVC_DENIED;
stat = svcauth_unix_set_client(rqstp);
- if (stat == SVC_DROP)
+ if (stat == SVC_DROP || stat == SVC_CLOSE)
return stat;
return SVC_OK;
}
@@ -1018,7 +1018,7 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
return SVC_DENIED;
memset(&rsikey, 0, sizeof(rsikey));
if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
- return SVC_DROP;
+ return SVC_CLOSE;
*authp = rpc_autherr_badverf;
if (svc_safe_getnetobj(argv, &tmpobj)) {
kfree(rsikey.in_handle.data);
@@ -1026,38 +1026,35 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
}
if (dup_netobj(&rsikey.in_token, &tmpobj)) {
kfree(rsikey.in_handle.data);
- return SVC_DROP;
+ return SVC_CLOSE;
}
/* Perform upcall, or find upcall result: */
rsip = rsi_lookup(&rsikey);
rsi_free(&rsikey);
if (!rsip)
- return SVC_DROP;
- switch (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
- case -EAGAIN:
- case -ETIMEDOUT:
- case -ENOENT:
+ return SVC_CLOSE;
+ if (cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
/* No upcall result: */
- return SVC_DROP;
- case 0:
- ret = SVC_DROP;
- /* Got an answer to the upcall; use it: */
- if (gss_write_init_verf(rqstp, rsip))
- goto out;
- if (resv->iov_len + 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, RPC_SUCCESS);
- if (svc_safe_putnetobj(resv, &rsip->out_handle))
- goto out;
- if (resv->iov_len + 3 * 4 > PAGE_SIZE)
- goto out;
- svc_putnl(resv, rsip->major_status);
- svc_putnl(resv, rsip->minor_status);
- svc_putnl(resv, GSS_SEQ_WIN);
- if (svc_safe_putnetobj(resv, &rsip->out_token))
- goto out;
- }
+ return SVC_CLOSE;
+
+ ret = SVC_CLOSE;
+ /* Got an answer to the upcall; use it: */
+ if (gss_write_init_verf(rqstp, rsip))
+ goto out;
+ if (resv->iov_len + 4 > PAGE_SIZE)
+ goto out;
+ svc_putnl(resv, RPC_SUCCESS);
+ if (svc_safe_putnetobj(resv, &rsip->out_handle))
+ goto out;
+ if (resv->iov_len + 3 * 4 > PAGE_SIZE)
+ goto out;
+ svc_putnl(resv, rsip->major_status);
+ svc_putnl(resv, rsip->minor_status);
+ svc_putnl(resv, GSS_SEQ_WIN);
+ if (svc_safe_putnetobj(resv, &rsip->out_token))
+ goto out;
+
ret = SVC_COMPLETE;
out:
cache_put(&rsip->h, &rsi_cache);
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 7dce81a926c5..e433e7580e27 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -33,15 +33,16 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
+#include "netns.h"
#define RPCDBG_FACILITY RPCDBG_CACHE
-static int cache_defer_req(struct cache_req *req, struct cache_head *item);
+static void cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h)
{
- time_t now = get_seconds();
+ time_t now = seconds_since_boot();
h->next = NULL;
h->flags = 0;
kref_init(&h->ref);
@@ -51,7 +52,7 @@ static void cache_init(struct cache_head *h)
static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{
- return (h->expiry_time < get_seconds()) ||
+ return (h->expiry_time < seconds_since_boot()) ||
(detail->flush_time > h->last_refresh);
}
@@ -126,7 +127,7 @@ static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
static void cache_fresh_locked(struct cache_head *head, time_t expiry)
{
head->expiry_time = expiry;
- head->last_refresh = get_seconds();
+ head->last_refresh = seconds_since_boot();
set_bit(CACHE_VALID, &head->flags);
}
@@ -237,7 +238,7 @@ int cache_check(struct cache_detail *detail,
/* now see if we want to start an upcall */
refresh_age = (h->expiry_time - h->last_refresh);
- age = get_seconds() - h->last_refresh;
+ age = seconds_since_boot() - h->last_refresh;
if (rqstp == NULL) {
if (rv == -EAGAIN)
@@ -252,7 +253,7 @@ int cache_check(struct cache_detail *detail,
cache_revisit_request(h);
if (rv == -EAGAIN) {
set_bit(CACHE_NEGATIVE, &h->flags);
- cache_fresh_locked(h, get_seconds()+CACHE_NEW_EXPIRY);
+ cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY);
cache_fresh_unlocked(h, detail);
rv = -ENOENT;
}
@@ -267,7 +268,8 @@ int cache_check(struct cache_detail *detail,
}
if (rv == -EAGAIN) {
- if (cache_defer_req(rqstp, h) < 0) {
+ cache_defer_req(rqstp, h);
+ if (!test_bit(CACHE_PENDING, &h->flags)) {
/* Request is not deferred */
rv = cache_is_valid(detail, h);
if (rv == -EAGAIN)
@@ -387,11 +389,11 @@ static int cache_clean(void)
return -1;
}
current_detail = list_entry(next, struct cache_detail, others);
- if (current_detail->nextcheck > get_seconds())
+ if (current_detail->nextcheck > seconds_since_boot())
current_index = current_detail->hash_size;
else {
current_index = 0;
- current_detail->nextcheck = get_seconds()+30*60;
+ current_detail->nextcheck = seconds_since_boot()+30*60;
}
}
@@ -476,7 +478,7 @@ EXPORT_SYMBOL_GPL(cache_flush);
void cache_purge(struct cache_detail *detail)
{
detail->flush_time = LONG_MAX;
- detail->nextcheck = get_seconds();
+ detail->nextcheck = seconds_since_boot();
cache_flush();
detail->flush_time = 1;
}
@@ -505,81 +507,155 @@ EXPORT_SYMBOL_GPL(cache_purge);
static DEFINE_SPINLOCK(cache_defer_lock);
static LIST_HEAD(cache_defer_list);
-static struct list_head cache_defer_hash[DFR_HASHSIZE];
+static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt;
-static int cache_defer_req(struct cache_req *req, struct cache_head *item)
+static void __unhash_deferred_req(struct cache_deferred_req *dreq)
+{
+ hlist_del_init(&dreq->hash);
+ if (!list_empty(&dreq->recent)) {
+ list_del_init(&dreq->recent);
+ cache_defer_cnt--;
+ }
+}
+
+static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
{
- struct cache_deferred_req *dreq, *discard;
int hash = DFR_HASH(item);
- if (cache_defer_cnt >= DFR_MAX) {
- /* too much in the cache, randomly drop this one,
- * or continue and drop the oldest below
- */
- if (net_random()&1)
- return -ENOMEM;
- }
- dreq = req->defer(req);
- if (dreq == NULL)
- return -ENOMEM;
+ INIT_LIST_HEAD(&dreq->recent);
+ hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
+}
+
+static void setup_deferral(struct cache_deferred_req *dreq,
+ struct cache_head *item,
+ int count_me)
+{
dreq->item = item;
spin_lock(&cache_defer_lock);
- list_add(&dreq->recent, &cache_defer_list);
-
- if (cache_defer_hash[hash].next == NULL)
- INIT_LIST_HEAD(&cache_defer_hash[hash]);
- list_add(&dreq->hash, &cache_defer_hash[hash]);
+ __hash_deferred_req(dreq, item);
- /* it is in, now maybe clean up */
- discard = NULL;
- if (++cache_defer_cnt > DFR_MAX) {
- discard = list_entry(cache_defer_list.prev,
- struct cache_deferred_req, recent);
- list_del_init(&discard->recent);
- list_del_init(&discard->hash);
- cache_defer_cnt--;
+ if (count_me) {
+ cache_defer_cnt++;
+ list_add(&dreq->recent, &cache_defer_list);
}
+
spin_unlock(&cache_defer_lock);
+}
+
+struct thread_deferred_req {
+ struct cache_deferred_req handle;
+ struct completion completion;
+};
+
+static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
+{
+ struct thread_deferred_req *dr =
+ container_of(dreq, struct thread_deferred_req, handle);
+ complete(&dr->completion);
+}
+
+static void cache_wait_req(struct cache_req *req, struct cache_head *item)
+{
+ struct thread_deferred_req sleeper;
+ struct cache_deferred_req *dreq = &sleeper.handle;
+
+ sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
+ dreq->revisit = cache_restart_thread;
+
+ setup_deferral(dreq, item, 0);
+
+ if (!test_bit(CACHE_PENDING, &item->flags) ||
+ wait_for_completion_interruptible_timeout(
+ &sleeper.completion, req->thread_wait) <= 0) {
+ /* The completion wasn't completed, so we need
+ * to clean up
+ */
+ spin_lock(&cache_defer_lock);
+ if (!hlist_unhashed(&sleeper.handle.hash)) {
+ __unhash_deferred_req(&sleeper.handle);
+ spin_unlock(&cache_defer_lock);
+ } else {
+ /* cache_revisit_request already removed
+ * this from the hash table, but hasn't
+ * called ->revisit yet. It will very soon
+ * and we need to wait for it.
+ */
+ spin_unlock(&cache_defer_lock);
+ wait_for_completion(&sleeper.completion);
+ }
+ }
+}
+
+static void cache_limit_defers(void)
+{
+ /* Make sure we haven't exceed the limit of allowed deferred
+ * requests.
+ */
+ struct cache_deferred_req *discard = NULL;
+
+ if (cache_defer_cnt <= DFR_MAX)
+ return;
+
+ spin_lock(&cache_defer_lock);
+
+ /* Consider removing either the first or the last */
+ if (cache_defer_cnt > DFR_MAX) {
+ if (net_random() & 1)
+ discard = list_entry(cache_defer_list.next,
+ struct cache_deferred_req, recent);
+ else
+ discard = list_entry(cache_defer_list.prev,
+ struct cache_deferred_req, recent);
+ __unhash_deferred_req(discard);
+ }
+ spin_unlock(&cache_defer_lock);
if (discard)
- /* there was one too many */
discard->revisit(discard, 1);
+}
- if (!test_bit(CACHE_PENDING, &item->flags)) {
- /* must have just been validated... */
- cache_revisit_request(item);
- return -EAGAIN;
+static void cache_defer_req(struct cache_req *req, struct cache_head *item)
+{
+ struct cache_deferred_req *dreq;
+
+ if (req->thread_wait) {
+ cache_wait_req(req, item);
+ if (!test_bit(CACHE_PENDING, &item->flags))
+ return;
}
- return 0;
+ dreq = req->defer(req);
+ if (dreq == NULL)
+ return;
+ setup_deferral(dreq, item, 1);
+ if (!test_bit(CACHE_PENDING, &item->flags))
+ /* Bit could have been cleared before we managed to
+ * set up the deferral, so need to revisit just in case
+ */
+ cache_revisit_request(item);
+
+ cache_limit_defers();
}
static void cache_revisit_request(struct cache_head *item)
{
struct cache_deferred_req *dreq;
struct list_head pending;
-
- struct list_head *lp;
+ struct hlist_node *lp, *tmp;
int hash = DFR_HASH(item);
INIT_LIST_HEAD(&pending);
spin_lock(&cache_defer_lock);
- lp = cache_defer_hash[hash].next;
- if (lp) {
- while (lp != &cache_defer_hash[hash]) {
- dreq = list_entry(lp, struct cache_deferred_req, hash);
- lp = lp->next;
- if (dreq->item == item) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
- }
+ hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
+ if (dreq->item == item) {
+ __unhash_deferred_req(dreq);
+ list_add(&dreq->recent, &pending);
}
- }
+
spin_unlock(&cache_defer_lock);
while (!list_empty(&pending)) {
@@ -600,9 +676,8 @@ void cache_clean_deferred(void *owner)
list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
if (dreq->owner == owner) {
- list_del_init(&dreq->hash);
- list_move(&dreq->recent, &pending);
- cache_defer_cnt--;
+ __unhash_deferred_req(dreq);
+ list_add(&dreq->recent, &pending);
}
}
spin_unlock(&cache_defer_lock);
@@ -901,7 +976,7 @@ static int cache_release(struct inode *inode, struct file *filp,
filp->private_data = NULL;
kfree(rp);
- cd->last_close = get_seconds();
+ cd->last_close = seconds_since_boot();
atomic_dec(&cd->readers);
}
module_put(cd->owner);
@@ -1014,6 +1089,23 @@ static void warn_no_listener(struct cache_detail *detail)
}
}
+static bool cache_listeners_exist(struct cache_detail *detail)
+{
+ if (atomic_read(&detail->readers))
+ return true;
+ if (detail->last_close == 0)
+ /* This cache was never opened */
+ return false;
+ if (detail->last_close < seconds_since_boot() - 30)
+ /*
+ * We allow for the possibility that someone might
+ * restart a userspace daemon without restarting the
+ * server; but after 30 seconds, we give up.
+ */
+ return false;
+ return true;
+}
+
/*
* register an upcall request to user-space and queue it up for read() by the
* upcall daemon.
@@ -1032,10 +1124,9 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
char *bp;
int len;
- if (atomic_read(&detail->readers) == 0 &&
- detail->last_close < get_seconds() - 30) {
- warn_no_listener(detail);
- return -EINVAL;
+ if (!cache_listeners_exist(detail)) {
+ warn_no_listener(detail);
+ return -EINVAL;
}
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -1094,13 +1185,19 @@ int qword_get(char **bpp, char *dest, int bufsize)
if (bp[0] == '\\' && bp[1] == 'x') {
/* HEX STRING */
bp += 2;
- while (isxdigit(bp[0]) && isxdigit(bp[1]) && len < bufsize) {
- int byte = isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
- bp++;
- byte <<= 4;
- byte |= isdigit(*bp) ? *bp-'0' : toupper(*bp)-'A'+10;
- *dest++ = byte;
- bp++;
+ while (len < bufsize) {
+ int h, l;
+
+ h = hex_to_bin(bp[0]);
+ if (h < 0)
+ break;
+
+ l = hex_to_bin(bp[1]);
+ if (l < 0)
+ break;
+
+ *dest++ = (h << 4) | l;
+ bp += 2;
len++;
}
} else {
@@ -1218,7 +1315,8 @@ static int c_show(struct seq_file *m, void *p)
ifdebug(CACHE)
seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n",
- cp->expiry_time, atomic_read(&cp->ref.refcount), cp->flags);
+ convert_to_wallclock(cp->expiry_time),
+ atomic_read(&cp->ref.refcount), cp->flags);
cache_get(cp);
if (cache_check(cd, cp, NULL))
/* cache_check does a cache_put on failure */
@@ -1284,7 +1382,7 @@ static ssize_t read_flush(struct file *file, char __user *buf,
unsigned long p = *ppos;
size_t len;
- sprintf(tbuf, "%lu\n", cd->flush_time);
+ sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time));
len = strlen(tbuf);
if (p >= len)
return 0;
@@ -1302,19 +1400,20 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
struct cache_detail *cd)
{
char tbuf[20];
- char *ep;
- long flushtime;
+ char *bp, *ep;
+
if (*ppos || count > sizeof(tbuf)-1)
return -EINVAL;
if (copy_from_user(tbuf, buf, count))
return -EFAULT;
tbuf[count] = 0;
- flushtime = simple_strtoul(tbuf, &ep, 0);
+ simple_strtoul(tbuf, &ep, 0);
if (*ep && *ep != '\n')
return -EINVAL;
- cd->flush_time = flushtime;
- cd->nextcheck = get_seconds();
+ bp = tbuf;
+ cd->flush_time = get_expiry(&bp);
+ cd->nextcheck = seconds_since_boot();
cache_flush();
*ppos += count;
@@ -1438,8 +1537,10 @@ static const struct file_operations cache_flush_operations_procfs = {
.llseek = no_llseek,
};
-static void remove_cache_proc_entries(struct cache_detail *cd)
+static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
+ struct sunrpc_net *sn;
+
if (cd->u.procfs.proc_ent == NULL)
return;
if (cd->u.procfs.flush_ent)
@@ -1449,15 +1550,18 @@ static void remove_cache_proc_entries(struct cache_detail *cd)
if (cd->u.procfs.content_ent)
remove_proc_entry("content", cd->u.procfs.proc_ent);
cd->u.procfs.proc_ent = NULL;
- remove_proc_entry(cd->name, proc_net_rpc);
+ sn = net_generic(net, sunrpc_net_id);
+ remove_proc_entry(cd->name, sn->proc_net_rpc);
}
#ifdef CONFIG_PROC_FS
-static int create_cache_proc_entries(struct cache_detail *cd)
+static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
struct proc_dir_entry *p;
+ struct sunrpc_net *sn;
- cd->u.procfs.proc_ent = proc_mkdir(cd->name, proc_net_rpc);
+ sn = net_generic(net, sunrpc_net_id);
+ cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc);
if (cd->u.procfs.proc_ent == NULL)
goto out_nomem;
cd->u.procfs.channel_ent = NULL;
@@ -1488,11 +1592,11 @@ static int create_cache_proc_entries(struct cache_detail *cd)
}
return 0;
out_nomem:
- remove_cache_proc_entries(cd);
+ remove_cache_proc_entries(cd, net);
return -ENOMEM;
}
#else /* CONFIG_PROC_FS */
-static int create_cache_proc_entries(struct cache_detail *cd)
+static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
{
return 0;
}
@@ -1503,23 +1607,33 @@ void __init cache_initialize(void)
INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
}
-int cache_register(struct cache_detail *cd)
+int cache_register_net(struct cache_detail *cd, struct net *net)
{
int ret;
sunrpc_init_cache_detail(cd);
- ret = create_cache_proc_entries(cd);
+ ret = create_cache_proc_entries(cd, net);
if (ret)
sunrpc_destroy_cache_detail(cd);
return ret;
}
+
+int cache_register(struct cache_detail *cd)
+{
+ return cache_register_net(cd, &init_net);
+}
EXPORT_SYMBOL_GPL(cache_register);
-void cache_unregister(struct cache_detail *cd)
+void cache_unregister_net(struct cache_detail *cd, struct net *net)
{
- remove_cache_proc_entries(cd);
+ remove_cache_proc_entries(cd, net);
sunrpc_destroy_cache_detail(cd);
}
+
+void cache_unregister(struct cache_detail *cd)
+{
+ cache_unregister_net(cd, &init_net);
+}
EXPORT_SYMBOL_GPL(cache_unregister);
static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index fa5549079d79..9dab9573be41 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -284,6 +284,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
struct rpc_xprt *xprt;
struct rpc_clnt *clnt;
struct xprt_create xprtargs = {
+ .net = args->net,
.ident = args->protocol,
.srcaddr = args->saddress,
.dstaddr = args->address,
@@ -1675,7 +1676,7 @@ rpc_verify_header(struct rpc_task *task)
rpcauth_invalcred(task);
/* Ensure we obtain a new XID! */
xprt_release(task);
- task->tk_action = call_refresh;
+ task->tk_action = call_reserve;
goto out_retry;
case RPC_AUTH_BADCRED:
case RPC_AUTH_BADVERF:
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
new file mode 100644
index 000000000000..d013bf211cae
--- /dev/null
+++ b/net/sunrpc/netns.h
@@ -0,0 +1,19 @@
+#ifndef __SUNRPC_NETNS_H__
+#define __SUNRPC_NETNS_H__
+
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+struct cache_detail;
+
+struct sunrpc_net {
+ struct proc_dir_entry *proc_net_rpc;
+ struct cache_detail *ip_map_cache;
+};
+
+extern int sunrpc_net_id;
+
+int ip_map_cache_create(struct net *);
+void ip_map_cache_destroy(struct net *);
+
+#endif
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 52f252432144..10a17a37ec4e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -28,7 +28,7 @@
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/cache.h>
-static struct vfsmount *rpc_mount __read_mostly;
+static struct vfsmount *rpc_mnt __read_mostly;
static int rpc_mount_count;
static struct file_system_type rpc_pipe_fs_type;
@@ -417,16 +417,16 @@ struct vfsmount *rpc_get_mount(void)
{
int err;
- err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mount, &rpc_mount_count);
+ err = simple_pin_fs(&rpc_pipe_fs_type, &rpc_mnt, &rpc_mount_count);
if (err != 0)
return ERR_PTR(err);
- return rpc_mount;
+ return rpc_mnt;
}
EXPORT_SYMBOL_GPL(rpc_get_mount);
void rpc_put_mount(void)
{
- simple_release_fs(&rpc_mount, &rpc_mount_count);
+ simple_release_fs(&rpc_mnt, &rpc_mount_count);
}
EXPORT_SYMBOL_GPL(rpc_put_mount);
@@ -445,6 +445,7 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
struct inode *inode = new_inode(sb);
if (!inode)
return NULL;
+ inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch(mode & S_IFMT) {
@@ -1017,17 +1018,17 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
return 0;
}
-static int
-rpc_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+static struct dentry *
+rpc_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, rpc_fill_super, mnt);
+ return mount_single(fs_type, flags, data, rpc_fill_super);
}
static struct file_system_type rpc_pipe_fs_type = {
.owner = THIS_MODULE,
.name = "rpc_pipefs",
- .get_sb = rpc_get_sb,
+ .mount = rpc_mount,
.kill_sb = kill_litter_super,
};
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index dac219a56ae1..fa6d7ca2c851 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -177,6 +177,7 @@ static DEFINE_MUTEX(rpcb_create_local_mutex);
static int rpcb_create_local(void)
{
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&rpcb_inaddr_loopback,
.addrsize = sizeof(rpcb_inaddr_loopback),
@@ -211,8 +212,9 @@ static int rpcb_create_local(void)
*/
clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
if (IS_ERR(clnt4)) {
- dprintk("RPC: failed to create local rpcbind v4 "
- "cleint (errno %ld).\n", PTR_ERR(clnt4));
+ dprintk("RPC: failed to bind second program to "
+ "rpcbind v4 client (errno %ld).\n",
+ PTR_ERR(clnt4));
clnt4 = NULL;
}
@@ -228,6 +230,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
size_t salen, int proto, u32 version)
{
struct rpc_create_args args = {
+ .net = &init_net,
.protocol = proto,
.address = srvaddr,
.addrsize = salen,
@@ -247,7 +250,7 @@ static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT);
break;
default:
- return NULL;
+ return ERR_PTR(-EAFNOSUPPORT);
}
return rpc_create(&args);
@@ -475,57 +478,6 @@ int rpcb_v4_register(const u32 program, const u32 version,
return -EAFNOSUPPORT;
}
-/**
- * rpcb_getport_sync - obtain the port for an RPC service on a given host
- * @sin: address of remote peer
- * @prog: RPC program number to bind
- * @vers: RPC version number to bind
- * @prot: transport protocol to use to make this request
- *
- * Return value is the requested advertised port number,
- * or a negative errno value.
- *
- * Called from outside the RPC client in a synchronous task context.
- * Uses default timeout parameters specified by underlying transport.
- *
- * XXX: Needs to support IPv6
- */
-int rpcb_getport_sync(struct sockaddr_in *sin, u32 prog, u32 vers, int prot)
-{
- struct rpcbind_args map = {
- .r_prog = prog,
- .r_vers = vers,
- .r_prot = prot,
- .r_port = 0,
- };
- struct rpc_message msg = {
- .rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
- .rpc_argp = &map,
- .rpc_resp = &map,
- };
- struct rpc_clnt *rpcb_clnt;
- int status;
-
- dprintk("RPC: %s(%pI4, %u, %u, %d)\n",
- __func__, &sin->sin_addr.s_addr, prog, vers, prot);
-
- rpcb_clnt = rpcb_create(NULL, (struct sockaddr *)sin,
- sizeof(*sin), prot, RPCBVERS_2);
- if (IS_ERR(rpcb_clnt))
- return PTR_ERR(rpcb_clnt);
-
- status = rpc_call_sync(rpcb_clnt, &msg, 0);
- rpc_shutdown_client(rpcb_clnt);
-
- if (status >= 0) {
- if (map.r_port != 0)
- return map.r_port;
- status = -EACCES;
- }
- return status;
-}
-EXPORT_SYMBOL_GPL(rpcb_getport_sync);
-
static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, struct rpc_procinfo *proc)
{
struct rpc_message msg = {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index aa5dbda6608c..243fc09b164e 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -908,7 +908,7 @@ static int rpciod_start(void)
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- wq = create_workqueue("rpciod");
+ wq = alloc_workqueue("rpciod", WQ_RESCUER, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index ea1046f3f9a3..f71a73107ae9 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -22,11 +22,10 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/metrics.h>
-#include <net/net_namespace.h>
-#define RPCDBG_FACILITY RPCDBG_MISC
+#include "netns.h"
-struct proc_dir_entry *proc_net_rpc = NULL;
+#define RPCDBG_FACILITY RPCDBG_MISC
/*
* Get RPC client stats
@@ -218,10 +217,11 @@ EXPORT_SYMBOL_GPL(rpc_print_iostats);
static inline struct proc_dir_entry *
do_register(const char *name, void *data, const struct file_operations *fops)
{
- rpc_proc_init();
- dprintk("RPC: registering /proc/net/rpc/%s\n", name);
+ struct sunrpc_net *sn;
- return proc_create_data(name, 0, proc_net_rpc, fops, data);
+ dprintk("RPC: registering /proc/net/rpc/%s\n", name);
+ sn = net_generic(&init_net, sunrpc_net_id);
+ return proc_create_data(name, 0, sn->proc_net_rpc, fops, data);
}
struct proc_dir_entry *
@@ -234,7 +234,10 @@ EXPORT_SYMBOL_GPL(rpc_proc_register);
void
rpc_proc_unregister(const char *name)
{
- remove_proc_entry(name, proc_net_rpc);
+ struct sunrpc_net *sn;
+
+ sn = net_generic(&init_net, sunrpc_net_id);
+ remove_proc_entry(name, sn->proc_net_rpc);
}
EXPORT_SYMBOL_GPL(rpc_proc_unregister);
@@ -248,25 +251,29 @@ EXPORT_SYMBOL_GPL(svc_proc_register);
void
svc_proc_unregister(const char *name)
{
- remove_proc_entry(name, proc_net_rpc);
+ struct sunrpc_net *sn;
+
+ sn = net_generic(&init_net, sunrpc_net_id);
+ remove_proc_entry(name, sn->proc_net_rpc);
}
EXPORT_SYMBOL_GPL(svc_proc_unregister);
-void
-rpc_proc_init(void)
+int rpc_proc_init(struct net *net)
{
+ struct sunrpc_net *sn;
+
dprintk("RPC: registering /proc/net/rpc\n");
- if (!proc_net_rpc)
- proc_net_rpc = proc_mkdir("rpc", init_net.proc_net);
+ sn = net_generic(net, sunrpc_net_id);
+ sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
+ if (sn->proc_net_rpc == NULL)
+ return -ENOMEM;
+
+ return 0;
}
-void
-rpc_proc_exit(void)
+void rpc_proc_exit(struct net *net)
{
dprintk("RPC: unregistering /proc/net/rpc\n");
- if (proc_net_rpc) {
- proc_net_rpc = NULL;
- remove_proc_entry("rpc", init_net.proc_net);
- }
+ remove_proc_entry("rpc", net->proc_net);
}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index c0d085013a2b..9d0809160994 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -22,7 +22,44 @@
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/xprtsock.h>
-extern struct cache_detail ip_map_cache, unix_gid_cache;
+#include "netns.h"
+
+int sunrpc_net_id;
+
+static __net_init int sunrpc_init_net(struct net *net)
+{
+ int err;
+
+ err = rpc_proc_init(net);
+ if (err)
+ goto err_proc;
+
+ err = ip_map_cache_create(net);
+ if (err)
+ goto err_ipmap;
+
+ return 0;
+
+err_ipmap:
+ rpc_proc_exit(net);
+err_proc:
+ return err;
+}
+
+static __net_exit void sunrpc_exit_net(struct net *net)
+{
+ ip_map_cache_destroy(net);
+ rpc_proc_exit(net);
+}
+
+static struct pernet_operations sunrpc_net_ops = {
+ .init = sunrpc_init_net,
+ .exit = sunrpc_exit_net,
+ .id = &sunrpc_net_id,
+ .size = sizeof(struct sunrpc_net),
+};
+
+extern struct cache_detail unix_gid_cache;
extern void cleanup_rpcb_clnt(void);
@@ -38,18 +75,22 @@ init_sunrpc(void)
err = rpcauth_init_module();
if (err)
goto out3;
+
+ cache_initialize();
+
+ err = register_pernet_subsys(&sunrpc_net_ops);
+ if (err)
+ goto out4;
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
-#ifdef CONFIG_PROC_FS
- rpc_proc_init();
-#endif
- cache_initialize();
- cache_register(&ip_map_cache);
cache_register(&unix_gid_cache);
svc_init_xprt_sock(); /* svc sock transport */
init_socket_xprt(); /* clnt sock transport */
return 0;
+
+out4:
+ rpcauth_remove_module();
out3:
rpc_destroy_mempool();
out2:
@@ -67,14 +108,11 @@ cleanup_sunrpc(void)
svc_cleanup_xprt_sock();
unregister_rpc_pipefs();
rpc_destroy_mempool();
- cache_unregister(&ip_map_cache);
cache_unregister(&unix_gid_cache);
+ unregister_pernet_subsys(&sunrpc_net_ops);
#ifdef RPC_DEBUG
rpc_unregister_sysctl();
#endif
-#ifdef CONFIG_PROC_FS
- rpc_proc_exit();
-#endif
rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
MODULE_LICENSE("GPL");
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index d9017d64597e..6359c42c4941 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1055,6 +1055,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_bad;
case SVC_DENIED:
goto err_bad_auth;
+ case SVC_CLOSE:
+ if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ svc_close_xprt(rqstp->rq_xprt);
case SVC_DROP:
goto dropit;
case SVC_COMPLETE:
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index cbc084939dd8..c82fe739fbdc 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -100,16 +100,14 @@ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
*/
int svc_print_xprts(char *buf, int maxlen)
{
- struct list_head *le;
+ struct svc_xprt_class *xcl;
char tmpstr[80];
int len = 0;
buf[0] = '\0';
spin_lock(&svc_xprt_class_lock);
- list_for_each(le, &svc_xprt_class_list) {
+ list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
int slen;
- struct svc_xprt_class *xcl =
- list_entry(le, struct svc_xprt_class, xcl_list);
sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
slen = strlen(tmpstr);
@@ -128,9 +126,9 @@ static void svc_xprt_free(struct kref *kref)
struct svc_xprt *xprt =
container_of(kref, struct svc_xprt, xpt_ref);
struct module *owner = xprt->xpt_class->xcl_owner;
- if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags) &&
- xprt->xpt_auth_cache != NULL)
- svcauth_unix_info_release(xprt->xpt_auth_cache);
+ if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags))
+ svcauth_unix_info_release(xprt);
+ put_net(xprt->xpt_net);
xprt->xpt_ops->xpo_free(xprt);
module_put(owner);
}
@@ -156,15 +154,18 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
INIT_LIST_HEAD(&xprt->xpt_list);
INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
+ INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
spin_lock_init(&xprt->xpt_lock);
set_bit(XPT_BUSY, &xprt->xpt_flags);
rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
+ xprt->xpt_net = get_net(&init_net);
}
EXPORT_SYMBOL_GPL(svc_xprt_init);
static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
struct svc_serv *serv,
+ struct net *net,
const int family,
const unsigned short port,
int flags)
@@ -199,12 +200,12 @@ static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl,
return ERR_PTR(-EAFNOSUPPORT);
}
- return xcl->xcl_ops->xpo_create(serv, sap, len, flags);
+ return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags);
}
int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
- const int family, const unsigned short port,
- int flags)
+ struct net *net, const int family,
+ const unsigned short port, int flags)
{
struct svc_xprt_class *xcl;
@@ -220,7 +221,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
goto err;
spin_unlock(&svc_xprt_class_lock);
- newxprt = __svc_xpo_create(xcl, serv, family, port, flags);
+ newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags);
if (IS_ERR(newxprt)) {
module_put(xcl->xcl_owner);
return PTR_ERR(newxprt);
@@ -329,12 +330,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
"svc_xprt_enqueue: "
"threads and transports both waiting??\n");
- if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
- /* Don't enqueue dead transports */
- dprintk("svc: transport %p is dead, not enqueued\n", xprt);
- goto out_unlock;
- }
-
pool->sp_stats.packets++;
/* Mark transport as busy. It will remain in this state until
@@ -651,6 +646,11 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (signalled() || kthread_should_stop())
return -EINTR;
+ /* Normally we will wait up to 5 seconds for any required
+ * cache information to be provided.
+ */
+ rqstp->rq_chandle.thread_wait = 5*HZ;
+
spin_lock_bh(&pool->sp_lock);
xprt = svc_xprt_dequeue(pool);
if (xprt) {
@@ -658,6 +658,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
+
+ /* As there is a shortage of threads and this request
+ * had to be queued, don't allow the thread to wait so
+ * long for cache updates.
+ */
+ rqstp->rq_chandle.thread_wait = 1*HZ;
} else {
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);
@@ -868,6 +874,19 @@ static void svc_age_temp_xprts(unsigned long closure)
mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
}
+static void call_xpt_users(struct svc_xprt *xprt)
+{
+ struct svc_xpt_user *u;
+
+ spin_lock(&xprt->xpt_lock);
+ while (!list_empty(&xprt->xpt_users)) {
+ u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
+ list_del(&u->list);
+ u->callback(u);
+ }
+ spin_unlock(&xprt->xpt_lock);
+}
+
/*
* Remove a dead transport
*/
@@ -878,7 +897,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
/* Only do this once */
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
- return;
+ BUG();
dprintk("svc: svc_delete_xprt(%p)\n", xprt);
xprt->xpt_ops->xpo_detach(xprt);
@@ -900,6 +919,7 @@ void svc_delete_xprt(struct svc_xprt *xprt)
while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
+ call_xpt_users(xprt);
svc_xprt_put(xprt);
}
@@ -910,10 +930,7 @@ void svc_close_xprt(struct svc_xprt *xprt)
/* someone else will have to effect the close */
return;
- svc_xprt_get(xprt);
svc_delete_xprt(xprt);
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
- svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_close_xprt);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 207311610988..560677d187f1 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -18,6 +18,8 @@
#include <linux/sunrpc/clnt.h>
+#include "netns.h"
+
/*
* AUTHUNIX and AUTHNULL credentials are both handled here.
* AUTHNULL is treated just like AUTHUNIX except that the uid/gid
@@ -92,7 +94,6 @@ struct ip_map {
struct unix_domain *m_client;
int m_add_change;
};
-static struct cache_head *ip_table[IP_HASHMAX];
static void ip_map_put(struct kref *kref)
{
@@ -178,8 +179,8 @@ static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
}
-static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
-static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
+static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
+static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
static int ip_map_parse(struct cache_detail *cd,
char *mesg, int mlen)
@@ -219,10 +220,9 @@ static int ip_map_parse(struct cache_detail *cd,
switch (address.sa.sa_family) {
case AF_INET:
/* Form a mapped IPv4 address in sin6 */
- memset(&sin6, 0, sizeof(sin6));
sin6.sin6_family = AF_INET6;
- sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
- sin6.sin6_addr.s6_addr32[3] = address.s4.sin_addr.s_addr;
+ ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
+ &sin6.sin6_addr);
break;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
@@ -249,9 +249,9 @@ static int ip_map_parse(struct cache_detail *cd,
dom = NULL;
/* IPv6 scope IDs are ignored for now */
- ipmp = ip_map_lookup(class, &sin6.sin6_addr);
+ ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
if (ipmp) {
- err = ip_map_update(ipmp,
+ err = __ip_map_update(cd, ipmp,
container_of(dom, struct unix_domain, h),
expiry);
} else
@@ -294,29 +294,15 @@ static int ip_map_show(struct seq_file *m,
}
-struct cache_detail ip_map_cache = {
- .owner = THIS_MODULE,
- .hash_size = IP_HASHMAX,
- .hash_table = ip_table,
- .name = "auth.unix.ip",
- .cache_put = ip_map_put,
- .cache_upcall = ip_map_upcall,
- .cache_parse = ip_map_parse,
- .cache_show = ip_map_show,
- .match = ip_map_match,
- .init = ip_map_init,
- .update = update,
- .alloc = ip_map_alloc,
-};
-
-static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
+static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
+ struct in6_addr *addr)
{
struct ip_map ip;
struct cache_head *ch;
strcpy(ip.m_class, class);
ipv6_addr_copy(&ip.m_addr, addr);
- ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
+ ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
hash_ip6(*addr));
@@ -326,7 +312,17 @@ static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
return NULL;
}
-static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
+static inline struct ip_map *ip_map_lookup(struct net *net, char *class,
+ struct in6_addr *addr)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ return __ip_map_lookup(sn->ip_map_cache, class, addr);
+}
+
+static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
+ struct unix_domain *udom, time_t expiry)
{
struct ip_map ip;
struct cache_head *ch;
@@ -344,17 +340,25 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex
ip.m_add_change++;
}
ip.h.expiry_time = expiry;
- ch = sunrpc_cache_update(&ip_map_cache,
- &ip.h, &ipm->h,
+ ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
hash_ip6(ipm->m_addr));
if (!ch)
return -ENOMEM;
- cache_put(ch, &ip_map_cache);
+ cache_put(ch, cd);
return 0;
}
-int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
+static inline int ip_map_update(struct net *net, struct ip_map *ipm,
+ struct unix_domain *udom, time_t expiry)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
+}
+
+int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
{
struct unix_domain *udom;
struct ip_map *ipmp;
@@ -362,10 +366,10 @@ int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
if (dom->flavour != &svcauth_unix)
return -EINVAL;
udom = container_of(dom, struct unix_domain, h);
- ipmp = ip_map_lookup("nfsd", addr);
+ ipmp = ip_map_lookup(net, "nfsd", addr);
if (ipmp)
- return ip_map_update(ipmp, udom, NEVER);
+ return ip_map_update(net, ipmp, udom, NEVER);
else
return -ENOMEM;
}
@@ -383,16 +387,18 @@ int auth_unix_forget_old(struct auth_domain *dom)
}
EXPORT_SYMBOL_GPL(auth_unix_forget_old);
-struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
+struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
{
struct ip_map *ipm;
struct auth_domain *rv;
+ struct sunrpc_net *sn;
- ipm = ip_map_lookup("nfsd", addr);
+ sn = net_generic(net, sunrpc_net_id);
+ ipm = ip_map_lookup(net, "nfsd", addr);
if (!ipm)
return NULL;
- if (cache_check(&ip_map_cache, &ipm->h, NULL))
+ if (cache_check(sn->ip_map_cache, &ipm->h, NULL))
return NULL;
if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
@@ -403,22 +409,29 @@ struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
rv = &ipm->m_client->h;
kref_get(&rv->ref);
}
- cache_put(&ipm->h, &ip_map_cache);
+ cache_put(&ipm->h, sn->ip_map_cache);
return rv;
}
EXPORT_SYMBOL_GPL(auth_unix_lookup);
void svcauth_unix_purge(void)
{
- cache_purge(&ip_map_cache);
+ struct net *net;
+
+ for_each_net(net) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ cache_purge(sn->ip_map_cache);
+ }
}
EXPORT_SYMBOL_GPL(svcauth_unix_purge);
static inline struct ip_map *
-ip_map_cached_get(struct svc_rqst *rqstp)
+ip_map_cached_get(struct svc_xprt *xprt)
{
struct ip_map *ipm = NULL;
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct sunrpc_net *sn;
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
@@ -430,9 +443,10 @@ ip_map_cached_get(struct svc_rqst *rqstp)
* remembered, e.g. by a second mount from the
* same IP address.
*/
+ sn = net_generic(xprt->xpt_net, sunrpc_net_id);
xprt->xpt_auth_cache = NULL;
spin_unlock(&xprt->xpt_lock);
- cache_put(&ipm->h, &ip_map_cache);
+ cache_put(&ipm->h, sn->ip_map_cache);
return NULL;
}
cache_get(&ipm->h);
@@ -443,10 +457,8 @@ ip_map_cached_get(struct svc_rqst *rqstp)
}
static inline void
-ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
+ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
{
- struct svc_xprt *xprt = rqstp->rq_xprt;
-
if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
spin_lock(&xprt->xpt_lock);
if (xprt->xpt_auth_cache == NULL) {
@@ -456,15 +468,26 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
}
spin_unlock(&xprt->xpt_lock);
}
- if (ipm)
- cache_put(&ipm->h, &ip_map_cache);
+ if (ipm) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(xprt->xpt_net, sunrpc_net_id);
+ cache_put(&ipm->h, sn->ip_map_cache);
+ }
}
void
-svcauth_unix_info_release(void *info)
+svcauth_unix_info_release(struct svc_xprt *xpt)
{
- struct ip_map *ipm = info;
- cache_put(&ipm->h, &ip_map_cache);
+ struct ip_map *ipm;
+
+ ipm = xpt->xpt_auth_cache;
+ if (ipm != NULL) {
+ struct sunrpc_net *sn;
+
+ sn = net_generic(xpt->xpt_net, sunrpc_net_id);
+ cache_put(&ipm->h, sn->ip_map_cache);
+ }
}
/****************************************************************************
@@ -674,6 +697,8 @@ static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
switch (ret) {
case -ENOENT:
return ERR_PTR(-ENOENT);
+ case -ETIMEDOUT:
+ return ERR_PTR(-ESHUTDOWN);
case 0:
gi = get_group_info(ug->gi);
cache_put(&ug->h, &unix_gid_cache);
@@ -691,6 +716,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
struct ip_map *ipm;
struct group_info *gi;
struct svc_cred *cred = &rqstp->rq_cred;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct net *net = xprt->xpt_net;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
switch (rqstp->rq_addr.ss_family) {
case AF_INET:
@@ -709,26 +737,27 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
if (rqstp->rq_proc == 0)
return SVC_OK;
- ipm = ip_map_cached_get(rqstp);
+ ipm = ip_map_cached_get(xprt);
if (ipm == NULL)
- ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
+ ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
&sin6->sin6_addr);
if (ipm == NULL)
return SVC_DENIED;
- switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
+ switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
default:
BUG();
- case -EAGAIN:
case -ETIMEDOUT:
+ return SVC_CLOSE;
+ case -EAGAIN:
return SVC_DROP;
case -ENOENT:
return SVC_DENIED;
case 0:
rqstp->rq_client = &ipm->m_client->h;
kref_get(&rqstp->rq_client->ref);
- ip_map_cached_put(rqstp, ipm);
+ ip_map_cached_put(xprt, ipm);
break;
}
@@ -736,6 +765,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
switch (PTR_ERR(gi)) {
case -EAGAIN:
return SVC_DROP;
+ case -ESHUTDOWN:
+ return SVC_CLOSE;
case -ENOENT:
break;
default:
@@ -776,7 +807,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
cred->cr_gid = (gid_t) -1;
cred->cr_group_info = groups_alloc(0);
if (cred->cr_group_info == NULL)
- return SVC_DROP; /* kmalloc failure - client must retry */
+ return SVC_CLOSE; /* kmalloc failure - client must retry */
/* Put NULL verifier */
svc_putnl(resv, RPC_AUTH_NULL);
@@ -840,7 +871,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
goto badcred;
cred->cr_group_info = groups_alloc(slen);
if (cred->cr_group_info == NULL)
- return SVC_DROP;
+ return SVC_CLOSE;
for (i = 0; i < slen; i++)
GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
@@ -886,3 +917,56 @@ struct auth_ops svcauth_unix = {
.set_client = svcauth_unix_set_client,
};
+int ip_map_cache_create(struct net *net)
+{
+ int err = -ENOMEM;
+ struct cache_detail *cd;
+ struct cache_head **tbl;
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
+ if (cd == NULL)
+ goto err_cd;
+
+ tbl = kzalloc(IP_HASHMAX * sizeof(struct cache_head *), GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_tbl;
+
+ cd->owner = THIS_MODULE,
+ cd->hash_size = IP_HASHMAX,
+ cd->hash_table = tbl,
+ cd->name = "auth.unix.ip",
+ cd->cache_put = ip_map_put,
+ cd->cache_upcall = ip_map_upcall,
+ cd->cache_parse = ip_map_parse,
+ cd->cache_show = ip_map_show,
+ cd->match = ip_map_match,
+ cd->init = ip_map_init,
+ cd->update = update,
+ cd->alloc = ip_map_alloc,
+
+ err = cache_register_net(cd, net);
+ if (err)
+ goto err_reg;
+
+ sn->ip_map_cache = cd;
+ return 0;
+
+err_reg:
+ kfree(tbl);
+err_tbl:
+ kfree(cd);
+err_cd:
+ return err;
+}
+
+void ip_map_cache_destroy(struct net *net)
+{
+ struct sunrpc_net *sn;
+
+ sn = net_generic(net, sunrpc_net_id);
+ cache_purge(sn->ip_map_cache);
+ cache_unregister_net(sn->ip_map_cache, net);
+ kfree(sn->ip_map_cache->hash_table);
+ kfree(sn->ip_map_cache);
+}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7e534dd09077..07919e16be3e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -64,7 +64,8 @@ static void svc_tcp_sock_detach(struct svc_xprt *);
static void svc_sock_free(struct svc_xprt *);
static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
- struct sockaddr *, int, int);
+ struct net *, struct sockaddr *,
+ int, int);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key svc_key[2];
static struct lock_class_key svc_slock_key[2];
@@ -657,10 +658,11 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt)
}
static struct svc_xprt *svc_udp_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
- return svc_create_socket(serv, IPPROTO_UDP, sa, salen, flags);
+ return svc_create_socket(serv, IPPROTO_UDP, net, sa, salen, flags);
}
static struct svc_xprt_ops svc_udp_ops = {
@@ -1133,9 +1135,6 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
reclen = htonl(0x80000000|((xbufp->len ) - 4));
memcpy(xbufp->head[0].iov_base, &reclen, 4);
- if (test_bit(XPT_DEAD, &rqstp->rq_xprt->xpt_flags))
- return -ENOTCONN;
-
sent = svc_sendto(rqstp, &rqstp->rq_res);
if (sent != xbufp->len) {
printk(KERN_NOTICE
@@ -1178,10 +1177,11 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
- return svc_create_socket(serv, IPPROTO_TCP, sa, salen, flags);
+ return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
}
static struct svc_xprt_ops svc_tcp_ops = {
@@ -1258,19 +1258,13 @@ void svc_sock_update_bufs(struct svc_serv *serv)
* The number of server threads has changed. Update
* rcvbuf and sndbuf accordingly on all sockets
*/
- struct list_head *le;
+ struct svc_sock *svsk;
spin_lock_bh(&serv->sv_lock);
- list_for_each(le, &serv->sv_permsocks) {
- struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_xprt.xpt_list);
+ list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- }
- list_for_each(le, &serv->sv_tempsocks) {
- struct svc_sock *svsk =
- list_entry(le, struct svc_sock, sk_xprt.xpt_list);
+ list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
- }
spin_unlock_bh(&serv->sv_lock);
}
EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
@@ -1385,6 +1379,7 @@ EXPORT_SYMBOL_GPL(svc_addsock);
*/
static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
int protocol,
+ struct net *net,
struct sockaddr *sin, int len,
int flags)
{
@@ -1421,7 +1416,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
return ERR_PTR(-EINVAL);
}
- error = sock_create_kern(family, type, protocol, &sock);
+ error = __sock_create(net, family, type, protocol, &sock, 1);
if (error < 0)
return ERR_PTR(error);
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index a1f82a87d34d..cd9e841e7492 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -111,6 +111,23 @@ xdr_decode_string_inplace(__be32 *p, char **sp,
}
EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
+/**
+ * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
+ * @buf: XDR buffer where string resides
+ * @len: length of string, in bytes
+ *
+ */
+void
+xdr_terminate_string(struct xdr_buf *buf, const u32 len)
+{
+ char *kaddr;
+
+ kaddr = kmap_atomic(buf->pages[0], KM_USER0);
+ kaddr[buf->page_base + len] = '\0';
+ kunmap_atomic(kaddr, KM_USER0);
+}
+EXPORT_SYMBOL(xdr_terminate_string);
+
void
xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
unsigned int len)
@@ -395,24 +412,29 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
{
struct kvec *tail;
size_t copy;
- char *p;
unsigned int pglen = buf->page_len;
+ unsigned int tailbuf_len;
tail = buf->tail;
BUG_ON (len > pglen);
+ tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
+
/* Shift the tail first */
- if (tail->iov_len != 0) {
- p = (char *)tail->iov_base + len;
+ if (tailbuf_len != 0) {
+ unsigned int free_space = tailbuf_len - tail->iov_len;
+
+ if (len < free_space)
+ free_space = len;
+ tail->iov_len += free_space;
+
+ copy = len;
if (tail->iov_len > len) {
- copy = tail->iov_len - len;
- memmove(p, tail->iov_base, copy);
+ char *p = (char *)tail->iov_base + len;
+ memmove(p, tail->iov_base, tail->iov_len - len);
} else
- buf->buflen -= len;
- /* Copy from the inlined pages into the tail */
- copy = len;
- if (copy > tail->iov_len)
copy = tail->iov_len;
+ /* Copy from the inlined pages into the tail */
_copy_from_pages((char *)tail->iov_base,
buf->pages, buf->page_base + pglen - len,
copy);
@@ -551,6 +573,27 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
EXPORT_SYMBOL_GPL(xdr_init_decode);
/**
+ * xdr_inline_peek - Allow read-ahead in the XDR data stream
+ * @xdr: pointer to xdr_stream struct
+ * @nbytes: number of bytes of data to decode
+ *
+ * Check if the input buffer is long enough to enable us to decode
+ * 'nbytes' more bytes of data starting at the current position.
+ * If so return the current pointer without updating the current
+ * pointer position.
+ */
+__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p = xdr->p;
+ __be32 *q = p + XDR_QUADLEN(nbytes);
+
+ if (unlikely(q > xdr->end || q < p))
+ return NULL;
+ return p;
+}
+EXPORT_SYMBOL_GPL(xdr_inline_peek);
+
+/**
* xdr_inline_decode - Retrieve non-page XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 970fb00f388c..4c8f18aff7c3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -199,8 +199,6 @@ int xprt_reserve_xprt(struct rpc_task *task)
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
- if (task == NULL)
- return 0;
goto out_sleep;
}
xprt->snd_task = task;
@@ -757,13 +755,11 @@ static void xprt_connect_status(struct rpc_task *task)
*/
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
- struct list_head *pos;
+ struct rpc_rqst *entry;
- list_for_each(pos, &xprt->recv) {
- struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
+ list_for_each_entry(entry, &xprt->recv, rq_list)
if (entry->rq_xid == xid)
return entry;
- }
dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
ntohl(xid));
@@ -962,6 +958,37 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
spin_unlock(&xprt->reserve_lock);
}
+struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
+{
+ struct rpc_xprt *xprt;
+
+ xprt = kzalloc(size, GFP_KERNEL);
+ if (xprt == NULL)
+ goto out;
+
+ xprt->max_reqs = max_req;
+ xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (xprt->slot == NULL)
+ goto out_free;
+
+ xprt->xprt_net = get_net(net);
+ return xprt;
+
+out_free:
+ kfree(xprt);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(xprt_alloc);
+
+void xprt_free(struct rpc_xprt *xprt)
+{
+ put_net(xprt->xprt_net);
+ kfree(xprt->slot);
+ kfree(xprt);
+}
+EXPORT_SYMBOL_GPL(xprt_free);
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
index d718b8fa9525..09af4fab1a45 100644
--- a/net/sunrpc/xprtrdma/svc_rdma.c
+++ b/net/sunrpc/xprtrdma/svc_rdma.c
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
+#include <linux/workqueue.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -74,6 +75,8 @@ atomic_t rdma_stat_sq_prod;
struct kmem_cache *svc_rdma_map_cachep;
struct kmem_cache *svc_rdma_ctxt_cachep;
+struct workqueue_struct *svc_rdma_wq;
+
/*
* This function implements reading and resetting an atomic_t stat
* variable through read/write to a proc file. Any write to the file
@@ -231,7 +234,7 @@ static ctl_table svcrdma_root_table[] = {
void svc_rdma_cleanup(void)
{
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
- flush_scheduled_work();
+ destroy_workqueue(svc_rdma_wq);
if (svcrdma_table_header) {
unregister_sysctl_table(svcrdma_table_header);
svcrdma_table_header = NULL;
@@ -249,6 +252,11 @@ int svc_rdma_init(void)
dprintk("\tsq_depth : %d\n",
svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
+
+ svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);
+ if (!svc_rdma_wq)
+ return -ENOMEM;
+
if (!svcrdma_table_header)
svcrdma_table_header =
register_sysctl_table(svcrdma_root_table);
@@ -283,6 +291,7 @@ int svc_rdma_init(void)
kmem_cache_destroy(svc_rdma_map_cachep);
err0:
unregister_sysctl_table(svcrdma_table_header);
+ destroy_workqueue(svc_rdma_wq);
return -ENOMEM;
}
MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 0194de814933..df67211c4baf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -263,9 +263,9 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
frmr->page_list_len = PAGE_ALIGN(byte_count) >> PAGE_SHIFT;
for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(rqstp->rq_arg.pages[page_no]),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ rqstp->rq_arg.pages[page_no], 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -309,17 +309,21 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
int count)
{
int i;
+ unsigned long off;
ctxt->count = count;
ctxt->direction = DMA_FROM_DEVICE;
for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) {
+ BUG_ON(0 == virt_to_page(vec[i].iov_base));
+ off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
ctxt->sge[i].addr =
- ib_dma_map_single(xprt->sc_cm_id->device,
- vec[i].iov_base,
- vec[i].iov_len,
- DMA_FROM_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ virt_to_page(vec[i].iov_base),
+ off,
+ vec[i].iov_len,
+ DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
ctxt->sge[i].addr))
return -EINVAL;
@@ -491,6 +495,7 @@ next_sge:
printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
err);
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 0);
goto out;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index b15e1ebb2bfa..249a835b703f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -70,8 +70,8 @@
* on extra page for the RPCRMDA header.
*/
static int fast_reg_xdr(struct svcxprt_rdma *xprt,
- struct xdr_buf *xdr,
- struct svc_rdma_req_map *vec)
+ struct xdr_buf *xdr,
+ struct svc_rdma_req_map *vec)
{
int sge_no;
u32 sge_bytes;
@@ -96,21 +96,25 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
vec->count = 2;
sge_no++;
- /* Build the FRMR */
+ /* Map the XDR head */
frmr->kva = frva;
frmr->direction = DMA_TO_DEVICE;
frmr->access_flags = 0;
frmr->map_len = PAGE_SIZE;
frmr->page_list_len = 1;
+ page_off = (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- (void *)xdr->head[0].iov_base,
- PAGE_SIZE, DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ virt_to_page(xdr->head[0].iov_base),
+ page_off,
+ PAGE_SIZE - page_off,
+ DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
atomic_inc(&xprt->sc_dma_used);
+ /* Map the XDR page list */
page_off = xdr->page_base;
page_bytes = xdr->page_len + page_off;
if (!page_bytes)
@@ -128,9 +132,9 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
page_bytes -= sge_bytes;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(page),
- PAGE_SIZE, DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device,
+ page, page_off,
+ sge_bytes, DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -166,8 +170,10 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
frmr->page_list->page_list[page_no] =
- ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
- DMA_TO_DEVICE);
+ ib_dma_map_page(xprt->sc_cm_id->device, virt_to_page(va),
+ page_off,
+ PAGE_SIZE,
+ DMA_TO_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
frmr->page_list->page_list[page_no]))
goto fatal_err;
@@ -245,6 +251,35 @@ static int map_xdr(struct svcxprt_rdma *xprt,
return 0;
}
+static dma_addr_t dma_map_xdr(struct svcxprt_rdma *xprt,
+ struct xdr_buf *xdr,
+ u32 xdr_off, size_t len, int dir)
+{
+ struct page *page;
+ dma_addr_t dma_addr;
+ if (xdr_off < xdr->head[0].iov_len) {
+ /* This offset is in the head */
+ xdr_off += (unsigned long)xdr->head[0].iov_base & ~PAGE_MASK;
+ page = virt_to_page(xdr->head[0].iov_base);
+ } else {
+ xdr_off -= xdr->head[0].iov_len;
+ if (xdr_off < xdr->page_len) {
+ /* This offset is in the page list */
+ page = xdr->pages[xdr_off >> PAGE_SHIFT];
+ xdr_off &= ~PAGE_MASK;
+ } else {
+ /* This offset is in the tail */
+ xdr_off -= xdr->page_len;
+ xdr_off += (unsigned long)
+ xdr->tail[0].iov_base & ~PAGE_MASK;
+ page = virt_to_page(xdr->tail[0].iov_base);
+ }
+ }
+ dma_addr = ib_dma_map_page(xprt->sc_cm_id->device, page, xdr_off,
+ min_t(size_t, PAGE_SIZE, len), dir);
+ return dma_addr;
+}
+
/* Assumptions:
* - We are using FRMR
* - or -
@@ -293,10 +328,9 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
sge[sge_no].length = sge_bytes;
if (!vec->frmr) {
sge[sge_no].addr =
- ib_dma_map_single(xprt->sc_cm_id->device,
- (void *)
- vec->sge[xdr_sge_no].iov_base + sge_off,
- sge_bytes, DMA_TO_DEVICE);
+ dma_map_xdr(xprt, &rqstp->rq_res, xdr_off,
+ sge_bytes, DMA_TO_DEVICE);
+ xdr_off += sge_bytes;
if (ib_dma_mapping_error(xprt->sc_cm_id->device,
sge[sge_no].addr))
goto err;
@@ -333,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
goto err;
return 0;
err:
+ svc_rdma_unmap_dma(ctxt);
+ svc_rdma_put_frmr(xprt, vec->frmr);
svc_rdma_put_context(ctxt, 0);
/* Fatal error, close transport */
return -EIO;
@@ -494,7 +530,8 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
* In all three cases, this function prepares the RPCRDMA header in
* sge[0], the 'type' parameter indicates the type to place in the
* RPCRDMA header, and the 'byte_count' field indicates how much of
- * the XDR to include in this RDMA_SEND.
+ * the XDR to include in this RDMA_SEND. NB: The offset of the payload
+ * to send is zero in the XDR.
*/
static int send_reply(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp,
@@ -536,23 +573,24 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->sge[0].lkey = rdma->sc_dma_lkey;
ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
ctxt->sge[0].addr =
- ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
- ctxt->sge[0].length, DMA_TO_DEVICE);
+ ib_dma_map_page(rdma->sc_cm_id->device, page, 0,
+ ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err;
atomic_inc(&rdma->sc_dma_used);
ctxt->direction = DMA_TO_DEVICE;
- /* Determine how many of our SGE are to be transmitted */
+ /* Map the payload indicated by 'byte_count' */
for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+ int xdr_off = 0;
sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
byte_count -= sge_bytes;
if (!vec->frmr) {
ctxt->sge[sge_no].addr =
- ib_dma_map_single(rdma->sc_cm_id->device,
- vec->sge[sge_no].iov_base,
- sge_bytes, DMA_TO_DEVICE);
+ dma_map_xdr(rdma, &rqstp->rq_res, xdr_off,
+ sge_bytes, DMA_TO_DEVICE);
+ xdr_off += sge_bytes;
if (ib_dma_mapping_error(rdma->sc_cm_id->device,
ctxt->sge[sge_no].addr))
goto err;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index edea15a54e51..9df1eadc912a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -45,6 +45,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
@@ -52,6 +53,7 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags);
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
@@ -89,6 +91,9 @@ struct svc_xprt_class svc_rdma_class = {
/* WR context cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_ctxt_cachep;
+/* Workqueue created in svc_rdma.c */
+extern struct workqueue_struct *svc_rdma_wq;
+
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{
struct svc_rdma_op_ctxt *ctxt;
@@ -120,7 +125,7 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
*/
if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
atomic_dec(&xprt->sc_dma_used);
- ib_dma_unmap_single(xprt->sc_cm_id->device,
+ ib_dma_unmap_page(xprt->sc_cm_id->device,
ctxt->sge[i].addr,
ctxt->sge[i].length,
ctxt->direction);
@@ -502,8 +507,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
BUG_ON(sge_no >= xprt->sc_max_sge);
page = svc_rdma_get_page();
ctxt->pages[sge_no] = page;
- pa = ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(page), PAGE_SIZE,
+ pa = ib_dma_map_page(xprt->sc_cm_id->device,
+ page, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt;
@@ -511,9 +516,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
+ ctxt->count = sge_no + 1;
buflen += PAGE_SIZE;
}
- ctxt->count = sge_no;
recv_wr.next = NULL;
recv_wr.sg_list = &ctxt->sge[0];
recv_wr.num_sge = ctxt->count;
@@ -529,6 +534,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
return ret;
err_put_ctxt:
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
return -ENOMEM;
}
@@ -670,6 +676,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
* Create a listening RDMA service endpoint.
*/
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
+ struct net *net,
struct sockaddr *sa, int salen,
int flags)
{
@@ -798,8 +805,8 @@ static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
if (ib_dma_mapping_error(frmr->mr->device, addr))
continue;
atomic_dec(&xprt->sc_dma_used);
- ib_dma_unmap_single(frmr->mr->device, addr, PAGE_SIZE,
- frmr->direction);
+ ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
+ frmr->direction);
}
}
@@ -1184,7 +1191,7 @@ static void svc_rdma_free(struct svc_xprt *xprt)
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
- schedule_work(&rdma->sc_work);
+ queue_work(svc_rdma_wq, &rdma->sc_work);
}
static int svc_rdma_has_wspace(struct svc_xprt *xprt)
@@ -1274,7 +1281,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
atomic_read(&xprt->sc_sq_count) <
xprt->sc_sq_depth);
if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
- return 0;
+ return -ENOTCONN;
continue;
}
/* Take a transport ref for each WR posted */
@@ -1306,7 +1313,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
enum rpcrdma_errcode err)
{
struct ib_send_wr err_wr;
- struct ib_sge sge;
struct page *p;
struct svc_rdma_op_ctxt *ctxt;
u32 *va;
@@ -1319,26 +1325,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
/* XDR encode error */
length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
+ ctxt = svc_rdma_get_context(xprt);
+ ctxt->direction = DMA_FROM_DEVICE;
+ ctxt->count = 1;
+ ctxt->pages[0] = p;
+
/* Prepare SGE for local address */
- sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
- page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
+ ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
+ p, 0, length, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
put_page(p);
return;
}
atomic_inc(&xprt->sc_dma_used);
- sge.lkey = xprt->sc_dma_lkey;
- sge.length = length;
-
- ctxt = svc_rdma_get_context(xprt);
- ctxt->count = 1;
- ctxt->pages[0] = p;
+ ctxt->sge[0].lkey = xprt->sc_dma_lkey;
+ ctxt->sge[0].length = length;
/* Prepare SEND WR */
memset(&err_wr, 0, sizeof err_wr);
ctxt->wr_op = IB_WR_SEND;
err_wr.wr_id = (unsigned long)ctxt;
- err_wr.sg_list = &sge;
+ err_wr.sg_list = ctxt->sge;
err_wr.num_sge = 1;
err_wr.opcode = IB_WR_SEND;
err_wr.send_flags = IB_SEND_SIGNALED;
@@ -1348,9 +1355,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
if (ret) {
dprintk("svcrdma: Error %d posting send for protocol error\n",
ret);
- ib_dma_unmap_single(xprt->sc_cm_id->device,
- sge.addr, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1);
}
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index a85e866a77f7..0867070bb5ca 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -237,8 +237,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
dprintk("RPC: %s: called\n", __func__);
- cancel_delayed_work(&r_xprt->rdma_connect);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&r_xprt->rdma_connect);
xprt_clear_connected(xprt);
@@ -251,9 +250,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
xprt_rdma_free_addresses(xprt);
- kfree(xprt->slot);
- xprt->slot = NULL;
- kfree(xprt);
+ xprt_free(xprt);
dprintk("RPC: %s: returning\n", __func__);
@@ -285,23 +282,14 @@ xprt_setup_rdma(struct xprt_create *args)
return ERR_PTR(-EBADF);
}
- xprt = kzalloc(sizeof(struct rpcrdma_xprt), GFP_KERNEL);
+ xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
+ xprt_rdma_slot_table_entries);
if (xprt == NULL) {
dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
__func__);
return ERR_PTR(-ENOMEM);
}
- xprt->max_reqs = xprt_rdma_slot_table_entries;
- xprt->slot = kcalloc(xprt->max_reqs,
- sizeof(struct rpc_rqst), GFP_KERNEL);
- if (xprt->slot == NULL) {
- dprintk("RPC: %s: couldn't allocate %d slots\n",
- __func__, xprt->max_reqs);
- kfree(xprt);
- return ERR_PTR(-ENOMEM);
- }
-
/* 60 second timeout, no retries */
xprt->timeout = &xprt_rdma_default_timeout;
xprt->bind_timeout = (60U * HZ);
@@ -410,8 +398,7 @@ out3:
out2:
rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ERR_PTR(rc);
}
@@ -460,7 +447,7 @@ xprt_rdma_connect(struct rpc_task *task)
} else {
schedule_delayed_work(&r_xprt->rdma_connect, 0);
if (!RPC_IS_ASYNC(task))
- flush_scheduled_work();
+ flush_delayed_work(&r_xprt->rdma_connect);
}
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index fe9306bf10cc..dfcab5ac65af 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -774,8 +774,7 @@ static void xs_destroy(struct rpc_xprt *xprt)
xs_close(xprt);
xs_free_peer_addresses(xprt);
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
module_put(THIS_MODULE);
}
@@ -1516,7 +1515,7 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
xs_update_peer_port(xprt);
}
-static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket *sock)
+static unsigned short xs_get_srcport(struct sock_xprt *transport)
{
unsigned short port = transport->srcport;
@@ -1525,7 +1524,7 @@ static unsigned short xs_get_srcport(struct sock_xprt *transport, struct socket
return port;
}
-static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket *sock, unsigned short port)
+static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
{
if (transport->srcport != 0)
transport->srcport = 0;
@@ -1535,23 +1534,18 @@ static unsigned short xs_next_srcport(struct sock_xprt *transport, struct socket
return xprt_max_resvport;
return --port;
}
-
-static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
+static int xs_bind(struct sock_xprt *transport, struct socket *sock)
{
- struct sockaddr_in myaddr = {
- .sin_family = AF_INET,
- };
- struct sockaddr_in *sa;
+ struct sockaddr_storage myaddr;
int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport, sock);
+ unsigned short port = xs_get_srcport(transport);
unsigned short last;
- sa = (struct sockaddr_in *)&transport->srcaddr;
- myaddr.sin_addr = sa->sin_addr;
+ memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
do {
- myaddr.sin_port = htons(port);
- err = kernel_bind(sock, (struct sockaddr *) &myaddr,
- sizeof(myaddr));
+ rpc_set_port((struct sockaddr *)&myaddr, port);
+ err = kernel_bind(sock, (struct sockaddr *)&myaddr,
+ transport->xprt.addrlen);
if (port == 0)
break;
if (err == 0) {
@@ -1559,48 +1553,23 @@ static int xs_bind4(struct sock_xprt *transport, struct socket *sock)
break;
}
last = port;
- port = xs_next_srcport(transport, sock, port);
+ port = xs_next_srcport(transport, port);
if (port > last)
nloop++;
} while (err == -EADDRINUSE && nloop != 2);
- dprintk("RPC: %s %pI4:%u: %s (%d)\n",
- __func__, &myaddr.sin_addr,
- port, err ? "failed" : "ok", err);
- return err;
-}
-
-static int xs_bind6(struct sock_xprt *transport, struct socket *sock)
-{
- struct sockaddr_in6 myaddr = {
- .sin6_family = AF_INET6,
- };
- struct sockaddr_in6 *sa;
- int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport, sock);
- unsigned short last;
- sa = (struct sockaddr_in6 *)&transport->srcaddr;
- myaddr.sin6_addr = sa->sin6_addr;
- do {
- myaddr.sin6_port = htons(port);
- err = kernel_bind(sock, (struct sockaddr *) &myaddr,
- sizeof(myaddr));
- if (port == 0)
- break;
- if (err == 0) {
- transport->srcport = port;
- break;
- }
- last = port;
- port = xs_next_srcport(transport, sock, port);
- if (port > last)
- nloop++;
- } while (err == -EADDRINUSE && nloop != 2);
- dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n",
- &myaddr.sin6_addr, port, err ? "failed" : "ok", err);
+ if (myaddr.ss_family == AF_INET)
+ dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
+ &((struct sockaddr_in *)&myaddr)->sin_addr,
+ port, err ? "failed" : "ok", err);
+ else
+ dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
+ &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
+ port, err ? "failed" : "ok", err);
return err;
}
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key xs_key[2];
static struct lock_class_key xs_slock_key[2];
@@ -1622,6 +1591,18 @@ static inline void xs_reclassify_socket6(struct socket *sock)
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
}
+
+static inline void xs_reclassify_socket(int family, struct socket *sock)
+{
+ switch (family) {
+ case AF_INET:
+ xs_reclassify_socket4(sock);
+ break;
+ case AF_INET6:
+ xs_reclassify_socket6(sock);
+ break;
+ }
+}
#else
static inline void xs_reclassify_socket4(struct socket *sock)
{
@@ -1630,8 +1611,36 @@ static inline void xs_reclassify_socket4(struct socket *sock)
static inline void xs_reclassify_socket6(struct socket *sock)
{
}
+
+static inline void xs_reclassify_socket(int family, struct socket *sock)
+{
+}
#endif
+static struct socket *xs_create_sock(struct rpc_xprt *xprt,
+ struct sock_xprt *transport, int family, int type, int protocol)
+{
+ struct socket *sock;
+ int err;
+
+ err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
+ if (err < 0) {
+ dprintk("RPC: can't create %d transport socket (%d).\n",
+ protocol, -err);
+ goto out;
+ }
+ xs_reclassify_socket(family, sock);
+
+ if (xs_bind(transport, sock)) {
+ sock_release(sock);
+ goto out;
+ }
+
+ return sock;
+out:
+ return ERR_PTR(err);
+}
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1661,82 +1670,23 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_udp_do_set_buffer_size(xprt);
}
-/**
- * xs_udp_connect_worker4 - set up a UDP socket
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_udp_connect_worker4(struct work_struct *work)
+static void xs_udp_setup_socket(struct work_struct *work)
{
struct sock_xprt *transport =
container_of(work, struct sock_xprt, connect_worker.work);
struct rpc_xprt *xprt = &transport->xprt;
struct socket *sock = transport->sock;
- int err, status = -EIO;
+ int status = -EIO;
if (xprt->shutdown)
goto out;
/* Start by resetting any existing state */
xs_reset_transport(transport);
-
- err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
+ if (IS_ERR(sock))
goto out;
- }
- xs_reclassify_socket4(sock);
-
- if (xs_bind4(transport, sock)) {
- sock_release(sock);
- goto out;
- }
-
- dprintk("RPC: worker connecting xprt %p via %s to "
- "%s (port %s)\n", xprt,
- xprt->address_strings[RPC_DISPLAY_PROTO],
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PORT]);
-
- xs_udp_finish_connecting(xprt, sock);
- status = 0;
-out:
- xprt_clear_connecting(xprt);
- xprt_wake_pending_tasks(xprt, status);
-}
-
-/**
- * xs_udp_connect_worker6 - set up a UDP socket
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_udp_connect_worker6(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
- struct socket *sock = transport->sock;
- int err, status = -EIO;
-
- if (xprt->shutdown)
- goto out;
-
- /* Start by resetting any existing state */
- xs_reset_transport(transport);
-
- err = sock_create_kern(PF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create UDP transport socket (%d).\n", -err);
- goto out;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
- goto out;
- }
dprintk("RPC: worker connecting xprt %p via %s to "
"%s (port %s)\n", xprt,
@@ -1755,12 +1705,12 @@ out:
* We need to preserve the port number so the reply cache on the server can
* find our cached RPC replies when we get around to reconnecting.
*/
-static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
+static void xs_abort_connection(struct sock_xprt *transport)
{
int result;
struct sockaddr any;
- dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
+ dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
/*
* Disconnect the transport socket by doing a connect operation
@@ -1770,13 +1720,13 @@ static void xs_abort_connection(struct rpc_xprt *xprt, struct sock_xprt *transpo
any.sa_family = AF_UNSPEC;
result = kernel_connect(transport->sock, &any, sizeof(any), 0);
if (!result)
- xs_sock_mark_closed(xprt);
+ xs_sock_mark_closed(&transport->xprt);
else
dprintk("RPC: AF_UNSPEC connect return code %d\n",
result);
}
-static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *transport)
+static void xs_tcp_reuse_connection(struct sock_xprt *transport)
{
unsigned int state = transport->inet->sk_state;
@@ -1799,7 +1749,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
"sk_shutdown set to %d\n",
__func__, transport->inet->sk_shutdown);
}
- xs_abort_connection(xprt, transport);
+ xs_abort_connection(transport);
}
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -1852,12 +1802,12 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
*
* Invoked by a work queue tasklet.
*/
-static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
- struct sock_xprt *transport,
- struct socket *(*create_sock)(struct rpc_xprt *,
- struct sock_xprt *))
+static void xs_tcp_setup_socket(struct work_struct *work)
{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, connect_worker.work);
struct socket *sock = transport->sock;
+ struct rpc_xprt *xprt = &transport->xprt;
int status = -EIO;
if (xprt->shutdown)
@@ -1865,7 +1815,8 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
- sock = create_sock(xprt, transport);
+ sock = xs_create_sock(xprt, transport,
+ xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (IS_ERR(sock)) {
status = PTR_ERR(sock);
goto out;
@@ -1876,7 +1827,7 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
&xprt->state);
/* "close" the socket, preserving the local port */
- xs_tcp_reuse_connection(xprt, transport);
+ xs_tcp_reuse_connection(transport);
if (abort_and_exit)
goto out_eagain;
@@ -1925,84 +1876,6 @@ out:
xprt_wake_pending_tasks(xprt, status);
}
-static struct socket *xs_create_tcp_sock4(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
-{
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket4(sock);
-
- if (xs_bind4(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
-out_err:
- return ERR_PTR(-EIO);
-}
-
-/**
- * xs_tcp_connect_worker4 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_tcp_connect_worker4(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock4);
-}
-
-static struct socket *xs_create_tcp_sock6(struct rpc_xprt *xprt,
- struct sock_xprt *transport)
-{
- struct socket *sock;
- int err;
-
- /* start from scratch */
- err = sock_create_kern(PF_INET6, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (err < 0) {
- dprintk("RPC: can't create TCP transport socket (%d).\n",
- -err);
- goto out_err;
- }
- xs_reclassify_socket6(sock);
-
- if (xs_bind6(transport, sock) < 0) {
- sock_release(sock);
- goto out_err;
- }
- return sock;
-out_err:
- return ERR_PTR(-EIO);
-}
-
-/**
- * xs_tcp_connect_worker6 - connect a TCP socket to a remote endpoint
- * @work: RPC transport to connect
- *
- * Invoked by a work queue tasklet.
- */
-static void xs_tcp_connect_worker6(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, connect_worker.work);
- struct rpc_xprt *xprt = &transport->xprt;
-
- xs_tcp_setup_socket(xprt, transport, xs_create_tcp_sock6);
-}
-
/**
* xs_connect - connect a socket to a remote endpoint
* @task: address of RPC task that manages state of connect request
@@ -2262,6 +2135,31 @@ static struct rpc_xprt_ops bc_tcp_ops = {
.print_stats = xs_tcp_print_stats,
};
+static int xs_init_anyaddr(const int family, struct sockaddr *sap)
+{
+ static const struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+ static const struct sockaddr_in6 sin6 = {
+ .sin6_family = AF_INET6,
+ .sin6_addr = IN6ADDR_ANY_INIT,
+ };
+
+ switch (family) {
+ case AF_INET:
+ memcpy(sap, &sin, sizeof(sin));
+ break;
+ case AF_INET6:
+ memcpy(sap, &sin6, sizeof(sin6));
+ break;
+ default:
+ dprintk("RPC: %s: Bad address family\n", __func__);
+ return -EAFNOSUPPORT;
+ }
+ return 0;
+}
+
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
unsigned int slot_table_size)
{
@@ -2273,27 +2171,25 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
return ERR_PTR(-EBADF);
}
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (new == NULL) {
+ xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size);
+ if (xprt == NULL) {
dprintk("RPC: xs_setup_xprt: couldn't allocate "
"rpc_xprt\n");
return ERR_PTR(-ENOMEM);
}
- xprt = &new->xprt;
-
- xprt->max_reqs = slot_table_size;
- xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
- if (xprt->slot == NULL) {
- kfree(xprt);
- dprintk("RPC: xs_setup_xprt: couldn't allocate slot "
- "table\n");
- return ERR_PTR(-ENOMEM);
- }
+ new = container_of(xprt, struct sock_xprt, xprt);
memcpy(&xprt->addr, args->dstaddr, args->addrlen);
xprt->addrlen = args->addrlen;
if (args->srcaddr)
memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
+ else {
+ int err;
+ err = xs_init_anyaddr(args->dstaddr->sa_family,
+ (struct sockaddr *)&new->srcaddr);
+ if (err != 0)
+ return ERR_PTR(err);
+ }
return xprt;
}
@@ -2341,7 +2237,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_udp_connect_worker4);
+ xs_udp_setup_socket);
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
break;
case AF_INET6:
@@ -2349,7 +2245,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_udp_connect_worker6);
+ xs_udp_setup_socket);
xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
break;
default:
@@ -2371,8 +2267,7 @@ static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
@@ -2416,7 +2311,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_tcp_connect_worker4);
+ xs_tcp_setup_socket);
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
break;
case AF_INET6:
@@ -2424,7 +2319,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker,
- xs_tcp_connect_worker6);
+ xs_tcp_setup_socket);
xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
break;
default:
@@ -2447,8 +2342,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
@@ -2507,15 +2401,10 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
goto out_err;
}
- if (xprt_bound(xprt))
- dprintk("RPC: set up xprt to %s (port %s) via %s\n",
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PORT],
- xprt->address_strings[RPC_DISPLAY_PROTO]);
- else
- dprintk("RPC: set up xprt to %s (autobind) via %s\n",
- xprt->address_strings[RPC_DISPLAY_ADDR],
- xprt->address_strings[RPC_DISPLAY_PROTO]);
+ dprintk("RPC: set up xprt to %s (port %s) via %s\n",
+ xprt->address_strings[RPC_DISPLAY_ADDR],
+ xprt->address_strings[RPC_DISPLAY_PORT],
+ xprt->address_strings[RPC_DISPLAY_PROTO]);
/*
* Since we don't want connections for the backchannel, we set
@@ -2528,8 +2417,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
return xprt;
ret = ERR_PTR(-EINVAL);
out_err:
- kfree(xprt->slot);
- kfree(xprt);
+ xprt_free(xprt);
return ret;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0ebc777a6660..3c95304a0817 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -117,7 +117,7 @@
static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
static DEFINE_SPINLOCK(unix_table_lock);
-static atomic_t unix_nr_socks = ATOMIC_INIT(0);
+static atomic_long_t unix_nr_socks;
#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
@@ -360,13 +360,13 @@ static void unix_sock_destructor(struct sock *sk)
if (u->addr)
unix_release_addr(u->addr);
- atomic_dec(&unix_nr_socks);
+ atomic_long_dec(&unix_nr_socks);
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
- printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk,
- atomic_read(&unix_nr_socks));
+ printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
+ atomic_long_read(&unix_nr_socks));
#endif
}
@@ -606,8 +606,8 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
struct sock *sk = NULL;
struct unix_sock *u;
- atomic_inc(&unix_nr_socks);
- if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
+ atomic_long_inc(&unix_nr_socks);
+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
@@ -632,7 +632,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
unix_insert_socket(unix_sockets_unbound, sk);
out:
if (sk == NULL)
- atomic_dec(&unix_nr_socks);
+ atomic_long_dec(&unix_nr_socks);
else {
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index e77e508126fa..55a28ab21db9 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
-cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 9c21ebf9780e..79772fcc37bc 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -4,6 +4,8 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/if.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -216,8 +218,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
rdev->wiphy.debugfsdir,
rdev->wiphy.debugfsdir->d_parent,
newname))
- printk(KERN_ERR "cfg80211: failed to rename debugfs dir to %s!\n",
- newname);
+ pr_err("failed to rename debugfs dir to %s!\n", newname);
nl80211_notify_dev_rename(rdev);
@@ -331,6 +332,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf);
WARN_ON(ops->add_station && !ops->del_station);
WARN_ON(ops->add_mpath && !ops->del_mpath);
+ WARN_ON(ops->join_mesh && !ops->leave_mesh);
alloc_size = sizeof(*rdev) + sizeof_priv;
@@ -699,8 +701,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
"phy80211")) {
- printk(KERN_ERR "wireless: failed to add phy80211 "
- "symlink to netdev!\n");
+ pr_err("failed to add phy80211 symlink to netdev!\n");
}
wdev->netdev = dev;
wdev->sme_state = CFG80211_SME_IDLE;
@@ -752,6 +753,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
cfg80211_mlme_down(rdev, dev);
wdev_unlock(wdev);
break;
+ case NL80211_IFTYPE_MESH_POINT:
+ cfg80211_leave_mesh(rdev, dev);
+ break;
default:
break;
}
@@ -775,20 +779,27 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
}
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
-#ifdef CONFIG_CFG80211_WEXT
wdev_lock(wdev);
switch (wdev->iftype) {
+#ifdef CONFIG_CFG80211_WEXT
case NL80211_IFTYPE_ADHOC:
cfg80211_ibss_wext_join(rdev, wdev);
break;
case NL80211_IFTYPE_STATION:
cfg80211_mgd_wext_connect(rdev, wdev);
break;
+#endif
+ case NL80211_IFTYPE_MESH_POINT:
+ /* backward compat code ... */
+ if (wdev->mesh_id_up_len)
+ __cfg80211_join_mesh(rdev, dev, wdev->ssid,
+ wdev->mesh_id_up_len,
+ &default_mesh_config);
+ break;
default:
break;
}
wdev_unlock(wdev);
-#endif
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 6583cca0e2ee..743203bb61ac 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -285,6 +285,19 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
+/* mesh */
+extern const struct mesh_config default_mesh_config;
+int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_config *conf);
+int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_config *conf);
+int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
+
/* MLME */
int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
@@ -341,9 +354,9 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
- bool channel_type_valid,
+ bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, u64 *cookie);
/* SME */
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 97d411f74507..3268fac5ab22 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/ieee80211.h>
@@ -224,8 +226,8 @@ int lib80211_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
return -EINVAL;
found:
- printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm "
- "'%s'\n", ops->name);
+ printk(KERN_DEBUG "lib80211_crypt: unregistered algorithm '%s'\n",
+ ops->name);
list_del(&alg->list);
spin_unlock_irqrestore(&lib80211_crypto_lock, flags);
kfree(alg);
@@ -270,7 +272,7 @@ static struct lib80211_crypto_ops lib80211_crypt_null = {
static int __init lib80211_init(void)
{
- printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION "\n");
+ pr_info(DRV_DESCRIPTION "\n");
return lib80211_register_crypto_ops(&lib80211_crypt_null);
}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 0fe40510e2cb..7ea4f2b0770e 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -10,6 +10,8 @@
* more details.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -99,8 +101,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
- printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
+ printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->tx_tfm_arc4 = NULL;
goto fail;
}
@@ -108,8 +109,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
- printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
+ printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->tx_tfm_michael = NULL;
goto fail;
}
@@ -117,8 +117,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
- printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate "
- "crypto API arc4\n");
+ printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->rx_tfm_arc4 = NULL;
goto fail;
}
@@ -126,8 +125,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
- printk(KERN_DEBUG "lib80211_crypt_tkip: could not allocate "
- "crypto API michael_mic\n");
+ printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->rx_tfm_michael = NULL;
goto fail;
}
@@ -536,7 +534,7 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
struct scatterlist sg[2];
if (tfm_michael == NULL) {
- printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+ pr_warn("%s(): tfm_michael == NULL\n", __func__);
return -1;
}
sg_init_table(sg, 2);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
new file mode 100644
index 000000000000..e0b9747fe50a
--- /dev/null
+++ b/net/wireless/mesh.c
@@ -0,0 +1,140 @@
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "core.h"
+
+/* Default values, timeouts in ms */
+#define MESH_TTL 31
+#define MESH_DEFAULT_ELEMENT_TTL 31
+#define MESH_MAX_RETR 3
+#define MESH_RET_T 100
+#define MESH_CONF_T 100
+#define MESH_HOLD_T 100
+
+#define MESH_PATH_TIMEOUT 5000
+
+/*
+ * Minimum interval between two consecutive PREQs originated by the same
+ * interface
+ */
+#define MESH_PREQ_MIN_INT 10
+#define MESH_DIAM_TRAVERSAL_TIME 50
+
+/*
+ * A path will be refreshed if it is used PATH_REFRESH_TIME milliseconds
+ * before timing out. This way it will remain ACTIVE and no data frames
+ * will be unnecessarily held in the pending queue.
+ */
+#define MESH_PATH_REFRESH_TIME 1000
+#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
+
+/* Default maximum number of established plinks per interface */
+#define MESH_MAX_ESTAB_PLINKS 32
+
+#define MESH_MAX_PREQ_RETRIES 4
+
+
+const struct mesh_config default_mesh_config = {
+ .dot11MeshRetryTimeout = MESH_RET_T,
+ .dot11MeshConfirmTimeout = MESH_CONF_T,
+ .dot11MeshHoldingTimeout = MESH_HOLD_T,
+ .dot11MeshMaxRetries = MESH_MAX_RETR,
+ .dot11MeshTTL = MESH_TTL,
+ .element_ttl = MESH_DEFAULT_ELEMENT_TTL,
+ .auto_open_plinks = true,
+ .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
+ .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
+ .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
+ .dot11MeshHWMPnetDiameterTraversalTime = MESH_DIAM_TRAVERSAL_TIME,
+ .dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
+ .path_refresh_time = MESH_PATH_REFRESH_TIME,
+ .min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
+};
+
+
+int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_config *conf)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct mesh_setup setup = {
+ .mesh_id = mesh_id,
+ .mesh_id_len = mesh_id_len,
+ };
+ int err;
+
+ BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN);
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
+ if (wdev->mesh_id_len)
+ return -EALREADY;
+
+ if (!mesh_id_len)
+ return -EINVAL;
+
+ if (!rdev->ops->join_mesh)
+ return -EOPNOTSUPP;
+
+ err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, &setup);
+ if (!err) {
+ memcpy(wdev->ssid, mesh_id, mesh_id_len);
+ wdev->mesh_id_len = mesh_id_len;
+ }
+
+ return err;
+}
+
+int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ const u8 *mesh_id, u8 mesh_id_len,
+ const struct mesh_config *conf)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_join_mesh(rdev, dev, mesh_id, mesh_id_len, conf);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->leave_mesh)
+ return -EOPNOTSUPP;
+
+ if (!wdev->mesh_id_len)
+ return -ENOTCONN;
+
+ err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
+ if (!err)
+ wdev->mesh_id_len = 0;
+ return err;
+}
+
+int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_leave_mesh(rdev, dev);
+ wdev_unlock(wdev);
+
+ return err;
+}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 26838d903b9a..d7680f2a4c5b 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -864,9 +864,9 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct ieee80211_channel *chan,
+ struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
- bool channel_type_valid,
+ bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -946,8 +946,9 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return -EINVAL;
/* Transmit the Action frame as requested by user space */
- return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, channel_type,
- channel_type_valid, buf, len, cookie);
+ return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
+ channel_type, channel_type_valid,
+ wait, buf, len, cookie);
}
bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1028,3 +1029,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
}
EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
+
+void cfg80211_cqm_pktloss_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ /* Indicate roaming trigger event to user space */
+ nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4e78e3f26798..c3f80e565365 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -121,6 +121,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 },
[NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_RATES },
+ [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 },
[NL80211_ATTR_MESH_PARAMS] = { .type = NLA_NESTED },
@@ -163,10 +164,13 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
[NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
[NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
-
[NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
[NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 },
+ [NL80211_ATTR_WIPHY_ANTENNA_TX] = { .type = NLA_U32 },
+ [NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 },
+ [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -526,7 +530,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.rts_threshold);
NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
dev->wiphy.coverage_class);
-
NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
dev->wiphy.max_scan_ssids);
NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
@@ -545,6 +548,16 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL)
NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE);
+ if (dev->ops->get_antenna) {
+ u32 tx_ant = 0, rx_ant = 0;
+ int res;
+ res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
+ if (!res) {
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant);
+ }
+ }
+
nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
if (!nl_modes)
goto nla_put_failure;
@@ -649,19 +662,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(add_beacon, NEW_BEACON);
CMD(add_station, NEW_STATION);
CMD(add_mpath, NEW_MPATH);
- CMD(set_mesh_params, SET_MESH_PARAMS);
+ CMD(update_mesh_params, SET_MESH_PARAMS);
CMD(change_bss, SET_BSS);
CMD(auth, AUTHENTICATE);
CMD(assoc, ASSOCIATE);
CMD(deauth, DEAUTHENTICATE);
CMD(disassoc, DISASSOCIATE);
CMD(join_ibss, JOIN_IBSS);
+ CMD(join_mesh, JOIN_MESH);
CMD(set_pmksa, SET_PMKSA);
CMD(del_pmksa, DEL_PMKSA);
CMD(flush_pmksa, FLUSH_PMKSA);
CMD(remain_on_channel, REMAIN_ON_CHANNEL);
CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
CMD(mgmt_tx, FRAME);
+ CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -683,6 +698,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_cmds);
+ /* for now at least assume all drivers have it */
+ if (dev->ops->mgmt_tx)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
+
if (mgmt_stypes) {
u16 stypes;
struct nlattr *nl_ftypes, *nl_ifs;
@@ -1024,6 +1043,22 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
+ info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) {
+ u32 tx_ant, rx_ant;
+ if (!rdev->ops->set_antenna) {
+ result = -EOPNOTSUPP;
+ goto bad_res;
+ }
+
+ tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]);
+ rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]);
+
+ result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant);
+ if (result)
+ goto bad_res;
+ }
+
changed = 0;
if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
@@ -1291,11 +1326,21 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_MESH_ID]) {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
if (ntype != NL80211_IFTYPE_MESH_POINT)
return -EINVAL;
- params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
- params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
- change = true;
+ if (netif_running(dev))
+ return -EBUSY;
+
+ wdev_lock(wdev);
+ BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
+ IEEE80211_MAX_MESH_ID_LEN);
+ wdev->mesh_id_up_len =
+ nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
+ memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
+ wdev->mesh_id_up_len);
+ wdev_unlock(wdev);
}
if (info->attrs[NL80211_ATTR_4ADDR]) {
@@ -1335,6 +1380,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct vif_params params;
+ struct net_device *dev;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
u32 flags;
@@ -1354,12 +1400,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
!(rdev->wiphy.interface_modes & (1 << type)))
return -EOPNOTSUPP;
- if (type == NL80211_IFTYPE_MESH_POINT &&
- info->attrs[NL80211_ATTR_MESH_ID]) {
- params.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]);
- params.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
- }
-
if (info->attrs[NL80211_ATTR_4ADDR]) {
params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]);
err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type);
@@ -1370,11 +1410,27 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
- err = rdev->ops->add_virtual_intf(&rdev->wiphy,
+ dev = rdev->ops->add_virtual_intf(&rdev->wiphy,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
- return err;
+ if (type == NL80211_IFTYPE_MESH_POINT &&
+ info->attrs[NL80211_ATTR_MESH_ID]) {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ wdev_lock(wdev);
+ BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
+ IEEE80211_MAX_MESH_ID_LEN);
+ wdev->mesh_id_up_len =
+ nla_len(info->attrs[NL80211_ATTR_MESH_ID]);
+ memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
+ wdev->mesh_id_up_len);
+ wdev_unlock(wdev);
+ }
+
+ return 0;
}
static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
@@ -1841,6 +1897,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (sinfo->filled & STATION_INFO_SIGNAL)
NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL,
sinfo->signal);
+ if (sinfo->filled & STATION_INFO_SIGNAL_AVG)
+ NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG,
+ sinfo->signal_avg);
if (sinfo->filled & STATION_INFO_TX_BITRATE) {
txrate = nla_nest_start(msg, NL80211_STA_INFO_TX_BITRATE);
if (!txrate)
@@ -2404,6 +2463,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
params.use_short_preamble = -1;
params.use_short_slot_time = -1;
params.ap_isolate = -1;
+ params.ht_opmode = -1;
if (info->attrs[NL80211_ATTR_BSS_CTS_PROT])
params.use_cts_prot =
@@ -2422,6 +2482,9 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_AP_ISOLATE])
params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]);
+ if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE])
+ params.ht_opmode =
+ nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]);
if (!rdev->ops->change_bss)
return -EOPNOTSUPP;
@@ -2507,21 +2570,32 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
}
static int nl80211_get_mesh_params(struct sk_buff *skb,
- struct genl_info *info)
+ struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct mesh_config cur_params;
- int err;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct mesh_config cur_params;
+ int err = 0;
void *hdr;
struct nlattr *pinfoattr;
struct sk_buff *msg;
+ if (wdev->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
if (!rdev->ops->get_mesh_params)
return -EOPNOTSUPP;
- /* Get the mesh params */
- err = rdev->ops->get_mesh_params(&rdev->wiphy, dev, &cur_params);
+ wdev_lock(wdev);
+ /* If not connected, get default parameters */
+ if (!wdev->mesh_id_len)
+ memcpy(&cur_params, &default_mesh_config, sizeof(cur_params));
+ else
+ err = rdev->ops->get_mesh_params(&rdev->wiphy, dev,
+ &cur_params);
+ wdev_unlock(wdev);
+
if (err)
return err;
@@ -2549,6 +2623,8 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
cur_params.dot11MeshMaxRetries);
NLA_PUT_U8(msg, NL80211_MESHCONF_TTL,
cur_params.dot11MeshTTL);
+ NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL,
+ cur_params.element_ttl);
NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
cur_params.auto_open_plinks);
NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
@@ -2575,14 +2651,6 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
return -ENOBUFS;
}
-#define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \
-do {\
- if (table[attr_num]) {\
- cfg.param = nla_fn(table[attr_num]); \
- mask |= (1 << (attr_num - 1)); \
- } \
-} while (0);\
-
static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
[NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
@@ -2590,6 +2658,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 },
[NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
[NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
@@ -2600,31 +2669,34 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
};
-static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
+static int nl80211_parse_mesh_params(struct genl_info *info,
+ struct mesh_config *cfg,
+ u32 *mask_out)
{
- u32 mask;
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
- struct mesh_config cfg;
struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
- struct nlattr *parent_attr;
+ u32 mask = 0;
+
+#define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \
+do {\
+ if (table[attr_num]) {\
+ cfg->param = nla_fn(table[attr_num]); \
+ mask |= (1 << (attr_num - 1)); \
+ } \
+} while (0);\
+
- parent_attr = info->attrs[NL80211_ATTR_MESH_PARAMS];
- if (!parent_attr)
+ if (!info->attrs[NL80211_ATTR_MESH_PARAMS])
return -EINVAL;
if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX,
- parent_attr, nl80211_meshconf_params_policy))
+ info->attrs[NL80211_ATTR_MESH_PARAMS],
+ nl80211_meshconf_params_policy))
return -EINVAL;
- if (!rdev->ops->set_mesh_params)
- return -EOPNOTSUPP;
-
/* This makes sure that there aren't more than 32 mesh config
* parameters (otherwise our bitfield scheme would not work.) */
BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32);
/* Fill in the params struct */
- mask = 0;
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout,
mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout,
@@ -2637,6 +2709,8 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL,
mask, NL80211_MESHCONF_TTL, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl,
+ mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
@@ -2662,11 +2736,45 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
NL80211_MESHCONF_HWMP_ROOTMODE,
nla_get_u8);
- /* Apply changes */
- return rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask);
-}
+ if (mask_out)
+ *mask_out = mask;
+ return 0;
#undef FILL_IN_MESH_PARAM_IF_SET
+}
+
+static int nl80211_update_mesh_params(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct mesh_config cfg;
+ u32 mask;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_MESH_POINT)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->update_mesh_params)
+ return -EOPNOTSUPP;
+
+ err = nl80211_parse_mesh_params(info, &cfg, &mask);
+ if (err)
+ return err;
+
+ wdev_lock(wdev);
+ if (!wdev->mesh_id_len)
+ err = -ENOLINK;
+
+ if (!err)
+ err = rdev->ops->update_mesh_params(&rdev->wiphy, dev,
+ mask, &cfg);
+
+ wdev_unlock(wdev);
+
+ return err;
+}
static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
{
@@ -3569,6 +3677,34 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
local_state_change);
}
+static bool
+nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev,
+ int mcast_rate[IEEE80211_NUM_BANDS],
+ int rateval)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+ bool found = false;
+ int band, i;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].bitrate == rateval) {
+ mcast_rate[band] = i + 1;
+ found = true;
+ break;
+ }
+ }
+ }
+
+ return found;
+}
+
static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -3653,6 +3789,11 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
}
}
+ if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
+ !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
+ nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
+ return -EINVAL;
+
if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
connkeys = nl80211_parse_connkeys(rdev,
info->attrs[NL80211_ATTR_KEYS]);
@@ -4180,6 +4321,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
void *hdr;
u64 cookie;
struct sk_buff *msg;
+ unsigned int wait = 0;
+ bool offchan;
if (!info->attrs[NL80211_ATTR_FRAME] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -4196,6 +4339,12 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
return -EOPNOTSUPP;
+ if (info->attrs[NL80211_ATTR_DURATION]) {
+ if (!rdev->ops->mgmt_tx_cancel_wait)
+ return -EINVAL;
+ wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+ }
+
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
channel_type = nla_get_u32(
info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
@@ -4207,6 +4356,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
channel_type_valid = true;
}
+ offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
+
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
if (chan == NULL)
@@ -4223,8 +4374,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
err = PTR_ERR(hdr);
goto free_msg;
}
- err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, channel_type,
- channel_type_valid,
+ err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
+ channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
&cookie);
@@ -4243,6 +4394,31 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ u64 cookie;
+
+ if (!info->attrs[NL80211_ATTR_COOKIE])
+ return -EINVAL;
+
+ if (!rdev->ops->mgmt_tx_cancel_wait)
+ return -EOPNOTSUPP;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
+
+ return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, dev, cookie);
+}
+
static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4381,6 +4557,41 @@ out:
return err;
}
+static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct mesh_config cfg;
+ int err;
+
+ /* start with default */
+ memcpy(&cfg, &default_mesh_config, sizeof(cfg));
+
+ if (info->attrs[NL80211_ATTR_MESH_PARAMS]) {
+ /* and parse parameters if given */
+ err = nl80211_parse_mesh_params(info, &cfg, NULL);
+ if (err)
+ return err;
+ }
+
+ if (!info->attrs[NL80211_ATTR_MESH_ID] ||
+ !nla_len(info->attrs[NL80211_ATTR_MESH_ID]))
+ return -EINVAL;
+
+ return cfg80211_join_mesh(rdev, dev,
+ nla_data(info->attrs[NL80211_ATTR_MESH_ID]),
+ nla_len(info->attrs[NL80211_ATTR_MESH_ID]),
+ &cfg);
+}
+
+static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+
+ return cfg80211_leave_mesh(rdev, dev);
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -4645,10 +4856,10 @@ static struct genl_ops nl80211_ops[] = {
},
{
.cmd = NL80211_CMD_SET_MESH_PARAMS,
- .doit = nl80211_set_mesh_params,
+ .doit = nl80211_update_mesh_params,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -4816,6 +5027,14 @@ static struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_FRAME_WAIT_CANCEL,
+ .doit = nl80211_tx_mgmt_cancel_wait,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_SET_POWER_SAVE,
.doit = nl80211_set_power_save,
.policy = nl80211_policy,
@@ -4855,6 +5074,22 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_JOIN_MESH,
+ .doit = nl80211_join_mesh,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_LEAVE_MESH,
+ .doit = nl80211_leave_mesh,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5651,6 +5886,51 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void
+nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *pinfoattr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer);
+
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!pinfoattr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets);
+
+ nla_nest_end(msg, pinfoattr);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
static int nl80211_netlink_notify(struct notifier_block * nb,
unsigned long state,
void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 30d2f939150d..16c2f7190768 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -87,5 +87,9 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
gfp_t gfp);
+void
+nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, gfp_t gfp);
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 4b9f8912526c..5ed615f94e0c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -32,6 +32,9 @@
* rely on some SHA1 checksum of the regdomain for example.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -48,7 +51,7 @@
#ifdef CONFIG_CFG80211_REG_DEBUG
#define REG_DBG_PRINT(format, args...) \
do { \
- printk(KERN_DEBUG format , ## args); \
+ printk(KERN_DEBUG pr_fmt(format), ##args); \
} while (0)
#else
#define REG_DBG_PRINT(args...)
@@ -96,6 +99,9 @@ struct reg_beacon {
struct ieee80211_channel chan;
};
+static void reg_todo(struct work_struct *work);
+static DECLARE_WORK(reg_work, reg_todo);
+
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
.n_reg_rules = 5,
@@ -367,11 +373,10 @@ static int call_crda(const char *alpha2)
};
if (!is_world_regdom((char *) alpha2))
- printk(KERN_INFO "cfg80211: Calling CRDA for country: %c%c\n",
+ pr_info("Calling CRDA for country: %c%c\n",
alpha2[0], alpha2[1]);
else
- printk(KERN_INFO "cfg80211: Calling CRDA to update world "
- "regulatory domain\n");
+ pr_info("Calling CRDA to update world regulatory domain\n");
/* query internal regulatory database (if it exists) */
reg_regdb_query(alpha2);
@@ -711,6 +716,60 @@ int freq_reg_info(struct wiphy *wiphy,
}
EXPORT_SYMBOL(freq_reg_info);
+#ifdef CONFIG_CFG80211_REG_DEBUG
+static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
+{
+ switch (initiator) {
+ case NL80211_REGDOM_SET_BY_CORE:
+ return "Set by core";
+ case NL80211_REGDOM_SET_BY_USER:
+ return "Set by user";
+ case NL80211_REGDOM_SET_BY_DRIVER:
+ return "Set by driver";
+ case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ return "Set by country IE";
+ default:
+ WARN_ON(1);
+ return "Set by bug";
+ }
+}
+
+static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+ u32 desired_bw_khz,
+ const struct ieee80211_reg_rule *reg_rule)
+{
+ const struct ieee80211_power_rule *power_rule;
+ const struct ieee80211_freq_range *freq_range;
+ char max_antenna_gain[32];
+
+ power_rule = &reg_rule->power_rule;
+ freq_range = &reg_rule->freq_range;
+
+ if (!power_rule->max_antenna_gain)
+ snprintf(max_antenna_gain, 32, "N/A");
+ else
+ snprintf(max_antenna_gain, 32, "%d", power_rule->max_antenna_gain);
+
+ REG_DBG_PRINT("Updating information on frequency %d MHz "
+ "for %d a MHz width channel with regulatory rule:\n",
+ chan->center_freq,
+ KHZ_TO_MHZ(desired_bw_khz));
+
+ REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
+ freq_range->start_freq_khz,
+ freq_range->end_freq_khz,
+ max_antenna_gain,
+ power_rule->max_eirp);
+}
+#else
+static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
+ u32 desired_bw_khz,
+ const struct ieee80211_reg_rule *reg_rule)
+{
+ return;
+}
+#endif
+
/*
* Note that right now we assume the desired channel bandwidth
* is always 20 MHz for each individual channel (HT40 uses 20 MHz
@@ -720,7 +779,9 @@ EXPORT_SYMBOL(freq_reg_info);
* on the wiphy with the target_bw specified. Then we can simply use
* that below for the desired_bw_khz below.
*/
-static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
+static void handle_channel(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ enum ieee80211_band band,
unsigned int chan_idx)
{
int r;
@@ -748,8 +809,27 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
desired_bw_khz,
&reg_rule);
- if (r)
+ if (r) {
+ /*
+ * We will disable all channels that do not match our
+ * recieved regulatory rule unless the hint is coming
+ * from a Country IE and the Country IE had no information
+ * about a band. The IEEE 802.11 spec allows for an AP
+ * to send only a subset of the regulatory rules allowed,
+ * so an AP in the US that only supports 2.4 GHz may only send
+ * a country IE with information for the 2.4 GHz band
+ * while 5 GHz is still supported.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ r == -ERANGE)
+ return;
+
+ REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq);
+ chan->flags = IEEE80211_CHAN_DISABLED;
return;
+ }
+
+ chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
@@ -784,7 +864,9 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
-static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
+static void handle_band(struct wiphy *wiphy,
+ enum ieee80211_band band,
+ enum nl80211_reg_initiator initiator)
{
unsigned int i;
struct ieee80211_supported_band *sband;
@@ -793,24 +875,42 @@ static void handle_band(struct wiphy *wiphy, enum ieee80211_band band)
sband = wiphy->bands[band];
for (i = 0; i < sband->n_channels; i++)
- handle_channel(wiphy, band, i);
+ handle_channel(wiphy, initiator, band, i);
}
static bool ignore_reg_update(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
- if (!last_request)
+ if (!last_request) {
+ REG_DBG_PRINT("Ignoring regulatory request %s since "
+ "last_request is not set\n",
+ reg_initiator_name(initiator));
return true;
+ }
+
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
- wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY)
+ wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
+ REG_DBG_PRINT("Ignoring regulatory request %s "
+ "since the driver uses its own custom "
+ "regulatory domain ",
+ reg_initiator_name(initiator));
return true;
+ }
+
/*
* wiphy->regd will be set once the device has its own
* desired regulatory domain set
*/
if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
- !is_world_regdom(last_request->alpha2))
+ initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ !is_world_regdom(last_request->alpha2)) {
+ REG_DBG_PRINT("Ignoring regulatory request %s "
+ "since the driver requires its own regulaotry "
+ "domain to be set first",
+ reg_initiator_name(initiator));
return true;
+ }
+
return false;
}
@@ -1030,7 +1130,7 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
goto out;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (wiphy->bands[band])
- handle_band(wiphy, band);
+ handle_band(wiphy, band, initiator);
}
out:
reg_process_beacons(wiphy);
@@ -1066,10 +1166,17 @@ static void handle_channel_custom(struct wiphy *wiphy,
regd);
if (r) {
+ REG_DBG_PRINT("Disabling freq %d MHz as custom "
+ "regd has no rule that fits a %d MHz "
+ "wide channel\n",
+ chan->center_freq,
+ KHZ_TO_MHZ(desired_bw_khz));
chan->flags = IEEE80211_CHAN_DISABLED;
return;
}
+ chan_reg_rule_print_dbg(chan, desired_bw_khz, reg_rule);
+
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
@@ -1215,6 +1322,21 @@ static int ignore_request(struct wiphy *wiphy,
return -EINVAL;
}
+static void reg_set_request_processed(void)
+{
+ bool need_more_processing = false;
+
+ last_request->processed = true;
+
+ spin_lock(&reg_requests_lock);
+ if (!list_empty(&reg_requests_list))
+ need_more_processing = true;
+ spin_unlock(&reg_requests_lock);
+
+ if (need_more_processing)
+ schedule_work(&reg_work);
+}
+
/**
* __regulatory_hint - hint to the wireless core a regulatory domain
* @wiphy: if the hint comes from country information from an AP, this
@@ -1290,8 +1412,10 @@ new_request:
* have applied the requested regulatory domain before we just
* inform userspace we have processed the request
*/
- if (r == -EALREADY)
+ if (r == -EALREADY) {
nl80211_send_reg_change_event(last_request);
+ reg_set_request_processed();
+ }
return r;
}
@@ -1307,16 +1431,13 @@ static void reg_process_hint(struct regulatory_request *reg_request)
BUG_ON(!reg_request->alpha2);
- mutex_lock(&cfg80211_mutex);
- mutex_lock(&reg_mutex);
-
if (wiphy_idx_valid(reg_request->wiphy_idx))
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
!wiphy) {
kfree(reg_request);
- goto out;
+ return;
}
r = __regulatory_hint(wiphy, reg_request);
@@ -1324,28 +1445,46 @@ static void reg_process_hint(struct regulatory_request *reg_request)
if (r == -EALREADY && wiphy &&
wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
wiphy_update_regulatory(wiphy, initiator);
-out:
- mutex_unlock(&reg_mutex);
- mutex_unlock(&cfg80211_mutex);
}
-/* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* */
+/*
+ * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
+ * Regulatory hints come on a first come first serve basis and we
+ * must process each one atomically.
+ */
static void reg_process_pending_hints(void)
- {
+{
struct regulatory_request *reg_request;
+ mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
+
+ /* When last_request->processed becomes true this will be rescheduled */
+ if (last_request && !last_request->processed) {
+ REG_DBG_PRINT("Pending regulatory request, waiting "
+ "for it to be processed...");
+ goto out;
+ }
+
spin_lock(&reg_requests_lock);
- while (!list_empty(&reg_requests_list)) {
- reg_request = list_first_entry(&reg_requests_list,
- struct regulatory_request,
- list);
- list_del_init(&reg_request->list);
+ if (list_empty(&reg_requests_list)) {
spin_unlock(&reg_requests_lock);
- reg_process_hint(reg_request);
- spin_lock(&reg_requests_lock);
+ goto out;
}
+
+ reg_request = list_first_entry(&reg_requests_list,
+ struct regulatory_request,
+ list);
+ list_del_init(&reg_request->list);
+
spin_unlock(&reg_requests_lock);
+
+ reg_process_hint(reg_request);
+
+out:
+ mutex_unlock(&reg_mutex);
+ mutex_unlock(&cfg80211_mutex);
}
/* Processes beacon hints -- this has nothing to do with country IEs */
@@ -1392,8 +1531,6 @@ static void reg_todo(struct work_struct *work)
reg_process_pending_beacon_hints();
}
-static DECLARE_WORK(reg_work, reg_todo);
-
static void queue_regulatory_request(struct regulatory_request *request)
{
if (isalpha(request->alpha2[0]))
@@ -1428,12 +1565,7 @@ static int regulatory_hint_core(const char *alpha2)
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
- /*
- * This ensures last_request is populated once modules
- * come swinging in and calling regulatory hints and
- * wiphy_apply_custom_regulatory().
- */
- reg_process_hint(request);
+ queue_regulatory_request(request);
return 0;
}
@@ -1559,7 +1691,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
if (is_user_regdom_saved()) {
/* Unless we're asked to ignore it and reset it */
if (reset_user) {
- REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+ REG_DBG_PRINT("Restoring regulatory settings "
"including user preference\n");
user_alpha2[0] = '9';
user_alpha2[1] = '7';
@@ -1570,7 +1702,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
* back as they were for a full restore.
*/
if (!is_world_regdom(ieee80211_regdom)) {
- REG_DBG_PRINT("cfg80211: Keeping preference on "
+ REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
@@ -1578,7 +1710,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
alpha2[1] = ieee80211_regdom[1];
}
} else {
- REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+ REG_DBG_PRINT("Restoring regulatory settings "
"while preserving user preference for: %c%c\n",
user_alpha2[0],
user_alpha2[1]);
@@ -1586,14 +1718,14 @@ static void restore_alpha2(char *alpha2, bool reset_user)
alpha2[1] = user_alpha2[1];
}
} else if (!is_world_regdom(ieee80211_regdom)) {
- REG_DBG_PRINT("cfg80211: Keeping preference on "
+ REG_DBG_PRINT("Keeping preference on "
"module parameter ieee80211_regdom: %c%c\n",
ieee80211_regdom[0],
ieee80211_regdom[1]);
alpha2[0] = ieee80211_regdom[0];
alpha2[1] = ieee80211_regdom[1];
} else
- REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
+ REG_DBG_PRINT("Restoring regulatory settings\n");
}
/*
@@ -1661,7 +1793,7 @@ static void restore_regulatory_settings(bool reset_user)
void regulatory_hint_disconnect(void)
{
- REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
+ REG_DBG_PRINT("All devices are disconnected, going to "
"restore regulatory settings\n");
restore_regulatory_settings(false);
}
@@ -1691,7 +1823,7 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
if (!reg_beacon)
return -ENOMEM;
- REG_DBG_PRINT("cfg80211: Found new beacon on "
+ REG_DBG_PRINT("Found new beacon on "
"frequency: %d MHz (Ch %d) on %s\n",
beacon_chan->center_freq,
ieee80211_frequency_to_channel(beacon_chan->center_freq),
@@ -1721,8 +1853,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
- printk(KERN_INFO " (start_freq - end_freq @ bandwidth), "
- "(max_antenna_gain, max_eirp)\n");
+ pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
for (i = 0; i < rd->n_reg_rules; i++) {
reg_rule = &rd->reg_rules[i];
@@ -1734,16 +1865,14 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
* in certain regions
*/
if (power_rule->max_antenna_gain)
- printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), "
- "(%d mBi, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
- printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), "
- "(N/A, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
freq_range->max_bandwidth_khz,
@@ -1762,27 +1891,20 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
rdev = cfg80211_rdev_by_wiphy_idx(
last_request->wiphy_idx);
if (rdev) {
- printk(KERN_INFO "cfg80211: Current regulatory "
- "domain updated by AP to: %c%c\n",
+ pr_info("Current regulatory domain updated by AP to: %c%c\n",
rdev->country_ie_alpha2[0],
rdev->country_ie_alpha2[1]);
} else
- printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected:\n");
+ pr_info("Current regulatory domain intersected:\n");
} else
- printk(KERN_INFO "cfg80211: Current regulatory "
- "domain intersected:\n");
+ pr_info("Current regulatory domain intersected:\n");
} else if (is_world_regdom(rd->alpha2))
- printk(KERN_INFO "cfg80211: World regulatory "
- "domain updated:\n");
+ pr_info("World regulatory domain updated:\n");
else {
if (is_unknown_alpha2(rd->alpha2))
- printk(KERN_INFO "cfg80211: Regulatory domain "
- "changed to driver built-in settings "
- "(unknown country)\n");
+ pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
else
- printk(KERN_INFO "cfg80211: Regulatory domain "
- "changed to country: %c%c\n",
+ pr_info("Regulatory domain changed to country: %c%c\n",
rd->alpha2[0], rd->alpha2[1]);
}
print_rd_rules(rd);
@@ -1790,8 +1912,7 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
static void print_regdomain_info(const struct ieee80211_regdomain *rd)
{
- printk(KERN_INFO "cfg80211: Regulatory domain: %c%c\n",
- rd->alpha2[0], rd->alpha2[1]);
+ pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
print_rd_rules(rd);
}
@@ -1842,8 +1963,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
return -EINVAL;
if (!is_valid_rd(rd)) {
- printk(KERN_ERR "cfg80211: Invalid "
- "regulatory domain detected:\n");
+ pr_err("Invalid regulatory domain detected:\n");
print_regdomain_info(rd);
return -EINVAL;
}
@@ -1959,6 +2079,8 @@ int set_regdom(const struct ieee80211_regdomain *rd)
nl80211_send_reg_change_event(last_request);
+ reg_set_request_processed();
+
mutex_unlock(&reg_mutex);
return r;
@@ -2015,8 +2137,7 @@ int __init regulatory_init(void)
* early boot for call_usermodehelper(). For now treat these
* errors as non-fatal.
*/
- printk(KERN_ERR "cfg80211: kobject_uevent_env() was unable "
- "to call CRDA during init");
+ pr_err("kobject_uevent_env() was unable to call CRDA during init\n");
#ifdef CONFIG_CFG80211_REG_DEBUG
/* We want to find out exactly why when debugging */
WARN_ON(err);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 76120aeda57d..4de624ca4c63 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -502,7 +502,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
skb_orphan(skb);
if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
- printk(KERN_ERR "failed to reallocate Tx buffer\n");
+ pr_err("failed to reallocate Tx buffer\n");
return -ENOMEM;
}
skb->truesize += head_need;
@@ -685,20 +685,17 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
continue;
if (rdev->ops->add_key(wdev->wiphy, dev, i, false, NULL,
&wdev->connect_keys->params[i])) {
- printk(KERN_ERR "%s: failed to set key %d\n",
- dev->name, i);
+ netdev_err(dev, "failed to set key %d\n", i);
continue;
}
if (wdev->connect_keys->def == i)
if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) {
- printk(KERN_ERR "%s: failed to set defkey %d\n",
- dev->name, i);
+ netdev_err(dev, "failed to set defkey %d\n", i);
continue;
}
if (wdev->connect_keys->defmgmt == i)
if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i))
- printk(KERN_ERR "%s: failed to set mgtdef %d\n",
- dev->name, i);
+ netdev_err(dev, "failed to set mgtdef %d\n", i);
}
kfree(wdev->connect_keys);
@@ -795,6 +792,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
if (ntype != otype) {
dev->ieee80211_ptr->use_4addr = false;
+ dev->ieee80211_ptr->mesh_id_up_len = 0;
switch (otype) {
case NL80211_IFTYPE_ADHOC:
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index dc675a3daa3d..fdbc23c10d8c 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -467,8 +467,8 @@ void wireless_send_event(struct net_device * dev,
* The best the driver could do is to log an error message.
* We will do it ourselves instead...
*/
- printk(KERN_ERR "%s (WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
- dev->name, cmd);
+ netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n",
+ cmd);
return;
}
@@ -476,11 +476,13 @@ void wireless_send_event(struct net_device * dev,
if (descr->header_type == IW_HEADER_TYPE_POINT) {
/* Check if number of token fits within bounds */
if (wrqu->data.length > descr->max_tokens) {
- printk(KERN_ERR "%s (WE) : Wireless Event too big (%d)\n", dev->name, wrqu->data.length);
+ netdev_err(dev, "(WE) : Wireless Event too big (%d)\n",
+ wrqu->data.length);
return;
}
if (wrqu->data.length < descr->min_tokens) {
- printk(KERN_ERR "%s (WE) : Wireless Event too small (%d)\n", dev->name, wrqu->data.length);
+ netdev_err(dev, "(WE) : Wireless Event too small (%d)\n",
+ wrqu->data.length);
return;
}
/* Calculate extra_len - extra is NULL for restricted events */