diff options
author | David S. Miller <davem@davemloft.net> | 2008-03-18 08:37:55 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-03-18 08:37:55 +0100 |
commit | 577f99c1d08cf9cbdafd4e858dd13ff04d855090 (patch) | |
tree | 0f726bbda9b18d311d4c95198bbd96cb7ac01db0 /net | |
parent | iwlwifi: fix bug to show hidden APs during scan (diff) | |
parent | Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torval... (diff) | |
download | linux-577f99c1d08cf9cbdafd4e858dd13ff04d855090.tar.xz linux-577f99c1d08cf9cbdafd4e858dd13ff04d855090.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/wireless/rt2x00/rt2x00dev.c
net/8021q/vlan_dev.c
Diffstat (limited to 'net')
30 files changed, 212 insertions, 136 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 556b2893b56b..1e5c9904571d 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -382,7 +382,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_dev_info(dev)->cnt_encap_on_xmit++; pr_debug("%s: proto to encap: 0x%hx\n", - __func__, htons(veth->h_vlan_proto)); + __func__, ntohs(veth->h_vlan_proto)); /* Construct the second two bytes. This field looks something * like: * usr_priority: 3 bits (high bits) diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index a2992280c3d1..e69244dd8de8 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -174,7 +174,7 @@ struct bnep_session { void bnep_net_setup(struct net_device *dev); int bnep_sock_init(void); -int bnep_sock_cleanup(void); +void bnep_sock_cleanup(void); static inline int bnep_mc_hash(__u8 *addr) { diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 81065e548a1f..201e5b1ce473 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -257,12 +257,10 @@ error: return err; } -int __exit bnep_sock_cleanup(void) +void __exit bnep_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket"); proto_unregister(&bnep_proto); - - return 0; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 930b58e7149a..aec6929f5c16 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -902,8 +902,6 @@ int hci_unregister_dev(struct hci_dev *hdev) BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); - hci_unregister_sysfs(hdev); - write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); @@ -915,6 +913,8 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_notify(hdev, HCI_DEV_UNREG); + hci_unregister_sysfs(hdev); + __hci_dev_put(hdev); return 0; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 14991323c273..b5d4019d3572 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -734,7 +734,7 @@ error: return err; } -int __exit hci_sock_cleanup(void) +void __exit hci_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed"); @@ -742,6 +742,4 @@ int __exit hci_sock_cleanup(void) hci_unregister_notifier(&hci_sock_nblock); proto_unregister(&hci_sk_proto); - - return 0; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 4cb8a1385539..44f5ce1fbfa4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -458,7 +458,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = -EADDRNOTAVAIL; if (!sysctl_ip_nonlocal_bind && !inet->freebind && - addr->sin_addr.s_addr != INADDR_ANY && + addr->sin_addr.s_addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 091e6709f831..f3ceca31aa45 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -168,7 +168,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) struct xfrm_encap_tmpl *encap = x->encap; struct udphdr *uh; __be32 *udpdata32; - unsigned int sport, dport; + __be16 sport, dport; int encap_type; spin_lock_bh(&x->lock); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index e7c9e4e72327..bb3cbe5ec36d 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -583,7 +583,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, } if (!mreq.imr_ifindex) { - if (mreq.imr_address.s_addr == INADDR_ANY) { + if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) { inet->mc_index = 0; inet->mc_addr = 0; err = 0; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 4afce0572806..96138b128de8 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -103,6 +103,7 @@ - '3' from resolv.h */ #define NONE __constant_htonl(INADDR_NONE) +#define ANY __constant_htonl(INADDR_ANY) /* * Public IP configuration @@ -1476,19 +1477,19 @@ static int __init ip_auto_config_setup(char *addrs) DBG(("IP-Config: Parameter #%d: `%s'\n", num, ip)); switch (num) { case 0: - if ((ic_myaddr = in_aton(ip)) == INADDR_ANY) + if ((ic_myaddr = in_aton(ip)) == ANY) ic_myaddr = NONE; break; case 1: - if ((ic_servaddr = in_aton(ip)) == INADDR_ANY) + if ((ic_servaddr = in_aton(ip)) == ANY) ic_servaddr = NONE; break; case 2: - if ((ic_gateway = in_aton(ip)) == INADDR_ANY) + if ((ic_gateway = in_aton(ip)) == ANY) ic_gateway = NONE; break; case 3: - if ((ic_netmask = in_aton(ip)) == INADDR_ANY) + if ((ic_netmask = in_aton(ip)) == ANY) ic_netmask = NONE; break; case 4: diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cbfef8b1f5e8..67f84f5035c4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1035,6 +1035,13 @@ static void tcp_cwnd_validate(struct sock *sk) * introducing MSS oddities to segment boundaries. In rare cases where * mss_now != mss_cache, we will request caller to create a small skb * per input skb which could be mostly avoided here (if desired). + * + * We explicitly want to create a request for splitting write queue tail + * to a small skb for Nagle purposes while avoiding unnecessary modulos, + * thus all the complexity (cwnd_len is always MSS multiple which we + * return whenever allowed by the other factors). Basically we need the + * modulo only when the receiver window alone is the limiting factor or + * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) @@ -1048,10 +1055,11 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) return cwnd_len; - if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len) + needed = min(skb->len, window); + + if (skb == tcp_write_queue_tail(sk) && cwnd_len <= needed) return cwnd_len; - needed = min(skb->len, window); return needed - needed % mss_now; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index e06bf0028bb1..684ec9c1ad38 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -381,7 +381,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect) if (nf_ct_expect_count >= nf_ct_expect_max) { if (net_ratelimit()) printk(KERN_WARNING - "nf_conntrack: expectation table full"); + "nf_conntrack: expectation table full\n"); ret = -EMFILE; goto out; } diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 8b9be1e978cd..2bd9963b5b3e 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -19,14 +19,6 @@ static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; static DEFINE_MUTEX(nf_ct_ext_type_mutex); -/* Horrible trick to figure out smallest amount worth kmallocing. */ -#define CACHE(x) (x) + 0 * -enum { - NF_CT_EXT_MIN_SIZE = -#include <linux/kmalloc_sizes.h> - 1 }; -#undef CACHE - void __nf_ct_ext_destroy(struct nf_conn *ct) { unsigned int i; @@ -53,7 +45,7 @@ EXPORT_SYMBOL(__nf_ct_ext_destroy); static void * nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) { - unsigned int off, len, real_len; + unsigned int off, len; struct nf_ct_ext_type *t; rcu_read_lock(); @@ -61,16 +53,14 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) BUG_ON(t == NULL); off = ALIGN(sizeof(struct nf_ct_ext), t->align); len = off + t->len; - real_len = t->alloc_size; rcu_read_unlock(); - *ext = kzalloc(real_len, gfp); + *ext = kzalloc(t->alloc_size, gfp); if (!*ext) return NULL; (*ext)->offset[id] = off; (*ext)->len = len; - (*ext)->real_len = real_len; return (void *)(*ext) + off; } @@ -95,7 +85,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) newlen = newoff + t->len; rcu_read_unlock(); - if (newlen >= ct->ext->real_len) { + if (newlen >= ksize(ct->ext)) { new = kmalloc(newlen, gfp); if (!new) return NULL; @@ -114,7 +104,6 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) rcu_read_unlock(); } kfree(ct->ext); - new->real_len = newlen; ct->ext = new; } @@ -156,8 +145,6 @@ static void update_alloc_size(struct nf_ct_ext_type *type) t1->alloc_size = ALIGN(t1->alloc_size, t2->align) + t2->len; } - if (t1->alloc_size < NF_CT_EXT_MIN_SIZE) - t1->alloc_size = NF_CT_EXT_MIN_SIZE; } } diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bfc2928c1912..ddc80ea114cd 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -51,7 +51,7 @@ int nf_unregister_queue_handler(int pf, const struct nf_queue_handler *qh) return -EINVAL; mutex_lock(&queue_handler_mutex); - if (queue_handler[pf] != qh) { + if (queue_handler[pf] && queue_handler[pf] != qh) { mutex_unlock(&queue_handler_mutex); return -EINVAL; } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 7efa40d47393..bf3f19b21fe4 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -556,7 +556,7 @@ nfulnl_log_packet(unsigned int pf, /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ - size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ @@ -702,20 +702,30 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t group_num = ntohs(nfmsg->res_id); struct nfulnl_instance *inst; + struct nfulnl_msg_config_cmd *cmd = NULL; int ret = 0; + if (nfula[NFULA_CFG_CMD]) { + u_int8_t pf = nfmsg->nfgen_family; + cmd = nla_data(nfula[NFULA_CFG_CMD]); + + /* Commands without queue context */ + switch (cmd->command) { + case NFULNL_CFG_CMD_PF_BIND: + return nf_log_register(pf, &nfulnl_logger); + case NFULNL_CFG_CMD_PF_UNBIND: + nf_log_unregister_pf(pf); + return 0; + } + } + inst = instance_lookup_get(group_num); if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto out_put; } - if (nfula[NFULA_CFG_CMD]) { - u_int8_t pf = nfmsg->nfgen_family; - struct nfulnl_msg_config_cmd *cmd; - - cmd = nla_data(nfula[NFULA_CFG_CMD]); - + if (cmd != NULL) { switch (cmd->command) { case NFULNL_CFG_CMD_BIND: if (inst) { @@ -738,14 +748,6 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, instance_destroy(inst); goto out; - case NFULNL_CFG_CMD_PF_BIND: - ret = nf_log_register(pf, &nfulnl_logger); - break; - case NFULNL_CFG_CMD_PF_UNBIND: - /* This is a bug and a feature. We cannot unregister - * other handlers, like nfnetlink_inst can */ - nf_log_unregister_pf(pf); - break; default: ret = -ENOTSUPP; break; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 0043d3a9f87e..012cb6910820 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -224,7 +224,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, struct net_device *indev; struct net_device *outdev; - size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ @@ -703,19 +703,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, /* Commands without queue context - might sleep */ switch (cmd->command) { case NFQNL_CFG_CMD_PF_BIND: - ret = nf_register_queue_handler(ntohs(cmd->pf), - &nfqh); - break; + return nf_register_queue_handler(ntohs(cmd->pf), + &nfqh); case NFQNL_CFG_CMD_PF_UNBIND: - ret = nf_unregister_queue_handler(ntohs(cmd->pf), - &nfqh); - break; - default: - break; + return nf_unregister_queue_handler(ntohs(cmd->pf), + &nfqh); } - - if (ret < 0) - return ret; } rcu_read_lock(); diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index e9a8794bc3ab..9fa2e0824708 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -95,8 +95,11 @@ static inline void localtime_2(struct xtm *r, time_t time) */ r->dse = time / 86400; - /* 1970-01-01 (w=0) was a Thursday (4). */ - r->weekday = (4 + r->dse) % 7; + /* + * 1970-01-01 (w=0) was a Thursday (4). + * -1 and +1 map Sunday properly onto 7. + */ + r->weekday = (4 + r->dse - 1) % 7 + 1; } static void localtime_3(struct xtm *r, time_t time) diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index f19121d4795b..a39bf97f8830 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -143,7 +143,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) - memcpy(&msg->msg_name, &call->conn->trans->peer->srx, + memcpy(msg->msg_name, + &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_timestamp(msg, &rx->sk, skb); } diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index b18fa95ef248..c5c16b4b6e98 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -89,7 +89,7 @@ static const struct tcf_ext_map u32_ext_map = { static struct tc_u_common *u32_list; -static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift) +static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) { unsigned h = ntohl(key & sel->hmask)>>fshift; @@ -137,7 +137,7 @@ next_knode: for (i = n->sel.nkeys; i>0; i--, key++) { - if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { + if ((*(__be32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) { n = n->next; goto next_knode; } @@ -182,7 +182,7 @@ check_terminal: ht = n->ht_down; sel = 0; if (ht->divisor) - sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift); + sel = ht->divisor&u32_hash_fold(*(__be32*)(ptr+n->sel.hoff), &n->sel,n->fshift); if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT))) goto next_ht; @@ -190,7 +190,7 @@ check_terminal: if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) { off2 = n->sel.off + 3; if (n->sel.flags&TC_U32_VAROFFSET) - off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift; + off2 += ntohs(n->sel.offmask & *(__be16*)(ptr+n->sel.offoff)) >>n->sel.offshift; off2 &= ~3; } if (n->sel.flags&TC_U32_EAT) { diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c index 112796e4a7c4..953f1479f7da 100644 --- a/net/sched/em_u32.c +++ b/net/sched/em_u32.c @@ -35,7 +35,7 @@ static int em_u32_match(struct sk_buff *skb, struct tcf_ematch *em, if (!tcf_valid_offset(skb, ptr, sizeof(u32))) return 0; - return !(((*(u32*) ptr) ^ key->val) & key->mask); + return !(((*(__be32*) ptr) ^ key->val) & key->mask); } static struct tcf_ematch_ops em_u32_ops = { diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index a27511ebc4cb..ceefda025e2d 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -209,6 +209,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) { struct sctp_sockaddr_entry *addr, *temp; + int found = 0; /* We hold the socket lock when calling this function, * and that acts as a writer synchronizing lock. @@ -216,13 +217,14 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) list_for_each_entry_safe(addr, temp, &bp->address_list, list) { if (sctp_cmp_addr_exact(&addr->a, del_addr)) { /* Found the exact match. */ + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } - if (addr && !addr->valid) { + if (found) { call_rcu(&addr->rcu, sctp_local_addr_free); SCTP_DBG_OBJCNT_DEC(addr); return 0; diff --git a/net/sctp/input.c b/net/sctp/input.c index 4db5a75e01ce..c1d7e3b5c4b4 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -944,7 +944,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, static struct sctp_association *__sctp_rcv_asconf_lookup( sctp_chunkhdr_t *ch, const union sctp_addr *laddr, - __be32 peer_port, + __be16 peer_port, struct sctp_transport **transportp) { sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 2622215cd357..1937be583cd7 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -89,6 +89,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; + int found = 0; switch (ev) { case NETDEV_UP: @@ -111,13 +112,14 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, &sctp_local_addr_list, list) { if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&sctp_local_addr_lock); - if (addr && !addr->valid) + if (found) call_rcu(&addr->rcu, sctp_local_addr_free); break; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 87512f14cf71..25be8f04de6e 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -337,14 +337,14 @@ static int sctp_v4_cmp_addr(const union sctp_addr *addr1, static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) { addr->v4.sin_family = AF_INET; - addr->v4.sin_addr.s_addr = INADDR_ANY; + addr->v4.sin_addr.s_addr = htonl(INADDR_ANY); addr->v4.sin_port = port; } /* Is this a wildcard address? */ static int sctp_v4_is_any(const union sctp_addr *addr) { - return INADDR_ANY == addr->v4.sin_addr.s_addr; + return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr; } /* This function checks if the address is a valid address to be used for @@ -375,7 +375,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) int ret = inet_addr_type(&init_net, addr->v4.sin_addr.s_addr); - if (addr->v4.sin_addr.s_addr != INADDR_ANY && + if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && !sysctl_ip_nonlocal_bind) @@ -628,6 +628,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; + int found = 0; if (ifa->ifa_dev->dev->nd_net != &init_net) return NOTIFY_DONE; @@ -650,13 +651,14 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, list_for_each_entry_safe(addr, temp, &sctp_local_addr_list, list) { if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&sctp_local_addr_lock); - if (addr && !addr->valid) + if (found) call_rcu(&addr->rcu, sctp_local_addr_free); break; } @@ -786,8 +788,8 @@ static int sctp_inet_cmp_addr(const union sctp_addr *addr1, /* PF_INET only supports AF_INET addresses. */ if (addr1->sa.sa_family != addr2->sa.sa_family) return 0; - if (INADDR_ANY == addr1->v4.sin_addr.s_addr || - INADDR_ANY == addr2->v4.sin_addr.s_addr) + if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr || + htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr) return 1; if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) return 1; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e45be4e3f80d..578630e8e00d 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2375,6 +2375,14 @@ static int sctp_process_param(struct sctp_association *asoc, asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; + /* Assume that peer supports the address family + * by which it sends a packet. + */ + if (peer_addr->sa.sa_family == AF_INET6) + asoc->peer.ipv6_address = 1; + else if (peer_addr->sa.sa_family == AF_INET) + asoc->peer.ipv4_address = 1; + /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1316694d0a21..a3138a0fe2c5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2933,17 +2933,39 @@ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, int optlen) { + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; int val; + int assoc_id = 0; - if (optlen != sizeof(int)) + if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - if (val < 0) + if (optlen == sizeof(int)) { + printk(KERN_WARNING + "SCTP: Use of int in max_burst socket option deprecated\n"); + printk(KERN_WARNING + "SCTP: Use struct sctp_assoc_value instead\n"); + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + } else if (optlen == sizeof(struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + val = params.assoc_value; + assoc_id = params.assoc_id; + } else return -EINVAL; - sctp_sk(sk)->max_burst = val; + sp = sctp_sk(sk); + + if (assoc_id != 0) { + asoc = sctp_id2assoc(sk, assoc_id); + if (!asoc) + return -EINVAL; + asoc->max_burst = val; + } else + sp->max_burst = val; return 0; } @@ -5005,20 +5027,45 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { - int val; + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; if (len < sizeof(int)) return -EINVAL; - len = sizeof(int); + if (len == sizeof(int)) { + printk(KERN_WARNING + "SCTP: Use of int in max_burst socket option deprecated\n"); + printk(KERN_WARNING + "SCTP: Use struct sctp_assoc_value instead\n"); + params.assoc_id = 0; + } else if (len == sizeof (struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + } else + return -EINVAL; - val = sctp_sk(sk)->max_burst; - if (put_user(len, optlen)) - return -EFAULT; - if (copy_to_user(optval, &val, len)) - return -EFAULT; + sp = sctp_sk(sk); + + if (params.assoc_id != 0) { + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc) + return -EINVAL; + params.assoc_value = asoc->max_burst; + } else + params.assoc_value = sp->max_burst; + + if (len == sizeof(int)) { + if (copy_to_user(optval, ¶ms.assoc_value, len)) + return -EFAULT; + } else { + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + } + + return 0; - return -ENOTSUPP; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 61801a069ff0..bce9d527af08 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -317,7 +317,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle) if (!*context_handle) return(GSS_S_NO_CONTEXT); - if ((*context_handle)->internal_ctx_id != 0) + if ((*context_handle)->internal_ctx_id) (*context_handle)->mech_type->gm_ops ->gss_delete_sec_context((*context_handle) ->internal_ctx_id); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index ea377e06afae..332eb47539e1 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -185,7 +185,7 @@ int svc_create_xprt(struct svc_serv *serv, char *xprt_name, unsigned short port, struct svc_xprt_class *xcl; struct sockaddr_in sin = { .sin_family = AF_INET, - .sin_addr.s_addr = INADDR_ANY, + .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; dprintk("svc: creating transport %s[%d]\n", xprt_name, port); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 0598b229c11d..981f190c1b39 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -156,7 +156,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *ctxt; int ret = 0; - BUG_ON(sge_count >= 32); + BUG_ON(sge_count > RPCSVC_MAXPAGES); dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " "write_len=%d, xdr_sge=%p, sge_count=%d\n", rmr, (unsigned long long)to, xdr_off, diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f09444c451bc..16fd3f6718ff 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -54,7 +54,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); static void svc_rdma_release_rqst(struct svc_rqst *); -static void rdma_destroy_xprt(struct svcxprt_rdma *xprt); static void dto_tasklet_func(unsigned long data); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); @@ -247,6 +246,7 @@ static void dto_tasklet_func(unsigned long data) sq_cq_reap(xprt); } + svc_xprt_put(&xprt->sc_xprt); spin_lock_irqsave(&dto_lock, flags); } spin_unlock_irqrestore(&dto_lock, flags); @@ -275,8 +275,10 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) * add it */ spin_lock_irqsave(&dto_lock, flags); - if (list_empty(&xprt->sc_dto_q)) + if (list_empty(&xprt->sc_dto_q)) { + svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); + } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ @@ -386,8 +388,10 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context) * add it */ spin_lock_irqsave(&dto_lock, flags); - if (list_empty(&xprt->sc_dto_q)) + if (list_empty(&xprt->sc_dto_q)) { + svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); + } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ @@ -611,6 +615,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ + svc_xprt_get(xprt); dprintk("svcrdma: Connection completed on DTO xprt=%p, " "cm_id=%p\n", xprt, cma_id); clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); @@ -661,15 +666,15 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); if (IS_ERR(listen_id)) { - rdma_destroy_xprt(cma_xprt); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_create_id failed = %ld\n", PTR_ERR(listen_id)); return (void *)listen_id; } ret = rdma_bind_addr(listen_id, sa); if (ret) { - rdma_destroy_xprt(cma_xprt); rdma_destroy_id(listen_id); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); return ERR_PTR(ret); } @@ -678,8 +683,9 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { rdma_destroy_id(listen_id); - rdma_destroy_xprt(cma_xprt); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_listen failed = %d\n", ret); + return ERR_PTR(ret); } /* @@ -820,6 +826,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; } + svc_xprt_get(&newxprt->sc_xprt); newxprt->sc_qp = newxprt->sc_cm_id->qp; /* Register all of physical memory */ @@ -891,8 +898,15 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) errout: dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); + /* Take a reference in case the DTO handler runs */ + svc_xprt_get(&newxprt->sc_xprt); + if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) { + ib_destroy_qp(newxprt->sc_qp); + svc_xprt_put(&newxprt->sc_xprt); + } rdma_destroy_id(newxprt->sc_cm_id); - rdma_destroy_xprt(newxprt); + /* This call to put will destroy the transport */ + svc_xprt_put(&newxprt->sc_xprt); return NULL; } @@ -919,54 +933,60 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp) rqstp->rq_xprt_ctxt = NULL; } -/* Disable data ready events for this connection */ +/* + * When connected, an svc_xprt has at least three references: + * + * - A reference held by the QP. We still hold that here because this + * code deletes the QP and puts the reference. + * + * - A reference held by the cm_id between the ESTABLISHED and + * DISCONNECTED events. If the remote peer disconnected first, this + * reference could be gone. + * + * - A reference held by the svc_recv code that called this function + * as part of close processing. + * + * At a minimum two references should still be held. + */ static void svc_rdma_detach(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); - unsigned long flags; - dprintk("svc: svc_rdma_detach(%p)\n", xprt); - /* - * Shutdown the connection. This will ensure we don't get any - * more events from the provider. - */ + + /* Disconnect and flush posted WQE */ rdma_disconnect(rdma->sc_cm_id); - rdma_destroy_id(rdma->sc_cm_id); - /* We may already be on the DTO list */ - spin_lock_irqsave(&dto_lock, flags); - if (!list_empty(&rdma->sc_dto_q)) - list_del_init(&rdma->sc_dto_q); - spin_unlock_irqrestore(&dto_lock, flags); + /* Destroy the QP if present (not a listener) */ + if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) { + ib_destroy_qp(rdma->sc_qp); + svc_xprt_put(xprt); + } + + /* Destroy the CM ID */ + rdma_destroy_id(rdma->sc_cm_id); } static void svc_rdma_free(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt; dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); - rdma_destroy_xprt(rdma); - kfree(rdma); -} - -static void rdma_destroy_xprt(struct svcxprt_rdma *xprt) -{ - if (xprt->sc_qp && !IS_ERR(xprt->sc_qp)) - ib_destroy_qp(xprt->sc_qp); - - if (xprt->sc_sq_cq && !IS_ERR(xprt->sc_sq_cq)) - ib_destroy_cq(xprt->sc_sq_cq); + /* We should only be called from kref_put */ + BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0); + if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) + ib_destroy_cq(rdma->sc_sq_cq); - if (xprt->sc_rq_cq && !IS_ERR(xprt->sc_rq_cq)) - ib_destroy_cq(xprt->sc_rq_cq); + if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) + ib_destroy_cq(rdma->sc_rq_cq); - if (xprt->sc_phys_mr && !IS_ERR(xprt->sc_phys_mr)) - ib_dereg_mr(xprt->sc_phys_mr); + if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) + ib_dereg_mr(rdma->sc_phys_mr); - if (xprt->sc_pd && !IS_ERR(xprt->sc_pd)) - ib_dealloc_pd(xprt->sc_pd); + if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) + ib_dealloc_pd(rdma->sc_pd); - destroy_context_cache(xprt->sc_ctxt_head); + destroy_context_cache(rdma->sc_ctxt_head); + kfree(rdma); } static int svc_rdma_has_wspace(struct svc_xprt *xprt) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 02c522c17de5..a564c1a39ec5 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -614,7 +614,11 @@ xprt_rdma_free(void *buffer) return; req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); - r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); + if (req->rl_iov.length == 0) { /* see allocate above */ + r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, + struct rpcrdma_xprt, rx_buf); + } else + r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); rep = req->rl_reply; dprintk("RPC: %s: called on 0x%p%s\n", |