summaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/cipso_ipv4.c18
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/ip_output.c22
-rw-r--r--net/ipv4/ip_sockglue.c9
-rw-r--r--net/ipv4/ipconfig.c8
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_ah.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_esp.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_tcp.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_proto_udp.c1
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c80
-rw-r--r--net/ipv4/netfilter/ip_queue.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/route.c38
-rw-r--r--net/ipv4/tcp_cong.c12
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c10
-rw-r--r--net/ipv4/tcp_vegas.c16
-rw-r--r--net/ipv4/tcp_veno.c14
-rw-r--r--net/ipv4/udp.c7
23 files changed, 166 insertions, 118 deletions
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 4637ded3dba8..05afb576d935 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -983,7 +983,7 @@ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def,
return -EFAULT;
for (iter = 0; iter < enumcat_len; iter += 2) {
- cat = ntohs(get_unaligned((__be16 *)&enumcat[iter]));
+ cat = get_unaligned_be16(&enumcat[iter]);
if (cat <= cat_prev)
return -EFAULT;
cat_prev = cat;
@@ -1052,7 +1052,7 @@ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def,
for (iter = 0; iter < net_cat_len; iter += 2) {
ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat,
- ntohs(get_unaligned((__be16 *)&net_cat[iter])),
+ get_unaligned_be16(&net_cat[iter]),
GFP_ATOMIC);
if (ret_val != 0)
return ret_val;
@@ -1086,10 +1086,9 @@ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def,
return -EFAULT;
for (iter = 0; iter < rngcat_len; iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&rngcat[iter]));
+ cat_high = get_unaligned_be16(&rngcat[iter]);
if ((iter + 4) <= rngcat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&rngcat[iter + 2]));
+ cat_low = get_unaligned_be16(&rngcat[iter + 2]);
else
cat_low = 0;
@@ -1188,10 +1187,9 @@ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def,
u16 cat_high;
for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) {
- cat_high = ntohs(get_unaligned((__be16 *)&net_cat[net_iter]));
+ cat_high = get_unaligned_be16(&net_cat[net_iter]);
if ((net_iter + 4) <= net_cat_len)
- cat_low = ntohs(
- get_unaligned((__be16 *)&net_cat[net_iter + 2]));
+ cat_low = get_unaligned_be16(&net_cat[net_iter + 2]);
else
cat_low = 0;
@@ -1562,7 +1560,7 @@ int cipso_v4_validate(unsigned char **option)
}
rcu_read_lock();
- doi_def = cipso_v4_doi_search(ntohl(get_unaligned((__be32 *)&opt[2])));
+ doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2]));
if (doi_def == NULL) {
err_offset = 2;
goto validate_return_locked;
@@ -1843,7 +1841,7 @@ static int cipso_v4_getattr(const unsigned char *cipso,
if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0)
return 0;
- doi = ntohl(get_unaligned((__be32 *)&cipso[2]));
+ doi = get_unaligned_be32(&cipso[2]);
rcu_read_lock();
doi_def = cipso_v4_doi_search(doi);
if (doi_def == NULL)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c67d00e8c600..87397351ddac 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -691,7 +691,8 @@ static void icmp_unreach(struct sk_buff *skb)
NIPQUAD(iph->daddr));
} else {
info = ip_rt_frag_needed(net, iph,
- ntohs(icmph->un.frag.mtu));
+ ntohs(icmph->un.frag.mtu),
+ skb->dev);
if (!info)
goto out;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 08349267ceb4..e527628f56cf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -753,23 +753,15 @@ static inline int ip_ufo_append_data(struct sock *sk,
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
sk->sk_sndmsg_off = 0;
- }
- err = skb_append_datato_frags(sk,skb, getfrag, from,
- (length - transhdrlen));
- if (!err) {
- /* specify the length of each IP datagram fragment*/
+ /* specify the length of each IP datagram fragment */
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
__skb_queue_tail(&sk->sk_write_queue, skb);
-
- return 0;
}
- /* There is not enough support do UFO ,
- * so follow normal path
- */
- kfree_skb(skb);
- return err;
+
+ return skb_append_datato_frags(sk, skb, getfrag, from,
+ (length - transhdrlen));
}
/*
@@ -863,9 +855,9 @@ int ip_append_data(struct sock *sk,
csummode = CHECKSUM_PARTIAL;
inet->cork.length += length;
- if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
- (rt->u.dst.dev->features & NETIF_F_UFO)) {
-
+ if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
+ (sk->sk_protocol == IPPROTO_UDP) &&
+ (rt->u.dst.dev->features & NETIF_F_UFO)) {
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
fragheaderlen, transhdrlen, mtu,
flags);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 4d8d95404f45..e0514e82308e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1186,7 +1186,14 @@ int ip_getsockopt(struct sock *sk, int level,
int compat_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
- int err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+ int err;
+
+ if (optname == MCAST_MSFILTER)
+ return compat_mc_getsockopt(sk, level, optname, optval, optlen,
+ ip_getsockopt);
+
+ err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 0f42d1c1f690..89dee4346f60 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -412,12 +412,12 @@ static struct packet_type rarp_packet_type __initdata = {
.func = ic_rarp_recv,
};
-static inline void ic_rarp_init(void)
+static inline void __init ic_rarp_init(void)
{
dev_add_pack(&rarp_packet_type);
}
-static inline void ic_rarp_cleanup(void)
+static inline void __init ic_rarp_cleanup(void)
{
dev_remove_pack(&rarp_packet_type);
}
@@ -682,7 +682,7 @@ static void __init ic_bootp_init_ext(u8 *e)
/*
* Initialize the DHCP/BOOTP mechanism.
*/
-static inline void ic_bootp_init(void)
+static inline void __init ic_bootp_init(void)
{
int i;
@@ -696,7 +696,7 @@ static inline void ic_bootp_init(void)
/*
* DHCP/BOOTP cleanup.
*/
-static inline void ic_bootp_cleanup(void)
+static inline void __init ic_bootp_cleanup(void)
{
dev_remove_pack(&bootp_packet_type);
}
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index dde28a250d92..4b1c16cbb16b 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -148,7 +148,7 @@ const char * ip_vs_state_name(__u16 proto, int state)
struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
if (pp == NULL || pp->state_name == NULL)
- return "ERR!";
+ return (IPPROTO_IP == proto) ? "NONE" : "ERR!";
return pp->state_name(state);
}
diff --git a/net/ipv4/ipvs/ip_vs_proto_ah.c b/net/ipv4/ipvs/ip_vs_proto_ah.c
index a842676e1c69..4bf835e1d86d 100644
--- a/net/ipv4/ipvs/ip_vs_proto_ah.c
+++ b/net/ipv4/ipvs/ip_vs_proto_ah.c
@@ -160,6 +160,7 @@ static void ah_exit(struct ip_vs_protocol *pp)
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
+ .num_states = 1,
.dont_defrag = 1,
.init = ah_init,
.exit = ah_exit,
diff --git a/net/ipv4/ipvs/ip_vs_proto_esp.c b/net/ipv4/ipvs/ip_vs_proto_esp.c
index aef0d3ee8e44..db6a6b7b1a0b 100644
--- a/net/ipv4/ipvs/ip_vs_proto_esp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_esp.c
@@ -159,6 +159,7 @@ static void esp_exit(struct ip_vs_protocol *pp)
struct ip_vs_protocol ip_vs_protocol_esp = {
.name = "ESP",
.protocol = IPPROTO_ESP,
+ .num_states = 1,
.dont_defrag = 1,
.init = esp_init,
.exit = esp_exit,
diff --git a/net/ipv4/ipvs/ip_vs_proto_tcp.c b/net/ipv4/ipvs/ip_vs_proto_tcp.c
index 620e40ff79a9..b83dc14b0a4d 100644
--- a/net/ipv4/ipvs/ip_vs_proto_tcp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_tcp.c
@@ -594,6 +594,7 @@ static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
struct ip_vs_protocol ip_vs_protocol_tcp = {
.name = "TCP",
.protocol = IPPROTO_TCP,
+ .num_states = IP_VS_TCP_S_LAST,
.dont_defrag = 0,
.appcnt = ATOMIC_INIT(0),
.init = ip_vs_tcp_init,
diff --git a/net/ipv4/ipvs/ip_vs_proto_udp.c b/net/ipv4/ipvs/ip_vs_proto_udp.c
index 1caa2908373f..75771cb3cd6f 100644
--- a/net/ipv4/ipvs/ip_vs_proto_udp.c
+++ b/net/ipv4/ipvs/ip_vs_proto_udp.c
@@ -409,6 +409,7 @@ static void udp_exit(struct ip_vs_protocol *pp)
struct ip_vs_protocol ip_vs_protocol_udp = {
.name = "UDP",
.protocol = IPPROTO_UDP,
+ .num_states = IP_VS_UDP_S_LAST,
.dont_defrag = 0,
.init = udp_init,
.exit = udp_exit,
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 69c56663cc9a..eff54efe0351 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -288,11 +288,16 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
char *p;
int i;
+ if (buflen < sizeof(struct ip_vs_sync_mesg)) {
+ IP_VS_ERR_RL("sync message header too short\n");
+ return;
+ }
+
/* Convert size back to host byte order */
m->size = ntohs(m->size);
if (buflen != m->size) {
- IP_VS_ERR("bogus message\n");
+ IP_VS_ERR_RL("bogus sync message size\n");
return;
}
@@ -307,9 +312,48 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
for (i=0; i<m->nr_conns; i++) {
unsigned flags, state;
- s = (struct ip_vs_sync_conn *)p;
+ if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
+ IP_VS_ERR_RL("bogus conn in sync message\n");
+ return;
+ }
+ s = (struct ip_vs_sync_conn *) p;
flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
+ flags &= ~IP_VS_CONN_F_HASHED;
+ if (flags & IP_VS_CONN_F_SEQ_MASK) {
+ opt = (struct ip_vs_sync_conn_options *)&s[1];
+ p += FULL_CONN_SIZE;
+ if (p > buffer+buflen) {
+ IP_VS_ERR_RL("bogus conn options in sync message\n");
+ return;
+ }
+ } else {
+ opt = NULL;
+ p += SIMPLE_CONN_SIZE;
+ }
+
state = ntohs(s->state);
+ if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+ pp = ip_vs_proto_get(s->protocol);
+ if (!pp) {
+ IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+ s->protocol);
+ continue;
+ }
+ if (state >= pp->num_states) {
+ IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+ pp->name, state);
+ continue;
+ }
+ } else {
+ /* protocol in templates is not used for state/timeout */
+ pp = NULL;
+ if (state > 0) {
+ IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+ state);
+ state = 0;
+ }
+ }
+
if (!(flags & IP_VS_CONN_F_TEMPLATE))
cp = ip_vs_conn_in_get(s->protocol,
s->caddr, s->cport,
@@ -345,14 +389,9 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
IP_VS_ERR("ip_vs_conn_new failed\n");
return;
}
- cp->state = state;
} else if (!cp->dest) {
dest = ip_vs_try_bind_dest(cp);
- if (!dest) {
- /* it is an unbound entry created by
- * synchronization */
- cp->flags = flags | IP_VS_CONN_F_HASHED;
- } else
+ if (dest)
atomic_dec(&dest->refcnt);
} else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
(cp->state != state)) {
@@ -371,23 +410,22 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
}
}
- if (flags & IP_VS_CONN_F_SEQ_MASK) {
- opt = (struct ip_vs_sync_conn_options *)&s[1];
+ if (opt)
memcpy(&cp->in_seq, opt, sizeof(*opt));
- p += FULL_CONN_SIZE;
- } else
- p += SIMPLE_CONN_SIZE;
-
atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
cp->state = state;
- pp = ip_vs_proto_get(s->protocol);
- cp->timeout = pp->timeout_table[cp->state];
+ cp->old_state = cp->state;
+ /*
+ * We can not recover the right timeout for templates
+ * in all cases, we can not find the right fwmark
+ * virtual service. If needed, we can do it for
+ * non-fwmark persistent services.
+ */
+ if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
+ cp->timeout = pp->timeout_table[state];
+ else
+ cp->timeout = (3*60*HZ);
ip_vs_conn_put(cp);
-
- if (p > buffer+buflen) {
- IP_VS_ERR("bogus message\n");
- return;
- }
}
}
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 719be29f7506..26a37cedcf2e 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -296,9 +296,8 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
if (v->data_len > 0xFFFF)
return -EINVAL;
if (diff > skb_tailroom(e->skb)) {
- nskb = skb_copy_expand(e->skb, 0,
- diff - skb_tailroom(e->skb),
- GFP_ATOMIC);
+ nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
+ diff, GFP_ATOMIC);
if (!nskb) {
printk(KERN_WARNING "ip_queue: error "
"in mangle, dropping packet\n");
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 22d8e7cd9197..1819ad7ab910 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -169,14 +169,14 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
/* create proc dir entry */
sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(ip));
- c->pde = proc_create(buffer, S_IWUSR|S_IRUSR,
- clusterip_procdir, &clusterip_proc_fops);
+ c->pde = proc_create_data(buffer, S_IWUSR|S_IRUSR,
+ clusterip_procdir,
+ &clusterip_proc_fops, c);
if (!c->pde) {
kfree(c);
return NULL;
}
}
- c->pde->data = c;
#endif
write_lock_bh(&clusterip_lock);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index cacb9cb27dab..5a955c440364 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -303,7 +303,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
- NF_CT_TUPLE_U_BLANK(&tuple);
+ memset(&tuple, 0, sizeof(tuple));
tuple.src.u3.ip = inet->rcv_saddr;
tuple.src.u.tcp.port = inet->sport;
tuple.dst.u3.ip = inet->daddr;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ce25a13f3430..5e3685c5c407 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1430,11 +1430,13 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
}
unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
- unsigned short new_mtu)
+ unsigned short new_mtu,
+ struct net_device *dev)
{
- int i;
+ int i, k;
unsigned short old_mtu = ntohs(iph->tot_len);
struct rtable *rth;
+ int ikeys[2] = { dev->ifindex, 0 };
__be32 skeys[2] = { iph->saddr, 0, };
__be32 daddr = iph->daddr;
unsigned short est_mtu = 0;
@@ -1442,22 +1444,26 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
if (ipv4_config.no_pmtu_disc)
return 0;
- for (i = 0; i < 2; i++) {
- unsigned hash = rt_hash(daddr, skeys[i], 0);
+ for (k = 0; k < 2; k++) {
+ for (i = 0; i < 2; i++) {
+ unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
- rcu_read_lock();
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.dst.rt_next)) {
- if (rth->fl.fl4_dst == daddr &&
- rth->fl.fl4_src == skeys[i] &&
- rth->rt_dst == daddr &&
- rth->rt_src == iph->saddr &&
- rth->fl.iif == 0 &&
- !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
- net_eq(dev_net(rth->u.dst.dev), net) &&
- rth->rt_genid == atomic_read(&rt_genid)) {
+ rcu_read_lock();
+ for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+ rth = rcu_dereference(rth->u.dst.rt_next)) {
unsigned short mtu = new_mtu;
+ if (rth->fl.fl4_dst != daddr ||
+ rth->fl.fl4_src != skeys[i] ||
+ rth->rt_dst != daddr ||
+ rth->rt_src != iph->saddr ||
+ rth->fl.oif != ikeys[k] ||
+ rth->fl.iif != 0 ||
+ dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
+ !net_eq(dev_net(rth->u.dst.dev), net) ||
+ rth->rt_genid != atomic_read(&rt_genid))
+ continue;
+
if (new_mtu < 68 || new_mtu >= old_mtu) {
/* BSD 4.2 compatibility hack :-( */
@@ -1483,8 +1489,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
est_mtu = mtu;
}
}
+ rcu_read_unlock();
}
- rcu_read_unlock();
}
return est_mtu ? : new_mtu;
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 3a6be23d222f..6a250828b767 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -285,14 +285,12 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
if (in_flight >= tp->snd_cwnd)
return 1;
- if (!sk_can_gso(sk))
- return 0;
-
left = tp->snd_cwnd - in_flight;
- if (sysctl_tcp_tso_win_divisor)
- return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
- else
- return left <= tcp_max_burst(tp);
+ if (sk_can_gso(sk) &&
+ left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
+ left * tp->mss_cache < sk->sk_gso_max_size)
+ return 1;
+ return left <= tcp_max_burst(tp);
}
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index eb5b9854c8c7..4a1221e5e8ee 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -15,8 +15,8 @@
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/math64.h>
#include <net/tcp.h>
-#include <asm/div64.h>
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
@@ -128,7 +128,7 @@ static u32 cubic_root(u64 a)
* x = ( 2 * x + a / x ) / 3
* k+1 k k
*/
- x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1)));
+ x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
x = ((x * 341) >> 10);
return x;
}
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 44618b675916..bfcbd148a89d 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -101,8 +101,10 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (!ca->hybla_en)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!ca->hybla_en) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
if (ca->rho == 0)
hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0298f80681f2..eda4f4a233f3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1172,8 +1172,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
- u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
- u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
+ u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
+ u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
int dup_sack = 0;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
@@ -1181,8 +1181,8 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
tcp_dsack_seen(tp);
NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
- u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
- u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
+ u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
+ u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
@@ -1453,8 +1453,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
for (i = 0; i < num_sacks; i++) {
int dup_sack = !i && found_dup_sack;
- sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
- sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
+ sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
+ sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
if (!tcp_is_sackblock_valid(tp, dup_sack,
sp[used_sacks].start_seq,
@@ -3340,7 +3340,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
switch (opcode) {
case TCPOPT_MSS:
if (opsize == TCPOLEN_MSS && th->syn && !estab) {
- u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
+ u16 in_mss = get_unaligned_be16(ptr);
if (in_mss) {
if (opt_rx->user_mss &&
opt_rx->user_mss < in_mss)
@@ -3369,8 +3369,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
((estab && opt_rx->tstamp_ok) ||
(!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
- opt_rx->rcv_tsval = ntohl(get_unaligned((__be32 *)ptr));
- opt_rx->rcv_tsecr = ntohl(get_unaligned((__be32 *)(ptr+4)));
+ opt_rx->rcv_tsval = get_unaligned_be32(ptr);
+ opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
}
break;
case TCPOPT_SACK_PERM:
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0e9bc120707d..cd601a866c2f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2214,9 +2214,6 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
struct tcp_iter_state *s;
int err;
- if (unlikely(afinfo == NULL))
- return -EINVAL;
-
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct tcp_iter_state));
if (err < 0)
@@ -2241,10 +2238,9 @@ int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
- p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
- if (p)
- p->data = afinfo;
- else
+ p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ &afinfo->seq_fops, afinfo);
+ if (!p)
rc = -ENOMEM;
return rc;
}
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index be24d6ee34bd..14504dada116 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -167,8 +167,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
struct tcp_sock *tp = tcp_sk(sk);
struct vegas *vegas = inet_csk_ca(sk);
- if (!vegas->doing_vegas_now)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!vegas->doing_vegas_now) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
/* The key players are v_beg_snd_una and v_beg_snd_nxt.
*
@@ -229,7 +231,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*/
tcp_reno_cong_avoid(sk, ack, in_flight);
} else {
- u32 rtt, target_cwnd, diff;
+ u32 rtt, diff;
+ u64 target_cwnd;
/* We have enough RTT samples, so, using the Vegas
* algorithm, we determine if we should increase or
@@ -252,8 +255,9 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* We keep it as a fixed point number with
* V_PARAM_SHIFT bits to the right of the binary point.
*/
- target_cwnd = ((old_wnd * vegas->baseRTT)
- << V_PARAM_SHIFT) / rtt;
+ target_cwnd = ((u64)old_wnd * vegas->baseRTT);
+ target_cwnd <<= V_PARAM_SHIFT;
+ do_div(target_cwnd, rtt);
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
@@ -279,7 +283,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* utilization.
*/
tp->snd_cwnd = min(tp->snd_cwnd,
- (target_cwnd >>
+ ((u32)target_cwnd >>
V_PARAM_SHIFT)+1);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index d16689e98516..d08b2e855c22 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -119,8 +119,10 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk);
- if (!veno->doing_veno_now)
- return tcp_reno_cong_avoid(sk, ack, in_flight);
+ if (!veno->doing_veno_now) {
+ tcp_reno_cong_avoid(sk, ack, in_flight);
+ return;
+ }
/* limited by applications */
if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -133,7 +135,8 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*/
tcp_reno_cong_avoid(sk, ack, in_flight);
} else {
- u32 rtt, target_cwnd;
+ u64 target_cwnd;
+ u32 rtt;
/* We have enough rtt samples, so, using the Veno
* algorithm, we determine the state of the network.
@@ -141,8 +144,9 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
rtt = veno->minrtt;
- target_cwnd = ((tp->snd_cwnd * veno->basertt)
- << V_PARAM_SHIFT) / rtt;
+ target_cwnd = (tp->snd_cwnd * veno->basertt);
+ target_cwnd <<= V_PARAM_SHIFT;
+ do_div(target_cwnd, rtt);
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1f535e315188..db1cb7c96d63 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1605,10 +1605,9 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
- p = proc_net_fops_create(net, afinfo->name, S_IRUGO, &afinfo->seq_fops);
- if (p)
- p->data = afinfo;
- else
+ p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
+ &afinfo->seq_fops, afinfo);
+ if (!p)
rc = -ENOMEM;
return rc;
}