summaryrefslogtreecommitdiffstats
path: root/net/decnet/dn_route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/decnet/dn_route.c')
-rw-r--r--net/decnet/dn_route.c206
1 files changed, 103 insertions, 103 deletions
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9881933167bd..c1b5502f195b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -43,7 +43,7 @@
/******************************************************************************
(c) 1995-1998 E.M. Serrat emserrat@geocities.com
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy)
while((rt=*rtp) != NULL) {
if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire) {
- rtp = &rt->u.rt_next;
+ rtp = &rt->u.dst.dn_next;
continue;
}
- *rtp = rt->u.rt_next;
- rt->u.rt_next = NULL;
+ *rtp = rt->u.dst.dn_next;
+ rt->u.dst.dn_next = NULL;
dnrt_free(rt);
}
spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,11 +198,11 @@ static int dn_dst_gc(void)
while((rt=*rtp) != NULL) {
if (atomic_read(&rt->u.dst.__refcnt) ||
(now - rt->u.dst.lastuse) < expire) {
- rtp = &rt->u.rt_next;
+ rtp = &rt->u.dst.dn_next;
continue;
}
- *rtp = rt->u.rt_next;
- rt->u.rt_next = NULL;
+ *rtp = rt->u.dst.dn_next;
+ rt->u.dst.dn_next = NULL;
dnrt_drop(rt);
break;
}
@@ -246,7 +246,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
}
}
-/*
+/*
* When a route has been marked obsolete. (e.g. routing cache flush)
*/
static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
@@ -286,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
while((rth = *rthp) != NULL) {
if (compare_keys(&rth->fl, &rt->fl)) {
/* Put it first */
- *rthp = rth->u.rt_next;
- rcu_assign_pointer(rth->u.rt_next,
+ *rthp = rth->u.dst.dn_next;
+ rcu_assign_pointer(rth->u.dst.dn_next,
dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
@@ -300,12 +300,12 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
*rp = rth;
return 0;
}
- rthp = &rth->u.rt_next;
+ rthp = &rth->u.dst.dn_next;
}
- rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain);
+ rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
-
+
dst_hold(&rt->u.dst);
rt->u.dst.__use++;
rt->u.dst.lastuse = now;
@@ -326,8 +326,8 @@ void dn_run_flush(unsigned long dummy)
goto nothing_to_declare;
for(; rt; rt=next) {
- next = rt->u.rt_next;
- rt->u.rt_next = NULL;
+ next = rt->u.dst.dn_next;
+ rt->u.dst.dn_next = NULL;
dst_free((struct dst_entry *)rt);
}
@@ -506,23 +506,23 @@ static int dn_route_rx_long(struct sk_buff *skb)
skb_pull(skb, 20);
skb->h.raw = skb->data;
- /* Destination info */
- ptr += 2;
+ /* Destination info */
+ ptr += 2;
cb->dst = dn_eth2dn(ptr);
- if (memcmp(ptr, dn_hiord_addr, 4) != 0)
- goto drop_it;
- ptr += 6;
+ if (memcmp(ptr, dn_hiord_addr, 4) != 0)
+ goto drop_it;
+ ptr += 6;
- /* Source info */
- ptr += 2;
+ /* Source info */
+ ptr += 2;
cb->src = dn_eth2dn(ptr);
- if (memcmp(ptr, dn_hiord_addr, 4) != 0)
- goto drop_it;
- ptr += 6;
- /* Other junk */
- ptr++;
- cb->hops = *ptr++; /* Visit Count */
+ if (memcmp(ptr, dn_hiord_addr, 4) != 0)
+ goto drop_it;
+ ptr += 6;
+ /* Other junk */
+ ptr++;
+ cb->hops = *ptr++; /* Visit Count */
return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
@@ -545,16 +545,16 @@ static int dn_route_rx_short(struct sk_buff *skb)
skb->h.raw = skb->data;
cb->dst = *(__le16 *)ptr;
- ptr += 2;
- cb->src = *(__le16 *)ptr;
- ptr += 2;
- cb->hops = *ptr & 0x3f;
+ ptr += 2;
+ cb->src = *(__le16 *)ptr;
+ ptr += 2;
+ cb->hops = *ptr & 0x3f;
return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet);
drop_it:
- kfree_skb(skb);
- return NET_RX_DROP;
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
static int dn_route_discard(struct sk_buff *skb)
@@ -626,20 +626,20 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
cb->rt_flags = flags;
if (decnet_debug_level & 1)
- printk(KERN_DEBUG
+ printk(KERN_DEBUG
"dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
- (int)flags, (dev) ? dev->name : "???", len, skb->len,
+ (int)flags, (dev) ? dev->name : "???", len, skb->len,
padlen);
- if (flags & DN_RT_PKT_CNTL) {
+ if (flags & DN_RT_PKT_CNTL) {
if (unlikely(skb_linearize(skb)))
goto dump_it;
- switch(flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_INIT:
+ switch(flags & DN_RT_CNTL_MSK) {
+ case DN_RT_PKT_INIT:
dn_dev_init_pkt(skb);
break;
- case DN_RT_PKT_VERI:
+ case DN_RT_PKT_VERI:
dn_dev_veri_pkt(skb);
break;
}
@@ -648,31 +648,31 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
goto dump_it;
switch(flags & DN_RT_CNTL_MSK) {
- case DN_RT_PKT_HELO:
+ case DN_RT_PKT_HELO:
return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello);
- case DN_RT_PKT_L1RT:
- case DN_RT_PKT_L2RT:
- return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
- case DN_RT_PKT_ERTH:
+ case DN_RT_PKT_L1RT:
+ case DN_RT_PKT_L2RT:
+ return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard);
+ case DN_RT_PKT_ERTH:
return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello);
- case DN_RT_PKT_EEDH:
+ case DN_RT_PKT_EEDH:
return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello);
- }
- } else {
+ }
+ } else {
if (dn->parms.state != DN_DEV_S_RU)
goto dump_it;
skb_pull(skb, 1); /* Pull flags */
- switch(flags & DN_RT_PKT_MSK) {
- case DN_RT_PKT_LONG:
- return dn_route_rx_long(skb);
- case DN_RT_PKT_SHORT:
- return dn_route_rx_short(skb);
+ switch(flags & DN_RT_PKT_MSK) {
+ case DN_RT_PKT_LONG:
+ return dn_route_rx_long(skb);
+ case DN_RT_PKT_SHORT:
+ return dn_route_rx_short(skb);
}
- }
+ }
dump_it:
kfree_skb(skb);
@@ -815,8 +815,8 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
rt->u.dst.neighbour = n;
}
- if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
- rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
+ if (rt->u.dst.metrics[RTAX_MTU-1] == 0 ||
+ rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu)
rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst));
if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 ||
@@ -876,7 +876,7 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re
static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
{
- struct flowi fl = { .nl_u = { .dn_u =
+ struct flowi fl = { .nl_u = { .dn_u =
{ .daddr = oldflp->fld_dst,
.saddr = oldflp->fld_src,
.scope = RT_SCOPE_UNIVERSE,
@@ -899,7 +899,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old
"dn_route_output_slow: dst=%04x src=%04x mark=%d"
" iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst),
dn_ntohs(oldflp->fld_src),
- oldflp->mark, loopback_dev.ifindex, oldflp->oif);
+ oldflp->mark, loopback_dev.ifindex, oldflp->oif);
/* If we have an output interface, verify its a DECnet device */
if (oldflp->oif) {
@@ -982,19 +982,19 @@ source_ok:
if (err != -ESRCH)
goto out;
/*
- * Here the fallback is basically the standard algorithm for
+ * Here the fallback is basically the standard algorithm for
* routing in endnodes which is described in the DECnet routing
* docs
*
* If we are not trying hard, look in neighbour cache.
* The result is tested to ensure that if a specific output
- * device/source address was requested, then we honour that
+ * device/source address was requested, then we honour that
* here
*/
if (!try_hard) {
neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst);
if (neigh) {
- if ((oldflp->oif &&
+ if ((oldflp->oif &&
(neigh->dev->ifindex != oldflp->oif)) ||
(oldflp->fld_src &&
(!dn_dev_islocal(neigh->dev,
@@ -1044,7 +1044,7 @@ select_source:
if (fl.fld_src == 0) {
fl.fld_src = dnet_select_source(dev_out, gateway,
res.type == RTN_LOCAL ?
- RT_SCOPE_HOST :
+ RT_SCOPE_HOST :
RT_SCOPE_LINK);
if (fl.fld_src == 0 && res.type != RTN_LOCAL)
goto e_addr;
@@ -1074,14 +1074,14 @@ select_source:
if (res.fi->fib_nhs > 1 && fl.oif == 0)
dn_fib_select_multipath(&fl, &res);
- /*
+ /*
* We could add some logic to deal with default routes here and
* get rid of some of the special casing above.
*/
if (!fl.fld_src)
fl.fld_src = DN_FIB_RES_PREFSRC(res);
-
+
if (dev_out)
dev_put(dev_out);
dev_out = DN_FIB_RES_DEV(res);
@@ -1144,8 +1144,8 @@ out:
return err;
e_addr:
- err = -EADDRNOTAVAIL;
- goto done;
+ err = -EADDRNOTAVAIL;
+ goto done;
e_inval:
err = -EINVAL;
goto done;
@@ -1169,7 +1169,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
- rt = rcu_dereference(rt->u.rt_next)) {
+ rt = rcu_dereference(rt->u.dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) &&
(flp->mark == rt->fl.mark) &&
@@ -1223,7 +1223,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
int flags = 0;
__le16 gateway = 0;
__le16 local_src = 0;
- struct flowi fl = { .nl_u = { .dn_u =
+ struct flowi fl = { .nl_u = { .dn_u =
{ .daddr = cb->dst,
.saddr = cb->src,
.scope = RT_SCOPE_UNIVERSE,
@@ -1311,7 +1311,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
if (res.fi->fib_nhs > 1 && fl.oif == 0)
dn_fib_select_multipath(&fl, &res);
- /*
+ /*
* Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
* flag as a hint to set the intra-ethernet bit when
* forwarding. If we've got NAT in operation, we don't do
@@ -1443,9 +1443,9 @@ int dn_route_input(struct sk_buff *skb)
rcu_read_lock();
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
- rt = rcu_dereference(rt->u.rt_next)) {
+ rt = rcu_dereference(rt->u.dst.dn_next)) {
if ((rt->fl.fld_src == cb->src) &&
- (rt->fl.fld_dst == cb->dst) &&
+ (rt->fl.fld_dst == cb->dst) &&
(rt->fl.oif == 0) &&
(rt->fl.mark == skb->mark) &&
(rt->fl.iif == cb->iif)) {
@@ -1514,8 +1514,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
nlmsg_failure:
rtattr_failure:
- skb_trim(skb, b - skb->data);
- return -1;
+ skb_trim(skb, b - skb->data);
+ return -1;
}
/*
@@ -1627,12 +1627,12 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock_bh();
for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
rt;
- rt = rcu_dereference(rt->u.rt_next), idx++) {
+ rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
if (idx < s_idx)
continue;
skb->dst = dst_clone(&rt->u.dst);
if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1, NLM_F_MULTI) <= 0) {
dst_release(xchg(&skb->dst, NULL));
rcu_read_unlock_bh();
@@ -1673,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
{
struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
- rt = rt->u.rt_next;
+ rt = rt->u.dst.dn_next;
while(!rt) {
rcu_read_unlock_bh();
if (--s->bucket < 0)
@@ -1721,7 +1721,7 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
rt->u.dst.__use,
(int) dst_metric(&rt->u.dst, RTAX_RTT));
return 0;
-}
+}
static struct seq_operations dn_rt_cache_seq_ops = {
.start = dn_rt_cache_seq_start,
@@ -1751,7 +1751,7 @@ out_kfree:
goto out;
}
-static struct file_operations dn_rt_cache_seq_fops = {
+static const struct file_operations dn_rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = dn_rt_cache_seq_open,
.read = seq_read,
@@ -1778,38 +1778,38 @@ void __init dn_route_init(void)
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
- /*
- * Only want 1024 entries max, since the table is very, very unlikely
- * to be larger than that.
- */
- while(order && ((((1UL << order) * PAGE_SIZE) /
- sizeof(struct dn_rt_hash_bucket)) >= 2048))
- order--;
-
- do {
- dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
- sizeof(struct dn_rt_hash_bucket);
- while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
- dn_rt_hash_mask--;
- dn_rt_hash_table = (struct dn_rt_hash_bucket *)
- __get_free_pages(GFP_ATOMIC, order);
- } while (dn_rt_hash_table == NULL && --order > 0);
+ /*
+ * Only want 1024 entries max, since the table is very, very unlikely
+ * to be larger than that.
+ */
+ while(order && ((((1UL << order) * PAGE_SIZE) /
+ sizeof(struct dn_rt_hash_bucket)) >= 2048))
+ order--;
+
+ do {
+ dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
+ sizeof(struct dn_rt_hash_bucket);
+ while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
+ dn_rt_hash_mask--;
+ dn_rt_hash_table = (struct dn_rt_hash_bucket *)
+ __get_free_pages(GFP_ATOMIC, order);
+ } while (dn_rt_hash_table == NULL && --order > 0);
if (!dn_rt_hash_table)
- panic("Failed to allocate DECnet route cache hash table\n");
+ panic("Failed to allocate DECnet route cache hash table\n");
- printk(KERN_INFO
- "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
- dn_rt_hash_mask,
+ printk(KERN_INFO
+ "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
+ dn_rt_hash_mask,
(long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
dn_rt_hash_mask--;
- for(i = 0; i <= dn_rt_hash_mask; i++) {
- spin_lock_init(&dn_rt_hash_table[i].lock);
- dn_rt_hash_table[i].chain = NULL;
- }
+ for(i = 0; i <= dn_rt_hash_mask; i++) {
+ spin_lock_init(&dn_rt_hash_table[i].lock);
+ dn_rt_hash_table[i].chain = NULL;
+ }
- dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
+ dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
}