diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-17 15:25:26 +0200 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-07-17 15:25:26 +0200 |
commit | 4bf311ddfbffe12d41ad1a3c311ab727db6f72cb (patch) | |
tree | 9d19a2774e83637d86dc876f3af22af1dacf0bec /net | |
parent | [DLM] dlm: user locks (diff) | |
parent | Linux 2.6.18-rc2 (diff) | |
download | linux-4bf311ddfbffe12d41ad1a3c311ab727db6f72cb.tar.xz linux-4bf311ddfbffe12d41ad1a3c311ab727db6f72cb.zip |
Merge branch 'master'
Diffstat (limited to 'net')
48 files changed, 396 insertions, 644 deletions
diff --git a/net/atm/clip.c b/net/atm/clip.c index 121bf6f49148..2e62105d91bd 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = { static int __init atm_clip_init(void) { - struct proc_dir_entry *p; neigh_table_init_no_netlink(&clip_tbl); clip_tbl_hook = &clip_tbl; @@ -972,9 +971,15 @@ static int __init atm_clip_init(void) setup_timer(&idle_timer, idle_timer_check, 0); - p = create_proc_entry("arp", S_IRUGO, atm_proc_root); - if (p) - p->proc_fops = &arp_seq_fops; +#ifdef CONFIG_PROC_FS + { + struct proc_dir_entry *p; + + p = create_proc_entry("arp", S_IRUGO, atm_proc_root); + if (p) + p->proc_fops = &arp_seq_fops; + } +#endif return 0; } diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c index 4b1faca5013f..1d3de42fada0 100644 --- a/net/atm/ipcommon.c +++ b/net/atm/ipcommon.c @@ -25,22 +25,27 @@ /* * skb_migrate appends the list at "from" to "to", emptying "from" in the * process. skb_migrate is atomic with respect to all other skb operations on - * "from" and "to". Note that it locks both lists at the same time, so beware - * of potential deadlocks. + * "from" and "to". Note that it locks both lists at the same time, so to deal + * with the lock ordering, the locks are taken in address order. * * This function should live in skbuff.c or skbuff.h. */ -void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) +void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) { unsigned long flags; struct sk_buff *skb_from = (struct sk_buff *) from; struct sk_buff *skb_to = (struct sk_buff *) to; struct sk_buff *prev; - spin_lock_irqsave(&from->lock,flags); - spin_lock(&to->lock); + if ((unsigned long) from < (unsigned long) to) { + spin_lock_irqsave(&from->lock, flags); + spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock_irqsave(&to->lock, flags); + spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); + } prev = from->prev; from->next->prev = to->prev; prev->next = skb_to; @@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) from->prev = skb_from; from->next = skb_from; from->qlen = 0; - spin_unlock_irqrestore(&from->lock,flags); + spin_unlock_irqrestore(&from->lock, flags); } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 10a3c0aa8398..000695c48583 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -145,7 +145,7 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, ax25_cb *s; struct hlist_node *node; - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(s, node, &ax25_list) { if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) continue; @@ -154,12 +154,12 @@ struct sock *ax25_find_listener(ax25_address *addr, int digi, /* If device is null we match any device */ if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { sock_hold(s->sk); - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); return s->sk; } } } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); return NULL; } @@ -174,7 +174,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, ax25_cb *s; struct hlist_node *node; - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(s, node, &ax25_list) { if (s->sk && !ax25cmp(&s->source_addr, my_addr) && !ax25cmp(&s->dest_addr, dest_addr) && @@ -185,7 +185,7 @@ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, } } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); return sk; } @@ -235,7 +235,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) struct sk_buff *copy; struct hlist_node *node; - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(s, node, &ax25_list) { if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->sk_type == SOCK_RAW && @@ -248,7 +248,7 @@ void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) kfree_skb(copy); } } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); } /* @@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void) { ax25_cb *ax25; - if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) + if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) return NULL; - memset(ax25, 0x00, sizeof(*ax25)); atomic_set(&ax25->refcount, 1); skb_queue_head_init(&ax25->write_queue); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 47e6e790bd67..b787678220ff 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev) { ax25_dev *ax25_dev; - if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { + if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); return; } ax25_unregister_sysctl(); - memset(ax25_dev, 0x00, sizeof(*ax25_dev)); - dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); diff --git a/net/ax25/ax25_ds_subr.c b/net/ax25/ax25_ds_subr.c index 1d4ab641f82b..4d22d4430ec8 100644 --- a/net/ax25/ax25_ds_subr.c +++ b/net/ax25/ax25_ds_subr.c @@ -80,7 +80,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) ax25_start_t3timer(ax25); ax25_ds_set_timer(ax25->ax25_dev); - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(ax25o, node, &ax25_list) { if (ax25o == ax25) continue; @@ -106,7 +106,7 @@ void ax25_ds_enquiry_response(ax25_cb *ax25) if (ax25o->state != AX25_STATE_0) ax25_start_t3timer(ax25o); } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); } void ax25_ds_establish_data_link(ax25_cb *ax25) @@ -162,13 +162,13 @@ static int ax25_check_dama_slave(ax25_dev *ax25_dev) int res = 0; struct hlist_node *node; - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(ax25, node, &ax25_list) if (ax25->ax25_dev == ax25_dev && (ax25->condition & AX25_COND_DAMA_MODE) && ax25->state > AX25_STATE_1) { res = 1; break; } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); return res; } diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c index 5961459935eb..4f44185955c7 100644 --- a/net/ax25/ax25_ds_timer.c +++ b/net/ax25/ax25_ds_timer.c @@ -85,7 +85,7 @@ static void ax25_ds_timeout(unsigned long arg) return; } - spin_lock_bh(&ax25_list_lock); + spin_lock(&ax25_list_lock); ax25_for_each(ax25, node, &ax25_list) { if (ax25->ax25_dev != ax25_dev || !(ax25->condition & AX25_COND_DAMA_MODE)) continue; @@ -93,7 +93,7 @@ static void ax25_ds_timeout(unsigned long arg) ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_disconnect(ax25, ETIMEDOUT); } - spin_unlock_bh(&ax25_list_lock); + spin_unlock(&ax25_list_lock); ax25_dev_dama_off(ax25_dev); } diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c index 77ba07c67682..07ac0207eb69 100644 --- a/net/ax25/ax25_iface.c +++ b/net/ax25/ax25_iface.c @@ -66,10 +66,10 @@ int ax25_protocol_register(unsigned int pid, protocol->pid = pid; protocol->func = func; - write_lock(&protocol_list_lock); + write_lock_bh(&protocol_list_lock); protocol->next = protocol_list; protocol_list = protocol; - write_unlock(&protocol_list_lock); + write_unlock_bh(&protocol_list_lock); return 1; } @@ -80,16 +80,16 @@ void ax25_protocol_release(unsigned int pid) { struct protocol_struct *s, *protocol; - write_lock(&protocol_list_lock); + write_lock_bh(&protocol_list_lock); protocol = protocol_list; if (protocol == NULL) { - write_unlock(&protocol_list_lock); + write_unlock_bh(&protocol_list_lock); return; } if (protocol->pid == pid) { protocol_list = protocol->next; - write_unlock(&protocol_list_lock); + write_unlock_bh(&protocol_list_lock); kfree(protocol); return; } @@ -98,14 +98,14 @@ void ax25_protocol_release(unsigned int pid) if (protocol->next->pid == pid) { s = protocol->next; protocol->next = protocol->next->next; - write_unlock(&protocol_list_lock); + write_unlock_bh(&protocol_list_lock); kfree(s); return; } protocol = protocol->next; } - write_unlock(&protocol_list_lock); + write_unlock_bh(&protocol_list_lock); } EXPORT_SYMBOL(ax25_protocol_release); @@ -266,13 +266,13 @@ int ax25_protocol_is_registered(unsigned int pid) struct protocol_struct *protocol; int res = 0; - read_lock(&protocol_list_lock); + read_lock_bh(&protocol_list_lock); for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) if (protocol->pid == pid) { res = 1; break; } - read_unlock(&protocol_list_lock); + read_unlock_bh(&protocol_list_lock); return res; } diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 4cf87540fb3a..e9d94291581e 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -102,8 +102,8 @@ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) { int (*func)(struct sk_buff *, ax25_cb *); - volatile int queued = 0; unsigned char pid; + int queued = 0; if (skb == NULL) return 0; diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c index 6fb47e00e188..be04e9fb11f6 100644 --- a/net/bluetooth/cmtp/capi.c +++ b/net/bluetooth/cmtp/capi.c @@ -75,15 +75,13 @@ static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) { - struct cmtp_application *app = kmalloc(sizeof(*app), GFP_KERNEL); + struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL); BT_DBG("session %p application %p appl %d", session, app, appl); if (!app) return NULL; - memset(app, 0, sizeof(*app)); - app->state = BT_OPEN; app->appl = appl; diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 182254a580e2..b81a01c64aea 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -335,10 +335,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) baswap(&src, &bt_sk(sock->sk)->src); baswap(&dst, &bt_sk(sock->sk)->dst); - session = kmalloc(sizeof(struct cmtp_session), GFP_KERNEL); + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); if (!session) return -ENOMEM; - memset(session, 0, sizeof(struct cmtp_session)); down_write(&cmtp_session_sem); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 54e8e5ea2154..5ed474277903 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -336,9 +336,8 @@ void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { /* Entry not in the cache. Add new one. */ - if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) + if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) return; - memset(e, 0, sizeof(struct inquiry_entry)); e->next = cache->list; cache->list = e; } @@ -800,12 +799,10 @@ struct hci_dev *hci_alloc_dev(void) { struct hci_dev *hdev; - hdev = kmalloc(sizeof(struct hci_dev), GFP_KERNEL); + hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); if (!hdev) return NULL; - memset(hdev, 0, sizeof(struct hci_dev)); - skb_queue_head_init(&hdev->driver_init); return hdev; diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index edfea772fb67..c6abf2a5a932 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,7 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && BT_L2CAP && (BROKEN || !S390) - select INPUT + depends on BT && BT_L2CAP && INPUT help HIDP (Human Interface Device Protocol) is a transport layer for HID reports. HIDP is required for the Bluetooth Human diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index b9c24a55425c..c6e3a2c27c6e 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -582,10 +582,9 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) return -ENOTUNIQ; - session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL); + session = kzalloc(sizeof(struct hidp_session), GFP_KERNEL); if (!session) return -ENOMEM; - memset(session, 0, sizeof(struct hidp_session)); session->input = input_allocate_device(); if (!session->input) { diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index eaaad658d11d..d56f60b392ac 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -185,7 +185,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) { struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; - write_lock(&l->lock); + write_lock_bh(&l->lock); if (sk == l->head) l->head = next; @@ -193,7 +193,7 @@ static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) l2cap_pi(next)->prev_c = prev; if (prev) l2cap_pi(prev)->next_c = next; - write_unlock(&l->lock); + write_unlock_bh(&l->lock); __sock_put(sk); } @@ -313,9 +313,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) { struct l2cap_chan_list *l = &conn->chan_list; - write_lock(&l->lock); + write_lock_bh(&l->lock); __l2cap_chan_add(conn, sk, parent); - write_unlock(&l->lock); + write_unlock_bh(&l->lock); } static inline u8 l2cap_get_ident(struct l2cap_conn *conn) @@ -328,14 +328,14 @@ static inline u8 l2cap_get_ident(struct l2cap_conn *conn) * 200 - 254 are used by utilities like l2ping, etc. */ - spin_lock(&conn->lock); + spin_lock_bh(&conn->lock); if (++conn->tx_ident > 128) conn->tx_ident = 1; id = conn->tx_ident; - spin_unlock(&conn->lock); + spin_unlock_bh(&conn->lock); return id; } @@ -1416,11 +1416,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd if (!sk) goto response; - write_lock(&list->lock); + write_lock_bh(&list->lock); /* Check if we already have channel with that dcid */ if (__l2cap_get_chan_by_dcid(list, scid)) { - write_unlock(&list->lock); + write_unlock_bh(&list->lock); sock_set_flag(sk, SOCK_ZAPPED); l2cap_sock_kill(sk); goto response; @@ -1458,7 +1458,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd result = status = 0; done: - write_unlock(&list->lock); + write_unlock_bh(&list->lock); response: bh_unlock_sock(parent); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 155a2b93760e..77eab8f4c7fd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -273,10 +273,10 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) { - struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); + struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio); + if (!d) return NULL; - memset(d, 0, sizeof(*d)); init_timer(&d->timer); d->timer.function = rfcomm_dlc_timeout; @@ -289,6 +289,7 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) rfcomm_dlc_clear_state(d); BT_DBG("%p", d); + return d; } @@ -522,10 +523,10 @@ int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) /* ---- RFCOMM sessions ---- */ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) { - struct rfcomm_session *s = kmalloc(sizeof(*s), GFP_KERNEL); + struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) return NULL; - memset(s, 0, sizeof(*s)); BT_DBG("session %p sock %p", s, sock); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2ff2d5b87c93..bd8d671a0ba6 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -169,10 +169,9 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) BT_DBG("id %d channel %d", req->dev_id, req->channel); - dev = kmalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); + dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); if (!dev) return -ENOMEM; - memset(dev, 0, sizeof(struct rfcomm_dev)); write_lock_bh(&rfcomm_dev_lock); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 85defccc0287..7714a2ec3854 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -108,17 +108,14 @@ static void sco_sock_init_timer(struct sock *sk) static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) { struct hci_dev *hdev = hcon->hdev; - struct sco_conn *conn; - - if ((conn = hcon->sco_data)) - return conn; + struct sco_conn *conn = hcon->sco_data; - if (status) + if (conn || status) return conn; - if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC))) + conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); + if (!conn) return NULL; - memset(conn, 0, sizeof(struct sco_conn)); spin_lock_init(&conn->lock); @@ -134,6 +131,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status) conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); + return conn; } diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 8be9f2123e54..6ccd32b30809 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb) int br_dev_queue_push_xmit(struct sk_buff *skb) { /* drop mtu oversized packets except gso */ - if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) + if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8298a5179aef..cbc8a389a0a8 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP) && skb->len > skb->dev->mtu && - !skb_shinfo(skb)->gso_size) + !skb_is_gso(skb)) return ip_fragment(skb, br_dev_queue_push_xmit); else return br_dev_queue_push_xmit(skb); diff --git a/net/core/dev.c b/net/core/dev.c index 066a60a75280..4d2b5167d7f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward) unsigned int csum; int ret = 0, offset = skb->h.raw - skb->data; - if (inward) { - skb->ip_summed = CHECKSUM_NONE; - goto out; + if (inward) + goto out_set_summed; + + if (unlikely(skb_shinfo(skb)->gso_size)) { + static int warned; + + WARN_ON(!warned); + warned = 1; + + /* Let GSO fix up the checksum. */ + goto out_set_summed; } if (skb_cloned(skb)) { @@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward) BUG_ON(skb->csum + 2 > offset); *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + +out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: return ret; @@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; int type = skb->protocol; + int err; BUG_ON(skb_shinfo(skb)->frag_list); - BUG_ON(skb->ip_summed != CHECKSUM_HW); skb->mac.raw = skb->data; skb->mac_len = skb->nh.raw - skb->data; __skb_pull(skb, skb->mac_len); + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { + static int warned; + + WARN_ON(!warned); + warned = 1; + + if (skb_header_cloned(skb) && + (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + return ERR_PTR(err); + } + rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { if (ptype->type == type && !ptype->dev && ptype->gso_segment) { + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { + err = ptype->gso_send_check(skb); + segs = ERR_PTR(err); + if (err || skb_gso_ok(skb, features)) + break; + __skb_push(skb, skb->data - skb->nh.raw); + } segs = ptype->gso_segment(skb, features); break; } @@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb) if (dev->qdisc_ingress) { __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); if (MAX_RED_LOOP < ttl++) { - printk("Redir loop detected Dropping packet (%s->%s)\n", + printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", skb->input_dev->name, skb->dev->name); return TC_ACT_SHOT; } @@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev) /* Fix illegal SG+CSUM combinations. */ if ((dev->features & NETIF_F_SG) && !(dev->features & NETIF_F_ALL_CSUM)) { - printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", + printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", dev->name); dev->features &= ~NETIF_F_SG; } @@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev) /* TSO requires that SG is present as well. */ if ((dev->features & NETIF_F_TSO) && !(dev->features & NETIF_F_SG)) { - printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", + printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", dev->name); dev->features &= ~NETIF_F_TSO; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 44f6a181a754..476aa3978504 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -257,11 +257,11 @@ nodata: } -static void skb_drop_fraglist(struct sk_buff *skb) +static void skb_drop_list(struct sk_buff **listp) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; + struct sk_buff *list = *listp; - skb_shinfo(skb)->frag_list = NULL; + *listp = NULL; do { struct sk_buff *this = list; @@ -270,6 +270,11 @@ static void skb_drop_fraglist(struct sk_buff *skb) } while (list); } +static inline void skb_drop_fraglist(struct sk_buff *skb) +{ + skb_drop_list(&skb_shinfo(skb)->frag_list); +} + static void skb_clone_fraglist(struct sk_buff *skb) { struct sk_buff *list; @@ -830,41 +835,75 @@ free_skb: int ___pskb_trim(struct sk_buff *skb, unsigned int len) { + struct sk_buff **fragp; + struct sk_buff *frag; int offset = skb_headlen(skb); int nfrags = skb_shinfo(skb)->nr_frags; int i; + int err; + + if (skb_cloned(skb) && + unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) + return err; for (i = 0; i < nfrags; i++) { int end = offset + skb_shinfo(skb)->frags[i].size; - if (end > len) { - if (skb_cloned(skb)) { - if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - return -ENOMEM; - } - if (len <= offset) { - put_page(skb_shinfo(skb)->frags[i].page); - skb_shinfo(skb)->nr_frags--; - } else { - skb_shinfo(skb)->frags[i].size = len - offset; - } + + if (end < len) { + offset = end; + continue; } - offset = end; + + if (len > offset) + skb_shinfo(skb)->frags[i++].size = len - offset; + + skb_shinfo(skb)->nr_frags = i; + + for (; i < nfrags; i++) + put_page(skb_shinfo(skb)->frags[i].page); + + if (skb_shinfo(skb)->frag_list) + skb_drop_fraglist(skb); + break; } - if (offset < len) { + for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); + fragp = &frag->next) { + int end = offset + frag->len; + + if (skb_shared(frag)) { + struct sk_buff *nfrag; + + nfrag = skb_clone(frag, GFP_ATOMIC); + if (unlikely(!nfrag)) + return -ENOMEM; + + nfrag->next = frag->next; + frag = nfrag; + *fragp = frag; + } + + if (end < len) { + offset = end; + continue; + } + + if (end > len && + unlikely((err = pskb_trim(frag, len - offset)))) + return err; + + if (frag->next) + skb_drop_list(&frag->next); + break; + } + + if (len > skb_headlen(skb)) { skb->data_len -= skb->len - len; skb->len = len; } else { - if (len <= skb_headlen(skb)) { - skb->len = len; - skb->data_len = 0; - skb->tail = skb->data + len; - if (skb_shinfo(skb)->frag_list && !skb_cloned(skb)) - skb_drop_fraglist(skb); - } else { - skb->data_len -= skb->len - len; - skb->len = len; - } + skb->len = len; + skb->data_len = 0; + skb->tail = skb->data + len; } return 0; diff --git a/net/core/stream.c b/net/core/stream.c index e9489696f694..d1d7decf70b0 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -196,15 +196,13 @@ EXPORT_SYMBOL(sk_stream_error); void __sk_stream_mem_reclaim(struct sock *sk) { - if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM) { - atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM, - sk->sk_prot->memory_allocated); - sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1; - if (*sk->sk_prot->memory_pressure && - (atomic_read(sk->sk_prot->memory_allocated) < - sk->sk_prot->sysctl_mem[0])) - *sk->sk_prot->memory_pressure = 0; - } + atomic_sub(sk->sk_forward_alloc / SK_STREAM_MEM_QUANTUM, + sk->sk_prot->memory_allocated); + sk->sk_forward_alloc &= SK_STREAM_MEM_QUANTUM - 1; + if (*sk->sk_prot->memory_pressure && + (atomic_read(sk->sk_prot->memory_allocated) < + sk->sk_prot->sysctl_mem[0])) + *sk->sk_prot->memory_pressure = 0; } EXPORT_SYMBOL(__sk_stream_mem_reclaim); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index f4f0627ea41c..6f14bb5a28d4 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -484,7 +484,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, err = -EINVAL; else err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L, - (struct dccp_so_feat *) + (struct dccp_so_feat __user *) optval); break; @@ -493,7 +493,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, err = -EINVAL; else err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R, - (struct dccp_so_feat *) + (struct dccp_so_feat __user *) optval); break; diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 06e785fe5757..22f321d9bf9d 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { if (idx < s_idx) - continue; + goto next; if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) break; +next: idx++; } rcu_read_unlock(); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index da33393be45f..8514106761b0 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -572,16 +572,6 @@ config TCP_CONG_VENO loss packets. See http://www.ntu.edu.sg/home5/ZHOU0022/papers/CPFu03a.pdf -config TCP_CONG_COMPOUND - tristate "TCP Compound" - depends on EXPERIMENTAL - default n - ---help--- - TCP Compound is a sender-side only change to TCP that uses - a mixed Reno/Vegas approach to calculate the cwnd. - For further details look here: - ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf - endmenu config TCP_CONG_BIC diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 38b8039bdd55..4878fc5be85f 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -47,7 +47,6 @@ obj-$(CONFIG_TCP_CONG_VEGAS) += tcp_vegas.o obj-$(CONFIG_TCP_CONG_VENO) += tcp_veno.o obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o -obj-$(CONFIG_TCP_CONG_COMPOUND) += tcp_compound.o obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \ xfrm4_output.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 318d4674faa1..c84a32070f8d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk) EXPORT_SYMBOL(inet_sk_rebuild_header); +static int inet_gso_send_check(struct sk_buff *skb) +{ + struct iphdr *iph; + struct net_protocol *ops; + int proto; + int ihl; + int err = -EINVAL; + + if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) + goto out; + + iph = skb->nh.iph; + ihl = iph->ihl * 4; + if (ihl < sizeof(*iph)) + goto out; + + if (unlikely(!pskb_may_pull(skb, ihl))) + goto out; + + skb->h.raw = __skb_pull(skb, ihl); + iph = skb->nh.iph; + proto = iph->protocol & (MAX_INET_PROTOS - 1); + err = -EPROTONOSUPPORT; + + rcu_read_lock(); + ops = rcu_dereference(inet_protos[proto]); + if (likely(ops && ops->gso_send_check)) + err = ops->gso_send_check(skb); + rcu_read_unlock(); + +out: + return err; +} + static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); @@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = { static struct net_protocol tcp_protocol = { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, + .gso_send_check = tcp_v4_gso_send_check, .gso_segment = tcp_tso_segment, .no_policy = 1, }; @@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void); static struct packet_type ip_packet_type = { .type = __constant_htons(ETH_P_IP), .func = ip_rcv, + .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, }; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 6c642d11d4ca..773b12ba4e3c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); hlist_for_each_entry(r, node, &fib_rules, hlist) { - if (idx < s_idx) - continue; + goto next; if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, NLM_F_MULTI) < 0) break; +next: idx++; } rcu_read_unlock(); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1cb65305e102..23fb9d9768e3 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1252,8 +1252,8 @@ fn_trie_insert(struct fib_table *tb, struct rtmsg *r, struct kern_rta *rta, */ if (!fa_head) { - fa_head = fib_insert_node(t, &err, key, plen); err = 0; + fa_head = fib_insert_node(t, &err, key, plen); if (err) goto out_free_new_fa; } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 2160874ce7aa..03ff62ebcfeb 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -86,7 +86,7 @@ static struct inet_peer *peer_root = peer_avl_empty; static DEFINE_RWLOCK(peer_pool_lock); #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ -static volatile int peer_total; +static int peer_total; /* Exported for sysctl_net_ipv4. */ int inet_peer_threshold = 65536 + 128; /* start to throw entries more * aggressively at this stage */ diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index e1a7dba2fa8a..184c78ca79e6 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -428,6 +428,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, goto drop; } + /* Remove any debris in the socket control block */ + memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); + return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL, ip_rcv_finish); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ca0e714613fb..7c9f9a6421b8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb) return dst_output(skb); } #endif - if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) + if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) return ip_fragment(skb, ip_finish_output2); else return ip_finish_output2(skb); @@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, while (size > 0) { int i; - if (skb_shinfo(skb)->gso_size) + if (skb_is_gso(skb)) len = size; else { diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 8e0374847532..8a8b5cf2f7fe 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -70,7 +70,8 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) if (err) goto out; - skb_put(skb, dlen - plen); + skb->truesize += dlen - plen; + __skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); out: put_cpu(); diff --git a/net/ipv4/tcp_compound.c b/net/ipv4/tcp_compound.c deleted file mode 100644 index bc54f7e9aea9..000000000000 --- a/net/ipv4/tcp_compound.c +++ /dev/null @@ -1,448 +0,0 @@ -/* - * TCP Vegas congestion control - * - * This is based on the congestion detection/avoidance scheme described in - * Lawrence S. Brakmo and Larry L. Peterson. - * "TCP Vegas: End to end congestion avoidance on a global internet." - * IEEE Journal on Selected Areas in Communication, 13(8):1465--1480, - * October 1995. Available from: - * ftp://ftp.cs.arizona.edu/xkernel/Papers/jsac.ps - * - * See http://www.cs.arizona.edu/xkernel/ for their implementation. - * The main aspects that distinguish this implementation from the - * Arizona Vegas implementation are: - * o We do not change the loss detection or recovery mechanisms of - * Linux in any way. Linux already recovers from losses quite well, - * using fine-grained timers, NewReno, and FACK. - * o To avoid the performance penalty imposed by increasing cwnd - * only every-other RTT during slow start, we increase during - * every RTT during slow start, just like Reno. - * o Largely to allow continuous cwnd growth during slow start, - * we use the rate at which ACKs come back as the "actual" - * rate, rather than the rate at which data is sent. - * o To speed convergence to the right rate, we set the cwnd - * to achieve the right ("actual") rate when we exit slow start. - * o To filter out the noise caused by delayed ACKs, we use the - * minimum RTT sample observed during the last RTT to calculate - * the actual rate. - * o When the sender re-starts from idle, it waits until it has - * received ACKs for an entire flight of new data before making - * a cwnd adjustment decision. The original Vegas implementation - * assumed senders never went idle. - * - * - * TCP Compound based on TCP Vegas - * - * further details can be found here: - * ftp://ftp.research.microsoft.com/pub/tr/TR-2005-86.pdf - */ - -#include <linux/config.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/skbuff.h> -#include <linux/inet_diag.h> - -#include <net/tcp.h> - -/* Default values of the Vegas variables, in fixed-point representation - * with V_PARAM_SHIFT bits to the right of the binary point. - */ -#define V_PARAM_SHIFT 1 - -#define TCP_COMPOUND_ALPHA 3U -#define TCP_COMPOUND_BETA 1U -#define TCP_COMPOUND_GAMMA 30 -#define TCP_COMPOUND_ZETA 1 - -/* TCP compound variables */ -struct compound { - u32 beg_snd_nxt; /* right edge during last RTT */ - u32 beg_snd_una; /* left edge during last RTT */ - u32 beg_snd_cwnd; /* saves the size of the cwnd */ - u8 doing_vegas_now; /* if true, do vegas for this RTT */ - u16 cntRTT; /* # of RTTs measured within last RTT */ - u32 minRTT; /* min of RTTs measured within last RTT (in usec) */ - u32 baseRTT; /* the min of all Vegas RTT measurements seen (in usec) */ - - u32 cwnd; - u32 dwnd; -}; - -/* There are several situations when we must "re-start" Vegas: - * - * o when a connection is established - * o after an RTO - * o after fast recovery - * o when we send a packet and there is no outstanding - * unacknowledged data (restarting an idle connection) - * - * In these circumstances we cannot do a Vegas calculation at the - * end of the first RTT, because any calculation we do is using - * stale info -- both the saved cwnd and congestion feedback are - * stale. - * - * Instead we must wait until the completion of an RTT during - * which we actually receive ACKs. - */ -static inline void vegas_enable(struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - struct compound *vegas = inet_csk_ca(sk); - - /* Begin taking Vegas samples next time we send something. */ - vegas->doing_vegas_now = 1; - - /* Set the beginning of the next send window. */ - vegas->beg_snd_nxt = tp->snd_nxt; - - vegas->cntRTT = 0; - vegas->minRTT = 0x7fffffff; -} - -/* Stop taking Vegas samples for now. */ -static inline void vegas_disable(struct sock *sk) -{ - struct compound *vegas = inet_csk_ca(sk); - - vegas->doing_vegas_now = 0; -} - -static void tcp_compound_init(struct sock *sk) -{ - struct compound *vegas = inet_csk_ca(sk); - const struct tcp_sock *tp = tcp_sk(sk); - - vegas->baseRTT = 0x7fffffff; - vegas_enable(sk); - - vegas->dwnd = 0; - vegas->cwnd = tp->snd_cwnd; -} - -/* Do RTT sampling needed for Vegas. - * Basically we: - * o min-filter RTT samples from within an RTT to get the current - * propagation delay + queuing delay (we are min-filtering to try to - * avoid the effects of delayed ACKs) - * o min-filter RTT samples from a much longer window (forever for now) - * to find the propagation delay (baseRTT) - */ -static void tcp_compound_rtt_calc(struct sock *sk, u32 usrtt) -{ - struct compound *vegas = inet_csk_ca(sk); - u32 vrtt = usrtt + 1; /* Never allow zero rtt or baseRTT */ - - /* Filter to find propagation delay: */ - if (vrtt < vegas->baseRTT) - vegas->baseRTT = vrtt; - - /* Find the min RTT during the last RTT to find - * the current prop. delay + queuing delay: - */ - - vegas->minRTT = min(vegas->minRTT, vrtt); - vegas->cntRTT++; -} - -static void tcp_compound_state(struct sock *sk, u8 ca_state) -{ - - if (ca_state == TCP_CA_Open) - vegas_enable(sk); - else - vegas_disable(sk); -} - - -/* 64bit divisor, dividend and result. dynamic precision */ -static inline u64 div64_64(u64 dividend, u64 divisor) -{ - u32 d = divisor; - - if (divisor > 0xffffffffULL) { - unsigned int shift = fls(divisor >> 32); - - d = divisor >> shift; - dividend >>= shift; - } - - /* avoid 64 bit division if possible */ - if (dividend >> 32) - do_div(dividend, d); - else - dividend = (u32) dividend / d; - - return dividend; -} - -/* calculate the quartic root of "a" using Newton-Raphson */ -static u32 qroot(u64 a) -{ - u32 x, x1; - - /* Initial estimate is based on: - * qrt(x) = exp(log(x) / 4) - */ - x = 1u << (fls64(a) >> 2); - - /* - * Iteration based on: - * 3 - * x = ( 3 * x + a / x ) / 4 - * k+1 k k - */ - do { - u64 x3 = x; - - x1 = x; - x3 *= x; - x3 *= x; - - x = (3 * x + (u32) div64_64(a, x3)) / 4; - } while (abs(x1 - x) > 1); - - return x; -} - - -/* - * If the connection is idle and we are restarting, - * then we don't want to do any Vegas calculations - * until we get fresh RTT samples. So when we - * restart, we reset our Vegas state to a clean - * slate. After we get acks for this flight of - * packets, _then_ we can make Vegas calculations - * again. - */ -static void tcp_compound_cwnd_event(struct sock *sk, enum tcp_ca_event event) -{ - if (event == CA_EVENT_CWND_RESTART || event == CA_EVENT_TX_START) - tcp_compound_init(sk); -} - -static void tcp_compound_cong_avoid(struct sock *sk, u32 ack, - u32 seq_rtt, u32 in_flight, int flag) -{ - struct tcp_sock *tp = tcp_sk(sk); - struct compound *vegas = inet_csk_ca(sk); - u8 inc = 0; - - if (vegas->cwnd + vegas->dwnd > tp->snd_cwnd) { - if (vegas->cwnd > tp->snd_cwnd || vegas->dwnd > tp->snd_cwnd) { - vegas->cwnd = tp->snd_cwnd; - vegas->dwnd = 0; - } else - vegas->cwnd = tp->snd_cwnd - vegas->dwnd; - - } - - if (!tcp_is_cwnd_limited(sk, in_flight)) - return; - - if (vegas->cwnd <= tp->snd_ssthresh) - inc = 1; - else if (tp->snd_cwnd_cnt < tp->snd_cwnd) - tp->snd_cwnd_cnt++; - - if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { - inc = 1; - tp->snd_cwnd_cnt = 0; - } - - if (inc && tp->snd_cwnd < tp->snd_cwnd_clamp) - vegas->cwnd++; - - /* The key players are v_beg_snd_una and v_beg_snd_nxt. - * - * These are so named because they represent the approximate values - * of snd_una and snd_nxt at the beginning of the current RTT. More - * precisely, they represent the amount of data sent during the RTT. - * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt, - * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding - * bytes of data have been ACKed during the course of the RTT, giving - * an "actual" rate of: - * - * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration) - * - * Unfortunately, v_beg_snd_una is not exactly equal to snd_una, - * because delayed ACKs can cover more than one segment, so they - * don't line up nicely with the boundaries of RTTs. - * - * Another unfortunate fact of life is that delayed ACKs delay the - * advance of the left edge of our send window, so that the number - * of bytes we send in an RTT is often less than our cwnd will allow. - * So we keep track of our cwnd separately, in v_beg_snd_cwnd. - */ - - if (after(ack, vegas->beg_snd_nxt)) { - /* Do the Vegas once-per-RTT cwnd adjustment. */ - u32 old_wnd, old_snd_cwnd; - - /* Here old_wnd is essentially the window of data that was - * sent during the previous RTT, and has all - * been acknowledged in the course of the RTT that ended - * with the ACK we just received. Likewise, old_snd_cwnd - * is the cwnd during the previous RTT. - */ - if (!tp->mss_cache) - return; - - old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) / - tp->mss_cache; - old_snd_cwnd = vegas->beg_snd_cwnd; - - /* Save the extent of the current window so we can use this - * at the end of the next RTT. - */ - vegas->beg_snd_una = vegas->beg_snd_nxt; - vegas->beg_snd_nxt = tp->snd_nxt; - vegas->beg_snd_cwnd = tp->snd_cwnd; - - /* We do the Vegas calculations only if we got enough RTT - * samples that we can be reasonably sure that we got - * at least one RTT sample that wasn't from a delayed ACK. - * If we only had 2 samples total, - * then that means we're getting only 1 ACK per RTT, which - * means they're almost certainly delayed ACKs. - * If we have 3 samples, we should be OK. - */ - - if (vegas->cntRTT > 2) { - u32 rtt, target_cwnd, diff; - u32 brtt, dwnd; - - /* We have enough RTT samples, so, using the Vegas - * algorithm, we determine if we should increase or - * decrease cwnd, and by how much. - */ - - /* Pluck out the RTT we are using for the Vegas - * calculations. This is the min RTT seen during the - * last RTT. Taking the min filters out the effects - * of delayed ACKs, at the cost of noticing congestion - * a bit later. - */ - rtt = vegas->minRTT; - - /* Calculate the cwnd we should have, if we weren't - * going too fast. - * - * This is: - * (actual rate in segments) * baseRTT - * We keep it as a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary point. - */ - if (!rtt) - return; - - brtt = vegas->baseRTT; - target_cwnd = ((old_wnd * brtt) - << V_PARAM_SHIFT) / rtt; - - /* Calculate the difference between the window we had, - * and the window we would like to have. This quantity - * is the "Diff" from the Arizona Vegas papers. - * - * Again, this is a fixed point number with - * V_PARAM_SHIFT bits to the right of the binary - * point. - */ - - diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; - - dwnd = vegas->dwnd; - - if (diff < (TCP_COMPOUND_GAMMA << V_PARAM_SHIFT)) { - u64 v; - u32 x; - - /* - * The TCP Compound paper describes the choice - * of "k" determines the agressiveness, - * ie. slope of the response function. - * - * For same value as HSTCP would be 0.8 - * but for computaional reasons, both the - * original authors and this implementation - * use 0.75. - */ - v = old_wnd; - x = qroot(v * v * v) >> TCP_COMPOUND_ALPHA; - if (x > 1) - dwnd = x - 1; - else - dwnd = 0; - - dwnd += vegas->dwnd; - - } else if ((dwnd << V_PARAM_SHIFT) < - (diff * TCP_COMPOUND_BETA)) - dwnd = 0; - else - dwnd = - ((dwnd << V_PARAM_SHIFT) - - (diff * - TCP_COMPOUND_BETA)) >> V_PARAM_SHIFT; - - vegas->dwnd = dwnd; - - } - - /* Wipe the slate clean for the next RTT. */ - vegas->cntRTT = 0; - vegas->minRTT = 0x7fffffff; - } - - tp->snd_cwnd = vegas->cwnd + vegas->dwnd; -} - -/* Extract info for Tcp socket info provided via netlink. */ -static void tcp_compound_get_info(struct sock *sk, u32 ext, struct sk_buff *skb) -{ - const struct compound *ca = inet_csk_ca(sk); - if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) { - struct tcpvegas_info *info; - - info = RTA_DATA(__RTA_PUT(skb, INET_DIAG_VEGASINFO, - sizeof(*info))); - - info->tcpv_enabled = ca->doing_vegas_now; - info->tcpv_rttcnt = ca->cntRTT; - info->tcpv_rtt = ca->baseRTT; - info->tcpv_minrtt = ca->minRTT; - rtattr_failure:; - } -} - -static struct tcp_congestion_ops tcp_compound = { - .init = tcp_compound_init, - .ssthresh = tcp_reno_ssthresh, - .cong_avoid = tcp_compound_cong_avoid, - .rtt_sample = tcp_compound_rtt_calc, - .set_state = tcp_compound_state, - .cwnd_event = tcp_compound_cwnd_event, - .get_info = tcp_compound_get_info, - - .owner = THIS_MODULE, - .name = "compound", -}; - -static int __init tcp_compound_register(void) -{ - BUG_ON(sizeof(struct compound) > ICSK_CA_PRIV_SIZE); - tcp_register_congestion_control(&tcp_compound); - return 0; -} - -static void __exit tcp_compound_unregister(void) -{ - tcp_unregister_congestion_control(&tcp_compound); -} - -module_init(tcp_compound_register); -module_exit(tcp_compound_unregister); - -MODULE_AUTHOR("Angelo P. Castellani, Stephen Hemminger"); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("TCP Compound"); diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index aaa1538c0692..fa3e1aad660c 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -139,14 +139,19 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, tp->snd_cwnd++; } } else { - /* Update AIMD parameters */ + /* Update AIMD parameters. + * + * We want to guarantee that: + * hstcp_aimd_vals[ca->ai-1].cwnd < + * snd_cwnd <= + * hstcp_aimd_vals[ca->ai].cwnd + */ if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && ca->ai < HSTCP_AIMD_MAX - 1) ca->ai++; - } else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) { - while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && - ca->ai > 0) + } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { + while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) ca->ai--; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a886e6efbbe..a891133f00e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) } } +int tcp_v4_gso_send_check(struct sk_buff *skb) +{ + struct iphdr *iph; + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return -EINVAL; + + iph = skb->nh.iph; + th = skb->h.th; + + th->check = 0; + th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); + skb->csum = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_HW; + return 0; +} + /* * This routine will send an RST to the other tcp. * diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 193363e22932..d16f863cf687 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb) } #endif - if (!skb_shinfo(skb)->gso_size) + if (!skb_is_gso(skb)) return xfrm4_output_finish2(skb); skb->protocol = htons(ETH_P_IP); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c250d0af10d7..2316a4315a18 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -508,6 +508,26 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) kfree(ifp); } +static void +ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) +{ + struct inet6_ifaddr *ifa, **ifap; + int ifp_scope = ipv6_addr_src_scope(&ifp->addr); + + /* + * Each device address list is sorted in order of scope - + * global before linklocal. + */ + for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; + ifap = &ifa->if_next) { + if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) + break; + } + + ifp->if_next = *ifap; + *ifap = ifp; +} + /* On success it returns ifp with increased reference count */ static struct inet6_ifaddr * @@ -573,8 +593,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen, write_lock(&idev->lock); /* Add to inet6_dev unicast addr list. */ - ifa->if_next = idev->addr_list; - idev->addr_list = ifa; + ipv6_link_dev_addr(idev, ifa); #ifdef CONFIG_IPV6_PRIVACY if (ifa->flags&IFA_F_TEMPORARY) { @@ -987,7 +1006,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, continue; } else if (score.scope < hiscore.scope) { if (score.scope < daddr_scope) - continue; + break; /* addresses sorted by scope */ else { score.rule = 2; goto record_it; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2c5b44575af0..3bc74ce78800 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb) int ip6_output(struct sk_buff *skb) { - if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || + if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || dst_allfrag(skb->dst)) return ip6_fragment(skb, ip6_output2); else @@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, skb->priority = sk->sk_priority; mtu = dst_mtu(dst); - if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { + if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index b285b0357084..7e4d1c17bfbc 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -109,7 +109,8 @@ static int ipcomp6_input(struct xfrm_state *x, struct sk_buff *skb) goto out_put_cpu; } - skb_put(skb, dlen - plen); + skb->truesize += dlen - plen; + __skb_put(skb, dlen - plen); memcpy(skb->data, scratch, dlen); err = ipch->nexthdr; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c17dec11c8d..43327264e69c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -57,29 +57,11 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; -static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) +static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, + int proto) { - struct sk_buff *segs = ERR_PTR(-EINVAL); - struct ipv6hdr *ipv6h; - struct inet6_protocol *ops; - int proto; + struct inet6_protocol *ops = NULL; - if (unlikely(skb_shinfo(skb)->gso_type & - ~(SKB_GSO_UDP | - SKB_GSO_DODGY | - SKB_GSO_TCP_ECN | - SKB_GSO_TCPV6 | - 0))) - goto out; - - if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) - goto out; - - ipv6h = skb->nh.ipv6h; - proto = ipv6h->nexthdr; - __skb_pull(skb, sizeof(*ipv6h)); - - rcu_read_lock(); for (;;) { struct ipv6_opt_hdr *opth; int len; @@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) ops = rcu_dereference(inet6_protos[proto]); if (unlikely(!ops)) - goto unlock; + break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) - goto unlock; + break; opth = (void *)skb->data; len = opth->hdrlen * 8 + 8; if (unlikely(!pskb_may_pull(skb, len))) - goto unlock; + break; proto = opth->nexthdr; __skb_pull(skb, len); } - skb->h.raw = skb->data; - if (likely(ops->gso_segment)) - segs = ops->gso_segment(skb, features); + return ops; +} + +static int ipv6_gso_send_check(struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct inet6_protocol *ops; + int err = -EINVAL; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + goto out; -unlock: + ipv6h = skb->nh.ipv6h; + __skb_pull(skb, sizeof(*ipv6h)); + err = -EPROTONOSUPPORT; + + rcu_read_lock(); + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); + if (likely(ops && ops->gso_send_check)) { + skb->h.raw = skb->data; + err = ops->gso_send_check(skb); + } + rcu_read_unlock(); + +out: + return err; +} + +static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct ipv6hdr *ipv6h; + struct inet6_protocol *ops; + + if (unlikely(skb_shinfo(skb)->gso_type & + ~(SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_TCPV6 | + 0))) + goto out; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + goto out; + + ipv6h = skb->nh.ipv6h; + __skb_pull(skb, sizeof(*ipv6h)); + segs = ERR_PTR(-EPROTONOSUPPORT); + + rcu_read_lock(); + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); + if (likely(ops && ops->gso_segment)) { + skb->h.raw = skb->data; + segs = ops->gso_segment(skb, features); + } rcu_read_unlock(); if (unlikely(IS_ERR(segs))) @@ -130,6 +162,7 @@ out: static struct packet_type ipv6_packet_type = { .type = __constant_htons(ETH_P_IPV6), .func = ipv6_rcv, + .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5bdcb9002cf7..923989d0520d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) } } +static int tcp_v6_gso_send_check(struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return -EINVAL; + + ipv6h = skb->nh.ipv6h; + th = skb->h.th; + + th->check = 0; + th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, + IPPROTO_TCP, 0); + skb->csum = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_HW; + return 0; +} static void tcp_v6_send_reset(struct sk_buff *skb) { @@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = { static struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, + .gso_send_check = tcp_v6_gso_send_check, .gso_segment = tcp_tso_segment, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 48fccb1eca08..0eea60ea9ebc 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb) { struct sk_buff *segs; - if (!skb_shinfo(skb)->gso_size) + if (!skb_is_gso(skb)) return xfrm6_output_finish2(skb); skb->protocol = htons(ETH_P_IP); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 389a4119e1b4..1d50f801f181 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -66,6 +66,14 @@ static DEFINE_SPINLOCK(nr_list_lock); static const struct proto_ops nr_proto_ops; /* + * NETROM network devices are virtual network devices encapsulating NETROM + * frames into AX.25 which will be sent through an AX.25 device, so form a + * special "super class" of normal net devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key nr_netdev_xmit_lock_key; + +/* * Socket removal during an interrupt is now safe. */ static void nr_remove_socket(struct sock *sk) @@ -986,18 +994,18 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) nr_make->vl = 0; nr_make->state = NR_STATE_3; sk_acceptq_added(sk); - - nr_insert_socket(make); - skb_queue_head(&sk->sk_receive_queue, skb); - nr_start_heartbeat(make); - nr_start_idletimer(make); - if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); bh_unlock_sock(sk); + + nr_insert_socket(make); + + nr_start_heartbeat(make); + nr_start_idletimer(make); + return 1; } @@ -1382,14 +1390,12 @@ static int __init nr_proto_init(void) return -1; } - dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); + dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_nr == NULL) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); return -1; } - memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *)); - for (i = 0; i < nr_ndevs; i++) { char name[IFNAMSIZ]; struct net_device *dev; @@ -1407,6 +1413,7 @@ static int __init nr_proto_init(void) free_netdev(dev); goto fail; } + lockdep_set_class(&dev->_xmit_lock, &nr_netdev_xmit_lock_key); dev_nr[i] = dev; } diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index 75b72d389ba9..ddba1c144260 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -138,8 +138,8 @@ static void nr_heartbeat_expiry(unsigned long param) if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { sock_hold(sk); - nr_destroy_socket(sk); bh_unlock_sock(sk); + nr_destroy_socket(sk); sock_put(sk); return; } diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index d0a67bb31363..08a542855654 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -67,6 +67,14 @@ static struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* + * ROSE network devices are virtual network devices encapsulating ROSE + * frames into AX.25 which will be sent through an AX.25 device, so form a + * special "super class" of normal net devices; split their locks off into a + * separate class since they always nest. + */ +static struct lock_class_key rose_netdev_xmit_lock_key; + +/* * Convert a ROSE address into text. */ const char *rose2asc(const rose_address *addr) @@ -1490,14 +1498,13 @@ static int __init rose_proto_init(void) rose_callsign = null_ax25_address; - dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); + dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } - memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; @@ -1516,6 +1523,7 @@ static int __init rose_proto_init(void) free_netdev(dev); goto fail; } + lockdep_set_class(&dev->_xmit_lock, &rose_netdev_xmit_lock_key); dev_rose[i] = dev; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 599423cc9d0d..9affeeedf107 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) return err; rtattr_failure: - module_put(a->ops->owner); nlmsg_failure: + module_put(a->ops->owner); err_out: kfree_skb(skb); kfree(a); @@ -884,8 +884,6 @@ static int __init tc_action_init(void) link_p[RTM_GETACTION-RTM_BASE].dumpit = tc_dump_action; } - printk("TC classifier action (bugs to netdev@vger.kernel.org cc " - "hadi@cyberus.ca)\n"); return 0; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 34afe41fa2f3..cc5f339e6f91 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -196,7 +196,7 @@ struct htb_class struct qdisc_rate_table *rate; /* rate table of the class itself */ struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */ long buffer,cbuffer; /* token bucket depth/rate */ - long mbuffer; /* max wait time */ + psched_tdiff_t mbuffer; /* max wait time */ long tokens,ctokens; /* current number of tokens */ psched_time_t t_c; /* checkpoint time */ }; @@ -1601,7 +1601,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* set class to be in HTB_CAN_SEND state */ cl->tokens = hopt->buffer; cl->ctokens = hopt->cbuffer; - cl->mbuffer = 60000000; /* 1min */ + cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */ PSCHED_GET_TIME(cl->t_c); cl->cmode = HTB_CAN_SEND; |