diff options
Diffstat (limited to 'net')
34 files changed, 153 insertions, 100 deletions
diff --git a/net/core/devlink.c b/net/core/devlink.c index 4eb969518ee0..051432ea4f69 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, case DEVLINK_PORT_FLAVOUR_PHYSICAL: case DEVLINK_PORT_FLAVOUR_CPU: case DEVLINK_PORT_FLAVOUR_DSA: - case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, attrs->phys.port_number)) return -EMSGSIZE; @@ -8631,7 +8630,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, switch (attrs->flavour) { case DEVLINK_PORT_FLAVOUR_PHYSICAL: - case DEVLINK_PORT_FLAVOUR_VIRTUAL: if (!attrs->split) n = snprintf(name, len, "p%u", attrs->phys.port_number); else @@ -8679,6 +8677,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, attrs->pci_sf.sf); break; + case DEVLINK_PORT_FLAVOUR_VIRTUAL: + return -EOPNOTSUPP; } if (n >= len) diff --git a/net/core/filter.c b/net/core/filter.c index cae56d08a670..65ab4e21c087 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3784,6 +3784,7 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, __skb_push(skb, head_room); memset(skb->data, 0, head_room); skb_reset_mac_header(skb); + skb_reset_mac_len(skb); } return ret; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index 9ec1aa9640ad..3c4c4c7a0402 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -174,8 +174,10 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool, struct page *page, unsigned int dma_sync_size) { + dma_addr_t dma_addr = page_pool_get_dma_addr(page); + dma_sync_size = min(dma_sync_size, pool->p.max_len); - dma_sync_single_range_for_device(pool->p.dev, page->dma_addr, + dma_sync_single_range_for_device(pool->p.dev, dma_addr, pool->p.offset, dma_sync_size, pool->p.dma_dir); } @@ -195,7 +197,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) if (dma_mapping_error(pool->p.dev, dma)) return false; - page->dma_addr = dma; + page_pool_set_dma_addr(page, dma); if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) page_pool_dma_sync_for_device(pool, page, pool->p.max_len); @@ -331,13 +333,13 @@ void page_pool_release_page(struct page_pool *pool, struct page *page) */ goto skip_dma_unmap; - dma = page->dma_addr; + dma = page_pool_get_dma_addr(page); - /* When page is unmapped, it cannot be returned our pool */ + /* When page is unmapped, it cannot be returned to our pool */ dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC); - page->dma_addr = 0; + page_pool_set_dma_addr(page, 0); skip_dma_unmap: /* This may be the last page returned, releasing the pool, so * it is not safe to reference pool afterwards. diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c index b7642dc96d50..ec07f5765e03 100644 --- a/net/ethtool/stats.c +++ b/net/ethtool/stats.c @@ -119,7 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, */ memset(&data->phy_stats, 0xff, sizeof(data->phy_stats)); memset(&data->mac_stats, 0xff, sizeof(data->mac_stats)); - memset(&data->ctrl_stats, 0xff, sizeof(data->mac_stats)); + memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats)); memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats)); if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) && diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index bfcdc75fc01e..26c32407f029 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -218,6 +218,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) if (master) { skb->dev = master->dev; skb_reset_mac_header(skb); + skb_reset_mac_len(skb); hsr_forward_skb(skb, master); } else { atomic_long_inc(&dev->tx_dropped); @@ -259,6 +260,7 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master) goto out; skb_reset_mac_header(skb); + skb_reset_mac_len(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 6852e9bccf5b..ceb8afb2a62f 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -474,8 +474,8 @@ static void handle_std_frame(struct sk_buff *skb, } } -void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame) +int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame) { struct hsr_port *port = frame->port_rcv; struct hsr_priv *hsr = port->hsr; @@ -483,20 +483,26 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, /* HSRv0 supervisory frames double as a tag so treat them as tagged. */ if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) || proto == htons(ETH_P_HSR)) { + /* Check if skb contains hsr_ethhdr */ + if (skb->mac_len < sizeof(struct hsr_ethhdr)) + return -EINVAL; + /* HSR tagged frame :- Data or Supervision */ frame->skb_std = NULL; frame->skb_prp = NULL; frame->skb_hsr = skb; frame->sequence_nr = hsr_get_skb_sequence_nr(skb); - return; + return 0; } /* Standard frame or PRP from master port */ handle_std_frame(skb, frame); + + return 0; } -void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame) +int prp_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame) { /* Supervision frame */ struct prp_rct *rct = skb_get_PRP_rct(skb); @@ -507,9 +513,11 @@ void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, frame->skb_std = NULL; frame->skb_prp = skb; frame->sequence_nr = prp_get_skb_sequence_nr(rct); - return; + return 0; } handle_std_frame(skb, frame); + + return 0; } static int fill_frame_info(struct hsr_frame_info *frame, @@ -519,9 +527,10 @@ static int fill_frame_info(struct hsr_frame_info *frame, struct hsr_vlan_ethhdr *vlan_hdr; struct ethhdr *ethhdr; __be16 proto; + int ret; - /* Check if skb contains hsr_ethhdr */ - if (skb->mac_len < sizeof(struct hsr_ethhdr)) + /* Check if skb contains ethhdr */ + if (skb->mac_len < sizeof(struct ethhdr)) return -EINVAL; memset(frame, 0, sizeof(*frame)); @@ -548,7 +557,10 @@ static int fill_frame_info(struct hsr_frame_info *frame, frame->is_from_san = false; frame->port_rcv = port; - hsr->proto_ops->fill_frame_info(proto, skb, frame); + ret = hsr->proto_ops->fill_frame_info(proto, skb, frame); + if (ret) + return ret; + check_local_dest(port->hsr, skb, frame); return 0; diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h index b6acaafa83fc..206636750b30 100644 --- a/net/hsr/hsr_forward.h +++ b/net/hsr/hsr_forward.h @@ -24,8 +24,8 @@ struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port); bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port); bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port); -void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame); -void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame); +int prp_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); +int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); #endif /* __HSR_FORWARD_H */ diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 8f264672b70b..53d1f7a82463 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -186,8 +186,8 @@ struct hsr_proto_ops { struct hsr_port *port); struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame, struct hsr_port *port); - void (*fill_frame_info)(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame); + int (*fill_frame_info)(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); bool (*invalid_dan_ingress_frame)(__be16 protocol); void (*update_san_info)(struct hsr_node *node, bool is_sup); }; diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index c5227d42faf5..b70e6bbf6021 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -60,12 +60,11 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) goto finish_pass; skb_push(skb, ETH_HLEN); - - if (skb_mac_header(skb) != skb->data) { - WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n", - __func__, __LINE__, port->dev->name); - goto finish_consume; - } + skb_reset_mac_header(skb); + if ((!hsr->prot_version && protocol == htons(ETH_P_PRP)) || + protocol == htons(ETH_P_HSR)) + skb_set_network_header(skb, ETH_HLEN + HSR_HLEN); + skb_reset_mac_len(skb); hsr_forward_skb(skb, port); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 47a0dc46cbdb..28e44782c94d 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -343,7 +343,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); - if (!(fhdr->frag_off & htons(0xFFF9))) { + if (!(fhdr->frag_off & htons(IP6_OFFSET | IP6_MF))) { /* It is not a fragmented frame */ skb->transport_header += sizeof(struct frag_hdr); __IP6_INC_STATS(net, @@ -351,6 +351,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb) IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; + IP6CB(skb)->frag_max_size = ntohs(hdr->payload_len) + + sizeof(struct ipv6hdr); return 1; } diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 99fc21406168..6b825fb3fa83 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -130,7 +130,6 @@ static void mptcp_parse_option(const struct sk_buff *skb, memcpy(mp_opt->hmac, ptr, MPTCPOPT_HMAC_LEN); pr_debug("MP_JOIN hmac"); } else { - pr_warn("MP_JOIN bad option size"); mp_opt->mp_join = 0; } break; @@ -1024,7 +1023,7 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR); } else { mptcp_pm_add_addr_echoed(msk, &mp_opt.addr); - mptcp_pm_del_add_timer(msk, &mp_opt.addr); + mptcp_pm_del_add_timer(msk, &mp_opt.addr, true); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD); } diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 6ba040897738..2469e06a3a9d 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -346,18 +346,18 @@ out: struct mptcp_pm_add_entry * mptcp_pm_del_add_timer(struct mptcp_sock *msk, - struct mptcp_addr_info *addr) + struct mptcp_addr_info *addr, bool check_id) { struct mptcp_pm_add_entry *entry; struct sock *sk = (struct sock *)msk; spin_lock_bh(&msk->pm.lock); entry = mptcp_lookup_anno_list_by_saddr(msk, addr); - if (entry) + if (entry && (!check_id || entry->addr.id == addr->id)) entry->retrans_times = ADD_ADDR_RETRANS_MAX; spin_unlock_bh(&msk->pm.lock); - if (entry) + if (entry && (!check_id || entry->addr.id == addr->id)) sk_stop_timer_sync(sk, &entry->add_timer); return entry; @@ -1064,7 +1064,7 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk, { struct mptcp_pm_add_entry *entry; - entry = mptcp_pm_del_add_timer(msk, addr); + entry = mptcp_pm_del_add_timer(msk, addr, false); if (entry) { list_del(&entry->list); kfree(entry); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2d21a4793d9d..2bc199549a88 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2424,13 +2424,12 @@ static int __mptcp_init_sock(struct sock *sk) timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); - tcp_assign_congestion_control(sk); - return 0; } static int mptcp_init_sock(struct sock *sk) { + struct inet_connection_sock *icsk = inet_csk(sk); struct net *net = sock_net(sk); int ret; @@ -2448,6 +2447,16 @@ static int mptcp_init_sock(struct sock *sk) if (ret) return ret; + /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will + * propagate the correct value + */ + tcp_assign_congestion_control(sk); + strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name); + + /* no need to keep a reference to the ops, the name will suffice */ + tcp_cleanup_congestion_control(sk); + icsk->icsk_ca_ops = NULL; + sk_sockets_allocated_inc(sk); sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; @@ -2622,7 +2631,6 @@ static void __mptcp_destroy_sock(struct sock *sk) sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); - tcp_cleanup_congestion_control(sk); sk_refcnt_debug_release(sk); mptcp_dispose_initial_subflow(msk); sock_put(sk); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index edc0128730df..0c6f99c67345 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -258,6 +258,7 @@ struct mptcp_sock { } rcvq_space; u32 setsockopt_seq; + char ca_name[TCP_CA_NAME_MAX]; }; #define mptcp_lock_sock(___sk, cb) do { \ @@ -671,7 +672,7 @@ void mptcp_pm_free_anno_list(struct mptcp_sock *msk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); struct mptcp_pm_add_entry * mptcp_pm_del_add_timer(struct mptcp_sock *msk, - struct mptcp_addr_info *addr); + struct mptcp_addr_info *addr, bool check_id); struct mptcp_pm_add_entry * mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk, struct mptcp_addr_info *addr); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 00d941b66c1e..a79798189599 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -547,7 +547,7 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t } if (ret == 0) - tcp_set_congestion_control(sk, name, false, cap_net_admin); + strcpy(msk->ca_name, name); release_sock(sk); return ret; @@ -705,7 +705,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk) sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG)); if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops) - tcp_set_congestion_control(ssk, inet_csk(sk)->icsk_ca_ops->name, false, true); + tcp_set_congestion_control(ssk, msk->ca_name, false, true); } static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index a5ede357cfbc..bde6be77ea73 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -867,7 +867,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk, data_len = mpext->data_len; if (data_len == 0) { - pr_err("Infinite mapping not handled"); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); return MAPPING_INVALID; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3a62f97acf39..6133e412b948 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -461,11 +461,13 @@ void netlink_table_ungrab(void) static inline void netlink_lock_table(void) { + unsigned long flags; + /* read_lock() synchronizes us to netlink_table_grab */ - read_lock(&nl_table_lock); + read_lock_irqsave(&nl_table_lock, flags); atomic_inc(&nl_table_users); - read_unlock(&nl_table_lock); + read_unlock_irqrestore(&nl_table_lock, flags); } static inline void diff --git a/net/rds/connection.c b/net/rds/connection.c index f2fcab182095..a3bc4b54d491 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -240,12 +240,23 @@ static struct rds_connection *__rds_conn_create(struct net *net, if (loop_trans) { rds_trans_put(loop_trans); conn->c_loopback = 1; - if (is_outgoing && trans->t_prefer_loopback) { - /* "outgoing" connection - and the transport - * says it wants the connection handled by the - * loopback transport. This is what TCP does. - */ - trans = &rds_loop_transport; + if (trans->t_prefer_loopback) { + if (likely(is_outgoing)) { + /* "outgoing" connection to local address. + * Protocol says it wants the connection + * handled by the loopback transport. + * This is what TCP does. + */ + trans = &rds_loop_transport; + } else { + /* No transport currently in use + * should end up here, but if it + * does, reset/destroy the connection. + */ + kmem_cache_free(rds_conn_slab, conn); + conn = ERR_PTR(-EOPNOTSUPP); + goto out; + } } } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 43db0eca911f..abf19c0e3ba0 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -313,8 +313,8 @@ out: } #endif -static int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, - __u32 scope_id) +int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, + __u32 scope_id) { struct net_device *dev = NULL; #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/rds/tcp.h b/net/rds/tcp.h index bad9cf49d565..dc8d745d6857 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h @@ -59,7 +59,8 @@ u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); extern struct rds_transport rds_tcp_transport; void rds_tcp_accept_work(struct sock *sk); - +int rds_tcp_laddr_check(struct net *net, const struct in6_addr *addr, + __u32 scope_id); /* tcp_connect.c */ int rds_tcp_conn_path_connect(struct rds_conn_path *cp); void rds_tcp_conn_path_shutdown(struct rds_conn_path *conn); diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 101cf14215a0..09cadd556d1e 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@ -167,6 +167,12 @@ int rds_tcp_accept_one(struct socket *sock) } #endif + if (!rds_tcp_laddr_check(sock_net(sock->sk), peer_addr, dev_if)) { + /* local address connection is only allowed via loopback */ + ret = -EOPNOTSUPP; + goto out; + } + conn = rds_conn_create(sock_net(sock->sk), my_addr, peer_addr, &rds_tcp_transport, 0, GFP_KERNEL, dev_if); diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index ec7a1c438df9..18edd9ad1410 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, */ cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); if (!cached) { - if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { + if (tcf_ct_flow_table_lookup(p, skb, family)) { skip_add = true; goto do_nat; } @@ -1022,10 +1022,11 @@ do_nat: * even if the connection is already confirmed. */ nf_conntrack_confirm(skb); - } else if (!skip_add) { - tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); } + if (!skip_add) + tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); + out_push: skb_push_rcsum(skb, nh_ofs); @@ -1202,9 +1203,6 @@ static int tcf_ct_fill_params(struct net *net, sizeof(p->zone)); } - if (p->zone == NF_CT_DEFAULT_ZONE_ID) - return 0; - nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); if (!tmpl) { diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 40fbea626dfd..279f9e2a2319 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1624,7 +1624,7 @@ int tcf_classify_ingress(struct sk_buff *skb, /* If we missed on some chain */ if (ret == TC_ACT_UNSPEC && last_executed_chain) { - ext = skb_ext_add(skb, TC_SKB_EXT); + ext = tc_skb_ext_alloc(skb); if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index cd2748e2d4a2..d320bcfb2da2 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -407,7 +407,8 @@ static void dsmark_reset(struct Qdisc *sch) struct dsmark_qdisc_data *p = qdisc_priv(sch); pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); - qdisc_reset(p->q); + if (p->q) + qdisc_reset(p->q); sch->qstats.backlog = 0; sch->q.qlen = 0; } diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 949163fe68af..cac684952edc 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -138,8 +138,15 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Classifies packet into corresponding flow */ idx = fq_pie_classify(skb, sch, &ret); - sel_flow = &q->flows[idx]; + if (idx == 0) { + if (ret & __NET_XMIT_BYPASS) + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + return ret; + } + idx--; + sel_flow = &q->flows[idx]; /* Checks whether adding a new packet would exceed memory limit */ get_pie_cb(skb)->mem_usage = skb->truesize; memory_limited = q->memory_usage > q->memory_limit + skb->truesize; @@ -297,9 +304,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, goto flow_error; } q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); - if (!q->flows_cnt || q->flows_cnt >= 65536) { + if (!q->flows_cnt || q->flows_cnt > 65536) { NL_SET_ERR_MSG_MOD(extack, - "Number of flows must range in [1..65535]"); + "Number of flows must range in [1..65536]"); goto flow_error; } } @@ -367,7 +374,7 @@ static void fq_pie_timer(struct timer_list *t) struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock; /* to lock qdisc for probability calculations */ - u16 idx; + u32 idx; root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); @@ -388,7 +395,7 @@ static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, { struct fq_pie_sched_data *q = qdisc_priv(sch); int err; - u16 idx; + u32 idx; pie_params_init(&q->p_params); sch->limit = 10 * 1024; @@ -500,7 +507,7 @@ static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) static void fq_pie_reset(struct Qdisc *sch) { struct fq_pie_sched_data *q = qdisc_priv(sch); - u16 idx; + u32 idx; INIT_LIST_HEAD(&q->new_flows); INIT_LIST_HEAD(&q->old_flows); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 40f9f6c4a0a1..a79d193ff872 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4473,6 +4473,7 @@ static int sctp_setsockopt_encap_port(struct sock *sk, transports) t->encap_port = encap_port; + asoc->encap_port = encap_port; return 0; } diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index e92df779af73..55871b277f47 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -307,7 +307,7 @@ static struct ctl_table sctp_net_table[] = { .data = &init_net.sctp.encap_port, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = &udp_port_max, }, diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index 9c6e95882553..967712ba52a0 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c @@ -402,6 +402,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, return NULL; } + smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", + WQ_MEM_RECLAIM, name); + if (!smcd->event_wq) { + kfree(smcd->conn); + kfree(smcd); + return NULL; + } + smcd->dev.parent = parent; smcd->dev.release = smcd_release; device_initialize(&smcd->dev); @@ -415,19 +423,14 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, INIT_LIST_HEAD(&smcd->vlan); INIT_LIST_HEAD(&smcd->lgr_list); init_waitqueue_head(&smcd->lgrs_deleted); - smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", - WQ_MEM_RECLAIM, name); - if (!smcd->event_wq) { - kfree(smcd->conn); - kfree(smcd); - return NULL; - } return smcd; } EXPORT_SYMBOL_GPL(smcd_alloc_dev); int smcd_register_dev(struct smcd_dev *smcd) { + int rc; + mutex_lock(&smcd_dev_list.mutex); if (list_empty(&smcd_dev_list.list)) { u8 *system_eid = NULL; @@ -447,7 +450,14 @@ int smcd_register_dev(struct smcd_dev *smcd) dev_name(&smcd->dev), smcd->pnetid, smcd->pnetid_by_user ? " (user defined)" : ""); - return device_add(&smcd->dev); + rc = device_add(&smcd->dev); + if (rc) { + mutex_lock(&smcd_dev_list.mutex); + list_del(&smcd->list); + mutex_unlock(&smcd_dev_list.mutex); + } + + return rc; } EXPORT_SYMBOL_GPL(smcd_register_dev); diff --git a/net/tipc/core.c b/net/tipc/core.c index 72f3ac73779b..3f4542e0f065 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -60,7 +60,7 @@ static int __net_init tipc_init_net(struct net *net) tn->trial_addr = 0; tn->addr_trial_end = 0; tn->capabilities = TIPC_NODE_CAPABILITIES; - INIT_WORK(&tn->final_work.work, tipc_net_finalize_work); + INIT_WORK(&tn->work, tipc_net_finalize_work); memset(tn->node_id, 0, sizeof(tn->node_id)); memset(tn->node_id_string, 0, sizeof(tn->node_id_string)); tn->mon_threshold = TIPC_DEF_MON_THRESHOLD; @@ -110,7 +110,7 @@ static void __net_exit tipc_exit_net(struct net *net) tipc_detach_loopback(net); /* Make sure the tipc_net_finalize_work() finished */ - cancel_work_sync(&tn->final_work.work); + cancel_work_sync(&tn->work); tipc_net_stop(net); tipc_bcast_stop(net); diff --git a/net/tipc/core.h b/net/tipc/core.h index 5741ae488bb5..0a3f7a70a50a 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -91,12 +91,6 @@ extern unsigned int tipc_net_id __read_mostly; extern int sysctl_tipc_rmem[3] __read_mostly; extern int sysctl_tipc_named_timeout __read_mostly; -struct tipc_net_work { - struct work_struct work; - struct net *net; - u32 addr; -}; - struct tipc_net { u8 node_id[NODE_ID_LEN]; u32 node_addr; @@ -148,7 +142,7 @@ struct tipc_net { struct tipc_crypto *crypto_tx; #endif /* Work item for net finalize */ - struct tipc_net_work final_work; + struct work_struct work; /* The numbers of work queues in schedule */ atomic_t wq_count; }; diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 5380f605b851..da69e1abf68f 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -168,7 +168,7 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d, /* Apply trial address if we just left trial period */ if (!trial && !self) { - tipc_sched_net_finalize(net, tn->trial_addr); + schedule_work(&tn->work); msg_set_prevnode(buf_msg(d->skb), tn->trial_addr); msg_set_type(buf_msg(d->skb), DSC_REQ_MSG); } @@ -308,7 +308,7 @@ static void tipc_disc_timeout(struct timer_list *t) if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) { mod_timer(&d->timer, jiffies + TIPC_DISC_INIT); spin_unlock_bh(&d->lock); - tipc_sched_net_finalize(net, tn->trial_addr); + schedule_work(&tn->work); return; } diff --git a/net/tipc/link.c b/net/tipc/link.c index 115109259430..c44b4bfaaee6 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -372,6 +372,11 @@ char tipc_link_plane(struct tipc_link *l) return l->net_plane; } +struct net *tipc_link_net(struct tipc_link *l) +{ + return l->net; +} + void tipc_link_update_caps(struct tipc_link *l, u16 capabilities) { l->peer_caps = capabilities; diff --git a/net/tipc/link.h b/net/tipc/link.h index fc07232c9a12..a16f401fdabd 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -156,4 +156,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); bool tipc_link_too_silent(struct tipc_link *l); +struct net *tipc_link_net(struct tipc_link *l); #endif diff --git a/net/tipc/net.c b/net/tipc/net.c index a130195af188..0e95572e56b4 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -41,6 +41,7 @@ #include "socket.h" #include "node.h" #include "bcast.h" +#include "link.h" #include "netlink.h" #include "monitor.h" @@ -142,19 +143,9 @@ static void tipc_net_finalize(struct net *net, u32 addr) void tipc_net_finalize_work(struct work_struct *work) { - struct tipc_net_work *fwork; + struct tipc_net *tn = container_of(work, struct tipc_net, work); - fwork = container_of(work, struct tipc_net_work, work); - tipc_net_finalize(fwork->net, fwork->addr); -} - -void tipc_sched_net_finalize(struct net *net, u32 addr) -{ - struct tipc_net *tn = tipc_net(net); - - tn->final_work.net = net; - tn->final_work.addr = addr; - schedule_work(&tn->final_work.work); + tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr); } void tipc_net_stop(struct net *net) |