summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/flow.h10
-rw-r--r--include/net/netprio_cgroup.h48
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h9
-rw-r--r--include/net/tcp.h14
5 files changed, 72 insertions, 13 deletions
diff --git a/include/net/flow.h b/include/net/flow.h
index 9b582437fbea..6c469dbdb917 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -93,6 +93,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
}
+
+/* Reset some input parameters after previous lookup */
+static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos,
+ __be32 daddr, __be32 saddr)
+{
+ fl4->flowi4_oif = oif;
+ fl4->flowi4_tos = tos;
+ fl4->daddr = daddr;
+ fl4->saddr = saddr;
+}
struct flowi6 {
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index 7b2d43139c8e..d58fdec47597 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -37,19 +37,51 @@ extern int net_prio_subsys_id;
extern void sock_update_netprioidx(struct sock *sk);
-static inline struct cgroup_netprio_state
- *task_netprio_state(struct task_struct *p)
+#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
+
+static inline u32 task_netprioidx(struct task_struct *p)
{
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
- return container_of(task_subsys_state(p, net_prio_subsys_id),
- struct cgroup_netprio_state, css);
-#else
- return NULL;
-#endif
+ struct cgroup_netprio_state *state;
+ u32 idx;
+
+ rcu_read_lock();
+ state = container_of(task_subsys_state(p, net_prio_subsys_id),
+ struct cgroup_netprio_state, css);
+ idx = state->prioidx;
+ rcu_read_unlock();
+ return idx;
+}
+
+#elif IS_MODULE(CONFIG_NETPRIO_CGROUP)
+
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ struct cgroup_netprio_state *state;
+ int subsys_id;
+ u32 idx = 0;
+
+ rcu_read_lock();
+ subsys_id = rcu_dereference_index_check(net_prio_subsys_id,
+ rcu_read_lock_held());
+ if (subsys_id >= 0) {
+ state = container_of(task_subsys_state(p, subsys_id),
+ struct cgroup_netprio_state, css);
+ idx = state->prioidx;
+ }
+ rcu_read_unlock();
+ return idx;
}
#else
+static inline u32 task_netprioidx(struct task_struct *p)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NETPRIO_CGROUP */
+
+#else
#define sock_update_netprioidx(sk)
#endif
diff --git a/include/net/route.h b/include/net/route.h
index 91855d185b53..b1c0d5b564c2 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
if (IS_ERR(rt))
return rt;
ip_rt_put(rt);
+ flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr);
}
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
return ip_route_output_flow(net, fl4, sk);
@@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
fl4->fl4_dport = dport;
fl4->fl4_sport = sport;
ip_rt_put(rt);
+ flowi4_update_output(fl4, sk->sk_bound_dev_if,
+ RT_CONN_FLAGS(sk), fl4->daddr,
+ fl4->saddr);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
return ip_route_output_flow(sock_net(sk), fl4, sk);
}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index f6bb08b73ca4..55ce96b53b09 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,9 +220,16 @@ struct tcf_proto {
struct qdisc_skb_cb {
unsigned int pkt_len;
- long data[];
+ unsigned char data[24];
};
+static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+{
+ struct qdisc_skb_cb *qcb;
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+}
+
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d49db0113a06..42c29bfbcee3 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -273,6 +273,14 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
return seq3 - seq2 >= seq1 - seq2;
}
+static inline bool tcp_out_of_memory(struct sock *sk)
+{
+ if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
+ sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
+ return true;
+ return false;
+}
+
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
{
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
@@ -283,13 +291,11 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
if (orphans << shift > sysctl_tcp_max_orphans)
return true;
}
-
- if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
- sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
- return true;
return false;
}
+extern bool tcp_check_oom(struct sock *sk, int shift);
+
/* syncookies: remember time of last synqueue overflow */
static inline void tcp_synq_overflow(struct sock *sk)
{