diff options
author | Paolo Abeni <pabeni@redhat.com> | 2022-01-07 01:20:25 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-01-07 12:27:07 +0100 |
commit | 3e5014909b5661b3da59990d72a317a45ba3b284 (patch) | |
tree | b07bfc656dc6be35ca783f55db254a5d9fda65bf /net/mptcp/protocol.h | |
parent | selftests: mptcp: add tests for subflow creation failure (diff) | |
download | linux-3e5014909b5661b3da59990d72a317a45ba3b284.tar.xz linux-3e5014909b5661b3da59990d72a317a45ba3b284.zip |
mptcp: cleanup MPJ subflow list handling
We can simplify the join list handling leveraging the
mptcp_release_cb(): if we can acquire the msk socket
lock at mptcp_finish_join time, move the new subflow
directly into the conn_list, otherwise place it on join_list and
let the release_cb process such list.
Since pending MPJ connection are now always processed
in a timely way, we can avoid flushing the join list
every time we have to process all the current subflows.
Additionally we can now use the mptcp data lock to protect
the join_list, removing the additional spin lock.
Finally, the MPJ handshake is now always finalized under the
msk socket lock, we can drop the additional synchronization
between mptcp_finish_join() and mptcp_close().
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/mptcp/protocol.h')
-rw-r--r-- | net/mptcp/protocol.h | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index a8eb32e29215..962f3b6b6a1d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -120,7 +120,7 @@ #define MPTCP_CLEAN_UNA 7 #define MPTCP_ERROR_REPORT 8 #define MPTCP_RETRANSMIT 9 -#define MPTCP_WORK_SYNC_SETSOCKOPT 10 +#define MPTCP_FLUSH_JOIN_LIST 10 #define MPTCP_CONNECTED 11 static inline bool before64(__u64 seq1, __u64 seq2) @@ -261,7 +261,6 @@ struct mptcp_sock { u8 recvmsg_inq:1, cork:1, nodelay:1; - spinlock_t join_list_lock; struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; @@ -509,15 +508,6 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) return subflow->map_seq + mptcp_subflow_get_map_offset(subflow); } -static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk, - struct mptcp_subflow_context *subflow) -{ - sock_hold(mptcp_subflow_tcp_sock(subflow)); - spin_lock_bh(&msk->join_list_lock); - list_add_tail(&subflow->node, &msk->join_list); - spin_unlock_bh(&msk->join_list_lock); -} - void mptcp_subflow_process_delegated(struct sock *ssk); static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action) @@ -682,7 +672,6 @@ void __mptcp_data_acked(struct sock *sk); void __mptcp_error_report(struct sock *sk); void mptcp_subflow_eof(struct sock *sk); bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit); -void __mptcp_flush_join_list(struct mptcp_sock *msk); static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk) { return READ_ONCE(msk->snd_data_fin_enable) && @@ -842,7 +831,7 @@ unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk); unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk); void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk); -void mptcp_sockopt_sync_all(struct mptcp_sock *msk); +void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk); static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb) { |