summaryrefslogtreecommitdiffstats
path: root/net/smc
diff options
context:
space:
mode:
authorUrsula Braun <ubraun@linux.ibm.com>2019-02-07 15:56:15 +0100
committerDavid S. Miller <davem@davemloft.net>2019-02-08 03:06:18 +0100
commitb03faa1fafc8018295401dc558bdc76362d860a4 (patch)
tree43ddfd79f928b7b5fee75a78ccf4c080e2c7a2e0 /net/smc
parents390/net: move pnet constants (diff)
downloadlinux-b03faa1fafc8018295401dc558bdc76362d860a4.tar.xz
linux-b03faa1fafc8018295401dc558bdc76362d860a4.zip
net/smc: postpone release of clcsock
According to RFC7609 (http://www.rfc-editor.org/info/rfc7609) first the SMC-R connection is shut down and then the normal TCP connection FIN processing drives cleanup of the internal TCP connection. The unconditional release of the clcsock during active socket closing has to be postponed if the peer has not yet signalled socket closing. Signed-off-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/af_smc.c33
-rw-r--r--net/smc/smc_close.c7
2 files changed, 23 insertions, 17 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 369870b0ef79..60ccc8f50368 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -145,32 +145,33 @@ static int smc_release(struct socket *sock)
rc = smc_close_active(smc);
sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK;
- }
-
- sk->sk_prot->unhash(sk);
-
- if (smc->clcsock) {
- if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+ } else {
+ if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+ sock_put(sk); /* passive closing */
+ if (sk->sk_state == SMC_LISTEN) {
/* wake up clcsock accept */
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
}
- mutex_lock(&smc->clcsock_release_lock);
- sock_release(smc->clcsock);
- smc->clcsock = NULL;
- mutex_unlock(&smc->clcsock_release_lock);
- }
- if (smc->use_fallback) {
- if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
- sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
}
+ sk->sk_prot->unhash(sk);
+
+ if (sk->sk_state == SMC_CLOSED) {
+ if (smc->clcsock) {
+ mutex_lock(&smc->clcsock_release_lock);
+ sock_release(smc->clcsock);
+ smc->clcsock = NULL;
+ mutex_unlock(&smc->clcsock_release_lock);
+ }
+ if (!smc->use_fallback)
+ smc_conn_free(&smc->conn);
+ }
+
/* detach socket */
sock_orphan(sk);
sock->sk = NULL;
- if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
- smc_conn_free(&smc->conn);
release_sock(sk);
sock_put(sk); /* final sock_put */
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ea2b87f29469..0e60dd741698 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -405,8 +405,13 @@ wakeup:
if (old_state != sk->sk_state) {
sk->sk_state_change(sk);
if ((sk->sk_state == SMC_CLOSED) &&
- (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
+ (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
smc_conn_free(conn);
+ if (smc->clcsock) {
+ sock_release(smc->clcsock);
+ smc->clcsock = NULL;
+ }
+ }
}
release_sock(sk);
sock_put(sk); /* sock_hold done by schedulers of close_work */