diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-06-13 22:50:06 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2023-06-15 10:01:05 +0200 |
commit | ed3c9a2fcab3b60b0766eb5d7566fd3b10df9a8e (patch) | |
tree | 76d8edce93760a8a518efaa9fb774ced54907009 /drivers | |
parent | Merge branch 'macb-partial-store-and-forward' (diff) | |
download | linux-ed3c9a2fcab3b60b0766eb5d7566fd3b10df9a8e.tar.xz linux-ed3c9a2fcab3b60b0766eb5d7566fd3b10df9a8e.zip |
net: tls: make the offload check helper take skb not socket
All callers of tls_is_sk_tx_device_offloaded() currently do
an equivalent of:
if (skb->sk && tls_is_skb_tx_device_offloaded(skb->sk))
Have the helper accept skb and do the skb->sk check locally.
Two drivers have local static inlines with similar wrappers
already.
While at it change the ifdef condition to TLS_DEVICE.
Only TLS_DEVICE selects SOCK_VALIDATE_XMIT, so the two are
equivalent. This makes removing the duplicated IS_ENABLED()
check in funeth more obviously correct.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Acked-by: Maxim Mikityanskiy <maxtram95@gmail.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Acked-by: Tariq Toukan <tariqt@nvidia.com>
Acked-by: Dimitris Michailidis <dmichail@fungible.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
10 files changed, 10 insertions, 21 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 007cec23a92f..16405b84dc2f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk { struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); - /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded + /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded * was true, if tls_device_down is running in parallel, but it's OK, * because bond_get_slave_by_dev has a NULL check. */ @@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev return NETDEV_TX_OK; #if IS_ENABLED(CONFIG_TLS_DEVICE) - if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) + if (tls_is_skb_tx_device_offloaded(skb)) return bond_tls_device_xmit(bond, skb, dev); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f0bc7396ce2b..2eb33a727bba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, txq = netdev_pick_tx(dev, skb, sb_dev); if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || skb->encapsulation || - cxgb4_is_ktls_skb(skb) || + tls_is_skb_tx_device_offloaded(skb) || (proto != IPPROTO_TCP && proto != IPPROTO_UDP)) txq = txq % pi->nqsets; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 34546f5312ee..a9599ba26975 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -497,11 +497,6 @@ struct cxgb4_uld_info { #endif }; -static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - void cxgb4_uld_enable(struct adapter *adap); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 46809e2d94ee..98dd78551d89 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) #endif /* CHELSIO_IPSEC_INLINE */ #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) - if (cxgb4_is_ktls_skb(skb) && + if (tls_is_skb_tx_device_offloaded(skb) && (skb->len - skb_tcp_all_headers(skb))) return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); #endif /* CHELSIO_TLS_DEVICE */ diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 1a5fdd755e9e..bcdc7fc2f427 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_ctx = tls_get_ctx(skb->sk); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't quit on NULL: if tls_device_down is running in parallel, - * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was + * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was * true. Rather continue processing this packet. */ if (unlikely(tls_netdev && tls_netdev != dev)) diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c index 706d81e39a54..8ddefd3ec15b 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c @@ -348,8 +348,7 @@ netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tls_len = 0; unsigned int ndesc; - if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk && - tls_is_sk_tx_device_offloaded(skb->sk)) { + if (tls_is_skb_tx_device_offloaded(skb)) { skb = fun_tls_tx(skb, q, &tls_len); if (unlikely(!skb)) goto dropped; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index c964644ee866..bac4717548c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, #ifdef CONFIG_MLX5_EN_TLS /* May send WQEs. */ - if (mlx5e_ktls_skb_offloaded(skb)) + if (tls_is_skb_tx_device_offloaded(skb)) if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, &state->tls))) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 0e4c0a093293..efb2cf74ad6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, tls_ctx = tls_get_ctx(skb->sk); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't WARN on NULL: if tls_device_down is running in parallel, - * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was + * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was * true. Rather continue processing this packet. */ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 2dd78dd4ad65..f87b65c560ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); } -static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - static inline void mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b7cce746b5c0..49f2f081ebb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, if (likely(!dp->ktls_tx)) return skb; - if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) + if (!tls_is_skb_tx_device_offloaded(skb)) return skb; datalen = skb->len - skb_tcp_all_headers(skb); @@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) if (!tls_handle) return; - if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))) + if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb))) return; datalen = skb->len - skb_tcp_all_headers(skb); |