net/mlx5e: kTLS, Enforce HW TX csum offload with kTLS

Checksum calculation cannot be done in SW for TX kTLS HW offloaded
packets.
Offload it to the device, disregard the declared state of the TX
csum offload feature.

Fixes: d2ead1f360 ("net/mlx5e: Add kTLS TX HW offload support")
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: Boris Pismenny <borisp@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Tariq Toukan 2020-12-02 20:39:45 -08:00 committed by Jakub Kicinski
parent 8a78a44010
commit b336e6b25e

View file

@ -161,7 +161,9 @@ ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
@ -173,6 +175,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
}
#ifdef CONFIG_MLX5_EN_TLS
} else if (unlikely(accel && accel->tls.tls_tisn)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
} else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
@ -607,12 +614,13 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
}
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
return true;
}
@ -639,7 +647,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@ -656,7 +664,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
@ -675,7 +683,7 @@ void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit
mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
}
@ -944,7 +952,7 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
eseg->mss = attr.mss;