mlx5-updates-2022-05-03

Leon Romanovsky Says:
 =====================
 
 Extra IPsec cleanup
 
 After FPGA IPsec removal, we can go further and make sure that flow
 steering logic is aligned to mlx5_core standard together with deep
 cleaning of whole IPsec path.
 
 =====================
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAmJyFjcACgkQSD+KveBX
 +j5UUwgAxKTtSaC2Qd/3Wzn9YBitvrS/y/O7QFEaCyKTn8SMcyFHYxySwSgaABJ2
 fbx1FgCRvj0XGJzMs8gN4IcUDai/ff6TlN1KstwjiDV7Xj6yy/6LnqR32YJafJ9Q
 zKt3c4Ws662/VDryaTbVgScRU/h11HmESM+BEzxowN+S3MExGaJfAUquee4YAip+
 KVV6Eas/vjmZcmHPy0+Pk2pndO0cIx7vK//cMeA2bmXsdF+3IqLlzQVy/KVbd7ro
 +pOZKVJIKF19ToXHGVMB/DQjTbSg3rWFb7kzUTksZO6o/5z2UYp6/PX6UuMgky0S
 5AvZxHXq+CoLGWJZvpxCrwbkEQ+2Ng==
 =/07+
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2022-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2022-05-03

Leon Romanovsky Says:
=====================

Extra IPsec cleanup

After FPGA IPsec removal, we can go further and make sure that flow
steering logic is aligned to mlx5_core standard together with deep
cleaning of whole IPsec path.

=====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-05-04 10:53:00 +01:00
commit 402f2d6b6b
15 changed files with 319 additions and 822 deletions

View file

@ -164,7 +164,6 @@ struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif

View file

@ -5,7 +5,7 @@
#include "en/txrx.h"
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec_offload.h"
#include "en_accel/ipsec.h"
static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)

View file

@ -37,23 +37,12 @@
#include <linux/netdevice.h>
#include "en.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec_fs.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa;
if (!x)
return NULL;
sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
if (!sa)
return NULL;
WARN_ON(sa->x != x);
return sa;
return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
}
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
@ -74,9 +63,9 @@ struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
return ret;
}
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
unsigned int handle)
static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
{
unsigned int handle = sa_entry->ipsec_obj_id;
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *_sa_entry;
unsigned long flags;
@ -148,7 +137,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
struct crypto_aead *aead;
unsigned int crypto_data_len, key_len;
@ -182,12 +171,6 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
}
/* rx handle */
attrs->sa_handle = sa_entry->handle;
/* algo type */
attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
/* action */
attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
@ -198,7 +181,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
/* spi */
attrs->spi = x->id.spi;
attrs->spi = be32_to_cpu(x->id.spi);
/* source , destination ips */
memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
@ -226,7 +209,7 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
!(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_ESN)) {
!(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
netdev_info(netdev, "Cannot offload ESN xfrm states\n");
return -EINVAL;
}
@ -273,39 +256,29 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
if (x->props.family == AF_INET6 &&
!(mlx5_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_CAP_IPV6)) {
netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
return -EINVAL;
}
return 0;
}
static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
static void _update_xfrm_state(struct work_struct *work)
{
return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
sa_entry->ipsec_obj_id,
&sa_entry->ipsec_rule);
}
struct mlx5e_ipsec_modify_state_work *modify_work =
container_of(work, struct mlx5e_ipsec_modify_state_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry)
{
mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
&sa_entry->ipsec_rule);
mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
}
static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
unsigned int sa_handle;
int err;
priv = netdev_priv(netdev);
if (!priv->ipsec)
return -EOPNOTSUPP;
err = mlx5e_xfrm_validate_state(x);
if (err)
@ -323,31 +296,18 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
/* check esn */
mlx5e_ipsec_update_esn_state(sa_entry);
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm = mlx5_accel_esp_create_xfrm(priv->mdev, &attrs);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sa_entry;
}
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
/* create hw context */
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
&sa_handle);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
err = mlx5_ipsec_create_sa_ctx(sa_entry);
if (err)
goto err_xfrm;
}
sa_entry->ipsec_obj_id = sa_handle;
err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
if (err)
goto err_hw_ctx;
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
err = mlx5e_ipsec_sadb_rx_add(sa_entry);
if (err)
goto err_add_rule;
} else {
@ -355,18 +315,16 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
}
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_add_rule:
mlx5e_xfrm_fs_del_rule(priv, sa_entry);
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
err_hw_ctx:
mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sa_entry:
kfree(sa_entry);
out:
return err;
}
@ -375,9 +333,6 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
if (!sa_entry)
return;
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry);
}
@ -387,22 +342,16 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
if (!sa_entry)
return;
if (sa_entry->hw_context) {
flush_workqueue(sa_entry->ipsec->wq);
mlx5e_xfrm_fs_del_rule(priv, sa_entry);
mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
cancel_work_sync(&sa_entry->modify_work.work);
mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry);
kfree(sa_entry);
}
int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = NULL;
struct mlx5e_ipsec *ipsec;
int ret;
if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
@ -415,18 +364,27 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
ipsec->en_priv = priv;
ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
kfree(ipsec);
return -ENOMEM;
ret = -ENOMEM;
goto err_wq;
}
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
goto err_fs_init;
priv->ipsec = ipsec;
mlx5e_accel_ipsec_fs_init(priv);
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
err_fs_init:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
return (ret != -EOPNOTSUPP) ? ret : 0;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
@ -436,9 +394,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
if (!ipsec)
return;
mlx5e_accel_ipsec_fs_cleanup(priv);
mlx5e_accel_ipsec_fs_cleanup(ipsec);
destroy_workqueue(ipsec->wq);
kfree(ipsec);
priv->ipsec = NULL;
}
@ -458,50 +415,19 @@ static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
struct mlx5e_ipsec_modify_state_work {
struct work_struct work;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_ipsec_sa_entry *sa_entry;
};
static void _update_xfrm_state(struct work_struct *work)
{
int ret;
struct mlx5e_ipsec_modify_state_work *modify_work =
container_of(work, struct mlx5e_ipsec_modify_state_work, work);
struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
&modify_work->attrs);
if (ret)
netdev_warn(sa_entry->ipsec->en_priv->netdev,
"Not an IPSec offload device\n");
kfree(modify_work);
}
static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_modify_state_work *modify_work;
struct mlx5e_ipsec_modify_state_work *modify_work =
&sa_entry->modify_work;
bool need_update;
if (!sa_entry)
return;
need_update = mlx5e_ipsec_update_esn_state(sa_entry);
if (!need_update)
return;
modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
if (!modify_work)
return;
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
modify_work->sa_entry = sa_entry;
INIT_WORK(&modify_work->work, _update_xfrm_state);
WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
queue_work(sa_entry->ipsec->wq, &modify_work->work);
}
static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
@ -517,11 +443,8 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
struct net_device *netdev = priv->netdev;
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
!MLX5_CAP_ETH(mdev, swp)) {
mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
if (!mlx5_ipsec_device_caps(mdev))
return;
}
mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
@ -536,8 +459,7 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
!MLX5_CAP_ETH(mdev, swp_lso)) {
if (!MLX5_CAP_ETH(mdev, swp_lso)) {
mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
return;
}

View file

@ -40,11 +40,56 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "ipsec_offload.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
struct aes_gcm_keymat {
u64 seq_iv;
u32 salt;
u32 icv_len;
u32 key_len;
u32 aes_key[256 / 32];
};
struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn;
u32 spi;
u32 flags;
struct aes_gcm_keymat aes_gcm;
union {
__be32 a4;
__be32 a6[4];
} saddr;
union {
__be32 a4;
__be32 a6[4];
} daddr;
u8 is_ipv6;
};
enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
};
struct mlx5e_priv;
struct mlx5e_ipsec_sw_stats {
@ -61,7 +106,7 @@ struct mlx5e_accel_fs_esp;
struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
struct mlx5_core_dev *mdev;
DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
spinlock_t sadb_rx_lock; /* Protects sadb_rx */
struct mlx5e_ipsec_sw_stats sw_stats;
@ -81,18 +126,24 @@ struct mlx5e_ipsec_rule {
struct mlx5_modify_hdr *set_modify_hdr;
};
struct mlx5e_ipsec_modify_state_work {
struct work_struct work;
struct mlx5_accel_esp_xfrm_attrs attrs;
};
struct mlx5e_ipsec_sa_entry {
struct hlist_node hlist; /* Item in SADB_RX hashtable */
struct mlx5e_ipsec_esn_state esn_state;
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
struct mlx5_accel_esp_xfrm_attrs attrs;
void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
u32 ipsec_obj_id;
u32 enc_key_id;
struct mlx5e_ipsec_rule ipsec_rule;
struct mlx5e_ipsec_modify_state_work modify_work;
};
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
@ -102,6 +153,26 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
unsigned int handle);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
return sa_entry->ipsec->mdev;
}
#else
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
@ -116,6 +187,10 @@ static inline void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
{
}
static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
#endif
#endif /* __MLX5E_IPSEC_H__ */

View file

@ -2,8 +2,9 @@
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#include <linux/netdevice.h>
#include "ipsec_offload.h"
#include "ipsec_fs.h"
#include "en.h"
#include "en/fs.h"
#include "ipsec.h"
#include "fs_core.h"
#define NUM_IPSEC_FTE BIT(15)
@ -35,6 +36,7 @@ struct mlx5e_accel_fs_esp {
};
struct mlx5e_ipsec_tx {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
struct mutex mutex; /* Protect IPsec TX steering */
u32 refcnt;
@ -58,7 +60,7 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *fte;
struct mlx5_flow_spec *spec;
int err = 0;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
@ -94,101 +96,27 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
goto out;
}
kvfree(spec);
rx_err->rule = fte;
rx_err->copy_modify_hdr = modify_hdr;
return 0;
out:
if (err)
mlx5_modify_header_dealloc(mdev, modify_hdr);
mlx5_modify_header_dealloc(mdev, modify_hdr);
out_spec:
kvfree(spec);
return err;
}
static void rx_err_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_rx_err *rx_err)
{
if (rx_err->rule) {
mlx5_del_flow_rules(rx_err->rule);
rx_err->rule = NULL;
}
if (rx_err->copy_modify_hdr) {
mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
rx_err->copy_modify_hdr = NULL;
}
}
static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
{
rx_err_del_rule(priv, rx_err);
if (rx_err->ft) {
mlx5_destroy_flow_table(rx_err->ft);
rx_err->ft = NULL;
}
}
static int rx_err_create_ft(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot,
struct mlx5e_ipsec_rx_err *rx_err)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
int err;
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
return err;
}
rx_err->ft = ft;
err = rx_err_add_rule(priv, fs_prot, rx_err);
if (err)
goto out_err;
return 0;
out_err:
mlx5_destroy_flow_table(ft);
rx_err->ft = NULL;
return err;
}
static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
{
if (fs_prot->miss_rule) {
mlx5_del_flow_rules(fs_prot->miss_rule);
fs_prot->miss_rule = NULL;
}
if (fs_prot->miss_group) {
mlx5_destroy_flow_group(fs_prot->miss_group);
fs_prot->miss_group = NULL;
}
if (fs_prot->ft) {
mlx5_destroy_flow_table(fs_prot->ft);
fs_prot->ft = NULL;
}
}
static int rx_fs_create(struct mlx5e_priv *priv,
struct mlx5e_accel_fs_esp_prot *fs_prot)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft = fs_prot->ft;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_handle *miss_rule;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
u32 *flow_group_in;
int err = 0;
@ -199,20 +127,6 @@ static int rx_fs_create(struct mlx5e_priv *priv,
goto out;
}
/* Create FT */
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
goto out;
}
fs_prot->ft = ft;
/* Create miss_group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
@ -227,19 +141,19 @@ static int rx_fs_create(struct mlx5e_priv *priv,
/* Create miss rule */
miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
if (IS_ERR(miss_rule)) {
mlx5_destroy_flow_group(fs_prot->miss_group);
err = PTR_ERR(miss_rule);
netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
goto out;
}
fs_prot->miss_rule = miss_rule;
out:
kvfree(flow_group_in);
kvfree(spec);
return err;
}
static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
@ -249,17 +163,21 @@ static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* The netdev unreg already happened, so all offloaded rule are already removed */
fs_prot = &accel_esp->fs_prot[type];
rx_fs_destroy(fs_prot);
mlx5_del_flow_rules(fs_prot->miss_rule);
mlx5_destroy_flow_group(fs_prot->miss_group);
mlx5_destroy_flow_table(fs_prot->ft);
rx_err_destroy_ft(priv, &fs_prot->rx_err);
return 0;
mlx5_del_flow_rules(fs_prot->rx_err.rule);
mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
mlx5_destroy_flow_table(fs_prot->rx_err.ft);
}
static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_table *ft;
int err;
accel_esp = priv->ipsec->rx_fs;
@ -268,14 +186,45 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot->default_dest =
mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
fs_prot->rx_err.ft = ft;
err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
if (err)
return err;
goto err_add;
/* Create FT */
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
}
fs_prot->ft = ft;
err = rx_fs_create(priv, fs_prot);
if (err)
rx_destroy(priv, type);
goto err_fs;
return 0;
err_fs:
mlx5_destroy_flow_table(fs_prot->ft);
err_fs_ft:
mlx5_del_flow_rules(fs_prot->rx_err.rule);
mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
err_add:
mlx5_destroy_flow_table(fs_prot->rx_err.ft);
return err;
}
@ -289,21 +238,21 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
if (fs_prot->refcnt++)
goto out;
if (fs_prot->refcnt)
goto skip;
/* create FT */
err = rx_create(priv, type);
if (err) {
fs_prot->refcnt--;
if (err)
goto out;
}
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
out:
mutex_unlock(&fs_prot->prot_mutex);
return err;
@ -317,7 +266,8 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
accel_esp = priv->ipsec->rx_fs;
fs_prot = &accel_esp->fs_prot[type];
mutex_lock(&fs_prot->prot_mutex);
if (--fs_prot->refcnt)
fs_prot->refcnt--;
if (fs_prot->refcnt)
goto out;
/* disconnect */
@ -338,15 +288,9 @@ static int tx_create(struct mlx5e_priv *priv)
struct mlx5_flow_table *ft;
int err;
priv->fs.egress_ns =
mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
if (!priv->fs.egress_ns)
return -EOPNOTSUPP;
ft_attr.max_fte = NUM_IPSEC_FTE;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
@ -356,32 +300,20 @@ static int tx_create(struct mlx5e_priv *priv)
return 0;
}
static void tx_destroy(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec = priv->ipsec;
if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
return;
mlx5_destroy_flow_table(ipsec->tx_fs->ft);
ipsec->tx_fs->ft = NULL;
}
static int tx_ft_get(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
int err = 0;
mutex_lock(&tx_fs->mutex);
if (tx_fs->refcnt++)
goto out;
if (tx_fs->refcnt)
goto skip;
err = tx_create(priv);
if (err) {
tx_fs->refcnt--;
if (err)
goto out;
}
skip:
tx_fs->refcnt++;
out:
mutex_unlock(&tx_fs->mutex);
return err;
@ -392,11 +324,11 @@ static void tx_ft_put(struct mlx5e_priv *priv)
struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
mutex_lock(&tx_fs->mutex);
if (--tx_fs->refcnt)
tx_fs->refcnt--;
if (tx_fs->refcnt)
goto out;
tx_destroy(priv);
mlx5_destroy_flow_table(tx_fs->ft);
out:
mutex_unlock(&tx_fs->mutex);
}
@ -424,8 +356,8 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
/* SPI number */
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
be32_to_cpu(attrs->spi));
MLX5_SET(fte_match_param, spec->match_value,
misc_parameters.outer_esp_spi, attrs->spi);
if (ip_version == 4) {
memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
@ -458,11 +390,12 @@ static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
}
static int rx_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
struct mlx5e_ipsec_sa_entry *sa_entry)
{
u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
struct mlx5_modify_hdr *modify_hdr = NULL;
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5_flow_destination dest = {};
@ -536,9 +469,7 @@ static int rx_add_rule(struct mlx5e_priv *priv,
}
static int tx_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
@ -555,7 +486,8 @@ static int tx_add_rule(struct mlx5e_priv *priv,
goto out;
}
setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
&flow_act);
/* Add IPsec indicator in metadata_reg_a */
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
@ -570,11 +502,11 @@ static int tx_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
attrs->action, err);
sa_entry->attrs.action, err);
goto out;
}
ipsec_rule->rule = rule;
sa_entry->ipsec_rule.rule = rule;
out:
kvfree(spec);
@ -583,130 +515,88 @@ static int tx_add_rule(struct mlx5e_priv *priv,
return err;
}
static void rx_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
{
mlx5_del_flow_rules(ipsec_rule->rule);
ipsec_rule->rule = NULL;
mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
ipsec_rule->set_modify_hdr = NULL;
rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
static void tx_del_rule(struct mlx5e_priv *priv,
struct mlx5e_ipsec_rule *ipsec_rule)
{
mlx5_del_flow_rules(ipsec_rule->rule);
ipsec_rule->rule = NULL;
tx_ft_put(priv);
}
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!priv->ipsec->rx_fs)
return -EOPNOTSUPP;
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
return tx_add_rule(priv, sa_entry);
if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
else
return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
return rx_add_rule(priv, sa_entry);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (!priv->ipsec->rx_fs)
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mlx5_del_flow_rules(ipsec_rule->rule);
if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
tx_ft_put(priv);
return;
}
if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
rx_del_rule(priv, attrs, ipsec_rule);
else
tx_del_rule(priv, ipsec_rule);
mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
rx_ft_put(priv,
sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
static void fs_cleanup_tx(struct mlx5e_priv *priv)
{
mutex_destroy(&priv->ipsec->tx_fs->mutex);
WARN_ON(priv->ipsec->tx_fs->refcnt);
kfree(priv->ipsec->tx_fs);
priv->ipsec->tx_fs = NULL;
}
static void fs_cleanup_rx(struct mlx5e_priv *priv)
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
enum accel_fs_esp_type i;
accel_esp = priv->ipsec->rx_fs;
if (!ipsec->rx_fs)
return;
mutex_destroy(&ipsec->tx_fs->mutex);
WARN_ON(ipsec->tx_fs->refcnt);
kfree(ipsec->tx_fs);
accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_destroy(&fs_prot->prot_mutex);
WARN_ON(fs_prot->refcnt);
}
kfree(priv->ipsec->rx_fs);
priv->ipsec->rx_fs = NULL;
kfree(ipsec->rx_fs);
}
static int fs_init_tx(struct mlx5e_priv *priv)
{
priv->ipsec->tx_fs =
kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
if (!priv->ipsec->tx_fs)
return -ENOMEM;
mutex_init(&priv->ipsec->tx_fs->mutex);
return 0;
}
static int fs_init_rx(struct mlx5e_priv *priv)
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
struct mlx5e_accel_fs_esp *accel_esp;
struct mlx5_flow_namespace *ns;
enum accel_fs_esp_type i;
int err = -ENOMEM;
priv->ipsec->rx_fs =
kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
if (!priv->ipsec->rx_fs)
ns = mlx5_get_flow_namespace(ipsec->mdev,
MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
if (!ns)
return -EOPNOTSUPP;
ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
if (!ipsec->tx_fs)
return -ENOMEM;
accel_esp = priv->ipsec->rx_fs;
ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
if (!ipsec->rx_fs)
goto err_rx;
mutex_init(&ipsec->tx_fs->mutex);
ipsec->tx_fs->ns = ns;
accel_esp = ipsec->rx_fs;
for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
fs_prot = &accel_esp->fs_prot[i];
mutex_init(&fs_prot->prot_mutex);
}
return 0;
}
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
{
if (!priv->ipsec->rx_fs)
return;
fs_cleanup_tx(priv);
fs_cleanup_rx(priv);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
int err;
err = fs_init_tx(priv);
if (err)
return err;
err = fs_init_rx(priv);
if (err)
fs_cleanup_tx(priv);
err_rx:
kfree(ipsec->tx_fs);
return err;
}

View file

@ -9,8 +9,8 @@
#include "ipsec_offload.h"
#include "en/fs.h"
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,

View file

@ -2,29 +2,12 @@
/* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
#include "mlx5_core.h"
#include "ipsec_offload.h"
#include "ipsec.h"
#include "lib/mlx5.h"
#include "en_accel/ipsec_fs.h"
struct mlx5_ipsec_sa_ctx {
struct rhash_head hash;
u32 enc_key_id;
u32 ipsec_obj_id;
/* hw ctx */
struct mlx5_core_dev *dev;
struct mlx5_ipsec_esp_xfrm *mxfrm;
};
struct mlx5_ipsec_esp_xfrm {
/* reference counter of SA ctx */
struct mlx5_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
u32 caps;
u32 caps = 0;
if (!MLX5_CAP_GEN(mdev, ipsec_offload))
return 0;
@ -36,23 +19,23 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
return 0;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) ||
!MLX5_CAP_ETH(mdev, insert_trailer))
return 0;
if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
return 0;
caps = MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 |
MLX5_ACCEL_IPSEC_CAP_LSO;
if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO;
if (!caps)
return 0;
if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
caps |= MLX5_IPSEC_CAP_ESN;
/* We can accommodate up to 2^24 different IPsec objects
* because we use up to 24 bit in flow table metadata
@ -63,89 +46,11 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
static int
mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
attrs->replay_type);
return -EOPNOTSUPP;
}
if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
attrs->keymat_type);
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.iv_algo !=
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
attrs->keymat.aes_gcm.iv_algo);
return -EOPNOTSUPP;
}
if (attrs->keymat.aes_gcm.key_len != 128 &&
attrs->keymat.aes_gcm.key_len != 256) {
mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
attrs->keymat.aes_gcm.key_len);
return -EOPNOTSUPP;
}
if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
!MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
return -EOPNOTSUPP;
}
return 0;
}
static struct mlx5_accel_esp_xfrm *
mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err = 0;
err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
if (err)
return ERR_PTR(err);
mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
if (!mxfrm)
return ERR_PTR(-ENOMEM);
mutex_init(&mxfrm->lock);
memcpy(&mxfrm->accel_xfrm.attrs, attrs,
sizeof(mxfrm->accel_xfrm.attrs));
return &mxfrm->accel_xfrm;
}
static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
accel_xfrm);
/* assuming no sa_ctx are connected to this xfrm_ctx */
WARN_ON(mxfrm->sa_ctx);
kfree(mxfrm);
}
struct mlx5_ipsec_obj_attrs {
const struct aes_gcm_keymat *aes_gcm;
u32 accel_flags;
u32 esn_msb;
u32 enc_key_id;
};
static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 *ipsec_id)
{
const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
void *obj, *salt_p, *salt_iv_p;
@ -157,33 +62,18 @@ static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
switch (aes_gcm->icv_len) {
case 64:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_8B);
break;
case 96:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_12B);
break;
case 128:
MLX5_SET(ipsec_obj, obj, icv_length,
MLX5_IPSEC_OBJECT_ICV_LEN_16B);
break;
default:
return -EINVAL;
}
MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
/* esn */
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
MLX5_SET(ipsec_obj, obj, esn_en, 1);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
}
MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
@ -193,13 +83,15 @@ static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
sa_entry->ipsec_obj_id =
MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
return err;
}
static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@ -207,86 +99,52 @@ static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *accel_xfrm,
const __be32 saddr[4], const __be32 daddr[4],
const __be32 spi, bool is_ipv6, u32 *hw_handle)
int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
struct mlx5_ipsec_esp_xfrm *mxfrm;
struct mlx5_ipsec_sa_ctx *sa_ctx;
struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
int err;
/* alloc SA context */
sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
if (!sa_ctx)
return ERR_PTR(-ENOMEM);
sa_ctx->dev = mdev;
mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
mutex_lock(&mxfrm->lock);
sa_ctx->mxfrm = mxfrm;
/* key */
err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
aes_gcm->key_len / BITS_PER_BYTE,
MLX5_ACCEL_OBJ_IPSEC_KEY,
&sa_ctx->enc_key_id);
&sa_entry->enc_key_id);
if (err) {
mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
goto err_sa_ctx;
return err;
}
ipsec_attrs.aes_gcm = aes_gcm;
ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
&sa_ctx->ipsec_obj_id);
err = mlx5_create_ipsec_obj(sa_entry);
if (err) {
mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
goto err_enc_key;
}
*hw_handle = sa_ctx->ipsec_obj_id;
mxfrm->sa_ctx = sa_ctx;
mutex_unlock(&mxfrm->lock);
return sa_ctx;
return 0;
err_enc_key:
mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
err_sa_ctx:
mutex_unlock(&mxfrm->lock);
kfree(sa_ctx);
return ERR_PTR(err);
mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
return err;
}
static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
{
struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
mutex_lock(&mxfrm->lock);
mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
kfree(sa_ctx);
mxfrm->sa_ctx = NULL;
mutex_unlock(&mxfrm->lock);
mlx5_destroy_ipsec_obj(sa_entry);
mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
}
static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_obj_attrs *attrs,
u32 ipsec_id)
static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
u64 modify_field_select = 0;
@ -294,7 +152,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
void *obj;
int err;
if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
return 0;
general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
@ -304,11 +162,11 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
/* general object fields set */
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (err) {
mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
ipsec_id, err);
sa_entry->ipsec_obj_id, err);
return err;
}
@ -321,8 +179,11 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET64(ipsec_obj, obj, modify_field_select,
MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
/* general object fields set */
@ -331,90 +192,14 @@ static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
struct mlx5_core_dev *mdev = xfrm->mdev;
struct mlx5_ipsec_esp_xfrm *mxfrm;
int err;
int err = 0;
err = mlx5_modify_ipsec_obj(sa_entry, attrs);
if (err)
return;
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
return 0;
if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
return -EOPNOTSUPP;
mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
mutex_lock(&mxfrm->lock);
if (!mxfrm->sa_ctx)
/* Not bound xfrm, change only sw attrs */
goto change_sw_xfrm_attrs;
/* need to add find and replace in ipsec_rhash_sa the sa_ctx */
/* modify device with new hw_sa */
ipsec_attrs.accel_flags = attrs->flags;
ipsec_attrs.esn_msb = attrs->esn;
err = mlx5_modify_ipsec_obj(mdev,
&ipsec_attrs,
mxfrm->sa_ctx->ipsec_obj_id);
change_sw_xfrm_attrs:
if (!err)
memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
mutex_unlock(&mxfrm->lock);
return err;
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle)
{
__be32 saddr[4] = {}, daddr[4] = {};
if (!xfrm->attrs.is_ipv6) {
saddr[3] = xfrm->attrs.saddr.a4;
daddr[3] = xfrm->attrs.daddr.a4;
} else {
memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
}
return mlx5_ipsec_offload_create_sa_ctx(mdev, xfrm, saddr, daddr,
xfrm->attrs.spi,
xfrm->attrs.is_ipv6, sa_handle);
}
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
{
mlx5_ipsec_offload_delete_sa_ctx(context);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_ipsec_offload_esp_create_xfrm(mdev, attrs);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_ipsec_offload_esp_destroy_xfrm(xfrm);
}
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return mlx5_ipsec_offload_esp_modify_xfrm(xfrm, attrs);
memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
}

View file

@ -1,14 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
#ifndef __MLX5_IPSEC_OFFLOAD_H__
#define __MLX5_IPSEC_OFFLOAD_H__
#include <linux/mlx5/driver.h>
#include <linux/mlx5/accel.h>
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
u32 *sa_handle);
void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
#endif /* __MLX5_IPSEC_OFFLOAD_H__ */

View file

@ -34,9 +34,8 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
#include "ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
#include "ipsec.h"
#include "ipsec_rxtx.h"
#include "en.h"
enum {
@ -333,7 +332,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
return;
}
sp = skb_sec_path(skb);
sp->xvec[sp->len++] = xs;
sp->olen++;

View file

@ -35,9 +35,7 @@
#include <net/sock.h>
#include "en.h"
#include "ipsec_offload.h"
#include "fpga/sdk.h"
#include "en_accel/ipsec.h"
#include "ipsec.h"
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },

View file

@ -48,7 +48,6 @@
#include "en_accel/ipsec.h"
#include "en_accel/en_accel.h"
#include "en_accel/ktls.h"
#include "en_accel/ipsec_offload.h"
#include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h"

View file

@ -48,7 +48,7 @@
#include "en_rep.h"
#include "en/rep/tc.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_offload.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ktls_txrx.h"
#include "en/xdp.h"

View file

@ -62,7 +62,7 @@
#include "lib/mlx5.h"
#include "lib/tout.h"
#include "fpga/core.h"
#include "en_accel/ipsec_offload.h"
#include "en_accel/ipsec.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
#include "lib/geneve.h"

View file

@ -1,153 +0,0 @@
/*
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef __MLX5_ACCEL_H__
#define __MLX5_ACCEL_H__
#include <linux/mlx5/driver.h>
enum mlx5_accel_esp_aes_gcm_keymat_iv_algo {
MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ,
};
enum mlx5_accel_esp_flags {
MLX5_ACCEL_ESP_FLAGS_TUNNEL = 0, /* Default */
MLX5_ACCEL_ESP_FLAGS_TRANSPORT = 1UL << 0,
MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED = 1UL << 1,
MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
};
enum mlx5_accel_esp_action {
MLX5_ACCEL_ESP_ACTION_DECRYPT,
MLX5_ACCEL_ESP_ACTION_ENCRYPT,
};
enum mlx5_accel_esp_keymats {
MLX5_ACCEL_ESP_KEYMAT_AES_NONE,
MLX5_ACCEL_ESP_KEYMAT_AES_GCM,
};
enum mlx5_accel_esp_replay {
MLX5_ACCEL_ESP_REPLAY_NONE,
MLX5_ACCEL_ESP_REPLAY_BMP,
};
struct aes_gcm_keymat {
u64 seq_iv;
enum mlx5_accel_esp_aes_gcm_keymat_iv_algo iv_algo;
u32 salt;
u32 icv_len;
u32 key_len;
u32 aes_key[256 / 32];
};
struct mlx5_accel_esp_xfrm_attrs {
enum mlx5_accel_esp_action action;
u32 esn;
__be32 spi;
u32 seq;
u32 tfc_pad;
u32 flags;
u32 sa_handle;
enum mlx5_accel_esp_replay replay_type;
union {
struct {
u32 size;
} bmp;
} replay;
enum mlx5_accel_esp_keymats keymat_type;
union {
struct aes_gcm_keymat aes_gcm;
} keymat;
union {
__be32 a4;
__be32 a6[4];
} saddr;
union {
__be32 a4;
__be32 a6[4];
} daddr;
u8 is_ipv6;
};
struct mlx5_accel_esp_xfrm {
struct mlx5_core_dev *mdev;
struct mlx5_accel_esp_xfrm_attrs attrs;
};
enum mlx5_accel_ipsec_cap {
MLX5_ACCEL_IPSEC_CAP_DEVICE = 1 << 0,
MLX5_ACCEL_IPSEC_CAP_ESP = 1 << 1,
MLX5_ACCEL_IPSEC_CAP_IPV6 = 1 << 2,
MLX5_ACCEL_IPSEC_CAP_LSO = 1 << 3,
MLX5_ACCEL_IPSEC_CAP_ESN = 1 << 4,
};
#ifdef CONFIG_MLX5_EN_IPSEC
u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs);
#else
static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
static inline struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) {}
static inline int
mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5_ACCEL_H__ */

View file

@ -11379,8 +11379,6 @@ enum {
enum {
MLX5_IPSEC_OBJECT_ICV_LEN_16B,
MLX5_IPSEC_OBJECT_ICV_LEN_12B,
MLX5_IPSEC_OBJECT_ICV_LEN_8B,
};
struct mlx5_ifc_ipsec_obj_bits {