qat: Add Intel® 4xxx Series VF driver support

Overview:
Intel(R) QuickAssist Technology (Intel(R) QAT) provides hardware
acceleration for offloading security, authentication and compression
services from the CPU, thus significantly increasing the performance and
efficiency of standard platform solutions.

This commit introduces:
- Intel® 4xxx Series VF driver support.
- Device configurability via sysctls.
- UIO support for Intel® 4xxx Series devices.

Patch co-authored by: Krzysztof Zdziarski <krzysztofx.zdziarski@intel.com>
Patch co-authored by: Michal Gulbicki <michalx.gulbicki@intel.com>
Patch co-authored by: Julian Grajkowski <julianx.grajkowski@intel.com>
Patch co-authored by: Piotr Kasierski <piotrx.kasierski@intel.com>
Patch co-authored by: Lukasz Kolodzinski <lukaszx.kolodzinski@intel.com>
Patch co-authored by: Karol Grzadziel <karolx.grzadziel@intel.com>

Sponsored by:	Intel Corporation
Differential Revision: https://reviews.freebsd.org/D39850
This commit is contained in:
Krzysztof Zdziarski 2023-06-12 13:10:03 -04:00 committed by Mark Johnston
parent 694f18d353
commit 266b0663c5
104 changed files with 18578 additions and 7764 deletions

View file

@ -1,7 +1,7 @@
.\" SPDX-License-Identifier: BSD-3-Clause
.\" Copyright(c) 2007-2022 Intel Corporation
.\" $FreeBSD$
.Dd September 1, 2022
.Dd May 4, 2023
.Dt QAT 4
.Os
.Sh NAME
@ -101,6 +101,61 @@ For details of usage and supported operations and algorithms refer to the
documentation mentioned above and
.Sx SEE ALSO
section.
.Sh SYSCTL_VARIABLES
Following variables may be used to reconfigure the QAT device.\&
For configuration persistence those variables may be set before loading the driver, either via
.Xr kenv 1
or
.Xr loader.conf(5).
The device specific configuration options are prefixed with
.Va dev.qat.X\&.
where X is the device number.
The specific device needs to be in "down" state before changing the configuration.
.Bl -tag -width indent
.It Va state
Show current state of the device. Override the device state. Possible values: "down", "up".
NOTE: If the symmetric services are used for device the qat_ocf driver needs to be disabled prior the device
reconfiguration.
Following variable may be used to enable/disable the QAT cryptographic framework connectivity
.Va dev.qat_ocf.0.enable\&.
Enabled by default.
.It Va cfg_services
Override the device services enabled: symmetric, asymmetric, data compression.
Possible values: "sym", "asym", "dc", "sym;dc", "asym;dc", "sym;asym".
Default services configured are "sym;asym" for even and "dc" for odd devices.
.It Va cfg_mode
Override the device mode configuration for kernel space and user space instances.
Possible values: "ks", "us", "ks;us".
Default value "ks;us".
.It Va num_user_processes
Override the number of uio user space processes that can connect to the QAT device.
Default: 2
.El
.Pp
The following
.Xr sysctl 8
variables are read-only:
.Bl -tag -width indent
.It Va frequency
QAT device frequency value.
.It Va mmp_version
QAT MMP Library revision number.
.It Va hw_version
QAT hardware revision number.
.It Va fw_version
QAT firmware revision number.
.It Va dev_cfg
Summary of device specific configuration.
.It Va heartbeat
QAT device heartbeat status. Value '1' indicates that the device is operational.
'0' value means that the device is not responsive. Device requires restart.
.It Va heartbeat_failed
Number of QAT heartbeat failures received.
.It Va heartbeat_sent
Number of QAT heartbeat requests sent.
.El
.Sh COMPATIBILITY
The
.Nm

Binary file not shown.

View file

@ -13,14 +13,14 @@
#define ADF_CFG_STATIC_CONF_DC_INTER_BUF_SIZE 64
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ENABLED 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA 0
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DH 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DRBG 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DSA 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_ECC 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_KEYGEN 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_LN 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_PRIME 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_RSA 1
#define ADF_CFG_STATIC_CONF_SAL_STATS_CFG_SYM 1
#define ADF_CFG_STATIC_CONF_POLL 1
#define ADF_CFG_STATIC_CONF_IRQ 0
@ -30,6 +30,14 @@
#define ADF_CFG_STATIC_CONF_INST_NUM_DC 2
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL 6
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ 2
#define ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM 2
#define ADF_CFG_STATIC_CONF_USER_INST_NUM_CY 6
#define ADF_CFG_STATIC_CONF_USER_INST_NUM_DC 2
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL_VF 1
#define ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ_VF 1
#define ADF_CFG_STATIC_CONF_INST_NUM_DC_VF 2
#define ADF_CFG_STATIC_CONF_USER_INST_NUM_CY_VF 2
#define ADF_CFG_STATIC_CONF_USER_INST_NUM_DC_VF 2
#define ADF_CFG_FW_STRING_TO_ID(str, acc, id) \
do { \

View file

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN2_PFVF_H
#define ADF_GEN2_PFVF_H
#include <linux/types.h>
#include "adf_accel_devices.h"
#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
static inline void
adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
}
static inline void
adf_gen2_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
}
#endif /* ADF_GEN2_PFVF_H */

View file

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN4_PFVF_H
#define ADF_GEN4_PFVF_H
#include "adf_accel_devices.h"
void adf_gen4_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops);
static inline void
adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_pfvf_comms_disabled;
}
#endif /* ADF_GEN4_PFVF_H */

View file

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN4_TIMER_H_
#define ADF_GEN4_TIMER_H_
struct adf_accel_dev;
struct adf_hb_timer_data {
struct adf_accel_dev *accel_dev;
struct work_struct hb_int_timer_work;
};
int adf_int_timer_init(struct adf_accel_dev *accel_dev);
void adf_int_timer_exit(struct adf_accel_dev *accel_dev);
#endif /* ADF_GEN4_TIMER_H_ */

View file

@ -0,0 +1,151 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_GEN4VF_HW_CSR_DATA_H_
#define ADF_GEN4VF_HW_CSR_DATA_H_
#define ADF_RING_CSR_ADDR_OFFSET_GEN4VF 0x0
#define ADF_RING_BUNDLE_SIZE_GEN4 0x2000
#define ADF_RING_CSR_RING_HEAD 0x0C0
#define ADF_RING_CSR_RING_TAIL 0x100
#define ADF_RING_CSR_E_STAT 0x14C
#define ADF_RING_CSR_RING_CONFIG_GEN4 0x1000
#define ADF_RING_CSR_RING_LBASE_GEN4 0x1040
#define ADF_RING_CSR_RING_UBASE_GEN4 0x1080
#define ADF_RING_CSR_INT_FLAG 0x170
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_NEXT_INT_SRCSEL 0x4
#define ADF_RING_CSR_INT_SRCSEL 0x174
#define ADF_RING_CSR_INT_COL_EN 0x17C
#define ADF_RING_CSR_INT_COL_CTL 0x180
#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C
#define ADF_BANK_INT_SRC_SEL_MASK_GEN4 0x44UL
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4 0x3
#define ADF_RINGS_PER_INT_SRCSEL_GEN4 2
#define BUILD_RING_BASE_ADDR_GEN4(addr, size) \
((((addr) >> 6) & (0xFFFFFFFFFFFFFFFFULL << (size))) << 6)
#define READ_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring) \
ADF_CSR_RD((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_HEAD + ((ring) << 2))
#define READ_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring) \
ADF_CSR_RD((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2))
#define READ_CSR_E_STAT_GEN4VF(csr_base_addr, bank) \
ADF_CSR_RD((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_E_STAT)
#define WRITE_CSR_RING_CONFIG_GEN4VF(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_CONFIG_GEN4 + ((ring) << 2), \
(value))
#define WRITE_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring, value) \
do { \
struct resource *_csr_base_addr = csr_base_addr; \
u32 _bank = bank; \
u32 _ring = ring; \
dma_addr_t _value = value; \
u32 l_base = 0, u_base = 0; \
l_base = (u32)((_value)&0xFFFFFFFF); \
u_base = (u32)(((_value)&0xFFFFFFFF00000000ULL) >> 32); \
ADF_CSR_WR((_csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (_bank)) + \
ADF_RING_CSR_RING_LBASE_GEN4 + ((_ring) << 2), \
l_base); \
ADF_CSR_WR((_csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (_bank)) + \
ADF_RING_CSR_RING_UBASE_GEN4 + ((_ring) << 2), \
u_base); \
} while (0)
static inline u64
read_base_gen4vf(struct resource *csr_base_addr, u32 bank, u32 ring)
{
u32 l_base, u_base;
u64 addr;
l_base = ADF_CSR_RD(csr_base_addr,
(ADF_RING_BUNDLE_SIZE_GEN4 * bank) +
ADF_RING_CSR_RING_LBASE_GEN4 + (ring << 2));
u_base = ADF_CSR_RD(csr_base_addr,
(ADF_RING_BUNDLE_SIZE_GEN4 * bank) +
ADF_RING_CSR_RING_UBASE_GEN4 + (ring << 2));
addr = (u64)l_base & 0x00000000FFFFFFFFULL;
addr |= (u64)u_base << 32 & 0xFFFFFFFF00000000ULL;
return addr;
}
#define WRITE_CSR_INT_SRCSEL_GEN4VF(csr_base_addr, bank) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank) + \
ADF_RING_CSR_INT_SRCSEL, \
ADF_BANK_INT_SRC_SEL_MASK_GEN4)
#define READ_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring) \
read_base_gen4vf((csr_base_addr), (bank), (ring))
#define WRITE_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_HEAD + ((ring) << 2), \
(value))
#define WRITE_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_TAIL + ((ring) << 2), \
(value))
#define WRITE_CSR_INT_FLAG_GEN4VF(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_INT_FLAG, \
(value))
#define WRITE_CSR_INT_COL_EN_GEN4VF(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_INT_COL_EN, \
(value))
#define WRITE_CSR_INT_COL_CTL_GEN4VF(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_INT_COL_CTL, \
(value))
#define WRITE_CSR_INT_FLAG_AND_COL_GEN4VF(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_INT_FLAG_AND_COL, \
(value))
#define READ_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank) \
ADF_CSR_RD((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_SRV_ARB_EN)
#define WRITE_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank, value) \
ADF_CSR_WR((csr_base_addr), \
(ADF_RING_CSR_ADDR_OFFSET_GEN4VF + \
ADF_RING_BUNDLE_SIZE_GEN4 * (bank)) + \
ADF_RING_CSR_RING_SRV_ARB_EN, \
(value))
struct adf_hw_csr_info;
void gen4vf_init_hw_csr_info(struct adf_hw_csr_info *csr_info);
#endif /* ADF_GEN4VF_HW_CSR_DATA_H_ */

View file

@ -1,182 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_PF2VF_MSG_H
#define ADF_PF2VF_MSG_H
/*
* PF<->VF Messaging
* The PF has an array of 32-bit PF2VF registers, one for each VF. The
* PF can access all these registers; each VF can access only the one
* register associated with that particular VF.
*
* The register functionally is split into two parts:
* The bottom half is for PF->VF messages. In particular when the first
* bit of this register (bit 0) gets set an interrupt will be triggered
* in the respective VF.
* The top half is for VF->PF messages. In particular when the first bit
* of this half of register (bit 16) gets set an interrupt will be triggered
* in the PF.
*
* The remaining bits within this register are available to encode messages.
* and implement a collision control mechanism to prevent concurrent use of
* the PF2VF register by both the PF and VF.
*
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | VF2PF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | PF2VF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* Message Origin (Should always be 1)
* A legacy out-of-tree QAT driver allowed for a set of messages not supported
* by this driver; these had a Msg Origin of 0 and are ignored by this driver.
*
* When a PF or VF attempts to send a message in the lower or upper 16 bits,
* respectively, the other 16 bits are written to first with a defined
* IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
*/
/* VF/PF compatibility version. */
/* ADF_PFVF_COMPATIBILITY_EXT_CAP: Support for extended capabilities */
#define ADF_PFVF_COMPATIBILITY_CAPABILITIES 2
/* ADF_PFVF_COMPATIBILITY_FAST_ACK: In-use pattern cleared by receiver */
#define ADF_PFVF_COMPATIBILITY_FAST_ACK 3
#define ADF_PFVF_COMPATIBILITY_RING_TO_SVC_MAP 4
#define ADF_PFVF_COMPATIBILITY_VERSION 4 /* PF<->VF compat */
/* PF->VF messages */
#define ADF_PF2VF_INT BIT(0)
#define ADF_PF2VF_MSGORIGIN_SYSTEM BIT(1)
#define ADF_PF2VF_MSGTYPE_MASK 0x0000003C
#define ADF_PF2VF_MSGTYPE_SHIFT 2
#define ADF_PF2VF_MSGTYPE_RESTARTING 0x01
#define ADF_PF2VF_MSGTYPE_VERSION_RESP 0x02
#define ADF_PF2VF_MSGTYPE_BLOCK_RESP 0x03
#define ADF_PF2VF_MSGTYPE_FATAL_ERROR 0x04
#define ADF_PF2VF_IN_USE_BY_PF 0x6AC20000
#define ADF_PF2VF_IN_USE_BY_PF_MASK 0xFFFE0000
/* PF->VF Version Response */
#define ADF_PF2VF_VERSION_RESP_VERS_MASK 0x00003FC0
#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT 6
#define ADF_PF2VF_VERSION_RESP_RESULT_MASK 0x0000C000
#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT 14
#define ADF_PF2VF_MINORVERSION_SHIFT 6
#define ADF_PF2VF_MAJORVERSION_SHIFT 10
#define ADF_PF2VF_VF_COMPATIBLE 1
#define ADF_PF2VF_VF_INCOMPATIBLE 2
#define ADF_PF2VF_VF_COMPAT_UNKNOWN 3
/* PF->VF Block Request Type */
#define ADF_VF2PF_MIN_SMALL_MESSAGE_TYPE 0
#define ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE (ADF_VF2PF_MIN_SMALL_MESSAGE_TYPE + 15)
#define ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE (ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE + 1)
#define ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE \
(ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE + 7)
#define ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE (ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE + 1)
#define ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE (ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE + 3)
#define ADF_VF2PF_SMALL_PAYLOAD_SIZE 30
#define ADF_VF2PF_MEDIUM_PAYLOAD_SIZE 62
#define ADF_VF2PF_LARGE_PAYLOAD_SIZE 126
#define ADF_VF2PF_MAX_BLOCK_TYPE 3
#define ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT 22
#define ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_SHIFT 24
#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_SHIFT 25
#define ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_SHIFT 26
#define ADF_VF2PF_BLOCK_REQ_CRC_SHIFT 31
#define ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_MASK 0x7F000000
#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_MASK 0x7E000000
#define ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_MASK 0x7C000000
#define ADF_VF2PF_LARGE_BLOCK_REQ_TYPE_MASK 0xC00000
#define ADF_VF2PF_MEDIUM_BLOCK_REQ_TYPE_MASK 0x1C00000
#define ADF_VF2PF_SMALL_BLOCK_REQ_TYPE_MASK 0x3C00000
/* PF->VF Block Response Type */
#define ADF_PF2VF_BLOCK_RESP_TYPE_DATA 0x0
#define ADF_PF2VF_BLOCK_RESP_TYPE_CRC 0x1
#define ADF_PF2VF_BLOCK_RESP_TYPE_ERROR 0x2
#define ADF_PF2VF_BLOCK_RESP_TYPE_SHIFT 6
#define ADF_PF2VF_BLOCK_RESP_DATA_SHIFT 8
#define ADF_PF2VF_BLOCK_RESP_TYPE_MASK 0x000000C0
#define ADF_PF2VF_BLOCK_RESP_DATA_MASK 0x0000FF00
/* PF-VF block message header bytes */
#define ADF_VF2PF_BLOCK_VERSION_BYTE 0
#define ADF_VF2PF_BLOCK_LEN_BYTE 1
#define ADF_VF2PF_BLOCK_DATA 2
/* PF->VF Block Error Code */
#define ADF_PF2VF_INVALID_BLOCK_TYPE 0x0
#define ADF_PF2VF_INVALID_BYTE_NUM_REQ 0x1
#define ADF_PF2VF_PAYLOAD_TRUNCATED 0x2
#define ADF_PF2VF_UNSPECIFIED_ERROR 0x3
/* VF->PF messages */
#define ADF_VF2PF_IN_USE_BY_VF 0x00006AC2
#define ADF_VF2PF_IN_USE_BY_VF_MASK 0x0000FFFE
#define ADF_VF2PF_INT BIT(16)
#define ADF_VF2PF_MSGORIGIN_SYSTEM BIT(17)
#define ADF_VF2PF_MSGTYPE_MASK 0x003C0000
#define ADF_VF2PF_MSGTYPE_SHIFT 18
#define ADF_VF2PF_MSGTYPE_INIT 0x3
#define ADF_VF2PF_MSGTYPE_SHUTDOWN 0x4
#define ADF_VF2PF_MSGTYPE_VERSION_REQ 0x5
#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ 0x6
#define ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ 0x7
#define ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ 0x8
#define ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ 0x9
#define ADF_VF2PF_MSGTYPE_NOTIFY 0xa
#define ADF_VF2PF_MSGGENC_RESTARTING_COMPLETE 0x0
/* Block message types
* 0..15 - 32 byte message
* 16..23 - 64 byte message
* 24..27 - 128 byte message
* 2 - Get Capability Request message
*/
#define ADF_VF2PF_BLOCK_MSG_CAP_SUMMARY 2
#define ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ 0x3
/* VF->PF Compatible Version Request */
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
/* How long to wait for far side to acknowledge receipt */
#define ADF_IOV_MSG_ACK_DELAY_US 5
#define ADF_IOV_MSG_ACK_EXP_MAX_DELAY_US (5 * 1000)
#define ADF_IOV_MSG_ACK_DELAY_MS 5
#define ADF_IOV_MSG_ACK_LIN_MAX_DELAY_US (2 * 1000 * 1000)
/* If CSR is busy, how long to delay before retrying */
#define ADF_IOV_MSG_RETRY_DELAY 5
#define ADF_IOV_MSG_MAX_RETRIES 10
/* How long to wait for a response from the other side */
#define ADF_IOV_MSG_RESP_TIMEOUT 100
/* How often to retry when there is no response */
#define ADF_IOV_MSG_RESP_RETRIES 5
#define ADF_IOV_RATELIMIT_INTERVAL 8
#define ADF_IOV_RATELIMIT_BURST 130
/* CRC Calculation */
#define ADF_CRC8_INIT_VALUE 0xFF
/* PF VF message byte shift */
#define ADF_PFVF_DATA_SHIFT 8
#define ADF_PFVF_DATA_MASK 0xFF
#endif /* ADF_IOV_MSG_H */

View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_PFVF_VF_MSG_H
#define ADF_PFVF_VF_MSG_H
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
#endif /* ADF_PFVF_VF_MSG_H */

View file

@ -6,6 +6,7 @@
#include "qat_freebsd.h"
#include "adf_cfg_common.h"
#include "adf_pfvf_msg.h"
#define ADF_CFG_NUM_SERVICES 4
@ -20,6 +21,7 @@
#define ADF_C4XXX_DEVICE_NAME "c4xxx"
#define ADF_C4XXXVF_DEVICE_NAME "c4xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_4XXXVF_DEVICE_NAME "4xxxvf"
#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
#define ADF_C62X_PCI_DEVICE_ID 0x37c8
@ -33,13 +35,17 @@
#define ADF_C4XXX_PCI_DEVICE_ID 0x18a0
#define ADF_C4XXXIOV_PCI_DEVICE_ID 0x18a1
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
#define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
static inline bool
IS_QAT_GEN4(const unsigned int id)
{
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID);
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID ||
id == ADF_4XXXIOV_PCI_DEVICE_ID ||
id == ADF_401XXIOV_PCI_DEVICE_ID);
}
#define IS_QAT_GEN3_OR_GEN4(ID) (IS_QAT_GEN3(ID) || IS_QAT_GEN4(ID))
@ -85,7 +91,7 @@ IS_QAT_GEN4(const unsigned int id)
(((ena_srv_mask) >> (ADF_SRV_TYPE_BIT_LEN * (srv))) & ADF_SRV_TYPE_MASK)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->csr_info.pfvf_ops)
#define ADF_DEFAULT_RING_TO_SRV_MAP \
(CRYPTO | CRYPTO << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
@ -266,6 +272,9 @@ struct adf_hw_csr_ops {
u32 bank,
u32 ring,
u32 value);
bus_addr_t (*read_csr_ring_base)(struct resource *csr_base_addr,
u32 bank,
u32 ring);
void (*write_csr_ring_base)(struct resource *csr_base_addr,
u32 bank,
u32 ring,
@ -288,15 +297,9 @@ struct adf_hw_csr_ops {
void (*write_csr_ring_srv_arb_en)(struct resource *csr_base_addr,
u32 bank,
u32 value);
};
struct adf_hw_csr_info {
struct adf_hw_csr_ops csr_ops;
u32 csr_addr_offset;
u32 ring_bundle_size;
u32 bank_int_flag_clear_mask;
u32 num_rings_per_int_srcsel;
u32 arb_enable_mask;
u32 (*get_src_sel_mask)(void);
u32 (*get_int_col_ctl_enable_mask)(void);
u32 (*get_bank_irq_mask)(u32 irq_mask);
};
struct adf_cfg_device_data;
@ -304,6 +307,33 @@ struct adf_accel_dev;
struct adf_etr_data;
struct adf_etr_ring_data;
struct adf_pfvf_ops {
int (*enable_comms)(struct adf_accel_dev *accel_dev);
u32 (*get_pf2vf_offset)(u32 i);
u32 (*get_vf2pf_offset)(u32 i);
void (*enable_vf2pf_interrupts)(struct resource *pmisc_addr,
u32 vf_mask);
void (*disable_all_vf2pf_interrupts)(struct resource *pmisc_addr);
u32 (*disable_pending_vf2pf_interrupts)(struct resource *pmisc_addr);
int (*send_msg)(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
u32 pfvf_offset,
struct mutex *csr_lock);
struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
u32 pfvf_offset,
u8 compat_ver);
};
struct adf_hw_csr_info {
struct adf_hw_csr_ops csr_ops;
struct adf_pfvf_ops pfvf_ops;
u32 csr_addr_offset;
u32 ring_bundle_size;
u32 bank_int_flag_clear_mask;
u32 num_rings_per_int_srcsel;
u32 arb_enable_mask;
};
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
uint32_t (*get_accel_mask)(struct adf_accel_dev *accel_dev);
@ -315,9 +345,6 @@ struct adf_hw_device_data {
uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
void (*notify_and_wait_ethernet)(struct adf_accel_dev *accel_dev);
bool (*get_eth_doorbell_msg)(struct adf_accel_dev *accel_dev);
uint32_t (*get_pf2vf_offset)(uint32_t i);
uint32_t (*get_vintmsk_offset)(uint32_t i);
u32 (*get_vintsou_offset)(void);
void (*get_arb_info)(struct arb_info *arb_csrs_info);
void (*get_admin_info)(struct admin_info *admin_csrs_info);
void (*get_errsou_offset)(u32 *errsou3, u32 *errsou5);
@ -352,6 +379,8 @@ struct adf_hw_device_data {
const uint32_t **cfg);
int (*init_device)(struct adf_accel_dev *accel_dev);
int (*get_heartbeat_status)(struct adf_accel_dev *accel_dev);
int (*int_timer_init)(struct adf_accel_dev *accel_dev);
void (*int_timer_exit)(struct adf_accel_dev *accel_dev);
uint32_t (*get_ae_clock)(struct adf_hw_device_data *self);
uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
@ -360,8 +389,10 @@ struct adf_hw_device_data {
void (*enable_ints)(struct adf_accel_dev *accel_dev);
bool (*check_slice_hang)(struct adf_accel_dev *accel_dev);
int (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
int (*disable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
void (*enable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev);
void (*disable_pf2vf_interrupt)(struct adf_accel_dev *accel_dev);
int (*interrupt_active_pf2vf)(struct adf_accel_dev *accel_dev);
int (*get_int_active_bundles)(struct adf_accel_dev *accel_dev);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*reset_hw_units)(struct adf_accel_dev *accel_dev);
int (*measure_clock)(struct adf_accel_dev *accel_dev);
@ -378,6 +409,11 @@ struct adf_hw_device_data {
char *aeidstr);
void (*remove_misc_error)(struct adf_accel_dev *accel_dev);
int (*configure_accel_units)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev,
u32 bank_number);
void (*config_ring_irq)(struct adf_accel_dev *accel_dev,
u32 bank_number,
u16 ring_mask);
uint32_t (*get_objs_num)(struct adf_accel_dev *accel_dev);
const char *(*get_obj_name)(struct adf_accel_dev *accel_dev,
enum adf_accel_unit_services services);
@ -411,7 +447,6 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
uint8_t min_iov_compat_ver;
int (*get_storage_enabled)(struct adf_accel_dev *accel_dev,
uint32_t *storage_enabled);
u8 query_storage_cap;
@ -419,6 +454,7 @@ struct adf_hw_device_data {
u8 storage_enable;
u32 extended_dc_capabilities;
int (*config_device)(struct adf_accel_dev *accel_dev);
u32 asym_ae_active_thd_mask;
u16 asym_rings_mask;
int (*get_fw_image_type)(struct adf_accel_dev *accel_dev,
enum adf_cfg_fw_image_type *fw_image_type);
@ -603,6 +639,15 @@ struct adf_fw_versions {
u8 mmp_version_patch;
};
struct adf_int_timer {
struct adf_accel_dev *accel_dev;
struct workqueue_struct *timer_irq_wq;
struct timer_list timer;
u32 timeout_val;
u32 int_cnt;
bool enabled;
};
#define ADF_COMPAT_CHECKER_MAX 8
typedef int (*adf_iov_compat_checker_t)(struct adf_accel_dev *accel_dev,
u8 vf_compat_ver);
@ -620,7 +665,9 @@ struct adf_accel_dev {
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
struct adf_uio_control_accel *accel;
struct adf_heartbeat *heartbeat;
struct adf_int_timer *int_timer;
struct adf_fw_versions fw_versions;
unsigned int autoreset_on_error;
struct adf_fw_counters_data *fw_counters_data;
@ -648,17 +695,18 @@ struct adf_accel_dev {
int num_vfs;
} pf;
struct {
bool irq_enabled;
struct resource *irq;
void *cookie;
char *irq_name;
struct task pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
int iov_msg_completion;
uint8_t compatible;
uint8_t pf_version;
u8 pf2vf_block_byte;
u8 pf2vf_block_resp_type;
struct completion msg_received;
struct pfvf_message
response; /* temp field holding pf2vf response */
enum ring_reset_result rpreset_sts;
struct mutex rpreset_lock; /* protect rpreset_sts */
struct pfvf_stats pfvf_counters;
u8 pf_compat_ver;
} vf;
} u1;
bool is_vf;

View file

@ -9,6 +9,8 @@
#include "adf_cfg_common.h"
#include "adf_cfg_strings.h"
#define ADF_CFG_MAX_VAL 16
struct adf_cfg_key_val {
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
@ -29,6 +31,9 @@ struct adf_cfg_device_data {
struct list_head sec_list;
struct sysctl_oid *debug;
struct sx lock;
char cfg_services[ADF_CFG_MAX_VAL];
char cfg_mode[ADF_CFG_MAX_VAL];
u16 num_user_processes;
};
struct adf_cfg_depot_list {

View file

@ -88,7 +88,10 @@ enum adf_device_type {
DEV_200XXVF,
DEV_C4XXX,
DEV_C4XXXVF,
DEV_4XXX
DEV_D15XX,
DEV_D15XXVF,
DEV_4XXX,
DEV_4XXXVF
};
enum adf_cfg_fw_image_type {
@ -196,9 +199,23 @@ struct adf_cfg_instance {
#define ADF_CFG_DEF_ASYM_MASK 0x03
#define ADF_CFG_MAX_SERVICES 4
#define ADF_CTL_IOC_MAGIC 'a'
#define IOCTL_STATUS_ACCEL_DEV \
_IOWR(ADF_CTL_IOC_MAGIC, 3, struct adf_dev_status_info)
#define IOCTL_RESERVE_RING \
_IOWR(ADF_CTL_IOC_MAGIC, 10, struct adf_user_reserve_ring)
#define IOCTL_RELEASE_RING \
_IOWR(ADF_CTL_IOC_MAGIC, 11, struct adf_user_reserve_ring)
#define IOCTL_ENABLE_RING \
_IOWR(ADF_CTL_IOC_MAGIC, 12, struct adf_user_reserve_ring)
#define IOCTL_DISABLE_RING \
_IOWR(ADF_CTL_IOC_MAGIC, 13, struct adf_user_reserve_ring)
#define IOCTL_GET_NUM_DEVICES _IOR(ADF_CTL_IOC_MAGIC, 4, int32_t)
#define ADF_CFG_HB_DEFAULT_VALUE 500
#define ADF_CFG_HB_COUNT_THRESHOLD 3
#define ADF_MIN_HB_TIMER_MS 100
#define IOCTL_GET_CFG_VAL \
_IOW(ADF_CTL_IOC_MAGIC, 5, struct adf_user_cfg_ctl_data)
enum adf_device_heartbeat_status {
DEV_HB_UNRESPONSIVE = 0,
@ -210,4 +227,6 @@ struct adf_dev_heartbeat_status_ctl {
uint16_t device_id;
enum adf_device_heartbeat_status status;
};
#define IOCTL_HEARTBEAT_ACCEL_DEV \
_IOWR(ADF_CTL_IOC_MAGIC, 9, struct adf_dev_heartbeat_status_ctl)
#endif

View file

@ -7,6 +7,7 @@
#define ADF_GENERAL_SEC "GENERAL"
#define ADF_KERNEL_SEC "KERNEL"
#define ADF_ACCEL_SEC "Accelerator"
#define ADF_SAL_SEC "SSL"
#define ADF_NUM_CY "NumberCyInstances"
#define ADF_NUM_DC "NumberDcInstances"
#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
@ -55,6 +56,11 @@
#define ADF_CFG_DC "dc"
#define ADF_CFG_ASYM "asym"
#define ADF_CFG_SYM "sym"
#define ADF_CFG_SYM_ASYM "sym;asym"
#define ADF_CFG_SYM_DC "sym;dc"
#define ADF_CFG_KERNEL_USER "ks;us"
#define ADF_CFG_KERNEL "ks"
#define ADF_CFG_USER "us"
#define ADF_SERVICE_INLINE "inline"
#define ADF_SERVICES_ENABLED "ServicesEnabled"
#define ADF_SERVICES_SEPARATOR ";"

View file

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2023 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_CFG_SYSCTL_H_
#define ADF_CFG_SYSCTL_H_
#include "adf_accel_devices.h"
int adf_cfg_sysctl_add(struct adf_accel_dev *accel_dev);
void adf_cfg_sysctl_remove(struct adf_accel_dev *accel_dev);
#endif /* ADF_CFG_SYSCTL_H_ */

View file

@ -9,7 +9,10 @@
#include "icp_qat_fw_loader_handle.h"
#include "icp_qat_hal.h"
#include "adf_cfg_user.h"
#include "adf_uio.h"
#include "adf_uio_control.h"
#define QAT_UIO_IOC_MAGIC 'b'
#define ADF_MAJOR_VERSION 0
#define ADF_MINOR_VERSION 6
#define ADF_BUILD_VERSION 0
@ -17,6 +20,10 @@
__stringify(ADF_MAJOR_VERSION) "." __stringify( \
ADF_MINOR_VERSION) "." __stringify(ADF_BUILD_VERSION)
#define IOCTL_GET_BUNDLE_SIZE _IOR(QAT_UIO_IOC_MAGIC, 0, int32_t)
#define IOCTL_ALLOC_BUNDLE _IOW(QAT_UIO_IOC_MAGIC, 1, int)
#define IOCTL_GET_ACCEL_TYPE _IOR(QAT_UIO_IOC_MAGIC, 2, uint32_t)
#define IOCTL_ADD_MEM_FD _IOW(QAT_UIO_IOC_MAGIC, 3, int)
#define ADF_STATUS_RESTARTING 0
#define ADF_STATUS_STARTING 1
#define ADF_STATUS_CONFIGURED 2
@ -81,43 +88,7 @@ int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
void adf_error_notifier(uintptr_t arg);
int adf_init_fatal_error_wq(void);
void adf_exit_fatal_error_wq(void);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
int adf_iov_notify(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_notify_fatal_error(struct adf_accel_dev *accel_dev);
void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev);
void adf_pf2vf_notify_uncorrectable_error(struct adf_accel_dev *accel_dev);
void adf_pf2vf_notify_heartbeat_error(struct adf_accel_dev *accel_dev);
typedef int (*adf_iov_block_provider)(struct adf_accel_dev *accel_dev,
u8 **buffer,
u8 *length,
u8 *block_version,
u8 compatibility,
u8 byte_num);
int adf_iov_block_provider_register(u8 block_type,
const adf_iov_block_provider provider);
u8 adf_iov_is_block_provider_registered(u8 block_type);
int adf_iov_block_provider_unregister(u8 block_type,
const adf_iov_block_provider provider);
int adf_iov_block_get(struct adf_accel_dev *accel_dev,
u8 block_type,
u8 *block_version,
u8 *buffer,
u8 *length);
u8 adf_pfvf_crc(u8 start_crc, u8 *buf, u8 len);
int adf_iov_init_compat_manager(struct adf_accel_dev *accel_dev,
struct adf_accel_compat_manager **cm);
int adf_iov_shutdown_compat_manager(struct adf_accel_dev *accel_dev,
struct adf_accel_compat_manager **cm);
int adf_iov_register_compat_checker(struct adf_accel_dev *accel_dev,
const adf_iov_compat_checker_t cc);
int adf_iov_unregister_compat_checker(struct adf_accel_dev *accel_dev,
const adf_iov_compat_checker_t cc);
int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
int adf_pf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
int adf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
void adf_clean_vf_map(bool);
int adf_sysctl_add_fw_versions(struct adf_accel_dev *accel_dev);
@ -125,19 +96,12 @@ int adf_sysctl_remove_fw_versions(struct adf_accel_dev *accel_dev);
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
int adf_pf_vf_capabilities_init(struct adf_accel_dev *accel_dev);
int adf_pf_ext_dc_cap_msg_provider(struct adf_accel_dev *accel_dev,
u8 **buffer,
u8 *length,
u8 *block_version,
u8 compatibility);
int adf_pf_vf_ring_to_svc_init(struct adf_accel_dev *accel_dev);
int adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
u8 **buffer,
u8 *length,
u8 *block_version,
u8 compatibility,
u8 byte_num);
int adf_register_ctl_device_driver(void);
void adf_unregister_ctl_device_driver(void);
int adf_processes_dev_register(void);
void adf_processes_dev_unregister(void);
void adf_state_init(void);
void adf_state_destroy(void);
int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
struct adf_accel_dev *pf);
void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@ -212,6 +176,7 @@ void adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
unsigned int bank_nr,
unsigned int mask);
int adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle);
struct adf_accel_dev *adf_devmgr_get_dev_by_bdf(struct adf_pci_address *addr);
struct adf_accel_dev *adf_devmgr_get_dev_by_pci_bus(u8 bus);
int adf_get_vf_nr(struct adf_pci_address *vf_pci_addr, int *vf_nr);
@ -239,7 +204,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev);
int qat_hal_init(struct adf_accel_dev *accel_dev);
void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
int qat_hal_start(struct icp_qat_fw_loader_handle *handle);
@ -334,13 +299,13 @@ int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
void qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh);
int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
unsigned int cfg_ae_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_init_vf_wq(void);
void adf_exit_vf_wq(void);
void adf_flush_vf_wq(void);
int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
int adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
struct pfvf_message msg);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
static inline int
adf_sriov_configure(device_t *pdev, int numvfs)
{

View file

@ -23,6 +23,7 @@
#define ADF_RING_CSR_INT_COL_CTL 0x180
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_RING_CSR_ADDR_OFFSET 0x0
#define ADF_RING_BUNDLE_SIZE 0x1000
#define ADF_GEN2_RX_RINGS_OFFSET 8
#define ADF_GEN2_TX_RINGS_MASK 0xFF
@ -45,6 +46,29 @@
(ADF_RING_BUNDLE_SIZE * (bank)) + \
ADF_RING_CSR_RING_CONFIG + ((ring) << 2), \
value)
static inline uint64_t
read_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
u32 l_base, u_base;
u64 addr;
l_base = ADF_CSR_RD(csr_base_addr,
(ADF_RING_BUNDLE_SIZE * bank) +
ADF_RING_CSR_RING_LBASE + (ring << 2));
u_base = ADF_CSR_RD(csr_base_addr,
(ADF_RING_BUNDLE_SIZE * bank) +
ADF_RING_CSR_RING_UBASE + (ring << 2));
addr = (uint64_t)l_base & 0x00000000FFFFFFFFULL;
addr |= (uint64_t)u_base << 32 & 0xFFFFFFFF00000000ULL;
return addr;
}
#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
read_base(csr_base_addr, bank, ring)
#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
do { \
u32 l_base = 0, u_base = 0; \

View file

@ -23,6 +23,22 @@
#define ADF_RING_CSR_ADDR_OFFSET 0x100000
#define ADF_RING_BUNDLE_SIZE 0x2000
/* Ring reset */
#define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
#define ADF_RPRESET_POLL_DELAY_US 20
#define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0)
#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
#define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0)
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
#define ADF_WQM_CSR_RPRESETCTL_SHIFT 0
#define ADF_WQM_CSR_RPRESETCTL_DRAIN_SHIFT 2
#define ADF_WQM_CSR_RPRESETCTL_MASK (BIT(3) - 1)
#define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3))
#define ADF_WQM_CSR_RPRESETSTS_SHIFT 0
#define ADF_WQM_CSR_RPRESETSTS_MASK (BIT(0))
#define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4)
#define BUILD_RING_BASE_ADDR(addr, size) \
((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6)
#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
@ -63,6 +79,30 @@
u_base); \
} while (0)
static inline u64
read_base_gen4(struct resource *csr_base_addr, u32 bank, u32 ring)
{
u32 l_base, u_base;
u64 addr;
l_base = ADF_CSR_RD(csr_base_addr,
ADF_RING_CSR_ADDR_OFFSET +
(ADF_RING_BUNDLE_SIZE * bank) +
ADF_RING_CSR_RING_LBASE + (ring << 2));
u_base = ADF_CSR_RD(csr_base_addr,
ADF_RING_CSR_ADDR_OFFSET +
(ADF_RING_BUNDLE_SIZE * bank) +
ADF_RING_CSR_RING_UBASE + (ring << 2));
addr = (u64)l_base & 0x00000000FFFFFFFFULL;
addr |= (u64)u_base << 32 & 0xFFFFFFFF00000000ULL;
return addr;
}
#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \
read_base_gen4((csr_base_addr), (bank), (ring))
#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
ADF_CSR_WR((csr_base_addr), \
ADF_RING_CSR_ADDR_OFFSET + ADF_RING_BUNDLE_SIZE * (bank) + \
@ -129,4 +169,5 @@
int adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
#endif

View file

@ -0,0 +1,260 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_PFVF_MSG_H
#define ADF_PFVF_MSG_H
/*
* PF<->VF Gen2 Messaging format
*
* The PF has an array of 32-bit PF2VF registers, one for each VF. The
* PF can access all these registers while each VF can access only the one
* register associated with that particular VF.
*
* The register functionally is split into two parts:
* The bottom half is for PF->VF messages. In particular when the first
* bit of this register (bit 0) gets set an interrupt will be triggered
* in the respective VF.
* The top half is for VF->PF messages. In particular when the first bit
* of this half of register (bit 16) gets set an interrupt will be triggered
* in the PF.
*
* The remaining bits within this register are available to encode messages.
* and implement a collision control mechanism to prevent concurrent use of
* the PF2VF register by both the PF and VF.
*
* 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | VF2PF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
* _______________________________________________
* | | | | | | | | | | | | | | | | |
* +-----------------------------------------------+
* \___________________________/ \_________/ ^ ^
* ^ ^ | |
* | | | PF2VF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* Message Origin (Should always be 1)
* A legacy out-of-tree QAT driver allowed for a set of messages not supported
* by this driver; these had a Msg Origin of 0 and are ignored by this driver.
*
* When a PF or VF attempts to send a message in the lower or upper 16 bits,
* respectively, the other 16 bits are written to first with a defined
* IN_USE_BY pattern as part of a collision control scheme (see function
* adf_gen2_pfvf_send() in adf_pf2vf_msg.c).
*
*
* PF<->VF Gen4 Messaging format
*
* Similarly to the gen2 messaging format, 32-bit long registers are used for
* communication between PF and VFs. However, each VF and PF share a pair of
* 32-bits register to avoid collisions: one for PV to VF messages and one
* for VF to PF messages.
*
* Both the Interrupt bit and the Message Origin bit retain the same position
* and meaning, although non-system messages are now deprecated and not
* expected.
*
* 31 30 9 8 7 6 5 4 3 2 1 0
* _______________________________________________
* | | | . . . | | | | | | | | | | |
* +-----------------------------------------------+
* \_____________________/ \_______________/ ^ ^
* ^ ^ | |
* | | | PF/VF Int
* | | Message Origin
* | Message Type
* Message-specific Data/Reserved
*
* For both formats, the message reception is acknowledged by lowering the
* interrupt bit on the register where the message was sent.
*/
/* PFVF message common bits */
#define ADF_PFVF_INT BIT(0)
#define ADF_PFVF_MSGORIGIN_SYSTEM BIT(1)
/* Different generations have different CSR layouts, use this struct
* to abstract these differences away
*/
struct pfvf_message {
u8 type;
u32 data;
};
/* PF->VF messages */
enum pf2vf_msgtype {
ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in
Gen2 devices. */
ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
};
/* VF->PF messages */
enum vf2pf_msgtype {
ADF_VF2PF_MSGTYPE_INIT = 0x03,
ADF_VF2PF_MSGTYPE_SHUTDOWN = 0x04,
ADF_VF2PF_MSGTYPE_VERSION_REQ = 0x05,
ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ = 0x06,
ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in
Gen2 devices. */
ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
};
/* VF/PF compatibility version. */
enum pfvf_compatibility_version {
/* Support for extended capabilities */
ADF_PFVF_COMPAT_CAPABILITIES = 0x02,
/* In-use pattern cleared by receiver */
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
/* Ring to service mapping support for non-standard mappings */
ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
/* Reference to the latest version */
ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
};
/* PF->VF Version Response */
#define ADF_PF2VF_VERSION_RESP_VERS_MASK GENMASK(7, 0)
#define ADF_PF2VF_VERSION_RESP_RESULT_MASK GENMASK(9, 8)
enum pf2vf_compat_response {
ADF_PF2VF_VF_COMPATIBLE = 0x01,
ADF_PF2VF_VF_INCOMPATIBLE = 0x02,
ADF_PF2VF_VF_COMPAT_UNKNOWN = 0x03,
};
enum ring_reset_result {
RPRESET_SUCCESS = 0x00,
RPRESET_NOT_SUPPORTED = 0x01,
RPRESET_INVAL_BANK = 0x02,
RPRESET_TIMEOUT = 0x03,
};
#define ADF_VF2PF_RNG_RESET_RP_MASK GENMASK(1, 0)
#define ADF_VF2PF_RNG_RESET_RSVD_MASK GENMASK(25, 2)
/* PF->VF Block Responses */
#define ADF_PF2VF_BLKMSG_RESP_TYPE_MASK GENMASK(1, 0)
#define ADF_PF2VF_BLKMSG_RESP_DATA_MASK GENMASK(9, 2)
enum pf2vf_blkmsg_resp_type {
ADF_PF2VF_BLKMSG_RESP_TYPE_DATA = 0x00,
ADF_PF2VF_BLKMSG_RESP_TYPE_CRC = 0x01,
ADF_PF2VF_BLKMSG_RESP_TYPE_ERROR = 0x02,
};
/* PF->VF Block Error Code */
enum pf2vf_blkmsg_error {
ADF_PF2VF_INVALID_BLOCK_TYPE = 0x00,
ADF_PF2VF_INVALID_BYTE_NUM_REQ = 0x01,
ADF_PF2VF_PAYLOAD_TRUNCATED = 0x02,
ADF_PF2VF_UNSPECIFIED_ERROR = 0x03,
};
/* VF->PF Block Requests */
#define ADF_VF2PF_LARGE_BLOCK_TYPE_MASK GENMASK(1, 0)
#define ADF_VF2PF_LARGE_BLOCK_BYTE_MASK GENMASK(8, 2)
#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK GENMASK(2, 0)
#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK GENMASK(8, 3)
#define ADF_VF2PF_SMALL_BLOCK_TYPE_MASK GENMASK(3, 0)
#define ADF_VF2PF_SMALL_BLOCK_BYTE_MASK GENMASK(8, 4)
#define ADF_VF2PF_BLOCK_CRC_REQ_MASK BIT(9)
/* PF->VF Block Request Types
* 0..15 - 32 byte message
* 16..23 - 64 byte message
* 24..27 - 128 byte message
*/
enum vf2pf_blkmsg_req_type {
ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY = 0x02,
ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP = 0x03,
};
#define ADF_VF2PF_SMALL_BLOCK_TYPE_MAX \
(FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK))
#define ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX \
(FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK) + \
ADF_VF2PF_SMALL_BLOCK_TYPE_MAX + 1)
#define ADF_VF2PF_LARGE_BLOCK_TYPE_MAX \
(FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK) + \
ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX)
#define ADF_VF2PF_SMALL_BLOCK_BYTE_MAX \
FIELD_MAX(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK)
#define ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX \
FIELD_MAX(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK)
#define ADF_VF2PF_LARGE_BLOCK_BYTE_MAX \
FIELD_MAX(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK)
struct pfvf_blkmsg_header {
u8 version;
u8 payload_size;
} __packed;
#define ADF_PFVF_BLKMSG_HEADER_SIZE (sizeof(struct pfvf_blkmsg_header))
#define ADF_PFVF_BLKMSG_PAYLOAD_SIZE(blkmsg) \
(sizeof(blkmsg) - ADF_PFVF_BLKMSG_HEADER_SIZE)
#define ADF_PFVF_BLKMSG_MSG_SIZE(blkmsg) \
(ADF_PFVF_BLKMSG_HEADER_SIZE + (blkmsg)->hdr.payload_size)
#define ADF_PFVF_BLKMSG_MSG_MAX_SIZE 128
/* PF->VF Block message header bytes */
#define ADF_PFVF_BLKMSG_VER_BYTE 0
#define ADF_PFVF_BLKMSG_LEN_BYTE 1
/* PF/VF Capabilities message values */
enum blkmsg_capabilities_versions {
ADF_PFVF_CAPABILITIES_V1_VERSION = 0x01,
ADF_PFVF_CAPABILITIES_V2_VERSION = 0x02,
ADF_PFVF_CAPABILITIES_V3_VERSION = 0x03,
};
struct capabilities_v1 {
struct pfvf_blkmsg_header hdr;
u32 ext_dc_caps;
} __packed;
struct capabilities_v2 {
struct pfvf_blkmsg_header hdr;
u32 ext_dc_caps;
u32 capabilities;
} __packed;
struct capabilities_v3 {
struct pfvf_blkmsg_header hdr;
u32 ext_dc_caps;
u32 capabilities;
u32 frequency;
} __packed;
/* PF/VF Ring to service mapping values */
enum blkmsg_ring_to_svc_versions {
ADF_PFVF_RING_TO_SVC_VERSION = 0x01,
};
struct ring_to_svc_map_v1 {
struct pfvf_blkmsg_header hdr;
u16 map;
} __packed;
#endif /* ADF_PFVF_MSG_H */

View file

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_PFVF_UTILS_H
#define ADF_PFVF_UTILS_H
#include <linux/types.h>
#include "adf_pfvf_msg.h"
/* How long to wait for far side to acknowledge receipt */
#define ADF_PFVF_MSG_ACK_DELAY_US 4
#define ADF_PFVF_MSG_ACK_MAX_DELAY_US (1 * USEC_PER_SEC)
u8 adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len);
struct pfvf_field_format {
u8 offset;
u32 mask;
};
struct pfvf_csr_format {
struct pfvf_field_format type;
struct pfvf_field_format data;
};
u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
const struct pfvf_csr_format *fmt);
struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev,
u32 raw_msg,
const struct pfvf_csr_format *fmt);
static inline struct resource *
adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc;
pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
return pmisc->virt_addr;
}
#endif /* ADF_PFVF_UTILS_H */

View file

@ -0,0 +1,32 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_PFVF_VF_PROTO_H
#define ADF_PFVF_VF_PROTO_H
#include <linux/types.h>
#include "adf_accel_devices.h"
#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
#define ADF_PFVF_MSG_ACK_DELAY 2
#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
/* How often to retry if there is no response */
#define ADF_PFVF_MSG_RESP_RETRIES 5
#define ADF_PFVF_MSG_RESP_TIMEOUT \
(ADF_PFVF_MSG_ACK_DELAY * ADF_PFVF_MSG_ACK_MAX_RETRY + \
ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev,
struct pfvf_message msg);
int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
struct pfvf_message *resp);
int adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev,
u8 type,
u8 *buffer,
unsigned int *buffer_len);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
#endif /* ADF_PFVF_VF_PROTO_H */

View file

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2023 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_UIO_H
#define ADF_UIO_H
#include "adf_accel_devices.h"
struct qat_uio_bundle_dev {
u8 hardware_bundle_number;
struct adf_uio_control_bundle *bundle;
struct adf_uio_control_accel *accel;
};
int adf_uio_register(struct adf_accel_dev *accel_dev);
void adf_uio_remove(struct adf_accel_dev *accel_dev);
#endif /* end of include guard: ADF_UIO_H */

View file

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2023 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_UIO_CLEANUP_H
#define ADF_UIO_CLEANUP_H
void adf_uio_do_cleanup_orphan(int bank,
struct adf_uio_control_accel *accel);
#endif

View file

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2023 Intel Corporation */
/* $FreeBSD$ */
#ifndef QAT_UIO_CONTROL_H
#define QAT_UIO_CONTROL_H
#include <sys/condvar.h>
struct adf_uio_instance_rings {
unsigned int user_pid;
u16 ring_mask;
struct list_head list;
};
struct adf_uio_control_bundle {
uint8_t hardware_bundle_number;
bool used;
struct list_head list;
struct mutex list_lock; /* protects list struct */
struct mutex lock; /* protects rings_used and csr_addr */
u16 rings_used;
u32 rings_enabled;
void *csr_addr;
struct qat_uio_bundle_dev uio_priv;
vm_object_t obj;
};
struct adf_uio_control_accel {
struct adf_accel_dev *accel_dev;
struct cdev *cdev;
struct mtx lock;
struct adf_bar *bar;
unsigned int nb_bundles;
unsigned int num_ker_bundles;
unsigned int total_used_bundles;
unsigned int num_handles;
struct cv cleanup_ok;
/* bundle[] must be last to allow dynamic size allocation. */
struct adf_uio_control_bundle bundle[0];
};
#endif /* end of include guard: QAT_UIO_CONTROL_H */

View file

@ -10,6 +10,7 @@
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
/* Cryptodev headers */
#include <opencrypto/cryptodev.h>
@ -44,6 +45,8 @@ MALLOC_DEFINE(M_QAT_OCF, "qat_ocf", "qat_ocf(4) memory allocations");
/* QAT OCF internal structures */
struct qat_ocf_softc {
device_t sc_dev;
struct sysctl_oid *rc;
uint32_t enabled;
int32_t cryptodev_id;
struct qat_ocf_instance cyInstHandles[QAT_OCF_MAX_INSTANCES];
int32_t numCyInstances;
@ -560,17 +563,22 @@ qat_ocf_newsession(device_t dev,
/* Create cryptodev session */
qat_softc = device_get_softc(dev);
qat_instance =
&qat_softc->cyInstHandles[cpu_id % qat_softc->numCyInstances];
qat_dsession = crypto_get_driver_session(cses);
if (NULL == qat_dsession) {
device_printf(dev, "Unable to create new session\n");
return (EINVAL);
}
if (qat_softc->numCyInstances > 0) {
qat_instance =
&qat_softc
->cyInstHandles[cpu_id % qat_softc->numCyInstances];
qat_dsession = crypto_get_driver_session(cses);
if (NULL == qat_dsession) {
device_printf(dev, "Unable to create new session\n");
return (EINVAL);
}
/* Add only instance at this point remaining operations moved to
* lazy session init */
qat_dsession->qatInstance = qat_instance;
/* Add only instance at this point remaining operations moved to
* lazy session init */
qat_dsession->qatInstance = qat_instance;
} else {
return ENXIO;
}
return 0;
}
@ -988,6 +996,10 @@ qat_ocf_get_irq_instances(CpaInstanceHandle *cyInstHandles,
if (NULL == baseAddr)
continue;
listTemp = baseAddr->sym_services;
if (NULL == listTemp) {
listTemp = baseAddr->crypto_services;
}
while (NULL != listTemp) {
cyInstHandle = SalList_getObject(listTemp);
status = cpaCyInstanceGetInfo2(cyInstHandle, &info);
@ -1023,8 +1035,6 @@ qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev)
&numInstances);
if (CPA_STATUS_SUCCESS != status)
return status;
if (0 == numInstances)
return CPA_STATUS_RESOURCE;
for (i = 0; i < numInstances; i++) {
struct qat_ocf_instance *qat_ocf_instance;
@ -1041,11 +1051,18 @@ qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev)
continue;
}
qat_ocf_instance = &qat_softc->cyInstHandles[startedInstances];
qat_ocf_instance->cyInstHandle = cyInstHandle;
mtx_init(&qat_ocf_instance->cyInstMtx,
"Instance MTX",
NULL,
MTX_DEF);
status =
cpaCySetAddressTranslation(cyInstHandle, qatVirtToPhys);
if (CPA_STATUS_SUCCESS != status) {
device_printf(qat_softc->sc_dev,
"unable to add virt to phys callback");
"unable to add virt to phys callback\n");
goto fail;
}
@ -1056,13 +1073,6 @@ qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev)
goto fail;
}
qat_ocf_instance = &qat_softc->cyInstHandles[startedInstances];
qat_ocf_instance->cyInstHandle = cyInstHandle;
mtx_init(&qat_ocf_instance->cyInstMtx,
"Instance MTX",
NULL,
MTX_DEF);
/* Initialize cookie pool */
status = qat_ocf_cookie_pool_init(qat_ocf_instance, dev);
if (CPA_STATUS_SUCCESS != status) {
@ -1085,19 +1095,16 @@ qat_ocf_start_instances(struct qat_ocf_softc *qat_softc, device_t dev)
startedInstances++;
continue;
fail:
mtx_destroy(&qat_ocf_instance->cyInstMtx);
/* Stop instance */
status = cpaCyStopInstance(cyInstHandle);
if (CPA_STATUS_SUCCESS != status)
device_printf(qat_softc->sc_dev,
"unable to stop the instance\n");
continue;
}
qat_softc->numCyInstances = startedInstances;
/* Success if at least one instance has been set */
if (!qat_softc->numCyInstances)
return CPA_STATUS_FAIL;
return CPA_STATUS_SUCCESS;
}
@ -1114,45 +1121,136 @@ qat_ocf_stop_instances(struct qat_ocf_softc *qat_softc)
status = cpaCyStopInstance(qat_instance->cyInstHandle);
if (CPA_STATUS_SUCCESS != status) {
pr_err("QAT: stopping instance id: %d failed\n", i);
mtx_unlock(&qat_instance->cyInstMtx);
continue;
}
qat_ocf_cookie_pool_deinit(qat_instance);
mtx_destroy(&qat_instance->cyInstMtx);
}
qat_softc->numCyInstances = 0;
return status;
}
static int
qat_ocf_deinit(struct qat_ocf_softc *qat_softc)
{
int status = 0;
CpaStatus cpaStatus;
if (qat_softc->cryptodev_id >= 0) {
crypto_unregister_all(qat_softc->cryptodev_id);
qat_softc->cryptodev_id = -1;
}
/* Stop QAT instances */
cpaStatus = qat_ocf_stop_instances(qat_softc);
if (CPA_STATUS_SUCCESS != cpaStatus) {
device_printf(qat_softc->sc_dev, "unable to stop instances\n");
status = EIO;
}
return status;
}
static int
qat_ocf_init(struct qat_ocf_softc *qat_softc)
{
int32_t cryptodev_id;
/* Starting instances for OCF */
if (qat_ocf_start_instances(qat_softc, qat_softc->sc_dev)) {
device_printf(qat_softc->sc_dev,
"unable to get QAT IRQ instances\n");
goto fail;
}
/* Register only if instances available */
if (qat_softc->numCyInstances) {
cryptodev_id =
crypto_get_driverid(qat_softc->sc_dev,
sizeof(struct qat_ocf_dsession),
CRYPTOCAP_F_HARDWARE);
if (cryptodev_id < 0) {
device_printf(qat_softc->sc_dev,
"cannot initialize!\n");
goto fail;
}
qat_softc->cryptodev_id = cryptodev_id;
}
return 0;
fail:
qat_ocf_deinit(qat_softc);
return ENXIO;
}
static int qat_ocf_sysctl_handle(SYSCTL_HANDLER_ARGS)
{
struct qat_ocf_softc *qat_softc = NULL;
int ret = 0;
device_t dev = arg1;
u_int enabled;
qat_softc = device_get_softc(dev);
enabled = qat_softc->enabled;
ret = sysctl_handle_int(oidp, &enabled, 0, req);
if (ret || !req->newptr)
return (ret);
if (qat_softc->enabled != enabled) {
if (enabled) {
ret = qat_ocf_init(qat_softc);
} else {
ret = qat_ocf_deinit(qat_softc);
}
if (!ret)
qat_softc->enabled = enabled;
}
return ret;
}
static int
qat_ocf_attach(device_t dev)
{
int status;
struct qat_ocf_softc *qat_softc;
int32_t cryptodev_id;
qat_softc = device_get_softc(dev);
qat_softc->sc_dev = dev;
qat_softc->cryptodev_id = -1;
qat_softc->enabled = 1;
cryptodev_id = crypto_get_driverid(dev,
sizeof(struct qat_ocf_dsession),
CRYPTOCAP_F_HARDWARE);
if (cryptodev_id < 0) {
device_printf(dev, "cannot initialize!\n");
goto fail;
}
qat_softc->cryptodev_id = cryptodev_id;
qat_softc->rc =
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO,
"enable",
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
dev,
0,
qat_ocf_sysctl_handle,
"I",
"QAT OCF support enablement");
/* Starting instances for OCF */
status = qat_ocf_start_instances(qat_softc, dev);
if (status) {
device_printf(dev, "no QAT IRQ instances available\n");
goto fail;
if (!qat_softc->rc)
return ENOMEM;
if (qat_softc->enabled) {
status = qat_ocf_init(qat_softc);
if (status) {
device_printf(dev, "qat_ocf init failed\n");
goto fail;
}
}
return 0;
fail:
qat_ocf_detach(dev);
qat_ocf_deinit(qat_softc);
return (ENXIO);
}
@ -1160,27 +1258,9 @@ qat_ocf_attach(device_t dev)
static int
qat_ocf_detach(device_t dev)
{
struct qat_ocf_softc *qat_softc = NULL;
CpaStatus cpaStatus;
int status = 0;
struct qat_ocf_softc *qat_softc = device_get_softc(dev);
qat_softc = device_get_softc(dev);
if (qat_softc->cryptodev_id >= 0) {
status = crypto_unregister_all(qat_softc->cryptodev_id);
if (status)
device_printf(dev,
"unable to unregister QAt backend\n");
}
/* Stop QAT instances */
cpaStatus = qat_ocf_stop_instances(qat_softc);
if (CPA_STATUS_SUCCESS != cpaStatus) {
device_printf(dev, "unable to stop instances\n");
status = EIO;
}
return status;
return qat_ocf_deinit(qat_softc);
}
static device_method_t qat_ocf_methods[] =

View file

@ -172,3 +172,19 @@ cpaDcDeflateCompressBound(const CpaInstanceHandle dcInstance,
return dcDeflateBoundGen2(huffType, inputSize, outputSize);
}
}
CpaStatus
cpaDcLZ4CompressBound(const CpaInstanceHandle dcInstance,
Cpa32U inputSize,
Cpa32U *outputSize)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcLZ4SCompressBound(const CpaInstanceHandle dcInstance,
Cpa32U inputSize,
Cpa32U *outputSize)
{
return CPA_STATUS_UNSUPPORTED;
}

View file

@ -0,0 +1,102 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file dc_chain.c
*
* @ingroup Dc_Chaining
*
* @description
* Implementation of the chaining session operations.
*
*****************************************************************************/
/*
*******************************************************************************
* Include public/global header files
*******************************************************************************
*/
#include "cpa.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_comp.h"
#include "icp_qat_hw.h"
/*
*******************************************************************************
* Include private header files
*******************************************************************************
*/
#include "sal_types_compression.h"
#include "cpa_dc_chain.h"
#include "lac_session.h"
#include "dc_session.h"
#include "dc_datapath.h"
#include "dc_stats.h"
#include "lac_mem_pools.h"
#include "lac_log.h"
#include "sal_types_compression.h"
#include "lac_buffer_desc.h"
#include "sal_service_state.h"
#include "sal_qat_cmn_msg.h"
#include "lac_sym_qat_hash_defs_lookup.h"
#include "sal_string_parse.h"
#include "lac_sym.h"
#include "lac_session.h"
#include "lac_sym_qat.h"
#include "lac_sym_hash.h"
#include "lac_sym_alg_chain.h"
#include "lac_sym_auth_enc.h"
CpaStatus
cpaDcChainGetSessionSize(CpaInstanceHandle dcInstance,
CpaDcChainOperations operation,
Cpa8U numSessions,
CpaDcChainSessionSetupData *pSessionData,
Cpa32U *pSessionSize)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcChainInitSession(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaDcChainOperations operation,
Cpa8U numSessions,
CpaDcChainSessionSetupData *pSessionData,
CpaDcCallbackFn callbackFn)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcChainRemoveSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcChainResetSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcChainPerformOp(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaBufferList *pSrcBuff,
CpaBufferList *pDestBuff,
CpaDcChainOperations operation,
Cpa8U numOpDatas,
CpaDcChainOpData *pChainOpData,
CpaDcChainRqResults *pResults,
void *callbackTag)
{
return CPA_STATUS_UNSUPPORTED;
}

View file

@ -331,6 +331,12 @@ dcCompression_ProcessCallback(void *pRespMsg)
(ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET ==
ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(
opStatus));
} else {
/* Check if returned data is a stored block
* in compression direction
*/
pResults->dataUncompressed =
ICP_QAT_FW_COMN_HDR_ST_BLK_FLAG_GET(hdrFlags);
}
/* Save the checksum for the next request */

View file

@ -273,6 +273,14 @@ cpaDcDpRemoveSession(const CpaInstanceHandle dcInstance,
return cpaDcRemoveSession(dcInstance, pSessionHandle);
}
CpaStatus
cpaDcDpUpdateSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaDcSessionUpdateData *pUpdateSessionData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcDpRegCbFunc(const CpaInstanceHandle dcInstance,
const CpaDcDpCallbackFn pNewCb)

View file

@ -0,0 +1,72 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file dc_ns_datapath.c
*
* @defgroup Dc_DataCompression DC Data Compression
*
* @ingroup Dc_DataCompression
*
* @description
* Implementation of the Data Compression datapath operations.
*
*****************************************************************************/
/*
*******************************************************************************
* Include public/global header files
*******************************************************************************
*/
#include "cpa.h"
#include "cpa_dc.h"
#include "cpa_dc_dp.h"
/*
*******************************************************************************
* Include private header files
*******************************************************************************
*/
#include "dc_session.h"
#include "dc_datapath.h"
#include "sal_statistics.h"
#include "lac_common.h"
#include "lac_mem.h"
#include "lac_mem_pools.h"
#include "lac_log.h"
#include "sal_types_compression.h"
#include "dc_stats.h"
#include "lac_buffer_desc.h"
#include "lac_sal.h"
#include "lac_sync.h"
#include "sal_service_state.h"
#include "sal_qat_cmn_msg.h"
#include "dc_error_counter.h"
CpaStatus
cpaDcNsDecompressData(CpaInstanceHandle dcInstance,
CpaDcNsSetupData *pSetupData,
CpaBufferList *pSrcBuff,
CpaBufferList *pDestBuff,
CpaDcOpData *pOpData,
CpaDcRqResults *pResults,
CpaDcCallbackFn callbackFn,
void *callbackTag)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcNsCompressData(CpaInstanceHandle dcInstance,
CpaDcNsSetupData *pSetupData,
CpaBufferList *pSrcBuff,
CpaBufferList *pDestBuff,
CpaDcOpData *pOpData,
CpaDcRqResults *pResults,
CpaDcCallbackFn callbackFn,
void *callbackTag)
{
return CPA_STATUS_UNSUPPORTED;
}

View file

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file dc_ns_header_footer.c
*
* @ingroup Dc_DataCompression
*
* @description
* Implementation of the Data Compression header and footer operations.
*
*****************************************************************************/
/*
*******************************************************************************
* Include public/global header files
*******************************************************************************
*/
#include "cpa.h"
#include "cpa_dc.h"
/*
*******************************************************************************
* Include private header files
*******************************************************************************
*/
#include "dc_session.h"
CpaStatus
cpaDcNsGenerateHeader(CpaDcNsSetupData *pSetupData,
CpaFlatBuffer *pDestBuff,
Cpa32U *count)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcNsGenerateFooter(CpaDcNsSetupData *pSetupData,
Cpa64U totalLength,
CpaFlatBuffer *pDestBuff,
CpaDcRqResults *pResults)
{
return CPA_STATUS_UNSUPPORTED;
}

View file

@ -65,14 +65,13 @@ dcCheckSessionData(const CpaDcSessionSetupData *pSessionData,
cpaDcQueryCapabilities(dcInstance, &instanceCapabilities);
if ((pSessionData->compLevel < CPA_DC_L1) ||
(pSessionData->compLevel > CPA_DC_L9)) {
(pSessionData->compLevel > CPA_DC_L12)) {
QAT_UTILS_LOG("Invalid compLevel value\n");
return CPA_STATUS_INVALID_PARAM;
}
if ((pSessionData->autoSelectBestHuffmanTree < CPA_DC_ASB_DISABLED) ||
(pSessionData->autoSelectBestHuffmanTree >
CPA_DC_ASB_UNCOMP_STATIC_DYNAMIC_WITH_NO_HDRS)) {
(pSessionData->autoSelectBestHuffmanTree > CPA_DC_ASB_ENABLED)) {
QAT_UTILS_LOG("Invalid autoSelectBestHuffmanTree value\n");
return CPA_STATUS_INVALID_PARAM;
}
@ -869,6 +868,13 @@ dcInitSession(CpaInstanceHandle dcInstance,
disableType0EnhancedAutoSelectBest =
ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST;
break;
case CPA_DC_ASB_ENABLED:
if (pService->comp_device_data.asbEnableSupport == CPA_FALSE) {
autoSelectBest = ICP_QAT_FW_COMP_AUTO_SELECT_BEST;
enhancedAutoSelectBest =
ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST;
}
break;
default:
break;
}
@ -1088,6 +1094,21 @@ cpaDcResetSession(const CpaInstanceHandle dcInstance,
return status;
}
CpaStatus
cpaDcResetXXHashState(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcUpdateSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaDcSessionUpdateData *pUpdateSessionData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaDcRemoveSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle)

View file

@ -208,8 +208,6 @@ typedef struct dc_session_desc_s {
/**< Session direction */
CpaDcSessionState sessState;
/**< Session state */
Cpa32U deflateWindowSize;
/**< Window size */
CpaDcCompLvl compLevel;
/**< Compression level */
CpaDcCallbackFn pCompressionCb;

View file

@ -2449,20 +2449,12 @@ LacSymKey_KeyGenSslTls(const CpaInstanceHandle instanceHandle_in,
{
CpaStatus status = CPA_STATUS_FAIL;
CpaInstanceHandle instanceHandle = LacKey_GetHandle(instanceHandle_in);
CpaCyCapabilitiesInfo cyCapInfo;
LAC_CHECK_INSTANCE_HANDLE(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
SAL_SERVICE_TYPE_CRYPTO_SYM));
SAL_RUNNING_CHECK(instanceHandle);
SalCtrl_CyQueryCapabilities(instanceHandle, &cyCapInfo);
if (IS_HKDF_UNSUPPORTED(cmdId, cyCapInfo.hkdfSupported)) {
LAC_LOG_ERROR("The device does not support HKDF");
return CPA_STATUS_UNSUPPORTED;
}
status = LacSymKey_CheckParamSslTls(pKeyGenOpData,
hashAlgorithm,

View file

@ -1147,8 +1147,8 @@ LacAlgChain_SessionInit(const CpaInstanceHandle instanceHandle,
* build the message templates
* create two content descriptors in the case we can support using SHRAM
* constants and an optimised content descriptor. we have to do this in
*case of partials. 64 byte content descriptor is used in the SHRAM case
*for AES-128-HMAC-SHA1
* case of partials. 64 byte content descriptor is used in the SHRAM
* case for AES-128-HMAC-SHA1
*-----------------------------------------------------------------------*/
if (CPA_STATUS_SUCCESS == status) {
pSessionDesc->cipherSliceType =

View file

@ -126,7 +126,7 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
sal_compression_service_t *pCompService)
{
int level = 0;
pCompService->comp_device_data.asbEnableSupport = CPA_FALSE;
pCompService->comp_device_data.uniqueCompressionLevels[0] = CPA_FALSE;
switch (device->deviceType) {
@ -154,6 +154,23 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
pCompService->comp_device_data.windowSizeMask =
(1 << DC_8K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
pCompService->comp_device_data.cnvnrSupported = CPA_FALSE;
for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L2:
case CPA_DC_L3:
case CPA_DC_L4:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_TRUE;
break;
default:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_FALSE;
break;
}
}
pCompService->comp_device_data.numCompressionLevels =
DC_NUM_COMPRESSION_LEVELS;
break;
case DEVICE_C3XXX:
case DEVICE_C3XXXVF:
@ -181,6 +198,24 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
pCompService->comp_device_data.cnvnrSupported = CPA_TRUE;
for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L2:
case CPA_DC_L3:
case CPA_DC_L4:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_TRUE;
break;
default:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_FALSE;
break;
}
}
pCompService->comp_device_data.numCompressionLevels =
DC_NUM_COMPRESSION_LEVELS;
break;
case DEVICE_C62X:
case DEVICE_C62XVF:
@ -209,7 +244,7 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
pCompService->comp_device_data.cnvnrSupported = CPA_TRUE;
for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) {
for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L2:
@ -254,8 +289,28 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
pCompService->comp_device_data.windowSizeMask =
(1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
pCompService->comp_device_data.cnvnrSupported = CPA_TRUE;
for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L2:
case CPA_DC_L3:
case CPA_DC_L4:
case CPA_DC_L5:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_TRUE;
break;
default:
pCompService->comp_device_data
.uniqueCompressionLevels[level] = CPA_FALSE;
break;
}
}
pCompService->comp_device_data.numCompressionLevels =
DC_NUM_COMPRESSION_LEVELS;
break;
case DEVICE_GEN4:
case DEVICE_4XXX:
case DEVICE_4XXXVF:
pCompService->generic_service_info.integrityCrcCheck = CPA_TRUE;
pCompService->numInterBuffs = 0;
pCompService->comp_device_data.minOutputBuffSize =
@ -277,7 +332,7 @@ SalCtrl_CompressionInit_CompData(icp_accel_dev_t *device,
pCompService->comp_device_data.windowSizeMask =
(1 << DC_4K_WINDOW_SIZE | 1 << DC_8K_WINDOW_SIZE |
1 << DC_16K_WINDOW_SIZE | 1 << DC_32K_WINDOW_SIZE);
for (level = CPA_DC_L1; level <= CPA_DC_L9; level++) {
for (level = CPA_DC_L1; level <= CPA_DC_L12; level++) {
switch (level) {
case CPA_DC_L1:
case CPA_DC_L6:

View file

@ -478,7 +478,8 @@ selectGeneration(device_type_t deviceType, sal_service_t *pInst)
pInst->gen = GEN3;
break;
case DEVICE_GEN4:
case DEVICE_4XXX:
case DEVICE_4XXXVF:
pInst->gen = GEN4;
break;
@ -719,6 +720,28 @@ SalCtrl_ServiceStop(icp_accel_dev_t *device, sal_list_t *services)
return status;
}
static CpaStatus
SalCtrl_ServiceError(icp_accel_dev_t *device, sal_list_t *services)
{
CpaStatus status = CPA_STATUS_SUCCESS;
/* Calling error handling functions */
sal_list_t *curr_element = services;
sal_service_t *service = NULL;
while (NULL != curr_element) {
service = (sal_service_t *)SalList_getObject(curr_element);
if (service->notification_cb) {
service->notification_cb(
service,
service->cb_tag,
CPA_INSTANCE_EVENT_FATAL_ERROR);
}
curr_element = SalList_next(curr_element);
}
return status;
}
/*
* @ingroup SalCtrl
* @description
@ -1164,6 +1187,78 @@ SalCtrl_ServiceEventStop(icp_accel_dev_t *device, Cpa32U enabled_services)
return ret_status;
}
/**************************************************************************
* @ingroup SalCtrl
* @description
* This function calls the error function on all the service instances.
*
* @context
* This function is called from the SalCtrl_ServiceEventHandler function.
*
* @assumptions
* None
* @sideEffects
* None
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] device An icp_accel_dev_t* type
* @param[in] enabled_services Enabled services by user
*
**************************************************************************/
static CpaStatus
SalCtrl_ServiceEventError(icp_accel_dev_t *device, Cpa32U enabled_services)
{
CpaStatus status = CPA_STATUS_SUCCESS;
CpaStatus ret_status = CPA_STATUS_SUCCESS;
sal_t *service_container = device->pSalHandle;
if (service_container == NULL) {
QAT_UTILS_LOG("Private data is NULL\n");
return CPA_STATUS_FATAL;
}
if (SalCtrl_IsServiceEnabled(enabled_services,
SAL_SERVICE_TYPE_CRYPTO_ASYM)) {
status = SalCtrl_ServiceError(device,
service_container->asym_services);
if (CPA_STATUS_SUCCESS != status) {
ret_status = status;
}
}
if (SalCtrl_IsServiceEnabled(enabled_services,
SAL_SERVICE_TYPE_CRYPTO_SYM)) {
status = SalCtrl_ServiceError(device,
service_container->sym_services);
if (CPA_STATUS_SUCCESS != status) {
ret_status = status;
}
}
if (SalCtrl_IsServiceEnabled(enabled_services,
SAL_SERVICE_TYPE_CRYPTO)) {
status =
SalCtrl_ServiceError(device,
service_container->crypto_services);
if (CPA_STATUS_SUCCESS != status) {
ret_status = status;
}
}
if (SalCtrl_IsServiceEnabled(enabled_services,
SAL_SERVICE_TYPE_COMPRESSION)) {
status = SalCtrl_ServiceError(
device, service_container->compression_services);
if (CPA_STATUS_SUCCESS != status) {
ret_status = status;
}
}
return ret_status;
}
/**************************************************************************
* @ingroup SalCtrl
* @description
@ -1307,6 +1402,10 @@ SalCtrl_ServiceEventHandler(icp_accel_dev_t *device,
}
break;
}
case ICP_ADF_EVENT_ERROR: {
status = SalCtrl_ServiceEventError(device, enabled_services);
break;
}
default:
status = CPA_STATUS_SUCCESS;
break;

View file

@ -0,0 +1,288 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/**
*****************************************************************************
* @file sal_get_instances.c
*
* @defgroup SalCtrl Service Access Layer Controller
*
* @ingroup SalCtrl
*
* @description
* This file contains the main function to get SAL instances.
*
*****************************************************************************/
/*
*******************************************************************************
* Include public/global header files
*******************************************************************************
*/
/* QAT-API includes */
#include "cpa.h"
#include "cpa_cy_common.h"
#include "cpa_cy_im.h"
#include "cpa_dc.h"
/* ADF includes */
#include "icp_accel_devices.h"
#include "icp_adf_accel_mgr.h"
/* SAL includes */
#include "lac_mem.h"
#include "lac_list.h"
#include "lac_sal_types.h"
/**
******************************************************************************
* @ingroup SalCtrl
* @description
* Get either sym or asym instance number
*****************************************************************************/
static CpaStatus
Lac_GetSingleCyNumInstances(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U *pNumInstances)
{
CpaStatus status = CPA_STATUS_SUCCESS;
icp_accel_dev_t **pAdfInsts = NULL;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
sal_list_t *list_temp = NULL;
Cpa16U num_accel_dev = 0;
Cpa16U num_inst = 0;
Cpa16U i = 0;
Cpa32U accel_capability = 0;
char *service = NULL;
LAC_CHECK_NULL_PARAM(pNumInstances);
*pNumInstances = 0;
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
service = "asym";
break;
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
service = "sym";
break;
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
}
/* Get the number of accel_dev in the system */
status = icp_amgr_getNumInstances(&num_accel_dev);
LAC_CHECK_STATUS(status);
/* Allocate memory to store addr of accel_devs */
pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *),
M_QAT,
M_WAITOK | M_ZERO);
if (NULL == pAdfInsts) {
QAT_UTILS_LOG("Failed to allocate dev instance memory\n");
return CPA_STATUS_RESOURCE;
}
num_accel_dev = 0;
status = icp_amgr_getAllAccelDevByCapabilities(accel_capability,
pAdfInsts,
&num_accel_dev);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG("No support for service %s\n", service);
free(pAdfInsts, M_QAT);
return status;
}
for (i = 0; i < num_accel_dev; i++) {
dev_addr = pAdfInsts[i];
if (NULL == dev_addr || NULL == dev_addr->pSalHandle) {
continue;
}
base_addr = dev_addr->pSalHandle;
if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType) {
list_temp = base_addr->asym_services;
} else {
list_temp = base_addr->sym_services;
}
while (NULL != list_temp) {
num_inst++;
list_temp = SalList_next(list_temp);
}
}
*pNumInstances = num_inst;
free(pAdfInsts, M_QAT);
return status;
}
/**
******************************************************************************
* @ingroup SalCtrl
* @description
* Get either sym or asym instance
*****************************************************************************/
static CpaStatus
Lac_GetSingleCyInstances(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U numInstances,
CpaInstanceHandle *pInstances)
{
CpaStatus status = CPA_STATUS_SUCCESS;
icp_accel_dev_t **pAdfInsts = NULL;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
sal_list_t *list_temp = NULL;
Cpa16U num_accel_dev = 0;
Cpa16U num_allocated_instances = 0;
Cpa16U index = 0;
Cpa16U i = 0;
Cpa32U accel_capability = 0;
char *service = NULL;
LAC_CHECK_NULL_PARAM(pInstances);
if (0 == numInstances) {
QAT_UTILS_LOG("NumInstances is 0\n");
return CPA_STATUS_INVALID_PARAM;
}
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
service = "asym";
break;
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
service = "sym";
break;
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
}
/* Get the number of instances */
status = cpaGetNumInstances(accelerationServiceType,
&num_allocated_instances);
if (CPA_STATUS_SUCCESS != status) {
return status;
}
if (numInstances > num_allocated_instances) {
QAT_UTILS_LOG("Only %d instances available\n",
num_allocated_instances);
return CPA_STATUS_RESOURCE;
}
/* Get the number of accel devices in the system */
status = icp_amgr_getNumInstances(&num_accel_dev);
LAC_CHECK_STATUS(status);
/* Allocate memory to store addr of accel_devs */
pAdfInsts = malloc(num_accel_dev * sizeof(icp_accel_dev_t *),
M_QAT,
M_WAITOK | M_ZERO);
if (NULL == pAdfInsts) {
QAT_UTILS_LOG("Failed to allocate dev instance memory\n");
return CPA_STATUS_RESOURCE;
}
num_accel_dev = 0;
status = icp_amgr_getAllAccelDevByCapabilities(accel_capability,
pAdfInsts,
&num_accel_dev);
if (CPA_STATUS_SUCCESS != status) {
QAT_UTILS_LOG("No support for service %s\n", service);
free(pAdfInsts, M_QAT);
return status;
}
for (i = 0; i < num_accel_dev; i++) {
dev_addr = pAdfInsts[i];
/* Note dev_addr cannot be NULL here as numInstances = 0
* is not valid and if dev_addr = NULL then index = 0 (which
* is less than numInstances and status is set to _RESOURCE
* above)
*/
base_addr = dev_addr->pSalHandle;
if (NULL == base_addr) {
continue;
}
if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType)
list_temp = base_addr->asym_services;
else
list_temp = base_addr->sym_services;
while (NULL != list_temp) {
if (index > (numInstances - 1))
break;
pInstances[index] = SalList_getObject(list_temp);
list_temp = SalList_next(list_temp);
index++;
}
}
free(pAdfInsts, M_QAT);
return status;
}
/**
******************************************************************************
* @ingroup SalCtrl
*****************************************************************************/
CpaStatus
cpaGetNumInstances(const CpaAccelerationServiceType accelerationServiceType,
Cpa16U *pNumInstances)
{
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
return Lac_GetSingleCyNumInstances(accelerationServiceType,
pNumInstances);
case CPA_ACC_SVC_TYPE_CRYPTO:
return cpaCyGetNumInstances(pNumInstances);
case CPA_ACC_SVC_TYPE_DATA_COMPRESSION:
return cpaDcGetNumInstances(pNumInstances);
default:
QAT_UTILS_LOG("Invalid service type\n");
*pNumInstances = 0;
return CPA_STATUS_INVALID_PARAM;
}
}
/**
******************************************************************************
* @ingroup SalCtrl
*****************************************************************************/
CpaStatus
cpaGetInstances(const CpaAccelerationServiceType accelerationServiceType,
Cpa16U numInstances,
CpaInstanceHandle *pInstances)
{
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
return Lac_GetSingleCyInstances(accelerationServiceType,
numInstances,
pInstances);
case CPA_ACC_SVC_TYPE_CRYPTO:
return cpaCyGetInstances(numInstances, pInstances);
case CPA_ACC_SVC_TYPE_DATA_COMPRESSION:
return cpaDcGetInstances(numInstances, pInstances);
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
}
}

View file

@ -23,7 +23,7 @@
#include "icp_adf_transport.h"
#define DC_NUM_RX_RINGS (1)
#define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L9)
#define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L12)
/**
*****************************************************************************
@ -73,6 +73,9 @@ typedef struct sal_compression_device_data {
/* Flag to indicate CompressAndVerifyAndRecover feature support */
CpaBoolean cnvnrSupported;
/* When set, implies device supports ASB_ENABLE */
CpaBoolean asbEnableSupport;
} sal_compression_device_data_t;
/**

View file

@ -29,6 +29,9 @@
#include "cpa_cy_rsa.h"
#include "cpa_cy_ln.h"
#include "cpa_dc.h"
#include "cpa_dc_chain.h"
#include "cpa_cy_ecsm2.h"
#include "cpa_cy_kpt.h"
#include "icp_accel_devices.h"
#include "icp_adf_init.h"
#include "icp_adf_transport.h"
@ -283,6 +286,194 @@ cpaCyEcMontEdwdsPointMultiply(
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcGenericPointVerify(const CpaInstanceHandle instanceHandle,
const CpaCyEcPointVerifyCbFunc pCb,
void *pCallbackTag,
const CpaCyEcGenericPointVerifyOpData *pOpData,
CpaBoolean *pVerifyStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcGenericPointMultiply(const CpaInstanceHandle instanceHandle,
const CpaCyEcPointMultiplyCbFunc pCb,
void *pCallbackTag,
const CpaCyEcGenericPointMultiplyOpData *pOpData,
CpaBoolean *pMultiplyStatus,
CpaFlatBuffer *pOutX,
CpaFlatBuffer *pOutY)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2PointMultiply(
const CpaInstanceHandle instanceHandle_in,
const CpaCyEcPointMultiplyCbFunc pEcsm2PointMulCb,
void *pCallbackTag,
const CpaCyEcsm2PointMultiplyOpData *pEcsm2PointMulOpData,
CpaBoolean *pMultiplyStatus,
CpaFlatBuffer *pXk,
CpaFlatBuffer *pYk)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2GeneratorMultiply(
const CpaInstanceHandle instanceHandle_in,
const CpaCyEcPointMultiplyCbFunc pEcsm2GenMulCb,
void *pCallbackTag,
const CpaCyEcsm2GeneratorMultiplyOpData *pEcsm2GenMulOpData,
CpaBoolean *pMultiplyStatus,
CpaFlatBuffer *pXk,
CpaFlatBuffer *pYk)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2PointVerify(
const CpaInstanceHandle instanceHandle_in,
const CpaCyEcPointVerifyCbFunc pEcsm2PointVeirfyCb,
void *pCallbackTag,
const CpaCyEcsm2PointVerifyOpData *pEcsm2PointVerifyOpData,
CpaBoolean *pPointVerifyStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2Sign(const CpaInstanceHandle instanceHandle_in,
const CpaCyEcsm2SignCbFunc pEcsm2SignCb,
void *pCallbackTag,
const CpaCyEcsm2SignOpData *pEcsm2SignOpData,
CpaBoolean *pSignStatus,
CpaFlatBuffer *pR,
CpaFlatBuffer *pS)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2Verify(const CpaInstanceHandle instanceHandle_in,
const CpaCyEcsm2VerifyCbFunc pEcsm2VerifyCb,
void *pCallbackTag,
const CpaCyEcsm2VerifyOpData *pEcsm2VerifyOpData,
CpaBoolean *pVerifyStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2Encrypt(const CpaInstanceHandle instanceHandle_in,
const CpaCyGenFlatBufCbFunc pEcsm2EncCb,
void *pCallbackTag,
const CpaCyEcsm2EncryptOpData *pEcsm2EncOpData,
CpaCyEcsm2EncryptOutputData *pEcsm2EncOutputData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2Decrypt(const CpaInstanceHandle instanceHandle_in,
const CpaCyGenFlatBufCbFunc pEcsm2DecCb,
void *pCallbackTag,
const CpaCyEcsm2DecryptOpData *pEcsm2DecOpData,
CpaCyEcsm2DecryptOutputData *pEcsm2DecOutputData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2KeyExPhase1(
const CpaInstanceHandle instanceHandle_in,
const CpaCyGenFlatBufCbFunc pEcsm2KeyExPhase1Cb,
void *pCallbackTag,
const CpaCyEcsm2KeyExPhase1OpData *pEcsm2KeyExPhase1OpData,
CpaCyEcsm2KeyExOutputData *pEcsm2KeyExPhase1OutputData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2KeyExPhase2(
const CpaInstanceHandle instanceHandle_in,
const CpaCyGenFlatBufCbFunc pEcsm2KeyExPhase2Cb,
void *pCallbackTag,
const CpaCyEcsm2KeyExPhase2OpData *pEcsm2KeyExPhase2OpData,
CpaCyEcsm2KeyExOutputData *pEcsm2KeyExPhase2OutputData)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyEcsm2QueryStats64(const CpaInstanceHandle instanceHandle_in,
CpaCyEcsm2Stats64 *pEcsm2Stats)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptEcdsaSignRS(const CpaInstanceHandle instanceHandle,
const CpaCyEcdsaSignRSCbFunc pCb,
void *pCallbackTag,
const CpaCyKptEcdsaSignRSOpData *pOpData,
CpaBoolean *pSignStatus,
CpaFlatBuffer *pR,
CpaFlatBuffer *pS,
CpaCyKptUnwrapContext *pKptUnwrapContext)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptRsaDecrypt(const CpaInstanceHandle instanceHandle,
const CpaCyGenFlatBufCbFunc pRsaDecryptCb,
void *pCallbackTag,
const CpaCyKptRsaDecryptOpData *pDecryptOpData,
CpaFlatBuffer *pOutputData,
CpaCyKptUnwrapContext *pKptUnwrapContext)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptQueryIssuingKeys(const CpaInstanceHandle instanceHandle_in,
CpaFlatBuffer *pPublicX509IssueCert,
CpaCyKptKeyManagementStatus *pKptStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptQueryDeviceCredentials(const CpaInstanceHandle instanceHandle,
CpaCyKptValidationKey *pDevCredential,
CpaCyKptKeyManagementStatus *pKptStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptLoadKey(CpaInstanceHandle instanceHandle,
CpaCyKptLoadKey *pSWK,
CpaCyKptHandle *keyHandle,
CpaCyKptKeyManagementStatus *pKptStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
CpaStatus
cpaCyKptDeleteKey(CpaInstanceHandle instanceHandle,
CpaCyKptHandle keyHandle,
CpaCyKptKeyManagementStatus *pKptStatus)
{
return CPA_STATUS_UNSUPPORTED;
}
/* Prime */
CpaStatus
cpaCyPrimeTest(const CpaInstanceHandle instanceHandle,

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
/* --- (Automatically generated (relocation v. 1.3), do not modify manually) --- */
/**
* @file icp_qat_fw_mmp_ids.h
* @ingroup icp_qat_fw_mmp
@ -14,7 +18,159 @@
#ifndef __ICP_QAT_FW_MMP_IDS__
#define __ICP_QAT_FW_MMP_IDS__
#define PKE_INIT 0x09061798
#define PKE_ECSM2_GENERATOR_MULTIPLICATION 0x220f16ae
/**< Functionality ID for ECC SM2 point multiply [k]G
* @li 1 input parameters : @link
* icp_qat_fw_mmp_ecsm2_generator_multiplication_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_mmp_ecsm2_generator_multiplication_output_s::xd xd @endlink @link
* icp_qat_fw_mmp_ecsm2_generator_multiplication_output_s::yd yd @endlink
*/
#define PKE_ECSM2_POINT_MULTIPLICATION 0x211716ce
/**< Functionality ID for ECC SM2 point multiply [k]P
* @li 3 input parameters : @link
* icp_qat_fw_mmp_ecsm2_point_multiplication_input_s::k k @endlink @link
* icp_qat_fw_mmp_ecsm2_point_multiplication_input_s::x x @endlink @link
* icp_qat_fw_mmp_ecsm2_point_multiplication_input_s::y y @endlink
* @li 2 output parameters : @link
* icp_qat_fw_mmp_ecsm2_point_multiplication_output_s::xd xd @endlink @link
* icp_qat_fw_mmp_ecsm2_point_multiplication_output_s::yd yd @endlink
*/
#define PKE_ECSM2_POINT_VERIFY 0x1b0716a6
/**< Functionality ID for ECC SM2 point verify
* @li 2 input parameters : @link icp_qat_fw_mmp_ecsm2_point_verify_input_s::x x
* @endlink @link icp_qat_fw_mmp_ecsm2_point_verify_input_s::y y @endlink
* @li no output parameters
*/
#define PKE_ECSM2_SIGN_RS 0x222116fe
/**< Functionality ID for ECC SM2 Sign RS
* @li 3 input parameters : @link icp_qat_fw_mmp_ecsm2_sign_rs_input_s::k k
* @endlink @link icp_qat_fw_mmp_ecsm2_sign_rs_input_s::e e @endlink @link
* icp_qat_fw_mmp_ecsm2_sign_rs_input_s::d d @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecsm2_sign_rs_output_s::r r
* @endlink @link icp_qat_fw_mmp_ecsm2_sign_rs_output_s::s s @endlink
*/
#define PKE_ECSM2_VERIFY 0x29241743
/**< Functionality ID for ECC SM2 Signature Verify
* @li 5 input parameters : @link icp_qat_fw_mmp_ecsm2_verify_input_s::e e
* @endlink @link icp_qat_fw_mmp_ecsm2_verify_input_s::r r @endlink @link
* icp_qat_fw_mmp_ecsm2_verify_input_s::s s @endlink @link
* icp_qat_fw_mmp_ecsm2_verify_input_s::xp xp @endlink @link
* icp_qat_fw_mmp_ecsm2_verify_input_s::yp yp @endlink
* @li no output parameters
*/
#define PKE_ECSM2_ENCRYPTION 0x25221720
/**< Functionality ID for ECC SM2 encryption
* @li 3 input parameters : @link icp_qat_fw_mmp_ecsm2_encryption_input_s::k k
* @endlink @link icp_qat_fw_mmp_ecsm2_encryption_input_s::xp xp @endlink @link
* icp_qat_fw_mmp_ecsm2_encryption_input_s::yp yp @endlink
* @li 4 output parameters : @link icp_qat_fw_mmp_ecsm2_encryption_output_s::xc
* xc @endlink @link icp_qat_fw_mmp_ecsm2_encryption_output_s::yc yc @endlink
* @link icp_qat_fw_mmp_ecsm2_encryption_output_s::xpb xpb @endlink @link
* icp_qat_fw_mmp_ecsm2_encryption_output_s::ypb ypb @endlink
*/
#define PKE_ECSM2_DECRYPTION 0x201716e6
/**< Functionality ID for ECC SM2 decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_ecsm2_decryption_input_s::d d
* @endlink @link icp_qat_fw_mmp_ecsm2_decryption_input_s::xpb xpb @endlink
* @link icp_qat_fw_mmp_ecsm2_decryption_input_s::ypb ypb @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecsm2_decryption_output_s::xd
* xd @endlink @link icp_qat_fw_mmp_ecsm2_decryption_output_s::yd yd @endlink
*/
#define PKE_ECSM2_KEYEX_P1 0x220f16be
/**< Functionality ID for ECC SM2 key exchange phase1
* @li 1 input parameters : @link icp_qat_fw_mmp_ecsm2_keyex_p1_input_s::k k
* @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecsm2_keyex_p1_output_s::xd xd
* @endlink @link icp_qat_fw_mmp_ecsm2_keyex_p1_output_s::yd yd @endlink
*/
#define PKE_ECSM2_KEYEX_P2 0x22361768
/**< Functionality ID for ECC SM2 key exchange phase2
* @li 7 input parameters : @link icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::r r
* @endlink @link icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::d d @endlink @link
* icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::x1 x1 @endlink @link
* icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::x2 x2 @endlink @link
* icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::y2 y2 @endlink @link
* icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::xp xp @endlink @link
* icp_qat_fw_mmp_ecsm2_keyex_p2_input_s::yp yp @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecsm2_keyex_p2_output_s::xus
* xus @endlink @link icp_qat_fw_mmp_ecsm2_keyex_p2_output_s::yus yus @endlink
*/
#define POINT_MULTIPLICATION_C25519 0x0a0634c6
/**< Functionality ID for ECC curve25519 Variable Point Multiplication [k]P(x),
* as specified in RFC7748
* @li 2 input parameters : @link
* icp_qat_fw_point_multiplication_c25519_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_c25519_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_point_multiplication_c25519_output_s::xr xr @endlink
*/
#define GENERATOR_MULTIPLICATION_C25519 0x0a0634d6
/**< Functionality ID for ECC curve25519 Generator Point Multiplication [k]G(x),
* as specified in RFC7748
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_c25519_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_generator_multiplication_c25519_output_s::xr xr @endlink
*/
#define POINT_MULTIPLICATION_ED25519 0x100b34e6
/**< Functionality ID for ECC edwards25519 Variable Point Multiplication [k]P,
* as specified in RFC8032
* @li 3 input parameters : @link
* icp_qat_fw_point_multiplication_ed25519_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_ed25519_input_s::yp yp @endlink @link
* icp_qat_fw_point_multiplication_ed25519_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_point_multiplication_ed25519_output_s::xr xr @endlink @link
* icp_qat_fw_point_multiplication_ed25519_output_s::yr yr @endlink
*/
#define GENERATOR_MULTIPLICATION_ED25519 0x100a34f6
/**< Functionality ID for ECC edwards25519 Generator Point Multiplication [k]G,
* as specified in RFC8032
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_ed25519_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_generator_multiplication_ed25519_output_s::xr xr @endlink @link
* icp_qat_fw_generator_multiplication_ed25519_output_s::yr yr @endlink
*/
#define POINT_MULTIPLICATION_C448 0x0c063506
/**< Functionality ID for ECC curve448 Variable Point Multiplication [k]P(x), as
* specified in RFC7748
* @li 2 input parameters : @link
* icp_qat_fw_point_multiplication_c448_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_c448_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_point_multiplication_c448_output_s::xr xr @endlink
*/
#define GENERATOR_MULTIPLICATION_C448 0x0c063516
/**< Functionality ID for ECC curve448 Generator Point Multiplication [k]G(x),
* as specified in RFC7748
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_c448_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_generator_multiplication_c448_output_s::xr xr @endlink
*/
#define POINT_MULTIPLICATION_ED448 0x1a0b3526
/**< Functionality ID for ECC edwards448 Variable Point Multiplication [k]P, as
* specified in RFC8032
* @li 3 input parameters : @link
* icp_qat_fw_point_multiplication_ed448_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_ed448_input_s::yp yp @endlink @link
* icp_qat_fw_point_multiplication_ed448_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_point_multiplication_ed448_output_s::xr xr @endlink @link
* icp_qat_fw_point_multiplication_ed448_output_s::yr yr @endlink
*/
#define GENERATOR_MULTIPLICATION_ED448 0x1a0a3536
/**< Functionality ID for ECC edwards448 Generator Point Multiplication [k]P, as
* specified in RFC8032
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_ed448_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_generator_multiplication_ed448_output_s::xr xr @endlink @link
* icp_qat_fw_generator_multiplication_ed448_output_s::yr yr @endlink
*/
#define PKE_INIT 0x0806169f
/**< Functionality ID for Initialisation sequence
* @li 1 input parameters : @link icp_qat_fw_mmp_init_input_s::z z @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_init_output_s::zz zz @endlink
@ -115,6 +271,22 @@
* icp_qat_fw_mmp_dh_4096_input_s::m m @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_dh_4096_output_s::r r @endlink
*/
#define PKE_DH_G2_8192 0x8d0b3626
/**< Functionality ID for Diffie-Hellman Modular exponentiation base 2 for
* 8192-bit numbers
* @li 2 input parameters : @link icp_qat_fw_mmp_dh_g2_8192_input_s::e e
* @endlink @link icp_qat_fw_mmp_dh_g2_8192_input_s::m m @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_dh_g2_8192_output_s::r r
* @endlink
*/
#define PKE_DH_8192 0xcd0d3636
/**< Functionality ID for Diffie-Hellman Modular exponentiation for 8192-bit
* numbers
* @li 3 input parameters : @link icp_qat_fw_mmp_dh_8192_input_s::g g @endlink
* @link icp_qat_fw_mmp_dh_8192_input_s::e e @endlink @link
* icp_qat_fw_mmp_dh_8192_input_s::m m @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_dh_8192_output_s::r r @endlink
*/
#define PKE_RSA_KP1_512 0x191d1a9a
/**< Functionality ID for RSA 512 key generation first form
* @li 3 input parameters : @link icp_qat_fw_mmp_rsa_kp1_512_input_s::p p
@ -391,6 +563,33 @@
* @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_4096_output_s::m m
* @endlink
*/
#define PKE_RSA_EP_8192 0xc31335c6
/**< Functionality ID for RSA 8192 Encryption
* @li 3 input parameters : @link icp_qat_fw_mmp_rsa_ep_8192_input_s::m m
* @endlink @link icp_qat_fw_mmp_rsa_ep_8192_input_s::e e @endlink @link
* icp_qat_fw_mmp_rsa_ep_8192_input_s::n n @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_rsa_ep_8192_output_s::c c
* @endlink
*/
#define PKE_RSA_DP1_8192 0xc31335e6
/**< Functionality ID for RSA 8192 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_rsa_dp1_8192_input_s::c c
* @endlink @link icp_qat_fw_mmp_rsa_dp1_8192_input_s::d d @endlink @link
* icp_qat_fw_mmp_rsa_dp1_8192_input_s::n n @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp1_8192_output_s::m m
* @endlink
*/
#define PKE_RSA_DP2_8192 0xc9133606
/**< Functionality ID for RSA 8192 Decryption with CRT
* @li 6 input parameters : @link icp_qat_fw_mmp_rsa_dp2_8192_input_s::c c
* @endlink @link icp_qat_fw_mmp_rsa_dp2_8192_input_s::p p @endlink @link
* icp_qat_fw_mmp_rsa_dp2_8192_input_s::q q @endlink @link
* icp_qat_fw_mmp_rsa_dp2_8192_input_s::dp dp @endlink @link
* icp_qat_fw_mmp_rsa_dp2_8192_input_s::dq dq @endlink @link
* icp_qat_fw_mmp_rsa_dp2_8192_input_s::qinv qinv @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_rsa_dp2_8192_output_s::m m
* @endlink
*/
#define PKE_GCD_PT_192 0x19201fcd
/**< Functionality ID for GCD primality test for 192-bit numbers
* @li 1 input parameters : @link icp_qat_fw_mmp_gcd_pt_192_input_s::m m
@ -677,6 +876,14 @@
* @li 1 output parameters : @link icp_qat_fw_maths_modexp_l4096_output_s::r r
* @endlink
*/
#define MATHS_MODEXP_L8192 0xc50c3646
/**< Functionality ID for Modular exponentiation for numbers up to 8192 bits
* @li 3 input parameters : @link icp_qat_fw_maths_modexp_l8192_input_s::g g
* @endlink @link icp_qat_fw_maths_modexp_l8192_input_s::e e @endlink @link
* icp_qat_fw_maths_modexp_l8192_input_s::m m @endlink
* @li 1 output parameters : @link icp_qat_fw_maths_modexp_l8192_output_s::r r
* @endlink
*/
#define MATHS_MODINV_ODD_L128 0x090623f8
/**< Functionality ID for Modular multiplicative inverse for numbers less than
* 128 bits
@ -765,6 +972,14 @@
* @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l4096_output_s::c
* c @endlink
*/
#define MATHS_MODINV_ODD_L8192 0x88073656
/**< Functionality ID for Modular multiplicative inverse for numbers up to 8192
* bits
* @li 2 input parameters : @link icp_qat_fw_maths_modinv_odd_l8192_input_s::a a
* @endlink @link icp_qat_fw_maths_modinv_odd_l8192_input_s::b b @endlink
* @li 1 output parameters : @link icp_qat_fw_maths_modinv_odd_l8192_output_s::c
* c @endlink
*/
#define MATHS_MODINV_EVEN_L128 0x0906243a
/**< Functionality ID for Modular multiplicative inverse for numbers less than
* 128 bits
@ -853,6 +1068,14 @@
* @li 1 output parameters : @link
* icp_qat_fw_maths_modinv_even_l4096_output_s::c c @endlink
*/
#define MATHS_MODINV_EVEN_L8192 0xc80d3666
/**< Functionality ID for Modular multiplicative inverse for numbers up to 8192
* bits
* @li 2 input parameters : @link icp_qat_fw_maths_modinv_even_l8192_input_s::a
* a @endlink @link icp_qat_fw_maths_modinv_even_l8192_input_s::b b @endlink
* @li 1 output parameters : @link
* icp_qat_fw_maths_modinv_even_l8192_output_s::c c @endlink
*/
#define PKE_DSA_GEN_P_1024_160 0x381824a4
/**< Functionality ID for DSA parameter generation P
* @li 2 input parameters : @link icp_qat_fw_mmp_dsa_gen_p_1024_160_input_s::x x
@ -1461,81 +1684,60 @@
* icp_qat_fw_maths_point_verify_gfp_521_input_s::b b @endlink
* @li no output parameters
*/
#define POINT_MULTIPLICATION_C25519 0x0a0634c6
/**< Functionality ID for ECC curve25519 Variable Point Multiplication [k]P(x),
* as specified in RFC7748
* @li 2 input parameters : @link
* icp_qat_fw_point_multiplication_c25519_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_c25519_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_point_multiplication_c25519_output_s::xr xr @endlink
*/
#define GENERATOR_MULTIPLICATION_C25519 0x0a0634d6
/**< Functionality ID for ECC curve25519 Generator Point Multiplication [k]G(x),
* as specified in RFC7748
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_c25519_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_generator_multiplication_c25519_output_s::xr xr @endlink
*/
#define POINT_MULTIPLICATION_ED25519 0x100b34e6
/**< Functionality ID for ECC edwards25519 Variable Point Multiplication [k]P,
* as specified in RFC8032
#define PKE_EC_POINT_MULTIPLICATION_P256 0x0a083546
/**< Functionality ID for ECC P256 Variable Point Multiplication [k]P(x)
* @li 3 input parameters : @link
* icp_qat_fw_point_multiplication_ed25519_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_ed25519_input_s::yp yp @endlink @link
* icp_qat_fw_point_multiplication_ed25519_input_s::k k @endlink
* icp_qat_fw_mmp_ec_p256_point_multiplication_input_s::xp xp @endlink @link
* icp_qat_fw_mmp_ec_p256_point_multiplication_input_s::yp yp @endlink @link
* icp_qat_fw_mmp_ec_p256_point_multiplication_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_point_multiplication_ed25519_output_s::xr xr @endlink @link
* icp_qat_fw_point_multiplication_ed25519_output_s::yr yr @endlink
* icp_qat_fw_mmp_ec_p256_point_multiplication_output_s::xr xr @endlink @link
* icp_qat_fw_mmp_ec_p256_point_multiplication_output_s::yr yr @endlink
*/
#define GENERATOR_MULTIPLICATION_ED25519 0x100a34f6
/**< Functionality ID for ECC edwards25519 Generator Point Multiplication [k]G,
* as specified in RFC8032
#define PKE_EC_GENERATOR_MULTIPLICATION_P256 0x12073556
/**< Functionality ID for ECC P256 Generator Point Multiplication [k]G(x)
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_ed25519_input_s::k k @endlink
* icp_qat_fw_mmp_ec_p256_generator_multiplication_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_generator_multiplication_ed25519_output_s::xr xr @endlink @link
* icp_qat_fw_generator_multiplication_ed25519_output_s::yr yr @endlink
* icp_qat_fw_mmp_ec_p256_generator_multiplication_output_s::xr xr @endlink
* @link icp_qat_fw_mmp_ec_p256_generator_multiplication_output_s::yr yr
* @endlink
*/
#define POINT_MULTIPLICATION_C448 0x0c063506
/**< Functionality ID for ECC curve448 Variable Point Multiplication [k]P(x), as
* specified in RFC7748
* @li 2 input parameters : @link
* icp_qat_fw_point_multiplication_c448_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_c448_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_point_multiplication_c448_output_s::xr xr @endlink
#define PKE_ECDSA_SIGN_RS_P256 0x18133566
/**< Functionality ID for ECC P256 ECDSA Sign RS
* @li 3 input parameters : @link icp_qat_fw_mmp_ecdsa_sign_rs_p256_input_s::k k
* @endlink @link icp_qat_fw_mmp_ecdsa_sign_rs_p256_input_s::e e @endlink @link
* icp_qat_fw_mmp_ecdsa_sign_rs_p256_input_s::d d @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecdsa_sign_rs_p256_output_s::r
* r @endlink @link icp_qat_fw_mmp_ecdsa_sign_rs_p256_output_s::s s @endlink
*/
#define GENERATOR_MULTIPLICATION_C448 0x0c063516
/**< Functionality ID for ECC curve448 Generator Point Multiplication [k]G(x),
* as specified in RFC7748
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_c448_input_s::k k @endlink
* @li 1 output parameters : @link
* icp_qat_fw_generator_multiplication_c448_output_s::xr xr @endlink
*/
#define POINT_MULTIPLICATION_ED448 0x1a0b3526
/**< Functionality ID for ECC edwards448 Variable Point Multiplication [k]P, as
* specified in RFC8032
#define PKE_EC_POINT_MULTIPLICATION_P384 0x0b083586
/**< Functionality ID for ECC P384 Variable Point Multiplication [k]P(x)
* @li 3 input parameters : @link
* icp_qat_fw_point_multiplication_ed448_input_s::xp xp @endlink @link
* icp_qat_fw_point_multiplication_ed448_input_s::yp yp @endlink @link
* icp_qat_fw_point_multiplication_ed448_input_s::k k @endlink
* icp_qat_fw_mmp_ec_p384_point_multiplication_input_s::xp xp @endlink @link
* icp_qat_fw_mmp_ec_p384_point_multiplication_input_s::yp yp @endlink @link
* icp_qat_fw_mmp_ec_p384_point_multiplication_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_point_multiplication_ed448_output_s::xr xr @endlink @link
* icp_qat_fw_point_multiplication_ed448_output_s::yr yr @endlink
* icp_qat_fw_mmp_ec_p384_point_multiplication_output_s::xr xr @endlink @link
* icp_qat_fw_mmp_ec_p384_point_multiplication_output_s::yr yr @endlink
*/
#define GENERATOR_MULTIPLICATION_ED448 0x1a0a3536
/**< Functionality ID for ECC edwards448 Generator Point Multiplication [k]P, as
* specified in RFC8032
#define PKE_EC_GENERATOR_MULTIPLICATION_P384 0x0b073596
/**< Functionality ID for ECC P384 Generator Point Multiplication [k]G(x)
* @li 1 input parameters : @link
* icp_qat_fw_generator_multiplication_ed448_input_s::k k @endlink
* icp_qat_fw_mmp_ec_p384_generator_multiplication_input_s::k k @endlink
* @li 2 output parameters : @link
* icp_qat_fw_generator_multiplication_ed448_output_s::xr xr @endlink @link
* icp_qat_fw_generator_multiplication_ed448_output_s::yr yr @endlink
* icp_qat_fw_mmp_ec_p384_generator_multiplication_output_s::xr xr @endlink
* @link icp_qat_fw_mmp_ec_p384_generator_multiplication_output_s::yr yr
* @endlink
*/
#define PKE_ECDSA_SIGN_RS_P384 0x1a1335a6
/**< Functionality ID for ECC P384 ECDSA Sign RS
* @li 3 input parameters : @link icp_qat_fw_mmp_ecdsa_sign_rs_p384_input_s::k k
* @endlink @link icp_qat_fw_mmp_ecdsa_sign_rs_p384_input_s::e e @endlink @link
* icp_qat_fw_mmp_ecdsa_sign_rs_p384_input_s::d d @endlink
* @li 2 output parameters : @link icp_qat_fw_mmp_ecdsa_sign_rs_p384_output_s::r
* r @endlink @link icp_qat_fw_mmp_ecdsa_sign_rs_p384_output_s::s s @endlink
*/
#define PKE_LIVENESS 0x00000001
/**< Functionality ID for PKE_LIVENESS
* @li 0 input parameter(s)
@ -1544,12 +1746,186 @@
#define PKE_INTERFACE_SIGNATURE 0x972ded54
/**< Encoded signature of the interface specifications
*/
#define PKE_INVALID_FUNC_ID 0xffffffff
#define PKE_KPT_ECDSA_SIGN_RS_P521 0xb6563896
/**< Functionality ID for ECC P521 ECDSA Sign RS
* @li 3 input parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p521_input_s::kpt_wrapped kpt_wrapped
* @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p521_input_s::kpt_wrapping_context
* kpt_wrapping_context @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p521_input_s::e e @endlink
* @li 2 output parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p521_output_s::r r @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p521_output_s::s s @endlink
*/
#define PKE_KPT_ECDSA_SIGN_RS_P384 0x22143876
/**< Functionality ID for ECC P384 ECDSA Sign RS
* @li 3 input parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p384_input_s::kpt_wrapped kpt_wrapped
* @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p384_input_s::kpt_wrapping_context
* kpt_wrapping_context @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p384_input_s::e e @endlink
* @li 2 output parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p384_output_s::r r @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p384_output_s::s s @endlink
*/
#define PKE_KPT_ECDSA_SIGN_RS_P256 0x8d153856
/**< Functionality ID for ECC KPT P256 ECDSA Sign RS
* @li 3 input parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p256_input_s::kpt_wrapped kpt_wrapped
* @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p256_input_s::key_unwrap_context
* key_unwrap_context @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p256_input_s::e e @endlink
* @li 2 output parameters : @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p256_output_s::r r @endlink @link
* icp_qat_fw_mmp_kpt_ecdsa_sign_rs_p256_output_s::s s @endlink
*/
#define PKE_KPT_RSA_DP1_512 0x1b1c3696
/**< Functionality ID for KPT RSA 512 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_512_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_512_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_512_input_s::kpt_unwrap_context kpt_unwrap_context
* @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_512_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_1024 0x2d1d36b6
/**< Functionality ID for KPT RSA 1024 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_1024_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_1024_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_1024_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_1024_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_1536 0x451d36d6
/**< Functionality ID for KPT RSA 1536 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_1536_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_1536_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_1536_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_1536_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_2048 0x661936f6
/**< Functionality ID for KPT RSA 2048 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_2048_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_2048_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_2048_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_2048_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_3072 0x751d3716
/**< Functionality ID for KPT RSA 3072 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_3072_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_3072_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_3072_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_3072_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_4096 0x9d1d3736
/**< Functionality ID for KPT RSA 4096 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_4096_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_4096_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_4096_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_4096_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP1_8192 0xbe203756
/**< Functionality ID for KPT RSA 8192 Decryption
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_8192_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp1_8192_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp1_8192_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp1_8192_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_512 0x241d3776
/**< Functionality ID for RSA 512 decryption second form
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_512_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_512_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_512_input_s::kpt_unwrap_context kpt_unwrap_context
* @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_512_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_1024 0x4e1d3796
/**< Functionality ID for RSA 1024 Decryption with CRT
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_1024_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_1024_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_1024_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_1024_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_1536 0x762b37b6
/**< Functionality ID for KPT RSA 1536 Decryption with CRT
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_1536_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_1536_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_1536_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_1536_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_2048 0xa41a37d6
/**< Functionality ID for RSA 2048 Decryption with CRT
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_2048_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_2048_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_2048_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_2048_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_3072 0xd41a37f6
/**< Functionality ID for
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_3072_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_3072_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_3072_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_3072_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_4096 0xd22a3816
/**< Functionality ID for RSA 4096 Decryption with CRT
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_4096_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_4096_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_4096_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_4096_output_s::m m
* @endlink
*/
#define PKE_KPT_RSA_DP2_8192 0xae383836
/**< Functionality ID for RSA 8192 Decryption with CRT
* @li 3 input parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_8192_input_s::c c
* @endlink @link icp_qat_fw_mmp_kpt_rsa_dp2_8192_input_s::kpt_wrapped
* kpt_wrapped @endlink @link
* icp_qat_fw_mmp_kpt_rsa_dp2_8192_input_s::kpt_unwrap_context
* kpt_unwrap_context @endlink
* @li 1 output parameters : @link icp_qat_fw_mmp_kpt_rsa_dp2_8192_output_s::m m
* @endlink
*/
#endif /* __ICP_QAT_FW_MMP_IDS__ */
/* --- (Automatically generated (relocation v. 1.3), do not modify manually) ---
*/
/* --- (Automatically generated (relocation v. 1.3), do not modify manually) --- */
/* --- end of file --- */

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -418,8 +418,12 @@ typedef enum _CpaAccelerationServiceType
/**< RAID */
CPA_ACC_SVC_TYPE_XML = CPA_INSTANCE_TYPE_XML,
/**< XML */
CPA_ACC_SVC_TYPE_VIDEO_ANALYTICS
CPA_ACC_SVC_TYPE_VIDEO_ANALYTICS,
/**< Video Analytics */
CPA_ACC_SVC_TYPE_CRYPTO_ASYM,
/**< Cryptography - Asymmetric service */
CPA_ACC_SVC_TYPE_CRYPTO_SYM
/**< Cryptography - Symmetric service */
} CpaAccelerationServiceType;
/**
@ -586,7 +590,7 @@ typedef struct _CpaInstanceInfo2 {
CpaPhysicalInstanceId physInstId;
/**< Identifies the "physical instance" of the accelerator. */
#define CPA_MAX_CORES 256
#define CPA_MAX_CORES 4096
/**< Maximum number of cores to support in the coreAffinity bitmap. */
CPA_BITMAP(coreAffinity, CPA_MAX_CORES);
/**< A bitmap identifying the core or cores to which the instance
@ -670,6 +674,124 @@ typedef enum _CpaInstanceEvent
*/
} CpaInstanceEvent;
/*****************************************************************************/
/* CPA Instance Management Functions */
/*****************************************************************************/
/**
*****************************************************************************
* @file cpa.h
* @ingroup cpa
* Get the number of Acceleration Service instances that are supported by
* the API implementation.
*
* @description
* This function will get the number of instances that are supported
* for the required Acceleration Service by an implementation of the CPA
* API. This number is then used to determine the size of the array that
* must be passed to @ref cpaGetInstances().
*
* @context
* This function MUST NOT be called from an interrupt context as it MAY
* sleep.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] accelerationServiceType Acceleration Service required
* @param[out] pNumInstances Pointer to where the number of
* instances will be written.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* None
* @post
* None
* @note
* This function operates in a synchronous manner and no asynchronous
* callback will be generated
*
* @see
* cpaGetInstances
*
*****************************************************************************/
CpaStatus
cpaGetNumInstances(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U *pNumInstances);
/**
*****************************************************************************
* @file cpa.h
* @ingroup cpa
* Get the handles to the required Acceleration Service instances that are
* supported by the API implementation.
*
* @description
* This function will return handles to the required Acceleration Service
* instances that are supported by an implementation of the CPA API. These
* instance handles can then be used as input parameters with other
* API functions.
*
* This function will populate an array that has been allocated by the
* caller. The size of this array will have been determined by the
* cpaGetNumInstances() function.
*
* @context
* This function MUST NOT be called from an interrupt context as it MAY
* sleep.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] accelerationServiceType Acceleration Service requested
* @param[in] numInstances Size of the array. If the value is
* greater than the number of instances
* supported, then an error (@ref
* CPA_STATUS_INVALID_PARAM) is returned.
* @param[in,out] cpaInstances Pointer to where the instance
* handles will be written.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* None
* @post
* None
* @note
* This function operates in a synchronous manner and no asynchronous
* callback will be generated
*
* @see
* cpaGetNumInstances
*
*****************************************************************************/
CpaStatus
cpaGetInstances(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U numInstances,
CpaInstanceHandle *cpaInstances);
#ifdef __cplusplus
} /* close the extern "C" { */
#endif

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -132,21 +132,6 @@ typedef int64_t Cpa64S;
* NULL definition. */
#endif
#ifndef TRUE
#define TRUE (1==1)
/**<
* @file cpa_types.h
* @ingroup cpa_Types
* True value definition. */
#endif
#ifndef FALSE
#define FALSE (0==1)
/**<
* @file cpa_types.h
* @ingroup cpa_Types
* False value definition. */
#endif
/**
*****************************************************************************
* @ingroup cpa_Types
@ -159,8 +144,8 @@ typedef int64_t Cpa64S;
*****************************************************************************/
typedef enum _CpaBoolean
{
CPA_FALSE = FALSE, /**< False value */
CPA_TRUE = TRUE /**< True value */
CPA_FALSE = (0==1), /**< False value */
CPA_TRUE = (1==1) /**< True value */
} CpaBoolean;

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View file

@ -0,0 +1,659 @@
/****************************************************************************
*
* BSD LICENSE
*
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
***************************************************************************/
/*
*****************************************************************************
* Doxygen group definitions
****************************************************************************/
/**
*****************************************************************************
* @file cpa_dc_chain.h
*
* @defgroup cpaDcChain Data Compression Chaining API
*
* @ingroup cpaDc
*
* @description
* These functions specify the API for Data Compression Chaining operations.
*
* @remarks
*
*
*****************************************************************************/
#ifndef CPA_DC_CHAIN_H
#define CPA_DC_CHAIN_H
#ifdef __cplusplus
extern"C" {
#endif
#include "cpa_dc.h"
#include "cpa_cy_sym.h"
/**
*****************************************************************************
* @ingroup cpaDcChain
* Supported operations for compression chaining
*
* @description
* This enumeration lists the supported operations for compression chaining
*
*****************************************************************************/
typedef enum _CpaDcChainOperations
{
CPA_DC_CHAIN_COMPRESS_THEN_HASH,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform hash on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for hash setup data*/
CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform encryption on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for encryption setup data*/
CPA_DC_CHAIN_COMPRESS_THEN_HASH_ENCRYPT,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform hash on compressed text and
* encryption on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for hash and encryption setup data*/
CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT_HASH,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform encryption on compressed text and
* hash on compressed & encrypted text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for encryption and hash setup data*/
CPA_DC_CHAIN_COMPRESS_THEN_AEAD,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform AEAD encryption on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for AEAD encryption setup data*/
CPA_DC_CHAIN_HASH_THEN_COMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform hash on plain text
* 2nd operation is to perform compression on plain text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash setup data
* 2nd entry is for compression setup data*/
CPA_DC_CHAIN_HASH_VERIFY_THEN_DECOMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform hash verify on compressed text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash setup data
* 2nd entry is for decompression setup data*/
CPA_DC_CHAIN_DECRYPT_THEN_DECOMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform decryption on compressed & encrypted text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decryption setup data
* 2nd entry is for decompression setup data*/
CPA_DC_CHAIN_HASH_VERIFY_DECRYPT_THEN_DECOMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform hash verify on compressed & encrypted text
* and decryption on compressed & encrypted text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash and decryption setup data
* 2nd entry is for decompression setup data*/
CPA_DC_CHAIN_DECRYPT_HASH_VERIFY_THEN_DECOMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform decryption on compressed & encrypted text
* and hash verify on compressed text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decryption and hash setup data
* 2nd entry is for decompression setup data*/
CPA_DC_CHAIN_AEAD_THEN_DECOMPRESS,
/**< 2 operations for chaining:
* 1st operation is to perform AEAD decryption on compressed & encrypted text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for AEAD decryption setup data
* 2nd entry is for decompression setup data*/
CPA_DC_CHAIN_DECOMPRESS_THEN_HASH_VERIFY,
/**< 2 operations for chaining:
* 1st operation is to perform decompression on compressed text
* 2nd operation is to perform hash verify on plain text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decompression setup data
* 2nd entry is for hash setup data*/
CPA_DC_CHAIN_COMPRESS_THEN_AEAD_THEN_HASH,
/**< 3 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform AEAD encryption compressed text
* 3rd operation is to perfom hash on compressed & encrypted text
**< 3 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for AEAD encryption setup data
* 3rd entry is for hash setup data*/
} CpaDcChainOperations;
/**
*****************************************************************************
* @ingroup cpaDcChain
* Supported session types for data compression chaining.
*
* @description
* This enumeration lists the supported session types
* for data compression chaining.
*****************************************************************************/
typedef enum _CpaDcChainSessionType
{
CPA_DC_CHAIN_COMPRESS_DECOMPRESS,
/**< Indicate the session is for compression or decompression */
CPA_DC_CHAIN_SYMMETRIC_CRYPTO,
/**< Indicate the session is for symmetric crypto */
} CpaDcChainSessionType;
/**
*****************************************************************************
* @ingroup cpaDcChain
* Chaining Session Setup Data.
* @description
* This structure contains data relating to set up chaining sessions. The
* client needs to complete the information in this structure in order to
* setup chaining sessions.
*
****************************************************************************/
typedef struct _CpaDcChainSessionSetupData {
CpaDcChainSessionType sessType;
/**Indicate the type for this session */
union {
CpaDcSessionSetupData *pDcSetupData;
/**< Pointer to compression session setup data */
CpaCySymSessionSetupData *pCySetupData;
/**< Pointer to symmectric crypto session setup data */
};
} CpaDcChainSessionSetupData;
/**
*****************************************************************************
* @ingroup cpaDcChain
* Compression chaining request input parameters.
* @description
* This structure contains the request information to use with
* compression chaining operations.
*
****************************************************************************/
typedef struct _CpaDcChainOpData {
CpaDcChainSessionType opType;
/**< Indicate the type for this operation */
union {
CpaDcOpData *pDcOp;
/**< Pointer to compression operation data */
CpaCySymOpData *pCySymOp;
/**< Pointer to symmectric crypto operation data */
};
} CpaDcChainOpData;
/**
*****************************************************************************
* @ingroup cpaDcChain
* Chaining request results data
* @description
* This structure contains the request results.
*
****************************************************************************/
typedef struct _CpaDcChainRqResults {
CpaDcReqStatus dcStatus;
/**< Additional status details from compression accelerator */
CpaStatus cyStatus;
/**< Additional status details from symmetric crypto accelerator */
CpaBoolean verifyResult;
/**< This parameter is valid when the verifyDigest option is set in the
* CpaCySymSessionSetupData structure. A value of CPA_TRUE indicates
* that the compare succeeded. A value of CPA_FALSE indicates that the
* compare failed */
Cpa32U produced;
/**< Octets produced to the output buffer */
Cpa32U consumed;
/**< Octets consumed from the input buffer */
Cpa32U crc32;
/**< crc32 checksum produced by chaining operations */
Cpa32U adler32;
/**< adler32 checksum produced by chaining operations */
}CpaDcChainRqResults;
/**
*****************************************************************************
* @ingroup cpaDcChain
* Get the size of the memory required to hold the chaining sessions
* information.
*
* @description
* The client of the Data Compression API is responsible for
* allocating sufficient memory to hold chaining sessions information.
* This function provides a way for determining the size of chaining
* sessions.
*
* @context
* No restrictions
* @assumptions
* None
* @sideEffects
* None
* @blocking
* No
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] dcInstance Instance handle.
* @param[in] operation The operation for chaining
* @param[in] numSessions Number of sessions for the chaining
* @param[in] pSessionData Pointer to an array of
* CpaDcChainSessionSetupData structures.
* There should be numSessions entries in
* the array.
* @param[out] pSessionSize On return, this parameter will be the size
* of the memory that will be required by
* cpaDcChainInitSession() for session data.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* None
* @post
* None
* @note
* Only a synchronous version of this function is provided.
*
* @see
* cpaDcChainInitSession()
*
*****************************************************************************/
CpaStatus
cpaDcChainGetSessionSize(CpaInstanceHandle dcInstance,
CpaDcChainOperations operation,
Cpa8U numSessions,
CpaDcChainSessionSetupData *pSessionData,
Cpa32U* pSessionSize);
/**
*****************************************************************************
* @ingroup cpaDcChain
* Initialize data compression chaining session
*
* @description
* This function is used to initialize compression/decompression chaining
* sessions.
* This function returns a unique session handle each time this function
* is invoked.
* If the session has been configured with a callback function, then
* the order of the callbacks are guaranteed to be in the same order the
* compression or decompression requests were submitted for each session,
* so long as a single thread of execution is used for job submission.
*
* @context
* This is a synchronous function and it cannot sleep. It can be executed
* in a context that does not permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* No
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] dcInstance Instance handle derived from discovery
* functions.
* @param[in,out] pSessionHandle Pointer to a session handle.
* @param[in] operation The operations for chaining
* @param[in] numSessions Number of sessions for chaining
* @param[in,out] pSessionData Pointer to an array of
* CpaDcChainSessionSetupData structures.
* There should be numSessions entries in
* the array.
* @param[in] callbackFn For synchronous operation this callback
* shall be a null pointer.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* dcInstance has been started using cpaDcStartInstance.
* @post
* None
* @note
* Only a synchronous version of this function is provided.
*
* pSessionData Setup Rules
* -# Each element in CpaDcChainSessionSetupData structure array provides
* (de)compression or a symmetric crypto session setup data.
*
* -# The supported chaining operations are listed in CpaDcChainOperations.
* This enum indicates the number of operations in a chain and the order
* in which they are performed.
*
* -# The order of entries in pSessionData[] should be consistent with the
* CpaDcChainOperations perform order.
* As an example, for CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT, pSessionData[0]
* holds the compression setup data and pSessionData[1] holds the
* encryption setup data..
*
* -# The numSessions for each chaining operation are provided in
* the comments of enum CpaDcChainOperations.
*
* -# For a (de)compression session, the corresponding
* pSessionData[]->sessType should be set to
* CPA_DC_CHAIN_COMPRESS_DECOMPRESS and pSessionData[]->pDcSetupData
* should point to a CpaDcSessionSetupData structure.
*
* -# For a symmetric crypto session, the corresponding
* pSessionData[]->sessType should be set to CPA_DC_CHAIN_SYMMETRIC_CRYPTO
* and pSessionData[]->pCySetupData should point to a
* CpaCySymSessionSetupData structure.
*
* -# Combined compression sessions are not supported for chaining.
*
* -# Stateful compression is not supported for chaining.
*
* -# Both CRC32 and Adler32 over the input data are supported for chaining.
*
* @see
* None
*
*****************************************************************************/
CpaStatus
cpaDcChainInitSession(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaDcChainOperations operation,
Cpa8U numSessions,
CpaDcChainSessionSetupData *pSessionData,
CpaDcCallbackFn callbackFn);
/**
*****************************************************************************
* @ingroup cpaDcChain
* Reset a compression chaining session.
*
* @description
* This function will reset a previously initialized session handle.
* Reset will fail if outstanding calls still exist for the initialized
* session handle.
* The client needs to retry the reset function at a later time.
*
* @context
* This is a synchronous function that cannot sleep. It can be
* executed in a context that does not permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* No.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] dcInstance Instance handle.
* @param[in,out] pSessionHandle Session handle.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_RETRY Resubmit the request.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* The component has been initialized via cpaDcStartInstance function.
* The session has been initialized via cpaDcChainInitSession function.
* @post
* None
* @note
* This is a synchronous function and has no completion callback
* associated with it.
*
* @see
* cpaDcChainInitSession()
*
*****************************************************************************/
CpaStatus
cpaDcChainResetSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle);
/**
*****************************************************************************
* @ingroup cpaDcChain
* Remove a compression chaining session.
*
* @description
* This function will remove a previously initialized session handle
* and the installed callback handler function. Removal will fail if
* outstanding calls still exist for the initialized session handle.
* The client needs to retry the remove function at a later time.
* The memory for the session handle MUST not be freed until this call
* has completed successfully.
*
* @context
* This is a synchronous function that cannot sleep. It can be executed
* in a context that does not permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* No.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] dcInstance Instance handle.
* @param[in,out] pSessionHandle Session handle.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_RETRY Resubmit the request.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* The component has been initialized via cpaDcStartInstance function.
* @post
* None
* @note
* This is a synchronous function and has no completion callback
* associated with it.
*
* @see
* cpaDcChainInitSession()
*
*****************************************************************************/
CpaStatus
cpaDcChainRemoveSession(const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle);
/**
*****************************************************************************
* @ingroup cpaDcChain
* Submit a request to perform chaining operations.
*
* @description
* This function is used to perform chaining operations over data from
* the source buffer.
*
* @context
* When called as an asynchronous function it cannot sleep. It can be
* executed in a context that does not permit sleeping.
* When called as a synchronous function it may sleep. It MUST NOT be
* executed in a context that DOES NOT permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* Yes when configured to operate in synchronous mode.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] dcInstance Target service instance.
* @param[in,out] pSessionHandle Session handle.
* @param[in] pSrcBuff Pointer to input data buffer.
* @param[out] pDestBuff Pointer to output data buffer.
* @param[in] operation Operation for the chaining request
* @param[in] numOpDatas The entries size CpaDcChainOpData array
* @param[in] pChainOpData Pointer to an array of CpaDcChainOpData
* structures. There should be numOpDatas
* entries in the array.
* @param[in,out] pResults Pointer to CpaDcChainRqResults structure.
* @param[in] callbackTag User supplied value to help correlate
* the callback with its associated request.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_RETRY Resubmit the request.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_DC_BAD_DATA The input data was not properly formed.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* pSessionHandle has been setup using cpaDcChainInitSession()
* @post
* pSessionHandle has session related state information
* @note
* This function passes control to the compression service for chaining
* processing, the supported chaining operations are described in
* CpaDcChainOperations.
*
* pChainOpData Setup Rules
* -# Each element in CpaDcChainOpData structure array holds either a
* (de)compression or a symmetric crypto operation data.
*
* -# The order of entries in pChainOpData[] must be consistent with the
* order of operations described for the chaining operation in
* CpaDcChainOperations.
* As an example, for CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT, pChainOpData[0]
* must contain the compression operation data and pChainOpData[1] must
* contain the encryption operation data.
*
* -# The numOpDatas for each chaining operation are specified in the
* comments for the operation in CpaDcChainOperations.
*
* -# For a (de)compression operation, the corresponding
* pChainOpData[]->opType should be set to
* CPA_DC_CHAIN_COMPRESS_DECOMPRESS and pChainOpData[]->pDcOp should
* point to a CpaDcOpData structure.
*
* -# For a symmetric crypto operation, the corresponding
* pChainOpData[]->opType should be set to
* CPA_DC_CHAIN_SYMMETRIC_CRYPTO and pChainOpData[]->pCySymOp should
* point to a CpaCySymOpData structure.
*
* -# Stateful compression is not supported for chaining.
*
* -# Partial packet processing is not supported.
*
* This function has identical buffer processing rules as
* cpaDcCompressData().
*
* This function has identical checksum processing rules as
* cpaDcCompressData(), except:
* -# pResults->crc32 is available to application if
* CpaDcSessionSetupData->checksum is set to CPA_DC_CRC32
*
* -# pResults->adler32 is available to application if
* CpaDcSessionSetupData->checksum is set to CPA_DC_ADLER32
*
* -# Both pResults->crc32 and pResults->adler32 are available if
* CpaDcSessionSetupData->checksum is set to CPA_DC_CRC32_ADLER32
*
* Synchronous or asynchronous operation of the API is determined by
* the value of the callbackFn parameter passed to cpaDcChainInitSession()
* when the sessionHandle was setup. If a non-NULL value was specified
* then the supplied callback function will be invoked asynchronously
* with the response of this request.
*
* This function has identical response ordering rules as
* cpaDcCompressData().
*
* @see
* cpaDcCompressData
*
*****************************************************************************/
CpaStatus
cpaDcChainPerformOp(CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaBufferList *pSrcBuff,
CpaBufferList *pDestBuff,
CpaDcChainOperations operation,
Cpa8U numOpDatas,
CpaDcChainOpData *pChainOpData,
CpaDcChainRqResults *pResults,
void *callbackTag );
#ifdef __cplusplus
} /* close the extern "C" { */
#endif
#endif /* CPA_DC_CHAIN_H */

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -143,7 +143,11 @@ typedef struct _CpaDcDpOpData
/**< Instance to which the request is to be enqueued */
CpaDcSessionHandle pSessionHandle;
/**< DC Session associated with the stream of requests */
/**< DC Session associated with the stream of requests.
* This field is only valid when using the session based API functions.
* This field must be set to NULL if the application wishes to use
* the No-Session (Ns) API.
*/
CpaPhysicalAddr srcBuffer;
/**< Physical address of the source buffer on which to operate.
@ -215,8 +219,18 @@ typedef struct _CpaDcDpOpData
* It may be used to store information that might be useful when
* processing the response later.
*/
} CpaDcDpOpData;
CpaDcNsSetupData *pSetupData;
/**< Pointer to the No-session (Ns) Setup data for configuration of this
* request.
*
* This @ref CpaDcNsSetupData structure must be initialised when using the
* Data Plane No-Session (Ns) API. Otherwise it should be set to NULL.
* When initialized, the existing Data Plane API functions can be used
* as is.
*/
} CpaDcDpOpData;
/**
*****************************************************************************
@ -226,7 +240,7 @@ typedef struct _CpaDcDpOpData
* @description
* This is the callback function prototype. The callback function is
* registered by the application using the @ref cpaDcDpRegCbFunc
* function call, and called back on completion of asycnhronous
* function call, and called back on completion of asynchronous
* requests made via calls to @ref cpaDcDpEnqueueOp or @ref
* cpaDcDpEnqueueOpBatch.
*
@ -306,8 +320,8 @@ typedef void (*CpaDcDpCallbackFn)(CpaDcDpOpData *pOpData);
* Only a synchronous version of this function is provided.
*
* Session data is expected to include interim checksum values, various
* counters and other other session related data that needs to persist
* between invocations.
* counters and other session related data that needs to persist between
* invocations.
* For a given implementation of this API, it is safe to assume that
* cpaDcDpGetSessionSize() will always return the same session size and
* that the size will not be different for different setup data
@ -405,6 +419,65 @@ cpaDcDpInitSession( CpaInstanceHandle dcInstance,
CpaDcSessionSetupData *pSessionData );
/**
*****************************************************************************
* @ingroup cpaDc
* Compression Session Update Function.
*
* @description
* This function is used to modify some select compression parameters
* of a previously initialized session handlei for a data plane session.
* Th update will fail if resources required for the new session settings
* are not available. Specifically, this function may fail if no
* intermediate buffers are associated with the instance, and the
* intended change would require these buffers.
* This function can be called at any time after a successful call of
* cpaDcDpInitSession().
* This function does not change the parameters to compression request
* already in flight.
*
* @context
* This is a synchronous function that cannot sleep. It can be
* executed in a context that does not permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* No.
* @reentrant
* No
* @threadSafe
* No
*
* @param[in] dcInstance Instance handle.
* @param[in,out] pSessionHandle Session handle.
* @param[in] pSessionUpdateData Session Data.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.
* Resubmit the request
*
* @pre
* The component has been initialized via cpaDcStartInstance function.
* The session has been initialized via cpaDcDpInitSession function.
* @post
* None
* @note
* This is a synchronous function and has no completion callback
* associated with it.
*
* @see
* cpaDcDpInitSession()
*
*****************************************************************************/
CpaStatus cpaDcDpUpdateSession( const CpaInstanceHandle dcInstance,
CpaDcSessionHandle pSessionHandle,
CpaDcSessionUpdateData *pSessionUpdateData );
/**
*****************************************************************************
* @ingroup cpaDc
@ -468,7 +541,7 @@ cpaDcDpRemoveSession(const CpaInstanceHandle dcInstance,
* @description
* This function allows a completion callback function to be registered.
* The registered callback function is invoked on completion of
* asycnhronous requests made via calls to @ref cpaDcDpEnqueueOp
* asynchronous requests made via calls to @ref cpaDcDpEnqueueOp
* or @ref cpaDcDpEnqueueOpBatch.
* @context
* This is a synchronous function and it cannot sleep. It can be
@ -569,7 +642,8 @@ CpaStatus cpaDcDpRegCbFunc(const CpaInstanceHandle dcInstance,
*
* @pre
* The session identified by pOpData->pSessionHandle was setup using
* @ref cpaDcDpInitSession.
* @ref cpaDcDpInitSession OR pOpData->pSetupData data structure was
* initialized for No-Session (Ns) usage.
* The instance identified by pOpData->dcInstance has had a
* callback function registered via @ref cpaDcDpRegCbFunc.
*
@ -584,8 +658,6 @@ CpaStatus cpaDcDpRegCbFunc(const CpaInstanceHandle dcInstance,
* @see
* @ref cpaDcDpPerformOpNow
*****************************************************************************/
CpaStatus
cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData,
const CpaBoolean performOpNow);
@ -665,7 +737,8 @@ cpaDcDpEnqueueOp(CpaDcDpOpData *pOpData,
*
* @pre
* The session identified by pOpData[i]->pSessionHandle was setup using
* @ref cpaDcDpInitSession.
* @ref cpaDcDpInitSession OR pOpData[i]->pSetupData data structure was
* initialized for No-Session (Ns) usage.
* The instance identified by pOpData[i]->dcInstance has had a
* callback function registered via @ref cpaDcDpRegCbFunc.
*
@ -694,7 +767,7 @@ cpaDcDpEnqueueOpBatch(const Cpa32U numberRequests,
* compression data plane API.
*
* @description
* This function triggers processing of previously enqueed requests on the
* This function triggers processing of previously enqueued requests on the
* referenced instance.
*
*

View file

@ -27,7 +27,7 @@
/* Part name and number of the accelerator device */
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 12
#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 13
#define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0
/**

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -85,7 +85,7 @@ extern "C" {
* for this interface.
*
*****************************************************************************/
#define CPA_CY_API_VERSION_NUM_MAJOR (2)
#define CPA_CY_API_VERSION_NUM_MAJOR (3)
/**
*****************************************************************************
@ -98,10 +98,43 @@ extern "C" {
* this interface.
*
*****************************************************************************/
#define CPA_CY_API_VERSION_NUM_MINOR (3)
#define CPA_CY_API_VERSION_NUM_MINOR (0)
/**
*****************************************************************************
* @file cpa_cy_common.h
* @ingroup cpa_cyCommon
* CPA CY API version at least
* @description
* The minimal supported CPA_CY API version. Allow to check if the API
* version is equal or above some version to avoid compilation issues
* with an older API version.
*
*****************************************************************************/
#define CPA_CY_API_VERSION_AT_LEAST(major, minor) \
(CPA_CY_API_VERSION_NUM_MAJOR > major || \
(CPA_CY_API_VERSION_NUM_MAJOR == major && \
CPA_CY_API_VERSION_NUM_MINOR >= minor))
/**
*****************************************************************************
* @file cpa_cy_common.h
* @ingroup cpa_cyCommon
* CPA CY API version less than
* @description
* The maximum supported CPA_CY API version. Allow to check if the API
* version is below some version to avoid compilation issues with a newer
* API version.
*
*****************************************************************************/
#define CPA_CY_API_VERSION_LESS_THAN(major, minor) \
(CPA_CY_API_VERSION_NUM_MAJOR < major || \
(CPA_CY_API_VERSION_NUM_MAJOR == major && \
CPA_CY_API_VERSION_NUM_MINOR < minor))
/**
*****************************************************************************
* @file cpa_cy_common.h
* @ingroup cpaCyCommon
* Request priority
* @description

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -89,7 +89,7 @@ typedef struct _CpaCyDhPhase1KeyGenOpData {
CpaFlatBuffer primeP;
/**< Flat buffer containing a pointer to the random odd prime number (p).
* The bit-length of this number may be one of 768, 1024, 1536, 2048,
* 3072 or 4096.
* 3072, 4096 or 8192.
*/
CpaFlatBuffer baseG;
/**< Flat buffer containing a pointer to base (g). This MUST comply with
@ -131,7 +131,7 @@ typedef struct _CpaCyDhPhase2SecretKeyGenOpData {
CpaFlatBuffer primeP;
/**< Flat buffer containing a pointer to the random odd prime number (p).
* The bit-length of this number may be one of 768, 1024, 1536, 2048,
* 3072 or 4096.
* 3072, 4096 or 8192.
* This SHOULD be same prime number as was used in the phase 1 key
* generation operation. */
CpaFlatBuffer remoteOctetStringPV;
@ -230,7 +230,7 @@ typedef struct _CpaCyDhStats64 {
* operations as defined in the PKCS #3 standard. It may be used to
* generate the (local) octet string public value (PV) key.
* The prime number sizes specified in RFC 2409, 4306, and part of
* RFC 3526 are supported (bit sizes 6144 and 8192 from RFC 3536 are not
* RFC 3526 are supported (bit size 6144 from RFC 3536 is not
* supported).
*
* @context

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -66,7 +66,7 @@
* 1. Montgomery 25519 Curve | scalar point Multiplication
* Input: Montgomery affine coordinate X of point P
* Scalar k
* Output: Montgomery affine coordinate X of point [k/P
* Output: Montgomery affine coordinate X of point [k]P
* Decode: Scalar k always decoded by implementation
*
* 2. Montgomery 25519 Curve | generator point Multiplication
@ -80,13 +80,17 @@
* Scalar k
* Output: Twisted Edwards affine coordinate X of point [k]P
* Twisted Edwards affine coordinate Y of point [k]P
* Decode: Caller must specify if decoding is required
* Decode: Caller must supply parameters in MSB order, the
* implementation will not explicitly decode according
* to RFC#7748 Section 5
*
* 4. Twisted Edwards 25519 Curve | generator point Multiplication
* Input: Scalar k
* Output: Twisted Edwards affine coordinate X of point [k]G
* Twisted Edwards affine coordinate Y of point [k]G
* Decode: Caller must specify if decoding is required
* Decode: Caller must supply parameters in MSB order, the
* implementation will not explicitly decode according
* to RFC#7748 Section 5
*
* 5. Montgomery 448 Curve | scalar point Multiplication
* Input: Montgomery affine coordinate X of point P
@ -105,13 +109,17 @@
* Scalar k
* Output: Edwards affine coordinate X of point [k]P
* Edwards affine coordinate Y of point [k]P
* Decode: Caller must specify if decoding is required
* Decode: Caller must supply parameters in MSB order, the
* implementation will not explicitly decode according
* to RFC#7748 Section 5
*
* 8. Edwards 448 Curve | generator point Multiplication
* Input: Scalar k
* Output: Edwards affine coordinate X of point [k]G
* Edwards affine coordinate Y of point [k]G
* Decode: Caller must specify if decoding is required
* Decode: Caller must supply parameters in MSB order, the
* implementation will not explicitly decode according
* to RFC#7748 Section 5
*
* @note
* Large numbers are represented on the QuickAssist API as described
@ -158,6 +166,35 @@ typedef enum _CpaCyEcFieldType
/**< A binary field, GF(2^m) */
} CpaCyEcFieldType;
/**
*****************************************************************************
* @ingroup cpaCyEc
* Enumeration listing curve types to use with generic multiplication
* and verification routines.
*
* @description
* This structure contains a list of different elliptic curve types.
* EC Point multiplication and other operations depend on the type of
* the curve.
*
* @see
* cpaCyEcGenericPointMultiply()
* cpaCyEcGenericPointVerify()
*
*****************************************************************************/
typedef enum _CpaCyEcCurveType
{
CPA_CY_EC_CURVE_TYPE_WEIERSTRASS_PRIME = 1,
/**< A Weierstrass curve with arithmetic in terms of the
* arithmetic of integers modulo p over a prime field. */
CPA_CY_EC_CURVE_TYPE_WEIERSTRASS_BINARY,
/**< A Weierstrass curve with arithmetic in terms of operations on bits
* over a binary field. */
CPA_CY_EC_CURVE_TYPE_WEIERSTRASS_KOBLITZ_BINARY,
/**< A Weierstrass-koblitz curve with arithmetic in terms of operations on
* the bits over a binary field. */
} CpaCyEcCurveType;
/**
*****************************************************************************
* @ingroup cpaCyEc
@ -175,16 +212,112 @@ typedef enum _CpaCyEcMontEdwdsCurveType
CPA_CY_EC_MONTEDWDS_CURVE25519_TYPE = 1,
/**< Montgomery 25519 curve */
CPA_CY_EC_MONTEDWDS_ED25519_TYPE,
/**< Twisted Edwards 25519 curve */
/**< Edwards 25519 curve */
CPA_CY_EC_MONTEDWDS_CURVE448_TYPE,
/**< Montgomery 448 curve */
CPA_CY_EC_MONTEDWDS_ED448_TYPE,
/**< Twisted Edwards 448 curve */
/**< Edwards 448 curve */
} CpaCyEcMontEdwdsCurveType;
/**
*****************************************************************************
* @file cpa_cy_ec.h
* @ingroup cpaCyEc
* Curve parameters for a Weierstrass type curve.
*
* @description
* This structure contains curve parameters for Weierstrass type
* curve: y^2 = x^3 + ax + b
* The client MUST allocate the memory for this structure
* When the structure is passed into the function, ownership of the memory
* passes to the function. Ownership of the memory returns to the client
* when this structure is returned in the callback function.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
* The legend used in this structure is borrowed from RFC7748
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the function, and before it
* has been returned in the callback, undefined behavior will result.
*
* @see
* CpaCyEcCurveParameters
* CpaCyEcFieldType
*
*****************************************************************************/
typedef struct _CpaCyEcCurveParametersWeierstrass
{
CpaCyEcFieldType fieldType;
/**< Prime or Binary */
CpaFlatBuffer p;
/**< Prime modulus or irreducible polynomial over GF(2^m) */
CpaFlatBuffer a;
/**< a coefficient */
CpaFlatBuffer b;
/**< b coefficient */
CpaFlatBuffer h;
/**< Cofactor */
} CpaCyEcCurveParametersWeierstrass;
/**
*****************************************************************************
* @ingroup cpaCyEc
* Union characterised by a specific curve.
*
* @description
* This union allows for the characterisation of different curve types
* encapsulted in one data type. The intention is that new curve types
* will be added in the future.
*
* @note
*
* @see
* CpaCyEcCurveParametersWeierstrass
*
*****************************************************************************/
typedef union _CpaCyEcCurveParameters
{
CpaCyEcCurveParametersWeierstrass weierstrassParameters;
} CpaCyEcCurveParameters;
/**
*****************************************************************************
* @ingroup cpaCyEc
* Unified curve parameters.
*
* @description
* This structure provides a single data type that can describe a number
* of different curve types. The intention is to add further
* curve types in the future, thus the union field will allow for that
* expansion.
*
* The client MUST allocate the memory for this structure and the
* items pointed to by this structure. When the structure is passed into
* the function, ownership of the memory passes to the function. Ownership
* of the memory returns to the client when this structure is returned in
* the callback function.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the function, and before it
* has been returned in the callback, undefined behavior will result.
*
* @see
* CpaCyEcCurveParameters
* cpaCyEcGenericPointMultiply
* cpaCyEcGenericPointVerify
*
*****************************************************************************/
typedef struct _CpaCyEcCurve
{
CpaCyEcCurveType curveType;
CpaCyEcCurveParameters parameters;
} CpaCyEcCurve;
/**
*****************************************************************************
* @ingroup cpaCyEc
* EC Point Multiplication Operation Data.
*
@ -230,8 +363,144 @@ typedef struct _CpaCyEcPointMultiplyOpData {
* data pointer of the Flat Buffer to NULL. */
CpaCyEcFieldType fieldType;
/**< field type for the operation */
} CpaCyEcPointMultiplyOpData;
} CpaCyEcPointMultiplyOpData CPA_DEPRECATED;
/**
*****************************************************************************
* @ingroup cpaCyEc
* Generic EC Point Multiplication Operation Data.
*
* @description
* This structure contains a generic EC point and a multiplier for use with
* cpaCyEcGenericPointMultiply. This is common for representing all EC
* points, irrespective of curve type: Weierstrass, Montgomery and Twisted
* Edwards (at this time only Weierstrass are supported). The same
* point + multiplier format can be used when performing generator
* multiplication, in which case the xP, yP supplied in this structure will
* be ignored by QAT API library & a generator point will be inserted in
* their place.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* All values in this structure are required to be in Most Significant Byte
* first order, e.g. a.pData[0] = MSB.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the cpaCyEcGenericPointMultiply
* function, and before it has been returned in the callback, undefined
* behavior will result.
*
* @see
* cpaCyEcGenericPointMultiply()
*
*****************************************************************************/
typedef struct _CpaCyEcGenericPointMultiplyOpData {
CpaFlatBuffer k;
/** <scalar multiplier (k > 0 and k < n) */
CpaFlatBuffer xP;
/** <x coordinate of public key */
CpaFlatBuffer yP;
/** <y coordinate of public key */
CpaCyEcCurve *pCurve;
/** <curve type specific parameters */
CpaBoolean generator;
/** <if TRUE xP and yP are the generator points */
} CpaCyEcGenericPointMultiplyOpData;
/**
*****************************************************************************
* @ingroup cpaCyEc
* Generic EC Point Verify Operation Data.
*
* @description
* This structure contains the operation data for the
* cpaCyEcGenericPointVerify function. This is common for representing
* all EC points, irrespective of curve type: Weierstrass, Montgomery and
* Twisted Edwards (at this time only Weierstrass are supported).
*
* This structure contains a generic EC point, irrespective of curve type.
* It is used to verify when the <x,y> pair specified in the structure
* lies on the curve indicated in the cpaCyEcGenericPointVerify API.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* All values in this structure are required to be in Most Significant Byte
* first order, e.g. a.pData[0] = MSB.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the cpaCyEcGenericPointVerify
* function, and before it has been returned in the callback, undefined
* behavior will result.
*
* @see
* cpaCyEcGenericPointVerify()
*
*****************************************************************************/
typedef struct _CpaCyEcGenericPointVerifyOpData {
CpaFlatBuffer xP;
/** <x coordinate of public key */
CpaFlatBuffer yP;
/** <y coordinate of public key */
CpaCyEcCurve *pCurve;
/** <curve type specific parameters */
} CpaCyEcGenericPointVerifyOpData;
/**
*****************************************************************************
* @ingroup cpaCyEc
* EC Point Multiplication Operation Data for Edwards or
* Montgomery curves as specificied in RFC#7748.
*
* @description
* This structure contains the operation data for the
* cpaCyEcMontEdwdsPointMultiply function.
* The client MUST allocate the memory for this structure and the
* items pointed to by this structure. When the structure is passed into
* the function, ownership of the memory passes to the function. Ownership
* of the memory returns to the client when this structure is returned in
* the callback function.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* All values in this structure are required to be in Most Significant Byte
* first order, e.g. a.pData[0] = MSB.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the
* cpaCyEcMontEdwdsPointMultiply function, and before it has been returned
* in the callback, undefined behavior will result.
*
* All buffers in this structure need to be:
* - 32 bytes in size for 25519 curves
* - 64 bytes in size for 448 curves
*
* @see
* cpaCyEcMontEdwdsPointMultiply()
*
*****************************************************************************/
typedef struct _CpaCyEcMontEdwdsPointMultiplyOpData {
CpaCyEcMontEdwdsCurveType curveType;
/**< field type for the operation */
CpaBoolean generator;
/**< True if the operation is a generator multiplication (kG)
* False if it is a variable point multiplcation (kP). */
CpaFlatBuffer k;
/**< k scalar multiplier for the operation */
CpaFlatBuffer x;
/**< x value. Used in scalar varable point multiplication operations.
* Not required if the generator is True. Must be NULL if not required.
* The size of the buffer MUST be 32B for 25519 curves and 64B for 448
* curves */
CpaFlatBuffer y;
/**< y value. Used in variable point multiplication of operations.
* Not required if the generator is True.
* Must be NULL if not required.
* The size of the buffer MUST be 32B for 25519 curves and 64B for 448
* curves */
} CpaCyEcMontEdwdsPointMultiplyOpData;
/**
*****************************************************************************
@ -272,67 +541,9 @@ typedef struct _CpaCyEcPointVerifyOpData {
/**< a elliptic curve coefficient */
CpaFlatBuffer b;
/**< b elliptic curve coefficient */
CpaCyEcFieldType fieldType;
/**< field type for the operation */
} CpaCyEcPointVerifyOpData;
/**
*****************************************************************************
* @file cpa_cy_ec.h
* @ingroup cpaCyEc
* EC Point Multiplication Operation Data for Edwards or
8 Montgomery curves as specificied in RFC#7748.
*
* @description
* This structure contains the operation data for the
* cpaCyEcMontEdwdsPointMultiply function.
* The client MUST allocate the memory for this structure and the
* items pointed to by this structure. When the structure is passed into
* the function, ownership of the memory passes to the function. Ownership
* of the memory returns to the client when this structure is returned in
* the callback function.
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* All values in this structure are required to be in Most Significant Byte
* first order, e.g. a.pData[0] = MSB.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the cpaCyEcPointMultiply
* function, and before it has been returned in the callback, undefined
* behavior will result.
*
* All buffers in this structure need to be:
* - 32 bytes in size for 25519 curves
* - 64 bytes in size for 448 curves
*
* @see
* cpaCyEcMontEdwdsPointMultiply()
*
*****************************************************************************/
typedef struct _CpaCyEcMontEdwdsPointMultiplyOpData {
CpaCyEcMontEdwdsCurveType curveType;
/**< field type for the operation */
CpaBoolean generator;
/**< True if the operation is a generator multiplication (kG)
* False if it is a variable point multiplcation (kP). */
CpaFlatBuffer k;
/**< k or generator for the operation */
CpaFlatBuffer x;
/**< x value. Used in scalar varable point multiplication operations.
* Not required if the generator is True. Must be NULL if not required.
* The size of the buffer MUST be 32B for 25519 curves and 64B for 448
* curves */
CpaFlatBuffer y;
/**< y value. Used in variable point multiplication of operations.
* Not required for curves defined only on scalar operations.
* Not required if the generator is True.
* Must be NULL if not required.
* The size of the buffer MUST be 32B for 25519 curves and 64B for 448
* curves */
} CpaCyEcMontEdwdsPointMultiplyOpData;
} CpaCyEcPointVerifyOpData CPA_DEPRECATED;
/**
*****************************************************************************
@ -414,7 +625,7 @@ typedef struct _CpaCyEcStats64 {
* @note
* None
* @see
* cpaCyEcPointMultiply()
* cpaCyEcGenericPointMultiply()
*
*****************************************************************************/
typedef void (*CpaCyEcPointMultiplyCbFunc)(void *pCallbackTag,
@ -428,7 +639,7 @@ typedef void (*CpaCyEcPointMultiplyCbFunc)(void *pCallbackTag,
/**
*****************************************************************************
* @ingroup cpaCyEc
* Definition of callback function invoked for cpaCyEcPointVerify
* Definition of callback function invoked for cpaCyEcGenericPointVerify
* requests.
* @context
* This callback function can be executed in a context that DOES NOT
@ -460,7 +671,7 @@ typedef void (*CpaCyEcPointMultiplyCbFunc)(void *pCallbackTag,
* @note
* None
* @see
* cpaCyEcPointVerify()
* cpaCyEcGenericPointVerify()
*
*****************************************************************************/
typedef void (*CpaCyEcPointVerifyCbFunc)(void *pCallbackTag,
@ -474,6 +685,9 @@ typedef void (*CpaCyEcPointVerifyCbFunc)(void *pCallbackTag,
* @ingroup cpaCyEc
* Perform EC Point Multiplication.
*
* @deprecated
* This function is replaced with @ref cpaCyEcGenericPointMultiply
*
* @description
* This function performs Elliptic Curve Point Multiplication as per
* ANSI X9.63 Annex D.3.2.
@ -534,7 +748,7 @@ typedef void (*CpaCyEcPointVerifyCbFunc)(void *pCallbackTag,
* CpaCyEcPointMultiplyCbFunc
*
*****************************************************************************/
CpaStatus
CpaStatus CPA_DEPRECATED
cpaCyEcPointMultiply(const CpaInstanceHandle instanceHandle,
const CpaCyEcPointMultiplyCbFunc pCb,
void *pCallbackTag,
@ -549,6 +763,9 @@ cpaCyEcPointMultiply(const CpaInstanceHandle instanceHandle,
* @ingroup cpaCyEc
* Verify that a point is on an elliptic curve.
*
* @deprecated
* This function is replaced with @ref cpaCyEcGenericPointVerify
*
* @description
* This function performs Elliptic Curve Point Verification, as per
* steps a, b and c of ANSI X9.62 Annex A.4.2. (To perform the final
@ -620,7 +837,7 @@ cpaCyEcPointMultiply(const CpaInstanceHandle instanceHandle,
* CpaCyEcPointVerifyCbFunc
*
*****************************************************************************/
CpaStatus
CpaStatus CPA_DEPRECATED
cpaCyEcPointVerify(const CpaInstanceHandle instanceHandle,
const CpaCyEcPointVerifyCbFunc pCb,
void *pCallbackTag,
@ -629,7 +846,148 @@ cpaCyEcPointVerify(const CpaInstanceHandle instanceHandle,
/**
*****************************************************************************
* @file cpa_cy_ec.h
* @ingroup cpaCyEc
* Generic ECC point multiplication operation.
*
* @description
* This is the generic ECC point multiplication operation, which is
* agnostic to the type of the curve used.
*
* @context
*
* @assumptions
* None
* @sideEffects
* None
* @blocking
* Yes when configured to operate in synchronous mode.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle Instance handle.
* @param[in] pCb Callback function pointer. If this is set
* to a NULL value, the function will operate
* synchronously.
* @param[in] pCallbackTag User-supplied value to help identify
* request.
* @param[in] pOpData Structure containing all the data needed to
* perform the operation. The client code
* allocates the memory for this structure.
* This component takes ownership of the
* memory until it is returned in the
* callback.
* @param[out] pMultiplyStatus In synchronous mode, the multiply output is
* valid (CPA_TRUE) or the output is invalid
* (CPA_FALSE).
*
* @param[out] pXk Pointer to xk flat buffer.
* @param[out] pYk Pointer to yk flat buffer.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* Component has been initialized.
* @post
* None
* @note
* When pCb is non-NULL an asynchronous callback of type
* CpaCyEcPointMultiplyCbFunc is generated in response to this function
* call.
* For optimal performance, data pointers SHOULD be 8-byte aligned.
* @see
* CpaCyEcPointMultiplyOpData,
* CpaCyEcPointMultiplyCbFunc
* CpaCyEcCurveType
* CpaCyEcCurveParameters
*****************************************************************************/
CpaStatus
cpaCyEcGenericPointMultiply(
const CpaInstanceHandle instanceHandle,
const CpaCyEcPointMultiplyCbFunc pCb,
void *pCallbackTag,
const CpaCyEcGenericPointMultiplyOpData *pOpData,
CpaBoolean *pMultiplyStatus,
CpaFlatBuffer *pXk,
CpaFlatBuffer *pYk);
/**
*****************************************************************************
* @ingroup cpaCyEc
* Generic ECC point verification operation.
*
* @description
* This is the generic ECC point verification operation, which is
* agnostic to the type of the curve used.
*
* @context
*
* @assumptions
* None
* @sideEffects
* None
* @blocking
* Yes when configured to operate in synchronous mode.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle Instance handle.
* @param[in] pCb Callback function pointer. If this is set
* to a NULL value the function will operate
* synchronously.
* @param[in] pCallbackTag User-supplied value to help identify
* request.
* @param[in] pOpData Structure containing all the data needed to
* perform the operation. The client code
* allocates the memory for this structure.
* This component takes ownership of the
* memory until it is returned in the
* callback.
* @param[out] pVerifyStatus In synchronous mode, the verification
* output is valid (CPA_TRUE) or the output is
* invalid (CPA_FALSE).
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* Component has been initialized.
* @post
* None
* @note
* When pCb is non-NULL an asynchronous callback of type
* CpaCyEcPointVerifyCbFunc is generated in response to this function call.
* For optimal performance, data pointers SHOULD be 8-byte aligned.
* @see
* CpaCyEcGenericPointVerifyOpData,
* CpaCyEcPointVerifyCbFunc
* CpaCyEcCurveType
* CpaCyEcCurveParameters
*****************************************************************************/
CpaStatus
cpaCyEcGenericPointVerify (
const CpaInstanceHandle instanceHandle,
const CpaCyEcPointVerifyCbFunc pCb,
void *pCallbackTag,
const CpaCyEcGenericPointVerifyOpData *pOpData,
CpaBoolean *pVerifyStatus);
/**
*****************************************************************************
* @ingroup cpaCyEc
* Perform EC Point Multiplication on an Edwards or Montgomery curve as
* defined in RFC#7748.

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -294,7 +294,6 @@ cpaCyEcdhPointMultiply(const CpaInstanceHandle instanceHandle,
CpaFlatBuffer *pXk,
CpaFlatBuffer *pYk);
/**
*****************************************************************************
* @ingroup cpaCyEcdh

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -267,7 +267,6 @@ typedef struct _CpaCyEcdsaVerifyOpData {
/**< field type for the operation */
} CpaCyEcdsaVerifyOpData;
/**
*****************************************************************************
* @ingroup cpaCyEcdsa
@ -339,6 +338,21 @@ typedef struct _CpaCyEcdsaStats64 {
/**< Total number of ECDSA Verification operation requests that resulted
* in an invalid output.
* Note that this does not indicate an error. */
Cpa64U numKptEcdsaSignRSCompletedOutputInvalid;
/**< Total number of KPT ECDSA Sign R & S operation requests could not be
* completed successfully due to an invalid output.
* Note that this does not indicate an error. */
Cpa64U numKptEcdsaSignRSCompleted;
/**< Total number of KPT ECDSA Sign R & S operation requests that completed
* successfully. */
Cpa64U numKptEcdsaSignRSRequests;
/**< Total number of KPT ECDSA Sign R & S operation requests. */
Cpa64U numKptEcdsaSignRSRequestErrors;
/**< Total number of KPT ECDSA Sign R & S operation requests that had an
* error and could not be processed. */
Cpa64U numKptEcdsaSignRSCompletedErrors;
/**< Total number of KPT ECDSA Sign R & S operation requests that could
* not be completed successfully due to errors. */
} CpaCyEcdsaStats64;
@ -775,7 +789,6 @@ cpaCyEcdsaVerify(const CpaInstanceHandle instanceHandle,
const CpaCyEcdsaVerifyOpData *pOpData,
CpaBoolean *pVerifyStatus);
/**
*****************************************************************************
* @ingroup cpaCyEcdsa

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -242,6 +242,9 @@ typedef struct _CpaCyCapabilitiesInfo
/**< CPA_TRUE if instance supports the Edwards and Montgomery elliptic
* curves of the EC API.
* See @ref cpaCyEc */
CpaBoolean ecSm2Supported;
/**< CPA_TRUE if instance supports the EcSM2 API.
* See @ref cpaCyEcsm2. */
} CpaCyCapabilitiesInfo;
/**

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -151,8 +151,7 @@ typedef struct _CpaCyKeyGenSslOpData {
* The length field indicates the length of the label in bytes. To use this
* field, the sslOp must be CPA_CY_KEY_SSL_OP_USER_DEFINED,
* or otherwise it is ignored and can be set to NULL.
* Implementation-specific limits
* may apply to this length. */
* Implementation-specific limits may apply to this length. */
} CpaCyKeyGenSslOpData;
/**
@ -298,7 +297,7 @@ typedef enum _CpaCyKeyHKDFCipherSuite
#define CPA_CY_HKDF_SUBLABEL_FINISHED ((Cpa16U)0x0008)
/**< Bit for creation of key material for 'finished' sublabel */
#define CPA_CY_HKDF_KEY_MAX_SECRET_SZ ((Cpa8U)64)
#define CPA_CY_HKDF_KEY_MAX_SECRET_SZ ((Cpa8U)80)
/** space in bytes PSK or (EC)DH */
#define CPA_CY_HKDF_KEY_MAX_HMAC_SZ ((Cpa8U)48)
/** space in bytes of CPA_CY_SYM_HASH_SHA384 result */
@ -346,7 +345,6 @@ typedef struct _CpaCyKeyGenHKDFExpandLabel
/**
*****************************************************************************
* @file cpa_cy_key.h
* @ingroup cpaCyKeyGen
* TLS data for key generation functions
* @description
@ -838,7 +836,6 @@ cpaCyKeyGenTls2(const CpaInstanceHandle instanceHandle,
/**
*****************************************************************************
* @file cpa_cy_key.h
* @ingroup cpaCyKeyGen
* TLS Key Generation Function version 3.
* @description

View file

@ -0,0 +1,853 @@
/***************************************************************************
*
* BSD LICENSE
*
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
***************************************************************************/
/*
*****************************************************************************
* Doxygen group definitions
****************************************************************************/
/**
*****************************************************************************
* @file cpa_cy_kpt.h
*
* @defgroup cpaCyKpt Intel(R) Key Protection Technology (KPT) Cryptographic API
*
* @ingroup cpaCy
*
* @description
* These functions specify the APIs for Key Protection Technology (KPT)
* Cryptographic services.
*
* @note
* These functions implement the KPT Cryptographic API.
* This API is experimental and subject to change.
*
*****************************************************************************/
#ifndef __CPA_CY_KPT_H__
#define __CPA_CY_KPT_H__
#ifdef __cplusplus
extern "C" {
#endif
#include "cpa_cy_common.h"
#include "cpa_cy_rsa.h"
#include "cpa_cy_ecdsa.h"
#include "cpa_cy_ec.h"
/**
*****************************************************************************
* @ingroup cpaCyKpt
* KPT wrapping key handle
*
* @description
* Handle to a unique wrapping key in wrapping key table. Application
* creates it in KPT key transfer phase and maintains it for KPT Crypto
* service. For each KPT Crypto service API invocation, this handle will
* be used to get a SWK(Symmetric Wrapping Key) to unwrap
* WPK(Wrapped Private Key) before performing the requested crypto
* service.
*
*****************************************************************************/
typedef Cpa64U CpaCyKptHandle;
/**
*****************************************************************************
* @ingroup cpaCyKpt
* Return Status
* @description
* This enumeration lists all the possible return status after completing
* KPT APIs.
*
*****************************************************************************/
typedef enum CpaCyKptKeyManagementStatus_t
{
CPA_CY_KPT_SUCCESS = 0,
/**< Generic success status for all KPT wrapping key handling functions*/
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_VFID,
/**< SWK count exceeds the configured maxmium value per VFID*/
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_PASID,
/**< SWK count exceeds the configured maxmium value per PASID*/
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED,
/**< SWK count exceeds the configured maxmium value when not scoped to
* VFID or PASID*/
CPA_CY_KPT_SWK_FAIL_NOT_FOUND,
/**< Unable to find SWK entry by handle */
CPA_CY_KPT_FAILED,
} CpaCyKptKeyManagementStatus;
/**
*****************************************************************************
* @ingroup cpaCyKpt
* PKCS#1 v2.2 RSA-3K signature output length in bytes.
* @see CpaCyKptValidationKey
*
*****************************************************************************/
#define CPA_CY_RSA3K_SIG_SIZE_INBYTES 384
/**
*****************************************************************************
* @ingroup cpaCyKpt
* KPT device credentials key certificate
* @description
* This structure defines the key format for use with KPT.
* @see
* cpaCyKptQueryDeviceCredentials
*
*****************************************************************************/
typedef struct CpaCyKptValidationKey_t
{
CpaCyRsaPublicKey publicKey;
/**< Key */
Cpa8U signature[CPA_CY_RSA3K_SIG_SIZE_INBYTES];
/**< Signature of key */
} CpaCyKptValidationKey;
/**
*****************************************************************************
* @ingroup cpaCyKpt
* Cipher algorithms used to generate a wrapped private key (WPK) from
* the clear private key.
*
* @description
* This enumeration lists supported cipher algorithms and modes.
*
*****************************************************************************/
typedef enum CpaCyKptWrappingKeyType_t
{
CPA_CY_KPT_WRAPPING_KEY_TYPE_AES256_GCM = 0
} CpaCyKptWrappingKeyType;
/**
*****************************************************************************
* @ingroup cpaCyKpt
* KPT Loading key format specification.
* @description
* This structure defines the format of the symmetric wrapping key to be
* loaded into KPT. Application sets these parameters through the
* cpaCyKptLoadKey calls.
*
*****************************************************************************/
typedef struct CpaCyKptLoadKey_t
{
CpaFlatBuffer eSWK;
/**< Encrypted SWK */
CpaCyKptWrappingKeyType wrappingAlgorithm;
/**< Symmetric wrapping algorithm */
} CpaCyKptLoadKey;
/**
*****************************************************************************
* @ingroup cpaCyKpt
* Max length of initialization vector
* @description
* Defines the permitted max iv length in bytes that may be used in
* private key wrapping/unwrapping.For AEC-GCM, iv length is 12 bytes.
*
*@see cpaCyKptUnwrapContext
*
*****************************************************************************/
#define CPA_CY_KPT_MAX_IV_LENGTH (12)
/**
*****************************************************************************
* @ingroup cpaCyKpt
* Max length of Additional Authenticated Data
* @description
* Defines the permitted max aad length in bytes that may be used in
* private key wrapping/unwrapping.
*
*@see cpaCyKptUnwrapContext
*
*****************************************************************************/
#define CPA_CY_KPT_MAX_AAD_LENGTH (16)
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Structure of KPT unwrapping context.
* @description
* This structure is a parameter of KPT crypto APIs, it contains data
* relating to KPT WPK unwrapping, the application needs to fill in this
* information.
*
*****************************************************************************/
typedef struct CpaCyKptUnwrapContext_t
{
CpaCyKptHandle kptHandle;
/**< This is application's unique handle that identifies its
* (symmetric) wrapping key*/
Cpa8U iv[CPA_CY_KPT_MAX_IV_LENGTH];
/**< Initialization Vector */
Cpa8U additionalAuthData[CPA_CY_KPT_MAX_AAD_LENGTH];
/**< A buffer holding the Additional Authenticated Data.*/
Cpa32U aadLenInBytes;
/**< Number of bytes representing the size of AAD within additionalAuthData
* buffer. */
} CpaCyKptUnwrapContext;
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* RSA Private Key Structure For Representation 1.
* @description
* This structure contains the first representation that can be used for
* describing the RSA private key, represented by the tuple of the
* modulus (N) and the private exponent (D).
* The representation is encrypted as follows:
* Encrypt - AES-256-GCM (Key, AAD, Input)
* "||" - denotes concatenation
* Key = SWK
* AAD = DER(OID)
* Input = (D || N)
* Encrypt (SWK, AAD, (D || N))
* Output (AuthTag, (D || N)')
* EncryptedRSAKey = (D || N)'
*
* privateKey = (EncryptedRSAKey || AuthTag)
*
* OID's that shall be supported by KPT implementation:
* OID DER(OID)
* 1.2.840.113549.1.1 06 08 2A 86 48 86 F7 0D 01 01
*
* Permitted lengths for N and D are:
* - 512 bits (64 bytes),
* - 1024 bits (128 bytes),
* - 1536 bits (192 bytes),
* - 2048 bits (256 bytes),
* - 3072 bits (384 bytes),
* - 4096 bits (512 bytes), or
* - 8192 bits (1024 bytes).
*
* AuthTag is 128 bits (16 bytes)
*
* @note It is important that the value D is big enough. It is STRONGLY
* recommended that this value is at least half the length of the modulus
* N to protect against the Wiener attack.
*
*****************************************************************************/
typedef struct CpaCyKptRsaPrivateKeyRep1_t
{
CpaFlatBuffer privateKey;
/**< The EncryptedRSAKey concatenated with AuthTag */
} CpaCyKptRsaPrivateKeyRep1;
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* KPT RSA Private Key Structure For Representation 2.
* @description
* This structure contains the second representation that can be used for
* describing the RSA private key. The quintuple of p, q, dP, dQ, and qInv
* (explained below and in the spec) are required for the second
* representation. For KPT the parameters are Encrypted
* with the assoicated SWK as follows:
* Encrypt - AES-256-GCM (Key, AAD, Input)
* "||" - denotes concatenation
* Key = SWK
* AAD = DER(OID)
* Input = (P || Q || dP || dQ || Qinv || publicExponentE)
* Expanded Description:
* Encrypt (SWK, AAD,
* (P || Q || dP || dQ || Qinv || publicExponentE))
* EncryptedRSAKey = (P || Q || dP || dQ || Qinv || publicExponentE)'
* Output (AuthTag, EncryptedRSAKey)
*
* privateKey = EncryptedRSAKey || AuthTag
*
* OID's that shall be supported by KPT implementation:
* OID DER(OID)
* 1.2.840.113549.1.1 06 08 2A 86 48 86 F7 0D 01 01
*
* All of the encrypted parameters will be of equal size. The length of
* each will be equal to keySize in bytes/2.
* For example for a key size of 256 Bytes (2048 bits), the length of
* P, Q, dP, dQ, and Qinv are all 128 Bytes, plus the
* publicExponentE of 256 Bytes, giving a total size for
* EncryptedRSAKey of 896 Bytes.
*
* AuthTag is 128 bits (16 bytes)
*
* Permitted Key Sizes are:
* - 512 bits (64 bytes),
* - 1024 bits (128 bytes),
* - 1536 bits (192 bytes),
* - 2048 bits (256 bytes),
* - 3072 bits (384 bytes),
* - 4096 bits (512 bytes), or
* - 8192 bits (1024 bytes).
*
*****************************************************************************/
typedef struct CpaCyKptRsaPrivateKeyRep2_t
{
CpaFlatBuffer privateKey;
/**< RSA private key representation 2 is built up from the
* tuple of p, q, dP, dQ, qInv, publicExponentE and AuthTag.
*/
} CpaCyKptRsaPrivateKeyRep2;
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* RSA Private Key Structure.
* @description
* This structure contains the two representations that can be used for
* describing the RSA private key. The privateKeyRepType will be used to
* identify which representation is to be used. Typically, using the
* second representation results in faster decryption operations.
*
*****************************************************************************/
typedef struct CpaCyKptRsaPrivateKey_t
{
CpaCyRsaVersion version;
/**< Indicates the version of the PKCS #1 specification that is
* supported.
* Note that this applies to both representations. */
CpaCyRsaPrivateKeyRepType privateKeyRepType;
/**< This value is used to identify which of the private key
* representation types in this structure is relevant.
* When performing key generation operations for Type 2 representations,
* memory must also be allocated for the type 1 representations, and values
* for both will be returned. */
CpaCyKptRsaPrivateKeyRep1 privateKeyRep1;
/**< This is the first representation of the RSA private key as
* defined in the PKCS #1 V2.2 specification. */
CpaCyKptRsaPrivateKeyRep2 privateKeyRep2;
/**< This is the second representation of the RSA private key as
* defined in the PKCS #1 V2.2 specification. */
} CpaCyKptRsaPrivateKey;
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* KPT RSA Decryption Primitive Operation Data
* @description
* This structure lists the different items that are required in the
* cpaCyKptRsaDecrypt function. As the RSA decryption primitive and
* signature primitive operations are mathematically identical this
* structure may also be used to perform an RSA signature primitive
* operation.
* When performing an RSA decryption primitive operation, the input data
* is the cipher text and the output data is the message text.
* When performing an RSA signature primitive operation, the input data
* is the message and the output data is the signature.
* The client MUST allocate the memory for this structure. When the
* structure is passed into the function, ownership of the memory passes
* to he function. Ownership of the memory returns to the client when
* this structure is returned in the CpaCyGenFlatBufCbFunc
* callback function.
*
* @note
* If the client modifies or frees the memory referenced in this structure
* after it has been submitted to the cpaCyKptRsaDecrypt function, and
* before it has been returned in the callback, undefined behavior will
* result.
* All values in this structure are required to be in Most Significant Byte
* first order, e.g. inputData.pData[0] = MSB.
*
*****************************************************************************/
typedef struct CpaCyKptRsaDecryptOpData_t
{
CpaCyKptRsaPrivateKey *pRecipientPrivateKey;
/**< Pointer to the recipient's RSA private key. */
CpaFlatBuffer inputData;
/**< The input data that the RSA decryption primitive operation is
* performed on. The data pointed to is an integer that MUST be in big-
* endian order. The value MUST be between 0 and the modulus n - 1. */
} CpaCyKptRsaDecryptOpData;
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* KPT ECDSA Sign R & S Operation Data.
*
* @description
* This structure contains the operation data for the cpaCyKptEcdsaSignRS
* function. The client MUST allocate the memory for this structure and the
* items pointed to by this structure. When the structure is passed into
* the function, ownership of the memory passes to the function. Ownership
* of the memory returns to the client when this structure is returned in
* the callback function.
* This key structure is encrypted when passed into cpaCyKptEcdsaSignRS
* Encrypt - AES-256-GCM (Key, AAD, Input)
* "||" - denotes concatenation
*
* Key = SWK
* AAD = DER(OID)
* Input = (d)
* Encrypt (SWK, AAD, (d))
* Output (AuthTag, EncryptedECKey)
*
* privatekey == EncryptedECKey || AuthTag
*
* OID's that shall be supported by KPT implementation:
* Curve OID DER(OID)
* secp256r1 1.2.840.10045.3.1.7 06 08 2A 86 48 CE 3D 03 01 07
* secp384r1 1.3.132.0.34 06 05 2B 81 04 00 22
* secp521r1 1.3.132.0.35 06 05 2B 81 04 00 23
*
* Expected private key (d) sizes:
* secp256r1 256 bits
* secp384r1 384 bits
* secp521r1 576 bits (rounded up to a multiple of 64-bit quadword)
*
* AuthTag is 128 bits (16 bytes)
*
* For optimal performance all data buffers SHOULD be 8-byte aligned.
*
* @note
* If the client modifies or frees the memory referenced in this
* structure after it has been submitted to the cpaCyKptEcdsaSignRS
* function, and before it has been returned in the callback, undefined
* behavior will result.
*
* @see
* cpaCyEcdsaSignRS()
*
*****************************************************************************/
typedef struct CpaCyKptEcdsaSignRSOpData_t
{
CpaFlatBuffer privateKey;
/**< Encrypted private key data of the form
* EncryptECKey || AuthTag */
CpaFlatBuffer m;
/**< digest of the message to be signed */
} CpaCyKptEcdsaSignRSOpData;
/**
*****************************************************************************
* Discovery and Provisioning APIs for KPT
*
*****************************************************************************/
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Query KPT's issuing public key(R_Pu) and signature from QAT driver.
* @description
* This function is to query the RSA3K issuing key and its
* PKCS#1 v2.2 SHA-384 signature from the QAT driver.
* @context
* This function may sleep, and MUST NOT be called in interrupt context.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @param[in] instanceHandle Instance handle.
* @param[out] pIssueCert KPT-2.0 Issuing certificate in PEM format
as defined in RFC#7468
* @param[out] pKptStatus One of the status codes denoted in the
* enumerate type CpaCyKptKeyManagementStatus
* CPA_CY_KPT_SUCCESS Issuing key retrieved successfully
* CPA_CY_KPT_FAILED Operation failed
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_FAIL Function failed. Suggested course of action
* is to shutdown and restart.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.
* Resubmit the request.
*
* @pre
* The component has been initialized via cpaCyStartInstance function.
* @post
* None
* @note
* Note that this is a synchronous function and has no completion callback
* associated with it.
* @see
*
*****************************************************************************/
CpaStatus
cpaCyKptQueryIssuingKeys(const CpaInstanceHandle instanceHandle,
CpaFlatBuffer *pPublicX509IssueCert,
CpaCyKptKeyManagementStatus *pKptStatus);
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Query KPT's Per-Part public key(I_pu) and signature from QAT
* device
* @description
* This function is to query RSA3K Per-Part public key and its
* PKCS#1 v2.2 SHA-384 signature from the QAT device.
* @context
* This function may sleep, and MUST NOT be called in interrupt context.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @param[in] instanceHandle Instance handle.
* @param[out] pDevCredential Device Per-Part public key
* @param[out] pKptStatus One of the status codes denoted in the
* enumerate type CpaCyKptKeyManagementStatus
* CPA_CY_KPT_SUCCESS Device credentials retrieved successfully
* CPA_CY_KPT_FAILED Operation failed
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_FAIL Function failed. Suggested course of action
* is to shutdown and restart.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.
* Resubmit the request.
*
* @pre
* The component has been initialized via cpaCyStartInstance function.
* @post
* None
* @note
* Note that this is a synchronous function and has no completion callback
* associated with it.
* @see
*
*****************************************************************************/
CpaStatus
cpaCyKptQueryDeviceCredentials(const CpaInstanceHandle instanceHandle,
CpaCyKptValidationKey *pDevCredential,
CpaCyKptKeyManagementStatus *pKptStatus);
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Perform KPT key loading function.
*
* @description
* This function is invoked by a QAT application to load an encrypted
* symmetric wrapping key.
* @context
* This is a synchronous function and it can sleep. It MUST NOT be
* executed in a context that DOES NOT permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle QAT service instance handle.
* @param[in] pSWK Encrypted SWK
* @param[out] keyHandle A 64-bit handle value created by KPT
* @param[out] pKptStatus One of the status codes denoted in the
* enumerate type CpaCyKptKeyManagementStatus
* CPA_CY_KPT_SUCCESS Key Loaded successfully
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_VFID
* SWK count exceeds the configured maxmium value per VFID
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_PASID
* SWK count exceeds the configured maxmium value per PASID
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED
* SWK count exceeds the configured maxmium value when not scoped to
* VFID or PASID
* CPA_CY_KPT_FAILED Operation failed due to unspecified reason
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.
* Resubmit the request.
* @retval CPA_STATUS_UNSUPPORTED KPT-2.0 is not supported.
*
* @pre
* Component has been initialized.
* @post
* None
* @note
* None
* @see
* None
*****************************************************************************/
CpaStatus
cpaCyKptLoadKey(CpaInstanceHandle instanceHandle,
CpaCyKptLoadKey *pSWK,
CpaCyKptHandle *keyHandle,
CpaCyKptKeyManagementStatus *pKptStatus);
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Perform KPT delete keys function according to key handle
*
* @description
* Before closing a QAT session(instance), an application that has
* previously stored its wrapping key in a QAT device using the KPT
* framework executes this call to delete its wrapping key in the QAT
* device.
* @context
* This is a synchronous function and it can sleep. It MUST NOT be
* executed in a context that DOES NOT permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* This function is synchronous and blocking.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle QAT service instance handle.
* @param[in] keyHandle A 64-bit handle value
* @param[out] pkptstatus One of the status codes denoted in the
* enumerate type CpaCyKptKeyManagementStatus
* CPA_CY_KPT_SUCCESS Key Deleted successfully
* CPA_CY_KPT_SWK_FAIL_NOT_FOUND For any reason the input handle cannot be
* found.
* CPA_CY_KPT_FAILED Operation failed due to unspecified reason
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.
* Resubmit the request.
* @pre
* Component has been initialized.
* @post
* None
* @note
* None
* @see
* None
*****************************************************************************/
CpaStatus
cpaCyKptDeleteKey(CpaInstanceHandle instanceHandle,
CpaCyKptHandle keyHandle,
CpaCyKptKeyManagementStatus *pKptStatus);
/**
*****************************************************************************
* Usage APIs for KPT
*
*****************************************************************************/
/**
*****************************************************************************
* @file cpa_cy_kpt.h
* @ingroup cpaCyKpt
* Perform KPT-2.0 mode RSA decrypt primitive operation on the input data.
*
* @description
* This function is a variant of cpaCyRsaDecrypt, which will perform
* an RSA decryption primitive operation on the input data using the
* specified RSA private key which are encrypted. As the RSA decryption
* primitive and signing primitive operations are mathematically
* identical this function may also be used to perform an RSA signing
* primitive operation.
*
* @context
* When called as an asynchronous function it cannot sleep. It can be
* executed in a context that does not permit sleeping.
* When called as a synchronous function it may sleep. It MUST NOT be
* executed in a context that DOES NOT permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* Yes when configured to operate in synchronous mode.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle Instance handle.
* @param[in] pRsaDecryptCb Pointer to callback function to be invoked
* when the operation is complete. If this is
* set to a NULL value the function will operate
* synchronously.
* @param[in] pCallbackTag Opaque User Data for this specific call.
* Will be returned unchanged in the callback.
* @param[in] pDecryptOpData Structure containing all the data needed to
* perform the RSA decrypt operation. The
* client code allocates the memory for this
* structure. This component takes ownership
* of the memory until it is returned in the
* callback.
* @param[out] pOutputData Pointer to structure into which the result of
* the RSA decryption primitive is written. The
* client MUST allocate this memory. The data
* pointed to is an integer in big-endian order.
* The value will be between 0 and the modulus
* n - 1.
* On invocation the callback function will
* contain this parameter in the pOut parameter.
* @param[in] pKptUnwrapContext Pointer of structure into which the content
* of KptUnwrapContext is kept. The client MUST
* allocate this memory and copy structure
* KptUnwrapContext into this flat buffer.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_RETRY Resubmit the request.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting.Resubmit
* the request.
* @pre
* The component has been initialized via cpaCyStartInstance function.
* @post
* None
* @note
* By virtue of invoking cpaSyKptRsaDecrypt, the implementation understands
* that pDecryptOpData contains an encrypted private key that requires
* unwrapping. KptUnwrapContext contains a 'KptHandle' field that points
* to the unwrapping key in the WKT.
* When pRsaDecryptCb is non-NULL an asynchronous callback is generated in
* response to this function call.
* Any errors generated during processing are reported as part of the
* callback status code. For optimal performance, data pointers SHOULD be
* 8-byte aligned.
* In KPT release, private key field in CpaCyKptRsaDecryptOpData is a
* concatenation of cipher text and hash tag.
* For optimal performance, data pointers SHOULD be 8-byte aligned.
* @see
* CpaCyKptRsaDecryptOpData,
* CpaCyGenFlatBufCbFunc,
*
*****************************************************************************/
CpaStatus
cpaCyKptRsaDecrypt(const CpaInstanceHandle instanceHandle,
const CpaCyGenFlatBufCbFunc pRsaDecryptCb,
void *pCallbackTag,
const CpaCyKptRsaDecryptOpData *pDecryptOpData,
CpaFlatBuffer *pOutputData,
CpaCyKptUnwrapContext *pKptUnwrapContext);
/**
*****************************************************************************
* @ingroup cpaCyKpt
* Generate ECDSA Signature R & S.
* @description
* This function is a variant of cpaCyEcdsaSignRS, it generates ECDSA
* signature R & S as per ANSI X9.62 2005 section 7.3.
* @context
* When called as an asynchronous function it cannot sleep. It can be
* executed in a context that does not permit sleeping.
* When called as a synchronous function it may sleep. It MUST NOT be
* executed in a context that DOES NOT permit sleeping.
* @assumptions
* None
* @sideEffects
* None
* @blocking
* Yes when configured to operate in synchronous mode.
* @reentrant
* No
* @threadSafe
* Yes
*
* @param[in] instanceHandle Instance handle.
* @param[in] pCb Callback function pointer. If this is set to
* a NULL value the function will operate
* synchronously.
* @param[in] pCallbackTag User-supplied value to help identify request.
* @param[in] pOpData Structure containing all the data needed to
* perform the operation. The client code
* allocates the memory for this structure. This
* component takes ownership of the memory until
* it is returned in the callback.
* @param[out] pSignStatus In synchronous mode, the multiply output is
* valid (CPA_TRUE) or the output is invalid
* (CPA_FALSE).
* @param[out] pR ECDSA message signature r.
* @param[out] pS ECDSA message signature s.
* @param[in] pKptUnwrapContext Pointer of structure into which the content
* of KptUnwrapContext is kept,The client MUST
* allocate this memory and copy structure
* KptUnwrapContext into this flat buffer.
*
* @retval CPA_STATUS_SUCCESS Function executed successfully.
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_RETRY Resubmit the request.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
* @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
* the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
* The component has been initialized via cpaCyStartInstance function.
* @post
* None
* @note
* By virtue of invoking the cpaCyKptEcdsaSignRS, the implementation
* understands CpaCyEcdsaSignRSOpData contains an encrypted private key that
* requires unwrapping. KptUnwrapContext contains a 'KptHandle' field
* that points to the unwrapping key in the WKT.
* When pCb is non-NULL an asynchronous callback of type
* CpaCyEcdsaSignRSCbFunc generated in response to this function
* call.
* In KPT release, private key field in CpaCyEcdsaSignRSOpData is a
* concatenation of cipher text and hash tag.
* @see
* None
*****************************************************************************/
CpaStatus
cpaCyKptEcdsaSignRS(const CpaInstanceHandle instanceHandle,
const CpaCyEcdsaSignRSCbFunc pCb,
void *pCallbackTag,
const CpaCyKptEcdsaSignRSOpData *pOpData,
CpaBoolean *pSignStatus,
CpaFlatBuffer *pR,
CpaFlatBuffer *pS,
CpaCyKptUnwrapContext *pKptUnwrapContext);
#ifdef __cplusplus
} /* close the extern "C" { */
#endif
#endif

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -76,8 +76,8 @@
* MSB is b[0]. Otherwise, all bytes from b[0] up to the MSB MUST be
* set to 0x00.
*
* The largest bit-length we support today is 4096 bits. In other
* words, we can deal with numbers up to a value of (2^4096)-1.
* The largest bit-length we support today is 8192 bits. In other
* words, we can deal with numbers up to a value of (2^8192)-1.
*
*****************************************************************************/
@ -110,21 +110,21 @@ extern "C" {
* result.
* The values of the base, the exponent and the modulus MUST all be less
* than 2^4096, and the modulus must not be equal to zero.
* than 2^8192, and the modulus must not be equal to zero.
*****************************************************************************/
typedef struct _CpaCyLnModExpOpData {
CpaFlatBuffer modulus;
/**< Flat buffer containing a pointer to the modulus.
* This number may be up to 4096 bits in length, and MUST be greater
* This number may be up to 8192 bits in length, and MUST be greater
* than zero.
*/
CpaFlatBuffer base;
/**< Flat buffer containing a pointer to the base.
* This number may be up to 4096 bits in length.
* This number may be up to 8192 bits in length.
*/
CpaFlatBuffer exponent;
/**< Flat buffer containing a pointer to the exponent.
* This number may be up to 4096 bits in length.
* This number may be up to 8192 bits in length.
*/
} CpaCyLnModExpOpData;
@ -146,19 +146,19 @@ typedef struct _CpaCyLnModExpOpData {
* result.
*
* Note that the values of A and B MUST NOT both be even numbers, and
* both MUST be less than 2^4096.
* both MUST be less than 2^8192.
*****************************************************************************/
typedef struct _CpaCyLnModInvOpData {
CpaFlatBuffer A;
/**< Flat buffer containing a pointer to the value that will be
* inverted.
* This number may be up to 4096 bits in length, it MUST NOT be zero,
* This number may be up to 8192 bits in length, it MUST NOT be zero,
* and it MUST be co-prime with B.
*/
CpaFlatBuffer B;
/**< Flat buffer containing a pointer to the value that will be used as
* the modulus.
* This number may be up to 4096 bits in length, it MUST NOT be zero,
* This number may be up to 8192 bits in length, it MUST NOT be zero,
* and it MUST be co-prime with A.
*/
} CpaCyLnModInvOpData;

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -138,8 +138,9 @@ typedef struct _CpaCyRsaPrivateKeyRep1 {
* - 1024 bits (128 bytes),
* - 1536 bits (192 bytes),
* - 2048 bits (256 bytes),
* - 3072 bits (384 bytes), or
* - 4096 bits (512 bytes).
* - 3072 bits (384 bytes),
* - 4096 bits (512 bytes), or
* - 8192 bits (1024 bytes).
*/
CpaFlatBuffer privateExponentD;
/**< The private exponent (d). For key generation operations the
@ -467,6 +468,17 @@ typedef struct _CpaCyRsaStats64 {
Cpa64U numRsaDecryptCompletedErrors;
/**< Total number of RSA decrypt operations that could not be
* completed successfully due to errors. */
Cpa64U numKptRsaDecryptRequests;
/**< Total number of successful KPT RSA decrypt operation requests. */
Cpa64U numKptRsaDecryptRequestErrors;
/**< Total number of KPT RSA decrypt requests that had an error and could
* not be processed. */
Cpa64U numKptRsaDecryptCompleted;
/**< Total number of KPT RSA decrypt operations that completed
* successfully. */
Cpa64U numKptRsaDecryptCompletedErrors;
/**< Total number of KPT RSA decrypt operations that could not be
* completed successfully due to errors. */
} CpaCyRsaStats64;
/**

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -372,17 +372,17 @@ typedef enum _CpaCySymHashAlgorithm
* CPA_CY_SYM_HASH_MODE_AUTH. Only 128-bit keys are supported. */
CPA_CY_SYM_HASH_ZUC_EIA3,
/**< ZUC algorithm in EIA3 mode */
CPA_CY_SYM_HASH_SHA3_224,
/**< 224 bit SHA-3 algorithm. Only CPA_CY_SYM_HASH_MODE_PLAIN and
* CPA_CY_SYM_HASH_MODE_AUTH are supported, that is, the hash
* mode CPA_CY_SYM_HASH_MODE_NESTED is not supported for this algorithm.
*/
CPA_CY_SYM_HASH_SHA3_256,
/**< 256 bit SHA-3 algorithm. Only CPA_CY_SYM_HASH_MODE_PLAIN and
* CPA_CY_SYM_HASH_MODE_AUTH are supported, that is, the hash
* mode CPA_CY_SYM_HASH_MODE_NESTED is not supported for this algorithm.
* Partial requests are not supported, that is, only requests
* of CPA_CY_SYM_PACKET_TYPE_FULL are supported. */
CPA_CY_SYM_HASH_SHA3_224,
/**< 224 bit SHA-3 algorithm. Only CPA_CY_SYM_HASH_MODE_PLAIN and
* CPA_CY_SYM_HASH_MODE_AUTH are supported, that is, the hash
* mode CPA_CY_SYM_HASH_MODE_NESTED is not supported for this algorithm.
*/
CPA_CY_SYM_HASH_SHA3_384,
/**< 384 bit SHA-3 algorithm. Only CPA_CY_SYM_HASH_MODE_PLAIN and
* CPA_CY_SYM_HASH_MODE_AUTH are supported, that is, the hash
@ -1453,7 +1453,7 @@ cpaCySymUpdateSession(CpaCySymSessionCtx sessionCtx,
*****************************************************************************/
CpaStatus
cpaCySymSessionInUse(CpaCySymSessionCtx sessionCtx,
CpaBoolean* pSessionInUse);
CpaBoolean* pSessionInUse);
/**
*****************************************************************************
@ -1519,6 +1519,9 @@ cpaCySymSessionInUse(CpaCySymSessionCtx sessionCtx,
* - The cipher algorithm is not CPA_CY_SYM_CIPHER_CHACHA and the hash
* algorithm is not CPA_CY_SYM_HASH_POLY.
*
* - The cipher algorithm is not CPA_CY_SYM_CIPHER_AES_GCM and the hash
* algorithm is not CPA_CY_SYM_HASH_AES_GCM.
*
* - The instance/implementation supports partial packets as one of
* its capabilities (see @ref CpaCySymCapabilitiesInfo).
*

View file

@ -2,7 +2,7 @@
*
* BSD LICENSE
*
* Copyright(c) 2007-2022 Intel Corporation. All rights reserved.
* Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without

View file

@ -98,7 +98,10 @@ typedef enum device_type_e {
DEVICE_200XXVF,
DEVICE_C4XXX,
DEVICE_C4XXXVF,
DEVICE_GEN4
DEVICE_D15XX,
DEVICE_D15XXVF,
DEVICE_4XXX,
DEVICE_4XXXVF
} device_type_t;
/*

View file

@ -284,8 +284,6 @@ adf_notify_fatal_error_work(struct work_struct *work)
adf_error_notifier((uintptr_t)accel_dev);
if (!accel_dev->is_vf) {
if (accel_dev->u1.pf.vf_info)
adf_pf2vf_notify_fatal_error(accel_dev);
adf_dev_autoreset(accel_dev);
}

View file

@ -5,6 +5,8 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_cfg_dev_dbg.h"
#include "adf_cfg_device.h"
#include "adf_cfg_sysctl.h"
#include "adf_heartbeat_dbg.h"
#include "adf_ver_dbg.h"
#include "adf_fw_counters.h"
@ -30,8 +32,54 @@ adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
sx_init(&dev_cfg_data->lock, "qat cfg data");
accel_dev->cfg = dev_cfg_data;
/* Default device configuration initialization */
if (!accel_dev->is_vf) {
if (IS_QAT_GEN4(pci_get_device(GET_DEV(accel_dev)))) {
dev_cfg_data->num_user_processes =
ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM;
strncpy(dev_cfg_data->cfg_mode,
ADF_CFG_KERNEL_USER,
ADF_CFG_MAX_VAL);
if (accel_dev->accel_id % 2 == 0) {
strncpy(dev_cfg_data->cfg_services,
ADF_CFG_SYM_ASYM,
ADF_CFG_MAX_VAL);
} else {
strncpy(dev_cfg_data->cfg_services,
ADF_CFG_DC,
ADF_CFG_MAX_VAL);
}
} else {
strncpy(dev_cfg_data->cfg_mode,
ADF_CFG_KERNEL,
ADF_CFG_MAX_VAL);
dev_cfg_data->num_user_processes = 0;
strncpy(dev_cfg_data->cfg_services,
ADF_CFG_SYM_DC,
ADF_CFG_MAX_VAL);
}
} else {
dev_cfg_data->num_user_processes =
ADF_CFG_STATIC_CONF_USER_PROCESSES_NUM;
strncpy(dev_cfg_data->cfg_mode,
ADF_CFG_KERNEL,
ADF_CFG_MAX_VAL);
strncpy(dev_cfg_data->cfg_services,
"sym;asym",
ADF_CFG_MAX_VAL);
}
if (adf_cfg_sysctl_add(accel_dev))
goto err;
if (adf_cfg_dev_dbg_add(accel_dev))
goto err;
if (!accel_dev->is_vf) {
if (adf_heartbeat_dbg_add(accel_dev))
goto err;
@ -94,6 +142,7 @@ adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
sx_xunlock(&dev_cfg_data->lock);
adf_cfg_sysctl_remove(accel_dev);
adf_cfg_dev_dbg_remove(accel_dev);
if (!accel_dev->is_vf) {
adf_ver_dbg_del(accel_dev);

View file

@ -7,7 +7,7 @@
#include "icp_qat_hw.h"
#include "adf_common_drv.h"
#define ADF_CFG_SVCS_MAX (25)
#define ADF_CFG_SVCS_MAX (12)
#define ADF_CFG_DEPRE_PARAMS_NUM (4)
#define ADF_CFG_CAP_DC ADF_ACCEL_CAPABILITIES_COMPRESSION
@ -83,6 +83,14 @@ static struct adf_cfg_profile adf_profiles[] =
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "asym;sym",
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "sym;asym",
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 },
{ "sym",
ADF_CFG_SYM_RINGS,
@ -116,57 +124,6 @@ static struct adf_cfg_profile adf_profiles[] =
ADF_CFG_SYM_DC_RINGS,
ADF_CFG_CAP_SYM | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "inline;sym",
ADF_CFG_SYM_RINGS,
ADF_CFG_CAP_SYM,
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "sym;inline",
ADF_CFG_SYM_RINGS,
ADF_CFG_CAP_SYM,
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "inline;asym",
ADF_CFG_SYM_RINGS,
ADF_CFG_CAP_SYM,
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "asym;inline",
ADF_CFG_ASYM_RINGS,
ADF_CFG_CAP_ASYM,
ADF_CFG_FW_CAP_ECEDMONT },
{ "inline", 0, 0, 0 },
{ "inline;cy",
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "cy;inline",
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc;inline", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 },
{ "inline;dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 },
{ "cy;dc;inline",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "cy;inline;dc",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc;inline;cy",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc;cy;inline",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "inline;cy;dc",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "inline;dc;cy",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
} },
{ ADF_FW_IMAGE_CRYPTO,
{
@ -174,8 +131,7 @@ static struct adf_cfg_profile adf_profiles[] =
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF |
ADF_CFG_FW_CAP_ECEDMONT |
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "sym",
ADF_CFG_SYM_RINGS,
ADF_CFG_CAP_SYM,
@ -196,8 +152,7 @@ static struct adf_cfg_profile adf_profiles[] =
ADF_CFG_CY_RINGS,
ADF_CFG_CAP_CY,
ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF |
ADF_CFG_FW_CAP_ECEDMONT |
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc", ADF_CFG_DC_RINGS, ADF_CFG_CAP_DC, 0 },
{ "sym",
ADF_CFG_SYM_RINGS,
@ -212,14 +167,12 @@ static struct adf_cfg_profile adf_profiles[] =
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF |
ADF_CFG_FW_CAP_ECEDMONT |
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "dc;cy",
ADF_CFG_CY_DC_RINGS,
ADF_CFG_CAP_CY | ADF_CFG_CAP_DC,
ADF_CFG_FW_CAP_RL | ADF_CFG_FW_CAP_HKDF |
ADF_CFG_FW_CAP_ECEDMONT |
ADF_CFG_FW_CAP_EXT_ALGCHAIN },
ADF_CFG_FW_CAP_ECEDMONT | ADF_CFG_FW_CAP_EXT_ALGCHAIN },
{ "asym;dc",
ADF_CFG_ASYM_DC_RINGS,
ADF_CFG_CAP_ASYM | ADF_CFG_CAP_DC,
@ -380,9 +333,34 @@ adf_cfg_get_services_enabled(struct adf_accel_dev *accel_dev,
void
adf_cfg_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
{
int service;
u16 ena_srv_mask;
u16 service_type;
u16 asym_mask = 0;
struct adf_cfg_device *cfg_dev = accel_dev->cfg->dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
hw_data->asym_rings_mask = 0;
if (!cfg_dev) {
hw_data->asym_rings_mask = ADF_CFG_DEF_ASYM_MASK;
return;
}
ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
/* parse each service */
for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) {
service_type = GET_SRV_TYPE(ena_srv_mask, service);
switch (service_type) {
case CRYPTO:
case ASYM:
SET_ASYM_MASK(asym_mask, service);
if (service_type == CRYPTO)
service++;
break;
}
}
hw_data->asym_rings_mask = asym_mask;
}
void
@ -579,7 +557,6 @@ adf_cfg_update_vf_accel_cap_mask(struct adf_accel_dev *accel_dev)
{
u32 enabled_svc_caps = 0;
u32 enabled_fw_caps = 0;
if (adf_cfg_get_caps_enabled(accel_dev,
&enabled_svc_caps,
&enabled_fw_caps))
@ -701,6 +678,272 @@ adf_cfg_device_clear(struct adf_cfg_device *device,
device->instances = NULL;
}
/*
* Static configuration for userspace
*/
static int
adf_cfg_static_conf_user(struct adf_accel_dev *accel_dev,
int cy_enabled,
int dc_enabled)
{
int ret = 0;
unsigned long val = 0;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
int cy_user_instances = 0;
int dc_user_instances = 0;
int i = 0;
int cpus = num_online_cpus();
if (!(IS_QAT_GEN4(pci_get_device(GET_DEV(accel_dev))))) {
device_printf(
GET_DEV(accel_dev),
"User space configuration supported only on QAT 4xxx devices\n");
return ENXIO;
}
ret |= adf_cfg_section_add(accel_dev, ADF_SAL_SEC);
if (accel_dev->is_vf) {
if (cy_enabled)
cy_user_instances =
ADF_CFG_STATIC_CONF_USER_INST_NUM_CY_VF;
if (dc_enabled)
dc_user_instances =
ADF_CFG_STATIC_CONF_USER_INST_NUM_DC_VF;
} else {
if (cy_enabled)
cy_user_instances =
ADF_CFG_STATIC_CONF_USER_INST_NUM_CY;
if (dc_enabled)
dc_user_instances =
ADF_CFG_STATIC_CONF_USER_INST_NUM_DC;
}
val = cy_user_instances;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
val = dc_user_instances;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
val = accel_dev->cfg->num_user_processes;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_PROCESSES);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
for (i = 0; i < cy_user_instances; i++) {
val = (accel_dev->accel_id * cy_user_instances + i) % cpus;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)value, ADF_STR);
}
for (i = 0; i < dc_user_instances; i++) {
val = (accel_dev->accel_id * dc_user_instances + i) % cpus;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_SAL_SEC, key, (void *)value, ADF_STR);
}
return ret;
}
static int
adf_cfg_static_conf_kernel(struct adf_accel_dev *accel_dev,
int asym_enabled,
int sym_enabled,
int dc_enabled)
{
int ret = 0;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
unsigned long val = 0;
int i = 0;
int instances = 0;
int cy_poll_instances = 0;
int cy_irq_instances = 0;
int dc_instances = 0;
int def_cy_poll_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL;
int def_cy_irq_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ;
int def_dc_inst = ADF_CFG_STATIC_CONF_INST_NUM_DC;
int cpus = num_online_cpus();
instances = GET_MAX_BANKS(accel_dev);
if (!instances)
return EFAULT;
if (accel_dev->is_vf) {
def_cy_poll_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL_VF;
def_cy_irq_inst = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ_VF;
def_dc_inst = ADF_CFG_STATIC_CONF_INST_NUM_DC_VF;
}
/* Get the mode enabled by user */
ret |= adf_cfg_section_add(accel_dev, ADF_KERNEL_SAL_SEC);
if (dc_enabled) {
if (instances >= def_dc_inst) {
dc_instances = def_dc_inst;
instances -= dc_instances;
} else {
return EFAULT;
}
}
if (asym_enabled || sym_enabled) {
if (instances >= def_cy_poll_inst) {
cy_poll_instances = def_cy_poll_inst;
instances -= cy_poll_instances;
} else {
return EFAULT;
}
if (sym_enabled) {
if (instances >= def_cy_irq_inst) {
cy_irq_instances = def_cy_irq_inst;
instances -= cy_irq_instances;
} else {
return EFAULT;
}
}
}
val = (cy_poll_instances + cy_irq_instances);
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = dc_instances;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
for (i = 0; i < (cy_irq_instances); i++) {
val = (accel_dev->accel_id * cy_irq_instances + i) % cpus;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_IRQ;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
}
for (i = cy_irq_instances; i < (cy_poll_instances + cy_irq_instances);
i++) {
val = (accel_dev->accel_id * cy_poll_instances + i) % cpus;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
}
for (i = 0; i < dc_instances; i++) {
val = (accel_dev->accel_id * dc_instances + i) % cpus;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
}
return ret;
}
static int
adf_cfg_static_conf(struct adf_accel_dev *accel_dev)
{
@ -708,40 +951,69 @@ adf_cfg_static_conf(struct adf_accel_dev *accel_dev)
unsigned long val = 0;
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
int cpus;
int instances = 0;
int cy_poll_instances;
int cy_irq_instances;
int dc_instances;
int i = 0;
char *token, *cur_str;
int ks_enabled = 0;
int us_enabled = 0;
int asym_enabled = 0;
int sym_enabled = 0;
int cy_enabled = 0;
int dc_enabled = 0;
cpus = num_online_cpus();
instances =
GET_MAX_BANKS(accel_dev) > cpus ? GET_MAX_BANKS(accel_dev) : cpus;
if (!instances)
return EFAULT;
strncpy(value, accel_dev->cfg->cfg_mode, ADF_CFG_MAX_VAL);
cur_str = value;
if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_DC)
dc_instances = ADF_CFG_STATIC_CONF_INST_NUM_DC;
else
return EFAULT;
instances -= dc_instances;
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
while (token) {
if (!strncmp(token, ADF_CFG_KERNEL, strlen(ADF_CFG_KERNEL)))
ks_enabled = 1;
if (!strncmp(token, ADF_CFG_USER, strlen(ADF_CFG_USER)))
us_enabled = 1;
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
}
if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL)
cy_poll_instances = ADF_CFG_STATIC_CONF_INST_NUM_CY_POLL;
else
return EFAULT;
instances -= cy_poll_instances;
/* Get the services enabled by user */
strncpy(value, accel_dev->cfg->cfg_services, ADF_CFG_MAX_VAL);
cur_str = value;
if (instances >= ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ)
cy_irq_instances = ADF_CFG_STATIC_CONF_INST_NUM_CY_IRQ;
else
return EFAULT;
instances -= cy_irq_instances;
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
while (token) {
if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) {
sym_enabled = 1;
}
if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) {
asym_enabled = 1;
}
/* cy means both asym & crypto should be enabled
* Hardware resources allocation check will be done later
*/
if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) {
asym_enabled = 1;
sym_enabled = 1;
}
if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) {
dc_enabled = 1;
}
token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
}
if (asym_enabled || sym_enabled) {
cy_enabled = 1;
}
ret |= adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_SERVICES_ENABLED);
ret |= adf_cfg_section_add(accel_dev, ADF_KERNEL_SAL_SEC);
if (strcmp(ADF_CFG_SYM_ASYM, accel_dev->cfg->cfg_services) == 0) {
strncpy(value, ADF_CFG_CY, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
} else {
strncpy(value,
accel_dev->cfg->cfg_services,
ADF_CFG_MAX_VAL_LEN_IN_BYTES);
}
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_GENERAL_SEC, key, (void *)value, ADF_STR);
val = ADF_CFG_STATIC_CONF_VER;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_CONFIG_VERSION);
@ -769,6 +1041,15 @@ adf_cfg_static_conf(struct adf_accel_dev *accel_dev)
return EFAULT;
}
/* User defined adjustement basing on serives enabled */
if (cy_enabled && !dc_enabled) {
cy_au += dc_au;
dc_au = 0;
} else if (!cy_enabled && dc_enabled) {
dc_au += cy_au;
cy_au = 0;
}
val = cy_au;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
@ -806,22 +1087,6 @@ adf_cfg_static_conf(struct adf_accel_dev *accel_dev)
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_SERVICES_ENABLED);
if ((cy_poll_instances + cy_irq_instances) == 0 && dc_instances > 0) {
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CFG_DC);
} else if (((cy_poll_instances + cy_irq_instances)) > 0 &&
dc_instances == 0) {
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CFG_SYM);
} else {
snprintf(value,
ADF_CFG_MAX_VAL_LEN_IN_BYTES,
"%s;%s",
ADF_CFG_SYM,
ADF_CFG_DC);
}
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_GENERAL_SEC, key, (void *)value, ADF_STR);
val = ADF_CFG_STATIC_CONF_SAL_STATS_CFG_DC;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, SAL_STATS_CFG_DC);
ret |= adf_cfg_add_key_value_param(
@ -877,97 +1142,20 @@ adf_cfg_static_conf(struct adf_accel_dev *accel_dev)
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC);
val = (cy_poll_instances + cy_irq_instances);
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_CY);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = dc_instances;
snprintf(key, ADF_CFG_MAX_KEY_LEN_IN_BYTES, ADF_NUM_DC);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
for (i = 0; i < (cy_irq_instances); i++) {
val = i;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_IRQ;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
if (ks_enabled) {
ret |= adf_cfg_static_conf_kernel(accel_dev,
asym_enabled,
sym_enabled,
dc_enabled);
}
for (i = cy_irq_instances; i < (cy_poll_instances + cy_irq_instances);
i++) {
val = i;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_CY "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_CY_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
}
for (i = 0; i < dc_instances; i++) {
val = i;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_ETRMGR_CORE_AFFINITY,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
val = ADF_CFG_STATIC_CONF_POLL;
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC "%d" ADF_POLL_MODE,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)&val, ADF_DEC);
snprintf(value, ADF_CFG_MAX_VAL_LEN_IN_BYTES, ADF_DC "%d", i);
snprintf(key,
ADF_CFG_MAX_KEY_LEN_IN_BYTES,
ADF_DC_NAME_FORMAT,
i);
ret |= adf_cfg_add_key_value_param(
accel_dev, ADF_KERNEL_SAL_SEC, key, (void *)value, ADF_STR);
if (us_enabled) {
ret |=
adf_cfg_static_conf_user(accel_dev, cy_enabled, dc_enabled);
}
if (ret)
ret = EFAULT;
ret = ENXIO;
return ret;
}

View file

@ -0,0 +1,343 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_cfg_sysctl.h"
#include "adf_cfg_device.h"
#include "adf_common_drv.h"
#include <sys/mutex.h>
#include <sys/sbuf.h>
#define ADF_CFG_SYSCTL_BUF_SZ ADF_CFG_MAX_VAL
#define ADF_CFG_UP_STR "up"
#define ADF_CFG_DOWN_STR "down"
#define ADF_CFG_MAX_USER_PROCESSES 64
static int
adf_cfg_down(struct adf_accel_dev *accel_dev)
{
int ret = 0;
if (!adf_dev_started(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"Device qat_dev%d already down\n",
accel_dev->accel_id);
return 0;
}
if (adf_dev_in_use(accel_dev)) {
pr_err("QAT: Device %d in use\n", accel_dev->accel_id);
goto out;
}
if (adf_dev_stop(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"Failed to stop qat_dev%d\n",
accel_dev->accel_id);
ret = EFAULT;
goto out;
}
adf_dev_shutdown(accel_dev);
out:
return ret;
}
static int
adf_cfg_up(struct adf_accel_dev *accel_dev)
{
int ret;
if (adf_dev_started(accel_dev))
return 0;
if (NULL == accel_dev->hw_device->config_device)
return ENXIO;
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret) {
device_printf(GET_DEV(accel_dev),
"Failed to start qat_dev%d\n",
accel_dev->accel_id);
return ret;
}
ret = adf_dev_init(accel_dev);
if (!ret)
ret = adf_dev_start(accel_dev);
if (ret) {
device_printf(GET_DEV(accel_dev),
"Failed to start qat_dev%d\n",
accel_dev->accel_id);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
if (!ret) {
struct adf_cfg_device *cfg_dev = NULL;
cfg_dev = accel_dev->cfg->dev;
adf_cfg_device_clear(cfg_dev, accel_dev);
free(cfg_dev, M_QAT);
accel_dev->cfg->dev = NULL;
}
return 0;
}
static const char *const cfg_serv[] =
{ "sym;asym", "sym", "asym", "dc", "sym;dc", "asym;dc", "cy", "cy;dc" };
static const char *const cfg_mode[] = { "ks;us", "us", "ks" };
static int adf_cfg_sysctl_services_handle(SYSCTL_HANDLER_ARGS)
{
struct adf_cfg_device_data *dev_cfg_data;
struct adf_accel_dev *accel_dev;
char buf[ADF_CFG_SYSCTL_BUF_SZ];
unsigned int len;
int ret = 0;
int i = 0;
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return ENXIO;
strlcpy(buf, dev_cfg_data->cfg_services, sizeof(buf));
ret = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (ret != 0 || req->newptr == NULL)
return ret;
/* Handle config change */
if (adf_dev_started(accel_dev)) {
device_printf(
GET_DEV(accel_dev),
"QAT: configuration could be changed in down state only\n");
return EINVAL;
}
len = strlen(buf);
for (i = 0; i < ARRAY_SIZE(cfg_serv); i++) {
if ((len > 0 && strncasecmp(cfg_serv[i], buf, len) == 0)) {
strlcpy(dev_cfg_data->cfg_services,
buf,
ADF_CFG_MAX_VAL);
break;
}
}
if (i == ARRAY_SIZE(cfg_serv)) {
device_printf(GET_DEV(accel_dev),
"Unknown service configuration\n");
ret = EINVAL;
}
return ret;
}
static int adf_cfg_sysctl_mode_handle(SYSCTL_HANDLER_ARGS)
{
struct adf_cfg_device_data *dev_cfg_data;
struct adf_accel_dev *accel_dev;
char buf[ADF_CFG_SYSCTL_BUF_SZ];
unsigned int len;
int ret = 0;
int i = 0;
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return ENXIO;
strlcpy(buf, dev_cfg_data->cfg_mode, sizeof(buf));
ret = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (ret != 0 || req->newptr == NULL)
return ret;
/* Handle config change */
if (adf_dev_started(accel_dev)) {
device_printf(
GET_DEV(accel_dev),
"QAT: configuration could be changed in down state only\n");
return EBUSY;
}
len = strlen(buf);
for (i = 0; i < ARRAY_SIZE(cfg_mode); i++) {
if ((len > 0 && strncasecmp(cfg_mode[i], buf, len) == 0)) {
strlcpy(dev_cfg_data->cfg_mode, buf, ADF_CFG_MAX_VAL);
break;
}
}
if (i == ARRAY_SIZE(cfg_mode)) {
device_printf(GET_DEV(accel_dev),
"Unknown configuration mode\n");
ret = EINVAL;
}
return ret;
}
static int adf_cfg_sysctl_handle(SYSCTL_HANDLER_ARGS)
{
struct adf_cfg_device_data *dev_cfg_data;
struct adf_accel_dev *accel_dev;
char buf[ADF_CFG_SYSCTL_BUF_SZ] = { 0 };
unsigned int len;
int ret = 0;
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return ENXIO;
if (adf_dev_started(accel_dev)) {
strlcpy(buf, ADF_CFG_UP_STR, sizeof(buf));
} else {
strlcpy(buf, ADF_CFG_DOWN_STR, sizeof(buf));
}
ret = sysctl_handle_string(oidp, buf, sizeof(buf), req);
if (ret != 0 || req->newptr == NULL)
return ret;
len = strlen(buf);
if ((len > 0 && strncasecmp(ADF_CFG_UP_STR, buf, len) == 0)) {
ret = adf_cfg_up(accel_dev);
} else if (len > 0 && strncasecmp(ADF_CFG_DOWN_STR, buf, len) == 0) {
ret = adf_cfg_down(accel_dev);
} else {
device_printf(GET_DEV(accel_dev), "QAT: Invalid operation\n");
ret = EINVAL;
}
return ret;
}
static int adf_cfg_sysctl_num_processes_handle(SYSCTL_HANDLER_ARGS)
{
struct adf_cfg_device_data *dev_cfg_data;
struct adf_accel_dev *accel_dev;
uint32_t num_user_processes = 0;
int ret = 0;
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
dev_cfg_data = accel_dev->cfg;
if (!dev_cfg_data)
return ENXIO;
num_user_processes = dev_cfg_data->num_user_processes;
ret = sysctl_handle_int(oidp, &num_user_processes, 0, req);
if (ret != 0 || req->newptr == NULL)
return ret;
if (adf_dev_started(accel_dev)) {
device_printf(
GET_DEV(accel_dev),
"QAT: configuration could be changed in down state only\n");
return EBUSY;
}
if (num_user_processes > ADF_CFG_MAX_USER_PROCESSES) {
return EINVAL;
}
dev_cfg_data->num_user_processes = num_user_processes;
return ret;
}
int
adf_cfg_sysctl_add(struct adf_accel_dev *accel_dev)
{
struct sysctl_ctx_list *qat_sysctl_ctx;
struct sysctl_oid *qat_sysctl_tree;
if (!accel_dev)
return EINVAL;
qat_sysctl_ctx =
device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
qat_sysctl_tree =
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
SYSCTL_ADD_PROC(qat_sysctl_ctx,
SYSCTL_CHILDREN(qat_sysctl_tree),
OID_AUTO,
"state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
accel_dev,
0,
adf_cfg_sysctl_handle,
"A",
"QAT State");
SYSCTL_ADD_PROC(qat_sysctl_ctx,
SYSCTL_CHILDREN(qat_sysctl_tree),
OID_AUTO,
"cfg_services",
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
accel_dev,
0,
adf_cfg_sysctl_services_handle,
"A",
"QAT services confguration");
SYSCTL_ADD_PROC(qat_sysctl_ctx,
SYSCTL_CHILDREN(qat_sysctl_tree),
OID_AUTO,
"cfg_mode",
CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
accel_dev,
0,
adf_cfg_sysctl_mode_handle,
"A",
"QAT mode configuration");
SYSCTL_ADD_PROC(qat_sysctl_ctx,
SYSCTL_CHILDREN(qat_sysctl_tree),
OID_AUTO,
"num_user_processes",
CTLTYPE_U32 | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
accel_dev,
0,
adf_cfg_sysctl_num_processes_handle,
"I",
"QAT user processes number ");
return 0;
}
void
adf_cfg_sysctl_remove(struct adf_accel_dev *accel_dev)
{
}

View file

@ -0,0 +1,491 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_uio_control.h"
#include "adf_uio_cleanup.h"
#include "adf_uio.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/sx.h>
#include <sys/malloc.h>
#include <machine/atomic.h>
#include <dev/pci/pcivar.h>
#include <sys/conf.h>
#include <sys/systm.h>
#include <sys/queue.h>
#include <sys/proc.h>
#include <sys/types.h>
#include <sys/priv.h>
#include <linux/list.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_cfg_common.h"
#include "adf_cfg_user.h"
#include "adf_heartbeat.h"
#include "adf_cfg_device.h"
#define DEVICE_NAME "qat_adf_ctl"
static struct sx adf_ctl_lock;
static d_ioctl_t adf_ctl_ioctl;
void *misc_counter;
static struct cdevsw adf_ctl_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = adf_ctl_ioctl,
.d_name = DEVICE_NAME,
};
static struct cdev *adf_ctl_dev;
static void adf_chr_drv_destroy(void)
{
destroy_dev(adf_ctl_dev);
}
struct adf_user_addr_info {
struct list_head list;
void *user_addr;
};
static int adf_chr_drv_create(void)
{
adf_ctl_dev = make_dev(&adf_ctl_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
DEVICE_NAME);
if (!adf_ctl_dev) {
printf("QAT: failed to create device\n");
goto err_cdev_del;
}
return 0;
err_cdev_del:
return EFAULT;
}
static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
caddr_t arg)
{
*ctl_data = (struct adf_user_cfg_ctl_data *)arg;
return 0;
}
static int adf_copy_keyval_to_user(struct adf_accel_dev *accel_dev,
struct adf_user_cfg_ctl_data *ctl_data)
{
struct adf_user_cfg_key_val key_val;
struct adf_user_cfg_section section;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
char *user_ptr;
if (copyin(ctl_data->config_section, &section,
sizeof(struct adf_user_cfg_section))) {
device_printf(GET_DEV(accel_dev),
"failed to copy section info\n");
return EFAULT;
}
if (copyin(section.params, &key_val,
sizeof(struct adf_user_cfg_key_val))) {
device_printf(GET_DEV(accel_dev), "failed to copy key val\n");
return EFAULT;
}
user_ptr = ((char *)section.params) + ADF_CFG_MAX_KEY_LEN_IN_BYTES;
if (adf_cfg_get_param_value(
accel_dev, section.name, key_val.key, val)) {
return EFAULT;
}
if (copyout(val, user_ptr,
ADF_CFG_MAX_VAL_LEN_IN_BYTES)) {
device_printf(GET_DEV(accel_dev),
"failed to copy keyvalue to user!\n");
return EFAULT;
}
return 0;
}
static int adf_ctl_ioctl_get_num_devices(unsigned int cmd,
caddr_t arg)
{
adf_devmgr_get_num_dev((uint32_t *)arg);
return 0;
}
static int adf_ctl_ioctl_get_status(unsigned int cmd,
caddr_t arg)
{
struct adf_hw_device_data *hw_data;
struct adf_dev_status_info *dev_info;
struct adf_accel_dev *accel_dev;
dev_info = (struct adf_dev_status_info *)arg;
accel_dev = adf_devmgr_get_dev_by_id(dev_info->accel_id);
if (!accel_dev)
return ENODEV;
hw_data = accel_dev->hw_device;
dev_info->state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
dev_info->num_ae = hw_data->get_num_aes(hw_data);
dev_info->num_accel = hw_data->get_num_accels(hw_data);
dev_info->num_logical_accel = hw_data->num_logical_accel;
dev_info->banks_per_accel = hw_data->num_banks
/ hw_data->num_logical_accel;
strlcpy(dev_info->name, hw_data->dev_class->name,
sizeof(dev_info->name));
dev_info->instance_id = hw_data->instance_id;
dev_info->type = hw_data->dev_class->type;
dev_info->bus = pci_get_bus(accel_to_pci_dev(accel_dev));
dev_info->dev = pci_get_slot(accel_to_pci_dev(accel_dev));
dev_info->fun = pci_get_function(accel_to_pci_dev(accel_dev));
dev_info->domain = pci_get_domain(accel_to_pci_dev(accel_dev));
dev_info->pci_device_id = pci_get_device(accel_to_pci_dev(accel_dev));
dev_info->node_id = accel_dev->accel_pci_dev.node;
dev_info->sku = accel_dev->accel_pci_dev.sku;
dev_info->device_mem_available = accel_dev->aram_info ?
accel_dev->aram_info->inter_buff_aram_region_size : 0;
return 0;
}
static int adf_ctl_ioctl_heartbeat(unsigned int cmd,
caddr_t arg)
{
int ret = 0;
struct adf_accel_dev *accel_dev;
struct adf_dev_heartbeat_status_ctl *hb_status;
hb_status = (struct adf_dev_heartbeat_status_ctl *)arg;
accel_dev = adf_devmgr_get_dev_by_id(hb_status->device_id);
if (!accel_dev)
return ENODEV;
if (adf_heartbeat_status(accel_dev, &hb_status->status)) {
device_printf(GET_DEV(accel_dev),
"failed to get heartbeat status\n");
return EAGAIN;
}
return ret;
}
static int adf_ctl_ioctl_dev_get_value(unsigned int cmd,
caddr_t arg)
{
int ret = 0;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
ret = adf_ctl_alloc_resources(&ctl_data, arg);
if (ret)
return ret;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
printf("QAT: Device %d not found\n", ctl_data->device_id);
ret = ENODEV;
goto out;
}
ret = adf_copy_keyval_to_user(accel_dev, ctl_data);
if (ret) {
ret = ENODEV;
goto out;
}
out:
return ret;
}
static struct adf_uio_control_bundle
*adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve)
{
struct adf_accel_dev *accel_dev;
struct adf_uio_control_accel *accel;
struct adf_uio_control_bundle *bundle = NULL;
u8 num_rings_per_bank = 0;
accel_dev = adf_devmgr_get_dev_by_id(reserve.accel_id);
if (!accel_dev) {
pr_err("QAT: Failed to get accel_dev\n");
return NULL;
}
num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
accel = accel_dev->accel;
if (!accel) {
pr_err("QAT: Failed to get accel\n");
return NULL;
}
if (reserve.bank_nr >= GET_MAX_BANKS(accel_dev)) {
pr_err("QAT: Invalid bank number %d\n", reserve.bank_nr);
return NULL;
}
if (reserve.ring_mask & ~((1 << num_rings_per_bank) - 1)) {
pr_err("QAT: Invalid ring mask %0X\n", reserve.ring_mask);
return NULL;
}
if (accel->num_ker_bundles > reserve.bank_nr) {
pr_err("QAT: Invalid user reserved bank\n");
return NULL;
}
bundle = &accel->bundle[reserve.bank_nr];
return bundle;
}
static int adf_ctl_ioctl_reserve_ring(caddr_t arg)
{
struct adf_user_reserve_ring reserve = {0};
struct adf_uio_control_bundle *bundle;
struct adf_uio_instance_rings *instance_rings;
int pid_entry_found = 0;
reserve = *((struct adf_user_reserve_ring *)arg);
bundle = adf_ctl_ioctl_bundle(reserve);
if (!bundle) {
pr_err("QAT: Failed to get bundle\n");
return -EINVAL;
}
mutex_lock(&bundle->lock);
if (bundle->rings_used & reserve.ring_mask) {
pr_err("QAT: Bundle %d, rings 0x%04X already reserved\n",
reserve.bank_nr,
reserve.ring_mask);
mutex_unlock(&bundle->lock);
return -EINVAL;
}
mutex_unlock(&bundle->lock);
/* Find the list entry for this process */
mutex_lock(&bundle->list_lock);
list_for_each_entry(instance_rings, &bundle->list, list) {
if (instance_rings->user_pid == curproc->p_pid) {
pid_entry_found = 1;
break;
}
}
mutex_unlock(&bundle->list_lock);
if (!pid_entry_found) {
pr_err("QAT: process %d not found\n", curproc->p_pid);
return -EINVAL;
}
instance_rings->ring_mask |= reserve.ring_mask;
mutex_lock(&bundle->lock);
bundle->rings_used |= reserve.ring_mask;
mutex_unlock(&bundle->lock);
return 0;
}
static int adf_ctl_ioctl_release_ring(caddr_t arg)
{
struct adf_user_reserve_ring reserve;
struct adf_uio_control_bundle *bundle;
struct adf_uio_instance_rings *instance_rings;
int pid_entry_found;
reserve = *((struct adf_user_reserve_ring *)arg);
bundle = adf_ctl_ioctl_bundle(reserve);
if (!bundle) {
pr_err("QAT: Failed to get bundle\n");
return -EINVAL;
}
/* Find the list entry for this process */
pid_entry_found = 0;
mutex_lock(&bundle->list_lock);
list_for_each_entry(instance_rings, &bundle->list, list) {
if (instance_rings->user_pid == curproc->p_pid) {
pid_entry_found = 1;
break;
}
}
mutex_unlock(&bundle->list_lock);
if (!pid_entry_found) {
pr_err("QAT: No ring reservation found for PID %d\n",
curproc->p_pid);
return -EINVAL;
}
if ((instance_rings->ring_mask & reserve.ring_mask) !=
reserve.ring_mask) {
pr_err("QAT: Attempt to release rings not reserved by this process\n");
return -EINVAL;
}
instance_rings->ring_mask &= ~reserve.ring_mask;
mutex_lock(&bundle->lock);
bundle->rings_used &= ~reserve.ring_mask;
mutex_unlock(&bundle->lock);
return 0;
}
static int adf_ctl_ioctl_enable_ring(caddr_t arg)
{
struct adf_user_reserve_ring reserve;
struct adf_uio_control_bundle *bundle;
reserve = *((struct adf_user_reserve_ring *)arg);
bundle = adf_ctl_ioctl_bundle(reserve);
if (!bundle) {
pr_err("QAT: Failed to get bundle\n");
return -EINVAL;
}
mutex_lock(&bundle->lock);
bundle->rings_enabled |= reserve.ring_mask;
adf_update_uio_ring_arb(bundle);
mutex_unlock(&bundle->lock);
return 0;
}
static int adf_ctl_ioctl_disable_ring(caddr_t arg)
{
struct adf_user_reserve_ring reserve;
struct adf_uio_control_bundle *bundle;
reserve = *((struct adf_user_reserve_ring *)arg);
bundle = adf_ctl_ioctl_bundle(reserve);
if (!bundle) {
pr_err("QAT: Failed to get bundle\n");
return -EINVAL;
}
mutex_lock(&bundle->lock);
bundle->rings_enabled &= ~reserve.ring_mask;
adf_update_uio_ring_arb(bundle);
mutex_unlock(&bundle->lock);
return 0;
}
static int adf_ctl_ioctl(struct cdev *dev,
u_long cmd,
caddr_t arg,
int fflag,
struct thread *td)
{
int ret = 0;
bool allowed = false;
int i;
static const unsigned int unrestricted_cmds[] = {
IOCTL_GET_NUM_DEVICES, IOCTL_STATUS_ACCEL_DEV,
IOCTL_HEARTBEAT_ACCEL_DEV, IOCTL_GET_CFG_VAL,
IOCTL_RESERVE_RING, IOCTL_RELEASE_RING,
IOCTL_ENABLE_RING, IOCTL_DISABLE_RING,
};
if (priv_check(curthread, PRIV_DRIVER)) {
for (i = 0; i < ARRAY_SIZE(unrestricted_cmds); i++) {
if (cmd == unrestricted_cmds[i]) {
allowed = true;
break;
}
}
if (!allowed)
return EACCES;
}
/* All commands have an argument */
if (!arg)
return EFAULT;
if (sx_xlock_sig(&adf_ctl_lock))
return EINTR;
switch (cmd) {
case IOCTL_GET_NUM_DEVICES:
ret = adf_ctl_ioctl_get_num_devices(cmd, arg);
break;
case IOCTL_STATUS_ACCEL_DEV:
ret = adf_ctl_ioctl_get_status(cmd, arg);
break;
case IOCTL_GET_CFG_VAL:
ret = adf_ctl_ioctl_dev_get_value(cmd, arg);
break;
case IOCTL_RESERVE_RING:
ret = adf_ctl_ioctl_reserve_ring(arg);
break;
case IOCTL_RELEASE_RING:
ret = adf_ctl_ioctl_release_ring(arg);
break;
case IOCTL_ENABLE_RING:
ret = adf_ctl_ioctl_enable_ring(arg);
break;
case IOCTL_DISABLE_RING:
ret = adf_ctl_ioctl_disable_ring(arg);
break;
case IOCTL_HEARTBEAT_ACCEL_DEV:
ret = adf_ctl_ioctl_heartbeat(cmd, arg);
break;
default:
printf("QAT: Invalid ioctl\n");
ret = ENOTTY;
break;
}
sx_xunlock(&adf_ctl_lock);
return ret;
}
int
adf_register_ctl_device_driver(void)
{
sx_init(&adf_ctl_lock, "adf ctl");
if (adf_chr_drv_create())
goto err_chr_dev;
adf_state_init();
if (adf_processes_dev_register() != 0)
goto err_processes_dev_register;
return 0;
err_processes_dev_register:
adf_chr_drv_destroy();
err_chr_dev:
sx_destroy(&adf_ctl_lock);
return EFAULT;
}
void
adf_unregister_ctl_device_driver(void)
{
adf_processes_dev_unregister();
adf_state_destroy();
adf_chr_drv_destroy();
adf_clean_vf_map(false);
sx_destroy(&adf_ctl_lock);
}

View file

@ -0,0 +1,677 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_uio_control.h"
#include "adf_uio_cleanup.h"
#include "adf_uio.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#define ADF_DEV_PROCESSES_NAME "qat_dev_processes"
#define ADF_DEV_STATE_NAME "qat_dev_state"
#define ADF_STATE_CALLOUT_TIME 10
static const char *mtx_name = "state_callout_mtx";
static d_open_t adf_processes_open;
static void adf_processes_release(void *data);
static d_read_t adf_processes_read;
static d_write_t adf_processes_write;
static d_open_t adf_state_open;
static void adf_state_release(void *data);
static d_read_t adf_state_read;
static int adf_state_kqfilter(struct cdev *dev, struct knote *kn);
static int adf_state_kqread_event(struct knote *kn, long hint);
static void adf_state_kqread_detach(struct knote *kn);
static struct callout callout;
static struct mtx mtx;
static struct service_hndl adf_state_hndl;
struct entry_proc_events {
struct adf_state_priv_data *proc_events;
SLIST_ENTRY(entry_proc_events) entries_proc_events;
};
struct entry_state {
struct adf_state state;
STAILQ_ENTRY(entry_state) entries_state;
};
SLIST_HEAD(proc_events_head, entry_proc_events);
STAILQ_HEAD(state_head, entry_state);
static struct proc_events_head proc_events_head;
struct adf_processes_priv_data {
char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
int read_flag;
struct list_head list;
};
struct adf_state_priv_data {
struct cdev *cdev;
struct selinfo rsel;
struct state_head state_head;
};
static struct cdevsw adf_processes_cdevsw = {
.d_version = D_VERSION,
.d_open = adf_processes_open,
.d_read = adf_processes_read,
.d_write = adf_processes_write,
.d_name = ADF_DEV_PROCESSES_NAME,
};
static struct cdevsw adf_state_cdevsw = {
.d_version = D_VERSION,
.d_open = adf_state_open,
.d_read = adf_state_read,
.d_kqfilter = adf_state_kqfilter,
.d_name = ADF_DEV_STATE_NAME,
};
static struct filterops adf_state_read_filterops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = adf_state_kqread_detach,
.f_event = adf_state_kqread_event,
};
static struct cdev *adf_processes_dev;
static struct cdev *adf_state_dev;
static LINUX_LIST_HEAD(processes_list);
struct sx processes_list_sema;
SX_SYSINIT(processes_list_sema, &processes_list_sema, "adf proc list");
static void
adf_chr_drv_destroy(void)
{
destroy_dev(adf_processes_dev);
}
static int
adf_chr_drv_create(void)
{
adf_processes_dev = make_dev(&adf_processes_cdevsw,
0,
UID_ROOT,
GID_WHEEL,
0600,
ADF_DEV_PROCESSES_NAME);
if (adf_processes_dev == NULL) {
printf("QAT: failed to create device\n");
goto err_cdev_del;
}
return 0;
err_cdev_del:
return EFAULT;
}
static int
adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
int i = 0, devices = 0;
struct adf_accel_dev *accel_dev = NULL;
struct adf_processes_priv_data *prv_data = NULL;
int error = 0;
for (i = 0; i < ADF_MAX_DEVICES; i++) {
accel_dev = adf_devmgr_get_dev_by_id(i);
if (!accel_dev)
continue;
if (!adf_dev_started(accel_dev))
continue;
devices++;
}
if (!devices) {
printf("QAT: No active devices found.\n");
return ENXIO;
}
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
if (!prv_data)
return ENOMEM;
INIT_LIST_HEAD(&prv_data->list);
error = devfs_set_cdevpriv(prv_data, adf_processes_release);
if (error) {
free(prv_data, M_QAT);
return error;
}
return 0;
}
static int
adf_get_first_started_dev(void)
{
int i = 0;
struct adf_accel_dev *accel_dev = NULL;
for (i = 0; i < ADF_MAX_DEVICES; i++) {
accel_dev = adf_devmgr_get_dev_by_id(i);
if (!accel_dev)
continue;
if (adf_dev_started(accel_dev))
return i;
}
return -1;
}
static int
adf_processes_write(struct cdev *dev, struct uio *uio, int ioflag)
{
struct adf_processes_priv_data *prv_data = NULL;
struct adf_processes_priv_data *pdata = NULL;
int dev_num = 0, pr_num = 0;
struct list_head *lpos = NULL;
char usr_name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES] = { 0 };
struct adf_accel_dev *accel_dev = NULL;
struct adf_cfg_section *section_ptr = NULL;
bool pr_name_available = 1;
uint32_t num_accel_devs = 0;
int error = 0;
ssize_t count;
int dev_id;
error = devfs_get_cdevpriv((void **)&prv_data);
if (error) {
printf("QAT: invalid file descriptor\n");
return error;
}
if (prv_data->read_flag == 1) {
printf("QAT: can only write once\n");
return EBADF;
}
count = uio->uio_resid;
if ((count <= 0) || (count > ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
printf("QAT: wrong size %d\n", (int)count);
return EIO;
}
error = uiomove(usr_name, count, uio);
if (error) {
printf("QAT: can't copy data\n");
return error;
}
/* Lock other processes and try to find out the process name */
if (sx_xlock_sig(&processes_list_sema)) {
printf("QAT: can't aquire process info lock\n");
return EBADF;
}
dev_id = adf_get_first_started_dev();
if (-1 == dev_id) {
pr_err("QAT: could not find started device\n");
sx_xunlock(&processes_list_sema);
return -EIO;
}
accel_dev = adf_devmgr_get_dev_by_id(dev_id);
if (!accel_dev) {
pr_err("QAT: could not find started device\n");
sx_xunlock(&processes_list_sema);
return -EIO;
}
/* If there is nothing there then take the first name and return */
if (list_empty(&processes_list)) {
snprintf(prv_data->name,
ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
"%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
usr_name,
0);
list_add(&prv_data->list, &processes_list);
sx_xunlock(&processes_list_sema);
prv_data->read_flag = 1;
return 0;
}
/* If there are processes running then search for a first free name */
adf_devmgr_get_num_dev(&num_accel_devs);
for (dev_num = 0; dev_num < num_accel_devs; dev_num++) {
accel_dev = adf_devmgr_get_dev_by_id(dev_num);
if (!accel_dev)
continue;
if (!adf_dev_started(accel_dev))
continue; /* to next device */
for (pr_num = 0; pr_num < GET_MAX_PROCESSES(accel_dev);
pr_num++) {
snprintf(prv_data->name,
ADF_CFG_MAX_SECTION_LEN_IN_BYTES,
"%s" ADF_INTERNAL_USERSPACE_SEC_SUFF "%d",
usr_name,
pr_num);
pr_name_available = 1;
/* Figure out if section exists in the config table */
section_ptr =
adf_cfg_sec_find(accel_dev, prv_data->name);
if (NULL == section_ptr) {
/* This section name doesn't exist */
pr_name_available = 0;
/* As process_num enumerates from 0, once we get
* to one which doesn't exist no further ones
* will exist. On to next device
*/
break;
}
/* Figure out if it's been taken already */
list_for_each(lpos, &processes_list)
{
pdata =
list_entry(lpos,
struct adf_processes_priv_data,
list);
if (!strncmp(
pdata->name,
prv_data->name,
ADF_CFG_MAX_SECTION_LEN_IN_BYTES)) {
pr_name_available = 0;
break;
}
}
if (pr_name_available)
break;
}
if (pr_name_available)
break;
}
/*
* If we have a valid name that is not on
* the list take it and add to the list
*/
if (pr_name_available) {
list_add(&prv_data->list, &processes_list);
sx_xunlock(&processes_list_sema);
prv_data->read_flag = 1;
return 0;
}
/* If not then the process needs to wait */
sx_xunlock(&processes_list_sema);
explicit_bzero(prv_data->name, ADF_CFG_MAX_SECTION_LEN_IN_BYTES);
prv_data->read_flag = 0;
return 1;
}
static int
adf_processes_read(struct cdev *dev, struct uio *uio, int ioflag)
{
struct adf_processes_priv_data *prv_data = NULL;
int error = 0;
error = devfs_get_cdevpriv((void **)&prv_data);
if (error) {
printf("QAT: invalid file descriptor\n");
return error;
}
/*
* If there is a name that the process can use then give it
* to the proocess.
*/
if (prv_data->read_flag) {
error = uiomove(prv_data->name,
strnlen(prv_data->name,
ADF_CFG_MAX_SECTION_LEN_IN_BYTES),
uio);
if (error) {
printf("QAT: failed to copy data to user\n");
return error;
}
return 0;
}
return EIO;
}
static void
adf_processes_release(void *data)
{
struct adf_processes_priv_data *prv_data = NULL;
prv_data = (struct adf_processes_priv_data *)data;
sx_xlock(&processes_list_sema);
list_del(&prv_data->list);
sx_xunlock(&processes_list_sema);
free(prv_data, M_QAT);
}
int
adf_processes_dev_register(void)
{
return adf_chr_drv_create();
}
void
adf_processes_dev_unregister(void)
{
adf_chr_drv_destroy();
}
static void
adf_state_callout_notify_ev(void *arg)
{
int notified = 0;
struct adf_state_priv_data *priv = NULL;
struct entry_proc_events *proc_events = NULL;
SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
notified = 1;
priv = proc_events->proc_events;
wakeup(priv);
selwakeup(&priv->rsel);
KNOTE_UNLOCKED(&priv->rsel.si_note, 0);
}
}
if (notified)
callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
}
static void
adf_state_set(int dev, enum adf_event event)
{
struct adf_accel_dev *accel_dev = NULL;
struct state_head *head = NULL;
struct entry_proc_events *proc_events = NULL;
struct entry_state *state = NULL;
accel_dev = adf_devmgr_get_dev_by_id(dev);
if (!accel_dev)
return;
mtx_lock(&mtx);
SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
state = NULL;
head = &proc_events->proc_events->state_head;
state = malloc(sizeof(struct entry_state),
M_QAT,
M_NOWAIT | M_ZERO);
if (!state)
continue;
state->state.dev_state = event;
state->state.dev_id = dev;
STAILQ_INSERT_TAIL(head, state, entries_state);
if (event == ADF_EVENT_STOP) {
state = NULL;
state = malloc(sizeof(struct entry_state),
M_QAT,
M_NOWAIT | M_ZERO);
if (!state)
continue;
state->state.dev_state = ADF_EVENT_SHUTDOWN;
state->state.dev_id = dev;
STAILQ_INSERT_TAIL(head, state, entries_state);
}
}
callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
mtx_unlock(&mtx);
}
static int
adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
{
int ret = 0;
#if defined(QAT_UIO) && defined(QAT_DBG)
if (event > ADF_EVENT_DBG_SHUTDOWN)
return -EINVAL;
#else
if (event > ADF_EVENT_ERROR)
return -EINVAL;
#endif /* defined(QAT_UIO) && defined(QAT_DBG) */
switch (event) {
case ADF_EVENT_INIT:
return ret;
case ADF_EVENT_SHUTDOWN:
return ret;
case ADF_EVENT_RESTARTING:
break;
case ADF_EVENT_RESTARTED:
break;
case ADF_EVENT_START:
return ret;
case ADF_EVENT_STOP:
break;
case ADF_EVENT_ERROR:
break;
#if defined(QAT_UIO) && defined(QAT_DBG)
case ADF_EVENT_PROC_CRASH:
break;
case ADF_EVENT_MANUAL_DUMP:
break;
case ADF_EVENT_SLICE_HANG:
break;
case ADF_EVENT_DBG_SHUTDOWN:
break;
#endif /* defined(QAT_UIO) && defined(QAT_DBG) */
default:
return -1;
}
adf_state_set(accel_dev->accel_id, event);
return 0;
}
static int
adf_state_kqfilter(struct cdev *dev, struct knote *kn)
{
struct adf_state_priv_data *priv;
mtx_lock(&mtx);
priv = dev->si_drv1;
switch (kn->kn_filter) {
case EVFILT_READ:
kn->kn_fop = &adf_state_read_filterops;
kn->kn_hook = priv;
knlist_add(&priv->rsel.si_note, kn, 0);
mtx_unlock(&mtx);
return 0;
default:
mtx_unlock(&mtx);
return -EINVAL;
}
}
static int
adf_state_kqread_event(struct knote *kn, long hint)
{
return 1;
}
static void
adf_state_kqread_detach(struct knote *kn)
{
struct adf_state_priv_data *priv = NULL;
mtx_lock(&mtx);
if (!kn) {
mtx_unlock(&mtx);
return;
}
priv = kn->kn_hook;
if (!priv) {
mtx_unlock(&mtx);
return;
}
knlist_remove(&priv->rsel.si_note, kn, 1);
mtx_unlock(&mtx);
}
void
adf_state_init(void)
{
adf_state_dev = make_dev(&adf_state_cdevsw,
0,
UID_ROOT,
GID_WHEEL,
0600,
"%s",
ADF_DEV_STATE_NAME);
SLIST_INIT(&proc_events_head);
mtx_init(&mtx, mtx_name, NULL, MTX_DEF);
callout_init_mtx(&callout, &mtx, 0);
explicit_bzero(&adf_state_hndl, sizeof(adf_state_hndl));
adf_state_hndl.event_hld = adf_state_event_handler;
adf_state_hndl.name = "adf_state_event_handler";
mtx_lock(&mtx);
adf_service_register(&adf_state_hndl);
callout_reset(&callout,
ADF_STATE_CALLOUT_TIME,
adf_state_callout_notify_ev,
NULL);
mtx_unlock(&mtx);
}
void
adf_state_destroy(void)
{
struct entry_proc_events *proc_events = NULL;
mtx_lock(&mtx);
adf_service_unregister(&adf_state_hndl);
callout_stop(&callout);
while (!SLIST_EMPTY(&proc_events_head)) {
proc_events = SLIST_FIRST(&proc_events_head);
SLIST_REMOVE_HEAD(&proc_events_head, entries_proc_events);
free(proc_events, M_QAT);
}
destroy_dev(adf_state_dev);
mtx_unlock(&mtx);
mtx_destroy(&mtx);
}
static int
adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
struct adf_state_priv_data *prv_data = NULL;
struct entry_proc_events *entry_proc_events = NULL;
int ret = 0;
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
if (!prv_data)
return -ENOMEM;
entry_proc_events =
malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
if (!entry_proc_events) {
free(prv_data, M_QAT);
return -ENOMEM;
}
mtx_lock(&mtx);
prv_data->cdev = dev;
prv_data->cdev->si_drv1 = prv_data;
knlist_init_mtx(&prv_data->rsel.si_note, &mtx);
STAILQ_INIT(&prv_data->state_head);
entry_proc_events->proc_events = prv_data;
SLIST_INSERT_HEAD(&proc_events_head,
entry_proc_events,
entries_proc_events);
ret = devfs_set_cdevpriv(prv_data, adf_state_release);
if (ret) {
SLIST_REMOVE(&proc_events_head,
entry_proc_events,
entry_proc_events,
entries_proc_events);
free(entry_proc_events, M_QAT);
free(prv_data, M_QAT);
}
callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
mtx_unlock(&mtx);
return ret;
}
static int
adf_state_read(struct cdev *dev, struct uio *uio, int ioflag)
{
int ret = 0;
struct adf_state_priv_data *prv_data = NULL;
struct state_head *state_head = NULL;
struct entry_state *entry_state = NULL;
struct adf_state *state = NULL;
struct entry_proc_events *proc_events = NULL;
mtx_lock(&mtx);
ret = devfs_get_cdevpriv((void **)&prv_data);
if (ret) {
mtx_unlock(&mtx);
return 0;
}
state_head = &prv_data->state_head;
if (STAILQ_EMPTY(state_head)) {
mtx_unlock(&mtx);
return 0;
}
entry_state = STAILQ_FIRST(state_head);
state = &entry_state->state;
ret = uiomove(state, sizeof(struct adf_state), uio);
if (!ret && !STAILQ_EMPTY(state_head)) {
STAILQ_REMOVE_HEAD(state_head, entries_state);
free(entry_state, M_QAT);
}
SLIST_FOREACH (proc_events, &proc_events_head, entries_proc_events) {
if (!STAILQ_EMPTY(&proc_events->proc_events->state_head)) {
prv_data = proc_events->proc_events;
wakeup(prv_data);
selwakeup(&prv_data->rsel);
KNOTE_UNLOCKED(&prv_data->rsel.si_note, 0);
}
}
callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
mtx_unlock(&mtx);
return ret;
}
static void
adf_state_release(void *data)
{
struct adf_state_priv_data *prv_data = NULL;
struct entry_state *entry_state = NULL;
struct entry_proc_events *entry_proc_events = NULL;
struct entry_proc_events *tmp = NULL;
mtx_lock(&mtx);
prv_data = (struct adf_state_priv_data *)data;
knlist_delete(&prv_data->rsel.si_note, curthread, 1);
knlist_destroy(&prv_data->rsel.si_note);
seldrain(&prv_data->rsel);
while (!STAILQ_EMPTY(&prv_data->state_head)) {
entry_state = STAILQ_FIRST(&prv_data->state_head);
STAILQ_REMOVE_HEAD(&prv_data->state_head, entries_state);
free(entry_state, M_QAT);
}
SLIST_FOREACH_SAFE (entry_proc_events,
&proc_events_head,
entries_proc_events,
tmp) {
if (entry_proc_events->proc_events == prv_data) {
SLIST_REMOVE(&proc_events_head,
entry_proc_events,
entry_proc_events,
entries_proc_events);
free(entry_proc_events, M_QAT);
}
}
free(prv_data, M_QAT);
mtx_unlock(&mtx);
}

View file

@ -0,0 +1,450 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_uio_control.h"
#include "adf_uio_cleanup.h"
#include "adf_uio.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include <sys/conf.h>
#include <sys/capsicum.h>
#include <sys/kdb.h>
#include <sys/condvar.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/file.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/sglist.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#define ADF_UIO_GET_NAME(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->name)
#define ADF_UIO_GET_TYPE(accel_dev) (GET_HW_DATA(accel_dev)->dev_class->type)
#define ADF_UIO_GET_BAR(accel_dev) \
(GET_HW_DATA(accel_dev)->get_etr_bar_id(GET_HW_DATA(accel_dev)))
static d_ioctl_t adf_uio_ioctl;
static d_mmap_single_t adf_uio_mmap_single;
static struct cdevsw adf_uio_cdevsw = { .d_ioctl = adf_uio_ioctl,
.d_mmap_single = adf_uio_mmap_single,
.d_version = D_VERSION,
.d_name = "qat" };
struct adf_uio_open_bundle {
struct adf_uio_control_accel *accel;
int bundle;
struct file **mem_files;
int num_mem_files;
};
static void
adf_release_bundle(void *arg)
{
struct adf_uio_control_accel *accel = NULL;
struct adf_uio_open_bundle *handle = NULL;
struct adf_uio_control_bundle *bundle = NULL;
struct adf_uio_instance_rings *instance_rings, *tmp;
int i = 0;
handle = arg;
accel = handle->accel;
bundle = &accel->bundle[handle->bundle];
mutex_lock(&bundle->lock);
adf_uio_do_cleanup_orphan(bundle->hardware_bundle_number, accel);
mutex_unlock(&bundle->lock);
for (i = 0; i < handle->num_mem_files; i++) {
/*
* Similar to the garbage collection of orphaned file
* descriptor references in UNIX domain socket control
* messages, the current thread isn't relevant to the
* the file descriptor reference being released. In
* particular, the current thread does not hold any
* advisory file locks on these file descriptors.
*/
fdrop(handle->mem_files[i], NULL);
}
free(handle->mem_files, M_QAT);
mtx_lock(&accel->lock);
mutex_lock(&bundle->list_lock);
list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
{
if (instance_rings->user_pid == curproc->p_pid) {
list_del(&instance_rings->list);
free(instance_rings, M_QAT);
break;
}
}
mutex_unlock(&bundle->list_lock);
adf_dev_put(accel->accel_dev);
accel->num_handles--;
free(handle, M_QAT);
if (!accel->num_handles) {
cv_broadcast(&accel->cleanup_ok);
/* the broadcasting effect happens after releasing accel->lock
*/
}
mtx_unlock(&accel->lock);
}
static int
adf_add_mem_fd(struct adf_accel_dev *accel_dev, int mem_fd)
{
struct adf_uio_control_accel *accel = NULL;
struct adf_uio_open_bundle *handle = NULL;
struct file *fp, **new_files;
cap_rights_t rights;
int error = -1, old_count = 0;
error = devfs_get_cdevpriv((void **)&handle);
if (error)
return (error);
error = fget(curthread, mem_fd, cap_rights_init(&rights), &fp);
if (error) {
printf(
"Failed to fetch file pointer from current process %d \n",
__LINE__);
return (error);
}
accel = accel_dev->accel;
mtx_lock(&accel->lock);
for (;;) {
old_count = handle->num_mem_files;
mtx_unlock(&accel->lock);
new_files = malloc((old_count + 1) * sizeof(*new_files),
M_QAT,
M_WAITOK);
mtx_lock(&accel->lock);
if (old_count == handle->num_mem_files) {
if (old_count != 0) {
memcpy(new_files,
handle->mem_files,
old_count * sizeof(*new_files));
free(handle->mem_files, M_QAT);
}
handle->mem_files = new_files;
new_files[old_count] = fp;
handle->num_mem_files++;
break;
} else
free(new_files, M_QAT);
}
mtx_unlock(&accel->lock);
return (0);
}
static vm_object_t
adf_uio_map_bar(struct adf_accel_dev *accel_dev, uint8_t bank_offset)
{
unsigned int ring_bundle_size, offset;
struct sglist *sg = NULL;
struct adf_uio_control_accel *accel = accel_dev->accel;
struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
vm_object_t obj;
ring_bundle_size = csr_info->ring_bundle_size;
offset = bank_offset * ring_bundle_size;
sg = sglist_alloc(1, M_WAITOK);
/* Starting from new HW there is an additional offset
* for bundle CSRs
*/
sglist_append_phys(sg,
accel->bar->base_addr + offset +
csr_info->csr_addr_offset,
ring_bundle_size);
obj = vm_pager_allocate(
OBJT_SG, sg, ring_bundle_size, VM_PROT_RW, 0, NULL);
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
VM_OBJECT_WUNLOCK(obj);
}
sglist_free(sg);
return obj;
}
static int
adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
{
struct adf_uio_control_accel *accel = NULL;
struct adf_uio_open_bundle *handle = NULL;
int error;
if (bundle_nr < 0 || bundle_nr >= GET_MAX_BANKS(accel_dev)) {
printf("ERROR in %s (%d) %d\n", __func__, bundle_nr, __LINE__);
return EINVAL;
}
accel = accel_dev->accel;
handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
if (!handle) {
printf("ERROR in adf_alloc_bundle %d\n", __LINE__);
return ENOMEM;
}
handle->accel = accel;
handle->bundle = bundle_nr;
mtx_lock(&accel->lock);
adf_dev_get(accel_dev);
accel->num_handles++;
mtx_unlock(&accel->lock);
error = devfs_set_cdevpriv(handle, adf_release_bundle);
if (error) {
adf_release_bundle(handle);
device_printf(GET_DEV(accel_dev),
"ERROR in adf_alloc_bundle %d\n",
__LINE__);
return (error);
}
return (0);
}
static int
adf_uio_ioctl(struct cdev *dev,
u_long cmd,
caddr_t data,
int fflag,
struct thread *td)
{
struct adf_accel_dev *accel_dev = dev->si_drv1;
struct adf_hw_csr_info *csr_info = NULL;
if (!accel_dev) {
printf("%s - accel_dev is NULL\n", __func__);
return EFAULT;
}
csr_info = &accel_dev->hw_device->csr_info;
switch (cmd) {
case IOCTL_GET_BUNDLE_SIZE:
*(uint32_t *)data = csr_info->ring_bundle_size;
break;
case IOCTL_ALLOC_BUNDLE:
return (adf_alloc_bundle(accel_dev, *(int *)data));
case IOCTL_GET_ACCEL_TYPE:
*(uint32_t *)data = ADF_UIO_GET_TYPE(accel_dev);
break;
case IOCTL_ADD_MEM_FD:
return (adf_add_mem_fd(accel_dev, *(int *)data));
default:
return (ENOTTY);
}
return (0);
}
static int
adf_uio_mmap_single(struct cdev *dev,
vm_ooffset_t *offset,
vm_size_t size,
struct vm_object **object,
int nprot)
{
struct adf_uio_open_bundle *handle = NULL;
struct adf_uio_control_accel *accel = NULL;
struct adf_uio_control_bundle *bundle = NULL;
struct adf_uio_instance_rings *instance_rings;
int error;
error = devfs_get_cdevpriv((void **)&handle);
if (error)
return (error);
if (!handle->accel) {
printf("QAT: Error - no accel in handle\n");
return EINVAL;
}
accel = handle->accel;
if (!accel->accel_dev) {
printf("QAT: Error - no accel_dev in accel\n");
return EINVAL;
}
bundle = &accel->bundle[handle->bundle];
if (!bundle->obj) {
printf("QAT: Error no vm_object in bundle\n");
return EINVAL;
}
/* Adding pid to bundle list */
instance_rings =
malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
if (!instance_rings) {
printf("QAT: Memory allocation error - line: %d\n", __LINE__);
return -ENOMEM;
}
instance_rings->user_pid = curproc->p_pid;
instance_rings->ring_mask = 0;
mutex_lock(&bundle->list_lock);
list_add_tail(&instance_rings->list, &bundle->list);
mutex_unlock(&bundle->list_lock);
vm_object_reference(bundle->obj);
*object = bundle->obj;
return (0);
}
static inline void
adf_uio_init_accel_ctrl(struct adf_uio_control_accel *accel,
struct adf_accel_dev *accel_dev,
unsigned int nb_bundles)
{
struct adf_uio_control_bundle *bundle;
struct qat_uio_bundle_dev *priv;
unsigned int i;
accel->nb_bundles = nb_bundles;
accel->total_used_bundles = 0;
for (i = 0; i < nb_bundles; i++) {
/*initialize the bundle */
bundle = &accel->bundle[i];
priv = &bundle->uio_priv;
bundle->hardware_bundle_number =
GET_MAX_BANKS(accel_dev) - nb_bundles + i;
INIT_LIST_HEAD(&bundle->list);
priv->bundle = bundle;
priv->accel = accel;
mutex_init(&bundle->lock);
mutex_init(&bundle->list_lock);
if (!accel->bar)
printf("ERROR: bar not defined in accel\n");
else
bundle->csr_addr = (void *)accel->bar->virt_addr;
}
}
/**
* Initialization bars on dev start.
*/
static inline void
adf_uio_init_bundle_dev(struct adf_uio_control_accel *accel,
struct adf_accel_dev *accel_dev,
unsigned int nb_bundles)
{
struct adf_uio_control_bundle *bundle;
unsigned int i;
for (i = 0; i < nb_bundles; i++) {
bundle = &accel->bundle[i];
bundle->obj =
adf_uio_map_bar(accel_dev, bundle->hardware_bundle_number);
if (!bundle->obj) {
device_printf(GET_DEV(accel_dev),
"ERROR in adf_alloc_bundle %d\n",
__LINE__);
}
}
}
int
adf_uio_register(struct adf_accel_dev *accel_dev)
{
struct adf_uio_control_accel *accel = NULL;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
int nb_bundles;
if (!accel_dev) {
printf("%s - accel_dev is NULL\n", __func__);
return EFAULT;
}
if (adf_cfg_get_param_value(
accel_dev, ADF_GENERAL_SEC, ADF_FIRST_USER_BUNDLE, val)) {
nb_bundles = 0;
} else {
nb_bundles = GET_MAX_BANKS(accel_dev);
}
if (nb_bundles) {
accel = malloc(sizeof(*accel) +
nb_bundles *
sizeof(struct adf_uio_control_bundle),
M_QAT,
M_WAITOK | M_ZERO);
mtx_init(&accel->lock, "qat uio", NULL, MTX_DEF);
accel->accel_dev = accel_dev;
accel->bar = accel_dev->accel_pci_dev.pci_bars +
ADF_UIO_GET_BAR(accel_dev);
adf_uio_init_accel_ctrl(accel, accel_dev, nb_bundles);
accel->cdev = make_dev(&adf_uio_cdevsw,
0,
UID_ROOT,
GID_WHEEL,
0600,
"%s",
device_get_nameunit(GET_DEV(accel_dev)));
if (accel->cdev == NULL) {
mtx_destroy(&accel->lock);
goto fail_clean;
}
accel->cdev->si_drv1 = accel_dev;
accel_dev->accel = accel;
cv_init(&accel->cleanup_ok, "uio_accel_cv");
adf_uio_init_bundle_dev(accel, accel_dev, nb_bundles);
}
return 0;
fail_clean:
free(accel, M_QAT);
device_printf(GET_DEV(accel_dev), "Failed to register UIO devices\n");
return ENODEV;
}
void
adf_uio_remove(struct adf_accel_dev *accel_dev)
{
struct adf_uio_control_accel *accel = accel_dev->accel;
struct adf_uio_control_bundle *bundle;
unsigned int i;
if (accel) {
/* Un-mapping all bars */
for (i = 0; i < accel->nb_bundles; i++) {
bundle = &accel->bundle[i];
vm_object_deallocate(bundle->obj);
}
destroy_dev(accel->cdev);
mtx_lock(&accel->lock);
while (accel->num_handles) {
cv_timedwait_sig(&accel->cleanup_ok,
&accel->lock,
3 * hz);
}
mtx_unlock(&accel->lock);
mtx_destroy(&accel->lock);
cv_destroy(&accel->cleanup_ok);
free(accel, M_QAT);
accel_dev->accel = NULL;
}
}

View file

@ -0,0 +1,404 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_uio_control.h"
#include "adf_uio_cleanup.h"
#include "adf_uio.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/sglist.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_param.h>
#define TX_RINGS_DISABLE 0
#define TX_RINGS_ENABLE 1
#define PKE_REQ_SIZE 64
#define BASE_ADDR_SHIFT 6
#define PKE_RX_RING_0 0
#define PKE_RX_RING_1 1
#define ADF_RING_EMPTY_RETRY_DELAY 2
#define ADF_RING_EMPTY_MAX_RETRY 15
struct bundle_orphan_ring {
unsigned long tx_mask;
unsigned long rx_mask;
unsigned long asym_mask;
int bank;
struct resource *csr_base;
struct adf_uio_control_bundle *bundle;
};
/*
* if orphan->tx_mask does not match with orphan->rx_mask
*/
static void
check_orphan_ring(struct adf_accel_dev *accel_dev,
struct bundle_orphan_ring *orphan,
struct adf_hw_device_data *hw_data)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
int i;
int tx_rx_gap = hw_data->tx_rx_gap;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
struct resource *csr_base = orphan->csr_base;
int bank = orphan->bank;
for (i = 0; i < num_rings_per_bank; i++) {
if (test_bit(i, &orphan->tx_mask)) {
int rx_ring = i + tx_rx_gap;
if (!test_bit(rx_ring, &orphan->rx_mask)) {
__clear_bit(i, &orphan->tx_mask);
/* clean up this tx ring */
csr_ops->write_csr_ring_config(csr_base,
bank,
i,
0);
csr_ops->write_csr_ring_base(csr_base,
bank,
i,
0);
}
} else if (test_bit(i, &orphan->rx_mask)) {
int tx_ring = i - tx_rx_gap;
if (!test_bit(tx_ring, &orphan->tx_mask)) {
__clear_bit(i, &orphan->rx_mask);
/* clean up this rx ring */
csr_ops->write_csr_ring_config(csr_base,
bank,
i,
0);
csr_ops->write_csr_ring_base(csr_base,
bank,
i,
0);
}
}
}
}
static int
get_orphan_bundle(int bank,
struct adf_uio_control_accel *accel,
struct bundle_orphan_ring **orphan_bundle_out)
{
int i;
int ret = 0;
struct resource *csr_base;
unsigned long tx_mask;
unsigned long asym_mask;
struct adf_accel_dev *accel_dev = accel->accel_dev;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
struct bundle_orphan_ring *orphan_bundle = NULL;
uint64_t base;
struct list_head *entry;
struct adf_uio_instance_rings *instance_rings;
struct adf_uio_control_bundle *bundle;
u16 ring_mask = 0;
orphan_bundle =
malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO);
if (!orphan_bundle)
return ENOMEM;
csr_base = accel->bar->virt_addr;
orphan_bundle->csr_base = csr_base;
orphan_bundle->bank = bank;
orphan_bundle->tx_mask = 0;
orphan_bundle->rx_mask = 0;
tx_mask = accel_dev->hw_device->tx_rings_mask;
asym_mask = accel_dev->hw_device->asym_rings_mask;
/* Get ring mask for this process. */
bundle = &accel->bundle[bank];
orphan_bundle->bundle = bundle;
mutex_lock(&bundle->list_lock);
list_for_each(entry, &bundle->list)
{
instance_rings =
list_entry(entry, struct adf_uio_instance_rings, list);
if (instance_rings->user_pid == curproc->p_pid) {
ring_mask = instance_rings->ring_mask;
break;
}
}
mutex_unlock(&bundle->list_lock);
for (i = 0; i < num_rings_per_bank; i++) {
base = csr_ops->read_csr_ring_base(csr_base, bank, i);
if (!base)
continue;
if (!(ring_mask & 1 << i))
continue; /* Not reserved for this process. */
if (test_bit(i, &tx_mask))
__set_bit(i, &orphan_bundle->tx_mask);
else
__set_bit(i, &orphan_bundle->rx_mask);
if (test_bit(i, &asym_mask))
__set_bit(i, &orphan_bundle->asym_mask);
}
if (orphan_bundle->tx_mask || orphan_bundle->rx_mask)
check_orphan_ring(accel_dev, orphan_bundle, hw_data);
*orphan_bundle_out = orphan_bundle;
return ret;
}
static void
put_orphan_bundle(struct bundle_orphan_ring *bundle)
{
if (!bundle)
return;
free(bundle, M_QAT);
}
/* cleanup all ring */
static void
cleanup_all_ring(struct adf_uio_control_accel *accel,
struct bundle_orphan_ring *orphan)
{
int i;
struct resource *csr_base = orphan->csr_base;
unsigned long mask = orphan->rx_mask | orphan->tx_mask;
struct adf_accel_dev *accel_dev = accel->accel_dev;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
int bank = orphan->bank;
mutex_lock(&orphan->bundle->lock);
orphan->bundle->rings_enabled &= ~mask;
adf_update_uio_ring_arb(orphan->bundle);
mutex_unlock(&orphan->bundle->lock);
for (i = 0; i < num_rings_per_bank; i++) {
if (!test_bit(i, &mask))
continue;
csr_ops->write_csr_ring_config(csr_base, bank, i, 0);
csr_ops->write_csr_ring_base(csr_base, bank, i, 0);
}
}
/*
* Return true, if number of messages in tx ring is equal to number
* of messages in corresponding rx ring, else false.
*/
static bool
is_all_resp_recvd(struct adf_hw_csr_ops *csr_ops,
struct bundle_orphan_ring *bundle,
const u8 num_rings_per_bank)
{
u32 rx_tail = 0, tx_head = 0, rx_ring_msg_offset = 0,
tx_ring_msg_offset = 0, tx_rx_offset = num_rings_per_bank / 2,
idx = 0, retry = 0, delay = ADF_RING_EMPTY_RETRY_DELAY;
do {
for_each_set_bit(idx, &bundle->tx_mask, tx_rx_offset)
{
rx_tail =
csr_ops->read_csr_ring_tail(bundle->csr_base,
0,
(idx + tx_rx_offset));
tx_head = csr_ops->read_csr_ring_head(bundle->csr_base,
0,
idx);
/*
* Normalize messages in tx rings to match rx ring
* message size, i.e., size of response message(32).
* Asym messages are 64 bytes each, so right shift
* by 1 to normalize to 32. Sym and compression
* messages are 128 bytes each, so right shift by 2
* to normalize to 32.
*/
if (bundle->asym_mask & (1 << idx))
tx_ring_msg_offset = (tx_head >> 1);
else
tx_ring_msg_offset = (tx_head >> 2);
rx_ring_msg_offset = rx_tail;
if (tx_ring_msg_offset != rx_ring_msg_offset)
break;
}
if (idx == tx_rx_offset)
/* All Tx and Rx ring message counts match */
return true;
DELAY(delay);
delay *= 2;
} while (++retry < ADF_RING_EMPTY_MAX_RETRY);
return false;
}
static int
bundle_need_cleanup(int bank, struct adf_uio_control_accel *accel)
{
struct resource *csr_base = accel->bar->virt_addr;
struct adf_accel_dev *accel_dev = accel->accel_dev;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 num_rings_per_bank = hw_data->num_rings_per_bank;
int i;
if (!csr_base)
return 0;
for (i = 0; i < num_rings_per_bank; i++) {
if (csr_ops->read_csr_ring_base(csr_base, bank, i))
return 1;
}
return 0;
}
static void
cleanup_orphan_ring(struct bundle_orphan_ring *orphan,
struct adf_uio_control_accel *accel)
{
struct adf_accel_dev *accel_dev = accel->accel_dev;
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u8 number_rings_per_bank = hw_data->num_rings_per_bank;
/* disable the interrupt */
csr_ops->write_csr_int_col_en(orphan->csr_base, orphan->bank, 0);
/*
* wait firmware finish the in-process ring
* 1. disable all tx rings
* 2. check if all responses are received
* 3. reset all rings
*/
adf_disable_ring_arb(accel_dev, orphan->csr_base, 0, orphan->tx_mask);
if (!is_all_resp_recvd(csr_ops, orphan, number_rings_per_bank)) {
device_printf(GET_DEV(accel_dev),
"Failed to clean up orphan rings");
return;
}
/*
* When the execution reaches here, it is assumed that
* there is no inflight request in the rings and that
* there is no in-process ring.
*/
cleanup_all_ring(accel, orphan);
}
void
adf_uio_do_cleanup_orphan(int bank, struct adf_uio_control_accel *accel)
{
int ret, pid_found;
struct adf_uio_instance_rings *instance_rings, *tmp;
struct adf_uio_control_bundle *bundle;
/* orphan is local pointer allocated and deallocated in this function */
struct bundle_orphan_ring *orphan = NULL;
struct adf_accel_dev *accel_dev = accel->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
if (!bundle_need_cleanup(bank, accel))
goto release;
ret = get_orphan_bundle(bank, accel, &orphan);
if (ret != 0)
return;
/*
* If driver supports ring pair reset, no matter process
* exits normally or abnormally, just do ring pair reset.
* ring pair reset will reset all ring pair registers to
* default value. Driver only needs to reset ring mask
*/
if (hw_data->ring_pair_reset) {
hw_data->ring_pair_reset(
accel_dev, orphan->bundle->hardware_bundle_number);
mutex_lock(&orphan->bundle->lock);
/*
* If processes exit normally, rx_mask, tx_mask
* and rings_enabled are all 0, below expression
* have no impact on rings_enabled.
* If processes exit abnormally, rings_enabled
* will be set as 0 by below expression.
*/
orphan->bundle->rings_enabled &=
~(orphan->rx_mask | orphan->tx_mask);
mutex_unlock(&orphan->bundle->lock);
goto out;
}
if (!orphan->tx_mask && !orphan->rx_mask)
goto out;
device_printf(GET_DEV(accel_dev),
"Process %d %s exit with orphan rings %lx:%lx\n",
curproc->p_pid,
curproc->p_comm,
orphan->tx_mask,
orphan->rx_mask);
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
cleanup_orphan_ring(orphan, accel);
}
out:
put_orphan_bundle(orphan);
release:
bundle = &accel->bundle[bank];
/*
* If the user process died without releasing the rings
* then force a release here.
*/
mutex_lock(&bundle->list_lock);
pid_found = 0;
list_for_each_entry_safe(instance_rings, tmp, &bundle->list, list)
{
if (instance_rings->user_pid == curproc->p_pid) {
pid_found = 1;
break;
}
}
mutex_unlock(&bundle->list_lock);
if (pid_found) {
mutex_lock(&bundle->lock);
bundle->rings_used &= ~instance_rings->ring_mask;
mutex_unlock(&bundle->lock);
}
}

View file

@ -55,6 +55,12 @@ write_csr_ring_config(struct resource *csr_base_addr,
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static dma_addr_t
read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
}
static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
@ -106,6 +112,12 @@ write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value)
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
static u32
get_int_col_ctl_enable_mask(void)
{
return ADF_RING_CSR_INT_COL_CTL_ENABLE;
}
void
adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
{
@ -113,6 +125,9 @@ adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_info->arb_enable_mask = 0xFF;
csr_info->csr_addr_offset = ADF_RING_CSR_ADDR_OFFSET;
csr_info->ring_bundle_size = ADF_RING_BUNDLE_SIZE;
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
@ -120,6 +135,7 @@ adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->read_csr_ring_base = read_csr_ring_base;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
@ -128,5 +144,5 @@ adf_gen2_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
}
EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_info);

View file

@ -2,8 +2,12 @@
/* Copyright(c) 2021 Intel Corporation */
/* $FreeBSD$ */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
#define ADF_RPRESET_TIMEOUT_MS 5000
#define ADF_RPRESET_POLLING_INTERVAL 20
static u64
build_csr_ring_base_addr(bus_addr_t addr, u32 size)
{
@ -55,6 +59,12 @@ write_csr_ring_config(struct resource *csr_base_addr,
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
static bus_addr_t
read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
}
static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
@ -106,6 +116,12 @@ write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value)
WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value);
}
static u32
get_int_col_ctl_enable_mask(void)
{
return ADF_RING_CSR_INT_COL_CTL_ENABLE;
}
void
adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
{
@ -113,6 +129,9 @@ adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_info->arb_enable_mask = 0x1;
csr_info->csr_addr_offset = ADF_RING_CSR_ADDR_OFFSET;
csr_info->ring_bundle_size = ADF_RING_BUNDLE_SIZE;
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
@ -120,6 +139,7 @@ adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->read_csr_ring_base = read_csr_ring_base;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
@ -128,8 +148,64 @@ adf_gen4_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
}
static int
reset_ring_pair(struct resource *csr, u32 bank_number)
{
int reset_timeout = ADF_RPRESET_TIMEOUT_MS;
const int timeout_step = ADF_RPRESET_POLLING_INTERVAL;
u32 val;
/* Write rpresetctl register bit#0 as 1
* As rpresetctl registers have no RW bits, no need to preserve
* values for other bits, just write bit#0
* NOTE: bit#12-bit#31 are WO, the write operation only takes
* effect when bit#1 is written 1 for pasid level reset
*/
ADF_CSR_WR(csr,
ADF_WQM_CSR_RPRESETCTL(bank_number),
BIT(ADF_WQM_CSR_RPRESETCTL_SHIFT));
/* Read rpresetsts register to wait for rp reset complete */
while (reset_timeout > 0) {
val = ADF_CSR_RD(csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
if (val & ADF_WQM_CSR_RPRESETSTS_MASK)
break;
pause_ms("adfstop", timeout_step);
reset_timeout -= timeout_step;
}
if (reset_timeout <= 0)
return EFAULT;
/* When rp reset is done, clear rpresetsts bit0 */
ADF_CSR_WR(csr,
ADF_WQM_CSR_RPRESETSTS(bank_number),
BIT(ADF_WQM_CSR_RPRESETSTS_SHIFT));
return 0;
}
int
adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data);
struct resource *csr;
int ret;
if (bank_number >= hw_data->num_banks)
return -EINVAL;
csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr;
ret = reset_ring_pair(csr, bank_number);
if (ret)
device_printf(GET_DEV(accel_dev),
"ring pair reset failure (timeout)\n");
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_info);
static inline void
adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower)
@ -173,4 +249,9 @@ adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
return 0;
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
int
adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
}

View file

@ -0,0 +1,131 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_pfvf.h"
#include "adf_pfvf_utils.h"
#include "adf_pfvf_vf_proto.h"
#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i)*0x20))
#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i)*0x20))
/* VF2PF interrupt source registers */
#define ADF_4XXX_VM2PF_SOU 0x41A180
#define ADF_4XXX_VM2PF_MSK 0x41A1C0
#define ADF_GEN4_VF_MSK 0xFFFF
#define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2
#define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F
#define ADF_PFVF_GEN4_MSGDATA_SHIFT 8
#define ADF_PFVF_GEN4_MSGDATA_MASK 0xFFFFFF
#define ADF_4XXXIOV_PF2VM_OFFSET 0x100C
#define ADF_4XXXIOV_VM2PF_OFFSET 0x1008
static const struct pfvf_csr_format csr_gen4_fmt = {
{ ADF_PFVF_GEN4_MSGTYPE_SHIFT, ADF_PFVF_GEN4_MSGTYPE_MASK },
{ ADF_PFVF_GEN4_MSGDATA_SHIFT, ADF_PFVF_GEN4_MSGDATA_MASK },
};
static u32
adf_gen4_vf_get_pfvf_offset(u32 i)
{
return ADF_4XXXIOV_PF2VM_OFFSET;
}
static u32
adf_gen4_vf_get_vfpf_offset(u32 i)
{
return ADF_4XXXIOV_VM2PF_OFFSET;
}
static int
adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
u32 pfvf_offset,
struct mutex *csr_lock)
{
struct resource *pmisc_addr = adf_get_pmisc_base(accel_dev);
u32 csr_val;
int ret;
csr_val = adf_pfvf_csr_msg_of(accel_dev, msg, &csr_gen4_fmt);
if (unlikely(!csr_val))
return -EINVAL;
mutex_lock(csr_lock);
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val | ADF_PFVF_INT);
/* Wait for confirmation from remote that it received the message */
ret = read_poll_timeout(ADF_CSR_RD,
csr_val,
!(csr_val & ADF_PFVF_INT),
ADF_PFVF_MSG_ACK_DELAY_US,
ADF_PFVF_MSG_ACK_MAX_DELAY_US,
true,
pmisc_addr,
pfvf_offset);
if (ret < 0)
device_printf(GET_DEV(accel_dev),
"ACK not received from remote\n");
mutex_unlock(csr_lock);
return ret;
}
static int
adf_gen4_vf2pf_send(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
u32 pfvf_offset,
struct mutex *csr_lock)
{
return adf_gen4_pfvf_send(accel_dev, msg, pfvf_offset, csr_lock);
}
static struct pfvf_message
adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset,
u8 compat_ver)
{
struct resource *pmisc_addr = adf_get_pmisc_base(accel_dev);
struct pfvf_message msg = { 0 };
u32 csr_val;
/* Read message from the CSR */
csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset);
if (!(csr_val & ADF_PFVF_INT)) {
device_printf(GET_DEV(accel_dev),
"Spurious PFVF interrupt, msg 0x%.8x. Ignored\n",
csr_val);
return msg;
}
/* We can now acknowledge the message reception by clearing the
* interrupt bit
*/
ADF_CSR_WR(pmisc_addr, pfvf_offset, csr_val & ~ADF_PFVF_INT);
/* Return the pfvf_message format */
return adf_pfvf_message_of(accel_dev, csr_val, &csr_gen4_fmt);
}
static struct pfvf_message
adf_gen4_pf2vf_recv(struct adf_accel_dev *accel_dev,
u32 pfvf_offset,
u8 compat_ver)
{
return adf_gen4_pfvf_recv(accel_dev, pfvf_offset, compat_ver);
}
void
adf_gen4_init_vf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops)
{
pfvf_ops->enable_comms = adf_enable_vf2pf_comms;
pfvf_ops->get_pf2vf_offset = adf_gen4_vf_get_pfvf_offset;
pfvf_ops->get_vf2pf_offset = adf_gen4_vf_get_vfpf_offset;
pfvf_ops->send_msg = adf_gen4_vf2pf_send;
pfvf_ops->recv_msg = adf_gen4_pf2vf_recv;
}

View file

@ -0,0 +1,134 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "adf_accel_devices.h"
#include "adf_heartbeat.h"
#include "adf_common_drv.h"
#include "icp_qat_fw_init_admin.h"
#include "adf_gen4_timer.h"
#include "adf_dev_err.h"
#define ADF_GEN4_INT_TIMER_VALUE_IN_MS 200
/* Interval within timer interrupt. Value in miliseconds. */
#define ADF_GEN4_MAX_INT_TIMER_VALUE_IN_MS 0xFFFFFFFF
/* MAX Interval within timer interrupt. Value in miliseconds. */
static u64
adf_get_next_timeout(u32 timeout_val)
{
u64 timeout = msecs_to_jiffies(timeout_val);
return rounddown(jiffies + timeout, timeout);
}
static void
adf_hb_irq_bh_handler(struct work_struct *work)
{
struct icp_qat_fw_init_admin_req req = { 0 };
struct icp_qat_fw_init_admin_resp resp = { 0 };
struct adf_hb_timer_data *hb_timer_data =
container_of(work, struct adf_hb_timer_data, hb_int_timer_work);
struct adf_accel_dev *accel_dev = hb_timer_data->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 ae_mask = hw_data->ae_mask;
if (!accel_dev->int_timer || !accel_dev->int_timer->enabled)
goto end;
/* Update heartbeat count via init/admin cmd */
if (!accel_dev->admin) {
device_printf(GET_DEV(accel_dev),
"adf_admin is not available\n");
goto end;
}
req.cmd_id = ICP_QAT_FW_HEARTBEAT_SYNC;
req.heartbeat_ticks = accel_dev->int_timer->int_cnt;
if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
device_printf(GET_DEV(accel_dev),
"Failed to update qat's HB count\n");
end:
kfree(hb_timer_data);
}
static void
timer_handler(struct timer_list *tl)
{
struct adf_int_timer *int_timer = from_timer(int_timer, tl, timer);
struct adf_accel_dev *accel_dev = int_timer->accel_dev;
struct adf_hb_timer_data *hb_timer_data = NULL;
u64 timeout_val = adf_get_next_timeout(int_timer->timeout_val);
/* Update TL TBD */
/* Schedule a heartbeat work queue to update HB */
hb_timer_data = kzalloc(sizeof(*hb_timer_data), GFP_ATOMIC);
if (hb_timer_data) {
hb_timer_data->accel_dev = accel_dev;
INIT_WORK(&hb_timer_data->hb_int_timer_work,
adf_hb_irq_bh_handler);
queue_work(int_timer->timer_irq_wq,
&hb_timer_data->hb_int_timer_work);
} else {
device_printf(GET_DEV(accel_dev),
"Failed to alloc heartbeat timer data\n");
}
int_timer->int_cnt++;
mod_timer(tl, timeout_val);
}
int
adf_int_timer_init(struct adf_accel_dev *accel_dev)
{
u64 timeout_val = adf_get_next_timeout(ADF_GEN4_INT_TIMER_VALUE_IN_MS);
struct adf_int_timer *int_timer = NULL;
char wqname[32] = { 0 };
if (!accel_dev)
return 0;
int_timer = kzalloc(sizeof(*int_timer), GFP_KERNEL);
if (!int_timer)
return -ENOMEM;
sprintf(wqname, "qat_timer_wq_%d", accel_dev->accel_id);
int_timer->timer_irq_wq = alloc_workqueue(wqname, WQ_MEM_RECLAIM, 1);
if (!int_timer->timer_irq_wq) {
kfree(int_timer);
return -ENOMEM;
}
int_timer->accel_dev = accel_dev;
int_timer->timeout_val = ADF_GEN4_INT_TIMER_VALUE_IN_MS;
int_timer->int_cnt = 0;
int_timer->enabled = true;
accel_dev->int_timer = int_timer;
timer_setup(&int_timer->timer, timer_handler, 0);
mod_timer(&int_timer->timer, timeout_val);
return 0;
}
void
adf_int_timer_exit(struct adf_accel_dev *accel_dev)
{
if (accel_dev && accel_dev->int_timer) {
del_timer_sync(&accel_dev->int_timer->timer);
accel_dev->int_timer->enabled = false;
if (accel_dev->int_timer->timer_irq_wq) {
flush_workqueue(accel_dev->int_timer->timer_irq_wq);
destroy_workqueue(accel_dev->int_timer->timer_irq_wq);
}
kfree(accel_dev->int_timer);
accel_dev->int_timer = NULL;
}
}

View file

@ -0,0 +1,162 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "adf_accel_devices.h"
#include "adf_gen4vf_hw_csr_data.h"
static u64
build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR_GEN4(addr, size);
}
static u32
read_csr_ring_head(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring);
}
static void
write_csr_ring_head(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_HEAD_GEN4VF(csr_base_addr, bank, ring, value);
}
static u32
read_csr_ring_tail(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring);
}
static void
write_csr_ring_tail(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_TAIL_GEN4VF(csr_base_addr, bank, ring, value);
}
static u32
read_csr_e_stat(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_E_STAT_GEN4VF(csr_base_addr, bank);
}
static void
write_csr_ring_config(struct resource *csr_base_addr,
u32 bank,
u32 ring,
u32 value)
{
WRITE_CSR_RING_CONFIG_GEN4VF(csr_base_addr, bank, ring, value);
}
static dma_addr_t
read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring);
}
static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
u32 ring,
dma_addr_t addr)
{
WRITE_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring, addr);
}
static void
write_csr_int_flag(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG_GEN4VF(csr_base_addr, bank, value);
}
static void
write_csr_int_srcsel(struct resource *csr_base_addr, u32 bank)
{
WRITE_CSR_INT_SRCSEL_GEN4VF(csr_base_addr, bank);
}
static void
write_csr_int_col_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_EN_GEN4VF(csr_base_addr, bank, value);
}
static void
write_csr_int_col_ctl(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_COL_CTL_GEN4VF(csr_base_addr, bank, value);
}
static void
write_csr_int_flag_and_col(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_INT_FLAG_AND_COL_GEN4VF(csr_base_addr, bank, value);
}
static u32
read_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank)
{
return READ_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank);
}
static void
write_csr_ring_srv_arb_en(struct resource *csr_base_addr, u32 bank, u32 value)
{
WRITE_CSR_RING_SRV_ARB_EN_GEN4VF(csr_base_addr, bank, value);
}
static u32
get_src_sel_mask(void)
{
return ADF_BANK_INT_SRC_SEL_MASK_GEN4;
}
static u32
get_int_col_ctl_enable_mask(void)
{
return ADF_RING_CSR_INT_COL_CTL_ENABLE;
}
static u32
get_bank_irq_mask(u32 irq_mask)
{
return 0x1;
}
void
gen4vf_init_hw_csr_info(struct adf_hw_csr_info *csr_info)
{
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
csr_info->csr_addr_offset = ADF_RING_CSR_ADDR_OFFSET_GEN4VF;
csr_info->ring_bundle_size = ADF_RING_BUNDLE_SIZE_GEN4;
csr_info->bank_int_flag_clear_mask = ADF_BANK_INT_FLAG_CLEAR_MASK_GEN4;
csr_info->num_rings_per_int_srcsel = ADF_RINGS_PER_INT_SRCSEL_GEN4;
csr_info->arb_enable_mask = 0x1;
csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr;
csr_ops->read_csr_ring_head = read_csr_ring_head;
csr_ops->write_csr_ring_head = write_csr_ring_head;
csr_ops->read_csr_ring_tail = read_csr_ring_tail;
csr_ops->write_csr_ring_tail = write_csr_ring_tail;
csr_ops->read_csr_e_stat = read_csr_e_stat;
csr_ops->write_csr_ring_config = write_csr_ring_config;
csr_ops->read_csr_ring_base = read_csr_ring_base;
csr_ops->write_csr_ring_base = write_csr_ring_base;
csr_ops->write_csr_int_flag = write_csr_int_flag;
csr_ops->write_csr_int_srcsel = write_csr_int_srcsel;
csr_ops->write_csr_int_col_en = write_csr_int_col_en;
csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl;
csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col;
csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en;
csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en;
csr_ops->get_src_sel_mask = get_src_sel_mask;
csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask;
csr_ops->get_bank_irq_mask = get_bank_irq_mask;
}

View file

@ -114,6 +114,25 @@ adf_update_ring_arb(struct adf_etr_ring_data *ring)
arben);
}
void
adf_update_uio_ring_arb(struct adf_uio_control_bundle *bundle)
{
int shift;
u32 arben, arben_tx, arben_rx, arb_mask;
struct adf_accel_dev *accel_dev = bundle->uio_priv.accel->accel_dev;
struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
arb_mask = csr_info->arb_enable_mask;
shift = hweight32(arb_mask);
arben_tx = bundle->rings_enabled & arb_mask;
arben_rx = (bundle->rings_enabled >> shift) & arb_mask;
arben = arben_tx & arben_rx;
csr_ops->write_csr_ring_srv_arb_en(bundle->csr_addr,
bundle->hardware_bundle_number,
arben);
}
void
adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
void *csr_addr,
@ -121,16 +140,15 @@ adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
unsigned int mask)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
struct resource *csr = csr_addr;
u32 arbenable;
if (!csr)
if (!csr_addr)
return;
mutex_lock(&csr_arb_lock);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
arbenable = csr_ops->read_csr_ring_srv_arb_en(csr_addr, bank_nr);
arbenable |= mask & 0xFF;
csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
csr_ops->write_csr_ring_srv_arb_en(csr_addr, bank_nr, arbenable);
mutex_unlock(&csr_arb_lock);
}

View file

@ -10,6 +10,7 @@
#include "icp_qat_fw_init_admin.h"
#include "adf_cfg_strings.h"
#include "adf_dev_err.h"
#include "adf_uio.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include <sys/mutex.h>
@ -77,7 +78,6 @@ adf_cfg_add_device_params(struct adf_accel_dev *accel_dev)
char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
struct adf_hw_device_data *hw_data = NULL;
unsigned long val;
if (!accel_dev)
return -EINVAL;
@ -349,18 +349,9 @@ adf_dev_init(struct adf_accel_dev *accel_dev)
hw_data->enable_error_correction(accel_dev);
if (hw_data->enable_vf2pf_comms &&
hw_data->enable_vf2pf_comms(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed to enable vf2pf comms\n");
return EFAULT;
}
if (adf_pf_vf_capabilities_init(accel_dev))
return EFAULT;
if (adf_pf_vf_ring_to_svc_init(accel_dev))
return EFAULT;
ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev);
if (ret)
return ret;
if (adf_cfg_add_device_params(accel_dev))
return EFAULT;
@ -462,6 +453,12 @@ adf_dev_start(struct adf_accel_dev *accel_dev)
return EFAULT;
}
if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) {
device_printf(GET_DEV(accel_dev),
"Failed to init heartbeat interrupt timer\n");
return -EFAULT;
}
list_for_each(list_itr, &service_table)
{
service = list_entry(list_itr, struct service_hndl, list);
@ -474,6 +471,18 @@ adf_dev_start(struct adf_accel_dev *accel_dev)
set_bit(accel_dev->accel_id, service->start_status);
}
if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) {
/*Register UIO devices */
if (adf_uio_register(accel_dev)) {
adf_uio_remove(accel_dev);
device_printf(GET_DEV(accel_dev),
"Failed to register UIO devices\n");
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
return ENODEV;
}
}
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) &&
adf_cfg_add_ext_params(accel_dev))
return EFAULT;
@ -521,6 +530,9 @@ adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
if (accel_dev->hw_device->int_timer_exit)
accel_dev->hw_device->int_timer_exit(accel_dev);
list_for_each(list_itr, &service_table)
{
service = list_entry(list_itr, struct service_hndl, list);
@ -529,6 +541,11 @@ adf_dev_stop(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, service->start_status);
}
if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) {
/* Remove UIO Devices */
adf_uio_remove(accel_dev);
}
if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
if (adf_ae_stop(accel_dev))
device_printf(GET_DEV(accel_dev),
@ -596,9 +613,6 @@ adf_dev_shutdown(struct adf_accel_dev *accel_dev)
hw_data->disable_iov(accel_dev);
if (hw_data->disable_vf2pf_comms)
hw_data->disable_vf2pf_comms(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);

View file

@ -1,147 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/device.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
#include "adf_cfg.h"
#define ADF_VF2PF_CAPABILITIES_V1_VERSION 1
#define ADF_VF2PF_CAPABILITIES_V1_LENGTH 4
#define ADF_VF2PF_CAPABILITIES_V2_VERSION 2
#define ADF_VF2PF_CAPABILITIES_CAP_OFFSET 4
#define ADF_VF2PF_CAPABILITIES_V2_LENGTH 8
#define ADF_VF2PF_CAPABILITIES_V3_VERSION 3
#define ADF_VF2PF_CAPABILITIES_FREQ_OFFSET 8
#define ADF_VF2PF_CAPABILITIES_V3_LENGTH 12
static int
adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev,
u8 **buffer,
u8 *length,
u8 *block_version,
u8 compatibility,
u8 byte_num)
{
static u8 data[ADF_VF2PF_CAPABILITIES_V3_LENGTH] = { 0 };
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 ext_dc_caps = hw_data->extended_dc_capabilities;
u32 capabilities = hw_data->accel_capabilities_mask;
u32 frequency = hw_data->clock_frequency;
u16 byte = 0;
u16 index = 0;
for (byte = 0; byte < sizeof(ext_dc_caps); byte++) {
data[byte] = (ext_dc_caps >> (byte * ADF_PFVF_DATA_SHIFT)) &
ADF_PFVF_DATA_MASK;
}
for (byte = 0, index = ADF_VF2PF_CAPABILITIES_CAP_OFFSET;
byte < sizeof(capabilities);
byte++, index++) {
data[index] = (capabilities >> (byte * ADF_PFVF_DATA_SHIFT)) &
ADF_PFVF_DATA_MASK;
}
if (frequency) {
for (byte = 0, index = ADF_VF2PF_CAPABILITIES_FREQ_OFFSET;
byte < sizeof(frequency);
byte++, index++) {
data[index] =
(frequency >> (byte * ADF_PFVF_DATA_SHIFT)) &
ADF_PFVF_DATA_MASK;
}
*length = ADF_VF2PF_CAPABILITIES_V3_LENGTH;
*block_version = ADF_VF2PF_CAPABILITIES_V3_VERSION;
} else {
*length = ADF_VF2PF_CAPABILITIES_V2_LENGTH;
*block_version = ADF_VF2PF_CAPABILITIES_V2_VERSION;
}
*buffer = data;
return 0;
}
int
adf_pf_vf_capabilities_init(struct adf_accel_dev *accel_dev)
{
u8 data[ADF_VF2PF_CAPABILITIES_V3_LENGTH] = { 0 };
u8 len = ADF_VF2PF_CAPABILITIES_V3_LENGTH;
u8 version = ADF_VF2PF_CAPABILITIES_V2_VERSION;
u32 ex_dc_cap = 0;
u32 capabilities = 0;
u32 frequency = 0;
u16 byte = 0;
u16 index = 0;
if (!accel_dev->is_vf) {
/* on the pf */
if (!adf_iov_is_block_provider_registered(
ADF_VF2PF_BLOCK_MSG_CAP_SUMMARY))
adf_iov_block_provider_register(
ADF_VF2PF_BLOCK_MSG_CAP_SUMMARY,
adf_pf_capabilities_msg_provider);
} else if (accel_dev->u1.vf.pf_version >=
ADF_PFVF_COMPATIBILITY_CAPABILITIES) {
/* on the vf */
if (adf_iov_block_get(accel_dev,
ADF_VF2PF_BLOCK_MSG_CAP_SUMMARY,
&version,
data,
&len)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed adf_iov_block_get\n");
return EFAULT;
}
if (len < ADF_VF2PF_CAPABILITIES_V1_LENGTH) {
device_printf(
GET_DEV(accel_dev),
"Capabilities message truncated to %d bytes\n",
len);
return EFAULT;
}
for (byte = 0; byte < sizeof(ex_dc_cap); byte++) {
ex_dc_cap |= data[byte] << (byte * ADF_PFVF_DATA_SHIFT);
}
accel_dev->hw_device->extended_dc_capabilities = ex_dc_cap;
/* Get capabilities if provided by PF */
if (len >= ADF_VF2PF_CAPABILITIES_V2_LENGTH) {
for (byte = 0,
index = ADF_VF2PF_CAPABILITIES_CAP_OFFSET;
byte < sizeof(capabilities);
byte++, index++) {
capabilities |= data[index]
<< (byte * ADF_PFVF_DATA_SHIFT);
}
accel_dev->hw_device->accel_capabilities_mask =
capabilities;
} else {
device_printf(GET_DEV(accel_dev),
"PF did not communicate capabilities\n");
}
/* Get frequency if provided by the PF */
if (len >= ADF_VF2PF_CAPABILITIES_V3_LENGTH) {
for (byte = 0,
index = ADF_VF2PF_CAPABILITIES_FREQ_OFFSET;
byte < sizeof(frequency);
byte++, index++) {
frequency |= data[index]
<< (byte * ADF_PFVF_DATA_SHIFT);
}
accel_dev->hw_device->clock_frequency = frequency;
} else {
device_printf(GET_DEV(accel_dev),
"PF did not communicate frequency\n");
}
} else {
/* The PF is too old to support the extended capabilities */
accel_dev->hw_device->extended_dc_capabilities = 0;
}
return 0;
}

View file

@ -1,896 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/delay.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
adf_iov_block_provider
pf2vf_message_providers[ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE + 1];
unsigned char pfvf_crc8_table[] =
{ 0x00, 0x97, 0xB9, 0x2E, 0xE5, 0x72, 0x5C, 0xCB, 0x5D, 0xCA, 0xE4, 0x73,
0xB8, 0x2F, 0x01, 0x96, 0xBA, 0x2D, 0x03, 0x94, 0x5F, 0xC8, 0xE6, 0x71,
0xE7, 0x70, 0x5E, 0xC9, 0x02, 0x95, 0xBB, 0x2C, 0xE3, 0x74, 0x5A, 0xCD,
0x06, 0x91, 0xBF, 0x28, 0xBE, 0x29, 0x07, 0x90, 0x5B, 0xCC, 0xE2, 0x75,
0x59, 0xCE, 0xE0, 0x77, 0xBC, 0x2B, 0x05, 0x92, 0x04, 0x93, 0xBD, 0x2A,
0xE1, 0x76, 0x58, 0xCF, 0x51, 0xC6, 0xE8, 0x7F, 0xB4, 0x23, 0x0D, 0x9A,
0x0C, 0x9B, 0xB5, 0x22, 0xE9, 0x7E, 0x50, 0xC7, 0xEB, 0x7C, 0x52, 0xC5,
0x0E, 0x99, 0xB7, 0x20, 0xB6, 0x21, 0x0F, 0x98, 0x53, 0xC4, 0xEA, 0x7D,
0xB2, 0x25, 0x0B, 0x9C, 0x57, 0xC0, 0xEE, 0x79, 0xEF, 0x78, 0x56, 0xC1,
0x0A, 0x9D, 0xB3, 0x24, 0x08, 0x9F, 0xB1, 0x26, 0xED, 0x7A, 0x54, 0xC3,
0x55, 0xC2, 0xEC, 0x7B, 0xB0, 0x27, 0x09, 0x9E, 0xA2, 0x35, 0x1B, 0x8C,
0x47, 0xD0, 0xFE, 0x69, 0xFF, 0x68, 0x46, 0xD1, 0x1A, 0x8D, 0xA3, 0x34,
0x18, 0x8F, 0xA1, 0x36, 0xFD, 0x6A, 0x44, 0xD3, 0x45, 0xD2, 0xFC, 0x6B,
0xA0, 0x37, 0x19, 0x8E, 0x41, 0xD6, 0xF8, 0x6F, 0xA4, 0x33, 0x1D, 0x8A,
0x1C, 0x8B, 0xA5, 0x32, 0xF9, 0x6E, 0x40, 0xD7, 0xFB, 0x6C, 0x42, 0xD5,
0x1E, 0x89, 0xA7, 0x30, 0xA6, 0x31, 0x1F, 0x88, 0x43, 0xD4, 0xFA, 0x6D,
0xF3, 0x64, 0x4A, 0xDD, 0x16, 0x81, 0xAF, 0x38, 0xAE, 0x39, 0x17, 0x80,
0x4B, 0xDC, 0xF2, 0x65, 0x49, 0xDE, 0xF0, 0x67, 0xAC, 0x3B, 0x15, 0x82,
0x14, 0x83, 0xAD, 0x3A, 0xF1, 0x66, 0x48, 0xDF, 0x10, 0x87, 0xA9, 0x3E,
0xF5, 0x62, 0x4C, 0xDB, 0x4D, 0xDA, 0xF4, 0x63, 0xA8, 0x3F, 0x11, 0x86,
0xAA, 0x3D, 0x13, 0x84, 0x4F, 0xD8, 0xF6, 0x61, 0xF7, 0x60, 0x4E, 0xD9,
0x12, 0x85, 0xAB, 0x3C };
void
adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct resource *pmisc_bar_addr =
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
}
void
adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct resource *pmisc_bar_addr =
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
}
static int
__adf_iov_putmsg(struct adf_accel_dev *accel_dev,
u32 msg,
u8 vf_nr,
bool is_notification)
{
struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct resource *pmisc_bar_addr =
pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
u32 val, pf2vf_offset;
u32 total_delay = 0, mdelay = ADF_IOV_MSG_ACK_DELAY_MS,
udelay = ADF_IOV_MSG_ACK_DELAY_US;
u32 local_in_use_mask, local_in_use_pattern;
u32 remote_in_use_mask, remote_in_use_pattern;
struct mutex *lock; /* lock preventing concurrent acces of CSR */
u32 int_bit;
int ret = 0;
struct pfvf_stats *pfvf_counters = NULL;
if (accel_dev->is_vf) {
pf2vf_offset = hw_data->get_pf2vf_offset(0);
lock = &accel_dev->u1.vf.vf2pf_lock;
local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
int_bit = ADF_VF2PF_INT;
pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
} else {
pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
lock = &accel_dev->u1.pf.vf_info[vf_nr].pf2vf_lock;
local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
int_bit = ADF_PF2VF_INT;
pfvf_counters = &accel_dev->u1.pf.vf_info[vf_nr].pfvf_counters;
}
mutex_lock(lock);
/* Check if PF2VF CSR is in use by remote function */
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
if ((val & remote_in_use_mask) == remote_in_use_pattern) {
device_printf(GET_DEV(accel_dev),
"PF2VF CSR in use by remote function\n");
ret = EAGAIN;
pfvf_counters->busy++;
goto out;
}
/* Attempt to get ownership of PF2VF CSR */
msg &= ~local_in_use_mask;
msg |= local_in_use_pattern;
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
pfvf_counters->tx++;
/* Wait for confirmation from remote func it received the message */
do {
if (udelay < ADF_IOV_MSG_ACK_EXP_MAX_DELAY_US) {
usleep_range(udelay, udelay * 2);
udelay = udelay * 2;
total_delay = total_delay + udelay;
} else {
pause_ms("adfstop", mdelay);
total_delay = total_delay + (mdelay * 1000);
}
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
} while ((val & int_bit) &&
(total_delay < ADF_IOV_MSG_ACK_LIN_MAX_DELAY_US));
if (val & int_bit) {
device_printf(GET_DEV(accel_dev),
"ACK not received from remote\n");
pfvf_counters->no_ack++;
val &= ~int_bit;
ret = EIO;
}
/* For fire-and-forget notifications, the receiver does not clear
* the in-use pattern. This is used to detect collisions.
*/
if (is_notification && (val & ~int_bit) != msg) {
/* Collision must have overwritten the message */
device_printf(GET_DEV(accel_dev),
"Collision on notification\n");
pfvf_counters->collision++;
ret = EAGAIN;
goto out;
}
/*
* If the far side did not clear the in-use pattern it is either
* 1) Notification - message left intact to detect collision
* 2) Older protocol (compatibility version < 3) on the far side
* where the sender is responsible for clearing the in-use
* pattern after the received has acknowledged receipt.
* In either case, clear the in-use pattern now.
*/
if ((val & local_in_use_mask) == local_in_use_pattern)
ADF_CSR_WR(pmisc_bar_addr,
pf2vf_offset,
val & ~local_in_use_mask);
out:
mutex_unlock(lock);
return ret;
}
static int
adf_iov_put(struct adf_accel_dev *accel_dev,
u32 msg,
u8 vf_nr,
bool is_notification)
{
u32 count = 0, delay = ADF_IOV_MSG_RETRY_DELAY;
int ret;
struct pfvf_stats *pfvf_counters = NULL;
if (accel_dev->is_vf)
pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
else
pfvf_counters = &accel_dev->u1.pf.vf_info[vf_nr].pfvf_counters;
do {
ret = __adf_iov_putmsg(accel_dev, msg, vf_nr, is_notification);
if (ret == EAGAIN)
pause_ms("adfstop", delay);
delay = delay * 2;
} while (ret == EAGAIN && ++count < ADF_IOV_MSG_MAX_RETRIES);
if (ret == EAGAIN) {
if (is_notification)
pfvf_counters->event_timeout++;
else
pfvf_counters->tx_timeout++;
}
return ret;
}
/**
* adf_iov_putmsg() - send PF2VF message
* @accel_dev: Pointer to acceleration device.
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
* Function sends a messge from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
int
adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
return adf_iov_put(accel_dev, msg, vf_nr, false);
}
/**
* adf_iov_notify() - send PF2VF notification message
* @accel_dev: Pointer to acceleration device.
* @msg: Message to send
* @vf_nr: VF number to which the message will be sent
*
* Function sends a notification messge from the PF to a VF
*
* Return: 0 on success, error code otherwise.
*/
int
adf_iov_notify(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
return adf_iov_put(accel_dev, msg, vf_nr, true);
}
u8
adf_pfvf_crc(u8 start_crc, u8 *buf, u8 len)
{
u8 crc = start_crc;
while (len-- > 0)
crc = pfvf_crc8_table[(crc ^ *buf++) & 0xff];
return crc;
}
int
adf_iov_block_provider_register(u8 msg_type,
const adf_iov_block_provider provider)
{
if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
pr_err("QAT: invalid message type %d for PF2VF provider\n",
msg_type);
return -EINVAL;
}
if (pf2vf_message_providers[msg_type]) {
pr_err("QAT: Provider %ps already registered for message %d\n",
pf2vf_message_providers[msg_type],
msg_type);
return -EINVAL;
}
pf2vf_message_providers[msg_type] = provider;
return 0;
}
u8
adf_iov_is_block_provider_registered(u8 msg_type)
{
if (pf2vf_message_providers[msg_type])
return 1;
else
return 0;
}
int
adf_iov_block_provider_unregister(u8 msg_type,
const adf_iov_block_provider provider)
{
if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
pr_err("QAT: invalid message type %d for PF2VF provider\n",
msg_type);
return -EINVAL;
}
if (pf2vf_message_providers[msg_type] != provider) {
pr_err("QAT: Provider %ps not registered for message %d\n",
provider,
msg_type);
return -EINVAL;
}
pf2vf_message_providers[msg_type] = NULL;
return 0;
}
static int
adf_iov_block_get_data(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 byte_num,
u8 *data,
u8 compatibility,
bool crc)
{
u8 *buffer;
u8 size;
u8 msg_ver;
u8 crc8;
if (msg_type >= ARRAY_SIZE(pf2vf_message_providers)) {
pr_err("QAT: invalid message type %d for PF2VF provider\n",
msg_type);
*data = ADF_PF2VF_INVALID_BLOCK_TYPE;
return -EINVAL;
}
if (!pf2vf_message_providers[msg_type]) {
pr_err("QAT: No registered provider for message %d\n",
msg_type);
*data = ADF_PF2VF_INVALID_BLOCK_TYPE;
return -EINVAL;
}
if ((*pf2vf_message_providers[msg_type])(
accel_dev, &buffer, &size, &msg_ver, compatibility, byte_num)) {
pr_err("QAT: unknown error from provider for message %d\n",
msg_type);
*data = ADF_PF2VF_UNSPECIFIED_ERROR;
return -EINVAL;
}
if ((msg_type <= ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE &&
size > ADF_VF2PF_SMALL_PAYLOAD_SIZE) ||
(msg_type <= ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE &&
size > ADF_VF2PF_MEDIUM_PAYLOAD_SIZE) ||
size > ADF_VF2PF_LARGE_PAYLOAD_SIZE) {
pr_err("QAT: Invalid size %d provided for message type %d\n",
size,
msg_type);
*data = ADF_PF2VF_PAYLOAD_TRUNCATED;
return -EINVAL;
}
if ((!byte_num && crc) || byte_num >= size + ADF_VF2PF_BLOCK_DATA) {
pr_err("QAT: Invalid byte number %d for message %d\n",
byte_num,
msg_type);
*data = ADF_PF2VF_INVALID_BYTE_NUM_REQ;
return -EINVAL;
}
if (crc) {
crc8 = adf_pfvf_crc(ADF_CRC8_INIT_VALUE, &msg_ver, 1);
crc8 = adf_pfvf_crc(crc8, &size, 1);
*data = adf_pfvf_crc(crc8, buffer, byte_num - 1);
} else {
if (byte_num == 0)
*data = msg_ver;
else if (byte_num == 1)
*data = size;
else
*data = buffer[byte_num - 2];
}
return 0;
}
static int
adf_iov_block_get_byte(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 byte_num,
u8 *data,
u8 compatibility)
{
return adf_iov_block_get_data(
accel_dev, msg_type, byte_num, data, compatibility, false);
}
static int
adf_iov_block_get_crc(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 byte_num,
u8 *data,
u8 compatibility)
{
return adf_iov_block_get_data(
accel_dev, msg_type, byte_num, data, compatibility, true);
}
int adf_iov_compatibility_check(struct adf_accel_dev *accel_dev, u8 compat_ver);
void
adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int bar_id = hw_data->get_misc_bar_id(hw_data);
struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
struct resource *pmisc_addr = pmisc->virt_addr;
u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
u8 byte_num = 0;
u8 msg_type = 0;
u8 resp_type;
int res;
u8 data;
u8 compat = 0x0;
int vf_compat_ver = 0;
bool is_notification = false;
/* Read message from the VF */
msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
if (!(msg & ADF_VF2PF_INT)) {
device_printf(GET_DEV(accel_dev),
"Spurious VF2PF interrupt. msg %X. Ignored\n",
msg);
vf_info->pfvf_counters.spurious++;
goto out;
}
vf_info->pfvf_counters.rx++;
if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM)) {
/* Ignore legacy non-system (non-kernel) VF2PF messages */
device_printf(GET_DEV(accel_dev),
"Ignored non-system message from VF%d (0x%x);\n",
vf_nr + 1,
msg);
/*
* To ack, clear the VF2PFINT bit.
* Because this must be a legacy message, the far side
* must clear the in-use pattern.
*/
msg &= ~(ADF_VF2PF_INT);
ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
goto out;
}
switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
{
is_notification = false;
vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
vf_info->compat_ver = vf_compat_ver;
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_VERSION_RESP
<< ADF_PF2VF_MSGTYPE_SHIFT) |
(ADF_PFVF_COMPATIBILITY_VERSION
<< ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
device_printf(
GET_DEV(accel_dev),
"Compatibility Version Request from VF%d vers=%u\n",
vf_nr + 1,
vf_info->compat_ver);
if (vf_compat_ver < ADF_PFVF_COMPATIBILITY_VERSION)
compat = adf_iov_compatibility_check(accel_dev,
vf_compat_ver);
else if (vf_compat_ver == ADF_PFVF_COMPATIBILITY_VERSION)
compat = ADF_PF2VF_VF_COMPATIBLE;
else
compat = ADF_PF2VF_VF_COMPAT_UNKNOWN;
resp |= compat << ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
if (compat == ADF_PF2VF_VF_INCOMPATIBLE)
device_printf(GET_DEV(accel_dev),
"VF%d and PF are incompatible.\n",
vf_nr + 1);
} break;
case ADF_VF2PF_MSGTYPE_VERSION_REQ:
device_printf(GET_DEV(accel_dev),
"Legacy VersionRequest received from VF%d 0x%x\n",
vf_nr + 1,
msg);
is_notification = false;
/* legacy driver, VF compat_ver is 0 */
vf_info->compat_ver = 0;
resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_VERSION_RESP
<< ADF_PF2VF_MSGTYPE_SHIFT));
/* PF always newer than legacy VF */
compat =
adf_iov_compatibility_check(accel_dev, vf_info->compat_ver);
resp |= compat << ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
/* Set legacy major and minor version num */
resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
1 << ADF_PF2VF_MINORVERSION_SHIFT;
if (compat == ADF_PF2VF_VF_INCOMPATIBLE)
device_printf(GET_DEV(accel_dev),
"VF%d and PF are incompatible.\n",
vf_nr + 1);
break;
case ADF_VF2PF_MSGTYPE_INIT: {
device_printf(GET_DEV(accel_dev),
"Init message received from VF%d 0x%x\n",
vf_nr + 1,
msg);
is_notification = true;
vf_info->init = true;
} break;
case ADF_VF2PF_MSGTYPE_SHUTDOWN: {
device_printf(GET_DEV(accel_dev),
"Shutdown message received from VF%d 0x%x\n",
vf_nr + 1,
msg);
is_notification = true;
vf_info->init = false;
} break;
case ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ:
case ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ: {
is_notification = false;
switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >>
ADF_VF2PF_MSGTYPE_SHIFT) {
case ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ:
byte_num =
((msg & ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_MASK) >>
ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_SHIFT);
msg_type =
((msg & ADF_VF2PF_LARGE_BLOCK_REQ_TYPE_MASK) >>
ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
msg_type += ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE;
break;
case ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ:
byte_num =
((msg & ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_MASK) >>
ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_SHIFT);
msg_type =
((msg & ADF_VF2PF_MEDIUM_BLOCK_REQ_TYPE_MASK) >>
ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
msg_type += ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE;
break;
case ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ:
byte_num =
((msg & ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_MASK) >>
ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_SHIFT);
msg_type =
((msg & ADF_VF2PF_SMALL_BLOCK_REQ_TYPE_MASK) >>
ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT);
msg_type += ADF_VF2PF_MIN_SMALL_MESSAGE_TYPE;
break;
}
if (msg >> ADF_VF2PF_BLOCK_REQ_CRC_SHIFT) {
res = adf_iov_block_get_crc(accel_dev,
msg_type,
byte_num,
&data,
vf_info->compat_ver);
if (res)
resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_ERROR;
else
resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_CRC;
} else {
if (!byte_num)
vf_info->pfvf_counters.blk_tx++;
res = adf_iov_block_get_byte(accel_dev,
msg_type,
byte_num,
&data,
vf_info->compat_ver);
if (res)
resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_ERROR;
else
resp_type = ADF_PF2VF_BLOCK_RESP_TYPE_DATA;
}
resp =
(ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_BLOCK_RESP << ADF_PF2VF_MSGTYPE_SHIFT) |
(resp_type << ADF_PF2VF_BLOCK_RESP_TYPE_SHIFT) |
(data << ADF_PF2VF_BLOCK_RESP_DATA_SHIFT));
} break;
default:
device_printf(GET_DEV(accel_dev),
"Unknown message from VF%d (0x%x);\n",
vf_nr + 1,
msg);
}
/* To ack, clear the VF2PFINT bit and the in-use-by */
msg &= ~ADF_VF2PF_INT;
/*
* Clear the in-use pattern if the sender won't do it.
* Because the compatibility version must be the first message
* exchanged between the VF and PF, the vf_info->compat_ver must be
* set at this time.
* The in-use pattern is not cleared for notifications so that
* it can be used for collision detection.
*/
if (vf_info->compat_ver >= ADF_PFVF_COMPATIBILITY_FAST_ACK &&
!is_notification)
msg &= ~ADF_VF2PF_IN_USE_BY_VF_MASK;
ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
device_printf(GET_DEV(accel_dev),
"Failed to send response to VF\n");
out:
return;
}
void
adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
int i, num_vfs = accel_dev->u1.pf.num_vfs;
for (i = 0, vf = accel_dev->u1.pf.vf_info; i < num_vfs; i++, vf++) {
if (vf->init && adf_iov_notify(accel_dev, msg, i))
device_printf(GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n",
i);
}
}
void
adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev)
{
struct adf_accel_vf_info *vf;
int i, num_vfs = accel_dev->u1.pf.num_vfs;
u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
(ADF_PF2VF_MSGTYPE_FATAL_ERROR << ADF_PF2VF_MSGTYPE_SHIFT));
for (i = 0, vf = accel_dev->u1.pf.vf_info; i < num_vfs; i++, vf++) {
if (vf->init && adf_iov_notify(accel_dev, msg, i))
device_printf(
GET_DEV(accel_dev),
"Failed to send fatal error msg 0x%x to VF%d\n",
msg,
i);
}
}
int
adf_iov_register_compat_checker(struct adf_accel_dev *accel_dev,
const adf_iov_compat_checker_t cc)
{
struct adf_accel_compat_manager *cm = accel_dev->cm;
int num = 0;
if (!cm) {
device_printf(GET_DEV(accel_dev),
"QAT: compatibility manager not initialized\n");
return ENOMEM;
}
for (num = 0; num < ADF_COMPAT_CHECKER_MAX; num++) {
if (cm->iov_compat_checkers[num]) {
if (cc == cm->iov_compat_checkers[num]) {
device_printf(GET_DEV(accel_dev),
"QAT: already registered\n");
return EFAULT;
}
} else {
/* registering the new checker */
cm->iov_compat_checkers[num] = cc;
break;
}
}
if (num >= ADF_COMPAT_CHECKER_MAX) {
device_printf(GET_DEV(accel_dev),
"QAT: compatibility checkers are overflow.\n");
return EFAULT;
}
cm->num_chker = num;
return 0;
}
int
adf_iov_unregister_compat_checker(struct adf_accel_dev *accel_dev,
const adf_iov_compat_checker_t cc)
{
struct adf_accel_compat_manager *cm = accel_dev->cm;
int num = 0;
if (!cm) {
device_printf(GET_DEV(accel_dev),
"QAT: compatibility manager not initialized\n");
return ENOMEM;
}
num = cm->num_chker - 1;
if (num < 0) {
device_printf(
GET_DEV(accel_dev),
"QAT: Array 'iov_compat_checkers' may use index value(s) -1\n");
return EFAULT;
}
if (cc == cm->iov_compat_checkers[num]) {
/* unregistering the given checker */
cm->iov_compat_checkers[num] = NULL;
} else {
device_printf(
GET_DEV(accel_dev),
"QAT: unregistering not in the registered order\n");
return EFAULT;
}
cm->num_chker--;
return 0;
}
int
adf_iov_init_compat_manager(struct adf_accel_dev *accel_dev,
struct adf_accel_compat_manager **cm)
{
if (!(*cm)) {
*cm = malloc(sizeof(**cm), M_QAT, M_WAITOK | M_ZERO);
} else {
/* zero the struct */
explicit_bzero(*cm, sizeof(**cm));
}
return 0;
}
int
adf_iov_shutdown_compat_manager(struct adf_accel_dev *accel_dev,
struct adf_accel_compat_manager **cm)
{
if (*cm) {
free(*cm, M_QAT);
*cm = NULL;
}
return 0;
}
int
adf_iov_compatibility_check(struct adf_accel_dev *accel_dev, u8 compat_ver)
{
int compatible = ADF_PF2VF_VF_COMPATIBLE;
int i = 0;
struct adf_accel_compat_manager *cm = accel_dev->cm;
if (!cm) {
device_printf(GET_DEV(accel_dev),
"QAT: compatibility manager not initialized\n");
return ADF_PF2VF_VF_INCOMPATIBLE;
}
for (i = 0; i < cm->num_chker; i++) {
compatible = cm->iov_compat_checkers[i](accel_dev, compat_ver);
if (compatible == ADF_PF2VF_VF_INCOMPATIBLE) {
device_printf(
GET_DEV(accel_dev),
"QAT: PF and VF are incompatible [checker%d]\n",
i);
break;
}
}
return compatible;
}
static int
adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
u32 msg = 0;
int ret = 0;
int comp = 0;
int response_received = 0;
int retry_count = 0;
struct pfvf_stats *pfvf_counters = NULL;
pfvf_counters = &accel_dev->u1.vf.pfvf_counters;
msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
/* Clear communication flag - without that VF will not be waiting for
* the response from host driver, and start sending init.
*/
accel_dev->u1.vf.iov_msg_completion = 0;
do {
/* Send request from VF to PF */
if (retry_count)
pfvf_counters->retry++;
if (adf_iov_putmsg(accel_dev, msg, 0)) {
device_printf(
GET_DEV(accel_dev),
"Failed to send Compat Version Request.\n");
return EIO;
}
mutex_lock(&accel_dev->u1.vf.vf2pf_lock);
if (accel_dev->u1.vf.iov_msg_completion == 0 &&
sx_sleep(&accel_dev->u1.vf.iov_msg_completion,
&accel_dev->u1.vf.vf2pf_lock.sx,
0,
"pfver",
timeout) == EWOULDBLOCK) {
/* It's possible that wakeup could be missed */
if (accel_dev->u1.vf.iov_msg_completion) {
response_received = 1;
} else {
device_printf(
GET_DEV(accel_dev),
"IOV request/response message timeout expired\n");
}
} else {
response_received = 1;
}
mutex_unlock(&accel_dev->u1.vf.vf2pf_lock);
} while (!response_received &&
++retry_count < ADF_IOV_MSG_RESP_RETRIES);
if (!response_received)
pfvf_counters->rx_timeout++;
else
pfvf_counters->rx_rsp++;
if (!response_received)
return EIO;
if (accel_dev->u1.vf.compatible == ADF_PF2VF_VF_COMPAT_UNKNOWN)
/* Response from PF received, check compatibility */
comp = adf_iov_compatibility_check(accel_dev,
accel_dev->u1.vf.pf_version);
else
comp = accel_dev->u1.vf.compatible;
ret = (comp == ADF_PF2VF_VF_COMPATIBLE) ? 0 : EFAULT;
if (ret)
device_printf(
GET_DEV(accel_dev),
"VF is not compatible with PF, due to the reason %d\n",
comp);
return ret;
}
/**
* adf_enable_vf2pf_comms() - Function enables communication from vf to pf
*
* @accel_dev: Pointer to acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
int ret = 0;
/* init workqueue for VF */
ret = adf_init_vf_wq();
if (ret)
return ret;
adf_enable_pf2vf_interrupts(accel_dev);
adf_iov_init_compat_manager(accel_dev, &accel_dev->cm);
return adf_vf2pf_request_version(accel_dev);
}
/**
* adf_disable_vf2pf_comms() - Function disables communication from vf to pf
*
* @accel_dev: Pointer to acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
return adf_iov_shutdown_compat_manager(accel_dev, &accel_dev->cm);
}
/**
* adf_pf_enable_vf2pf_comms() - Function enables communication from pf
*
* @accel_dev: Pointer to acceleration device physical function.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
adf_iov_init_compat_manager(accel_dev, &accel_dev->cm);
return 0;
}
/**
* adf_pf_disable_vf2pf_comms() - Function disables communication from pf
*
* @accel_dev: Pointer to acceleration device physical function.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_pf_disable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
return adf_iov_shutdown_compat_manager(accel_dev, &accel_dev->cm);
}

View file

@ -1,74 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/device.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
#include "adf_cfg.h"
#define ADF_VF2PF_RING_TO_SVC_VERSION 1
#define ADF_VF2PF_RING_TO_SVC_LENGTH 2
int
adf_pf_ring_to_svc_msg_provider(struct adf_accel_dev *accel_dev,
u8 **buffer,
u8 *length,
u8 *block_version,
u8 compatibility,
u8 byte_num)
{
static u8 data[ADF_VF2PF_RING_TO_SVC_LENGTH] = { 0 };
struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
u16 ring_to_svc_map = hw_data->ring_to_svc_map;
u16 byte = 0;
for (byte = 0; byte < ADF_VF2PF_RING_TO_SVC_LENGTH; byte++) {
data[byte] = (ring_to_svc_map >> (byte * ADF_PFVF_DATA_SHIFT)) &
ADF_PFVF_DATA_MASK;
}
*length = ADF_VF2PF_RING_TO_SVC_LENGTH;
*block_version = ADF_VF2PF_RING_TO_SVC_VERSION;
*buffer = data;
return 0;
}
int
adf_pf_vf_ring_to_svc_init(struct adf_accel_dev *accel_dev)
{
u8 data[ADF_VF2PF_RING_TO_SVC_LENGTH] = { 0 };
u8 len = ADF_VF2PF_RING_TO_SVC_LENGTH;
u8 version = ADF_VF2PF_RING_TO_SVC_VERSION;
u16 ring_to_svc_map = 0;
u16 byte = 0;
if (!accel_dev->is_vf) {
/* on the pf */
if (!adf_iov_is_block_provider_registered(
ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ))
adf_iov_block_provider_register(
ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ,
adf_pf_ring_to_svc_msg_provider);
} else if (accel_dev->u1.vf.pf_version >=
ADF_PFVF_COMPATIBILITY_RING_TO_SVC_MAP) {
/* on the vf */
if (adf_iov_block_get(accel_dev,
ADF_VF2PF_BLOCK_MSG_GET_RING_TO_SVC_REQ,
&version,
data,
&len)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed adf_iov_block_get\n");
return EFAULT;
}
for (byte = 0; byte < ADF_VF2PF_RING_TO_SVC_LENGTH; byte++) {
ring_to_svc_map |= data[byte]
<< (byte * ADF_PFVF_DATA_SHIFT);
}
GET_HW_DATA(accel_dev)->ring_to_svc_map = ring_to_svc_map;
}
return 0;
}

View file

@ -0,0 +1,102 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_utils.h"
/* CRC Calculation */
#define ADF_CRC8_INIT_VALUE 0xFF
static const unsigned char pfvf_crc8_table[] =
{ 0x00, 0x97, 0xB9, 0x2E, 0xE5, 0x72, 0x5C, 0xCB, 0x5D, 0xCA, 0xE4, 0x73,
0xB8, 0x2F, 0x01, 0x96, 0xBA, 0x2D, 0x03, 0x94, 0x5F, 0xC8, 0xE6, 0x71,
0xE7, 0x70, 0x5E, 0xC9, 0x02, 0x95, 0xBB, 0x2C, 0xE3, 0x74, 0x5A, 0xCD,
0x06, 0x91, 0xBF, 0x28, 0xBE, 0x29, 0x07, 0x90, 0x5B, 0xCC, 0xE2, 0x75,
0x59, 0xCE, 0xE0, 0x77, 0xBC, 0x2B, 0x05, 0x92, 0x04, 0x93, 0xBD, 0x2A,
0xE1, 0x76, 0x58, 0xCF, 0x51, 0xC6, 0xE8, 0x7F, 0xB4, 0x23, 0x0D, 0x9A,
0x0C, 0x9B, 0xB5, 0x22, 0xE9, 0x7E, 0x50, 0xC7, 0xEB, 0x7C, 0x52, 0xC5,
0x0E, 0x99, 0xB7, 0x20, 0xB6, 0x21, 0x0F, 0x98, 0x53, 0xC4, 0xEA, 0x7D,
0xB2, 0x25, 0x0B, 0x9C, 0x57, 0xC0, 0xEE, 0x79, 0xEF, 0x78, 0x56, 0xC1,
0x0A, 0x9D, 0xB3, 0x24, 0x08, 0x9F, 0xB1, 0x26, 0xED, 0x7A, 0x54, 0xC3,
0x55, 0xC2, 0xEC, 0x7B, 0xB0, 0x27, 0x09, 0x9E, 0xA2, 0x35, 0x1B, 0x8C,
0x47, 0xD0, 0xFE, 0x69, 0xFF, 0x68, 0x46, 0xD1, 0x1A, 0x8D, 0xA3, 0x34,
0x18, 0x8F, 0xA1, 0x36, 0xFD, 0x6A, 0x44, 0xD3, 0x45, 0xD2, 0xFC, 0x6B,
0xA0, 0x37, 0x19, 0x8E, 0x41, 0xD6, 0xF8, 0x6F, 0xA4, 0x33, 0x1D, 0x8A,
0x1C, 0x8B, 0xA5, 0x32, 0xF9, 0x6E, 0x40, 0xD7, 0xFB, 0x6C, 0x42, 0xD5,
0x1E, 0x89, 0xA7, 0x30, 0xA6, 0x31, 0x1F, 0x88, 0x43, 0xD4, 0xFA, 0x6D,
0xF3, 0x64, 0x4A, 0xDD, 0x16, 0x81, 0xAF, 0x38, 0xAE, 0x39, 0x17, 0x80,
0x4B, 0xDC, 0xF2, 0x65, 0x49, 0xDE, 0xF0, 0x67, 0xAC, 0x3B, 0x15, 0x82,
0x14, 0x83, 0xAD, 0x3A, 0xF1, 0x66, 0x48, 0xDF, 0x10, 0x87, 0xA9, 0x3E,
0xF5, 0x62, 0x4C, 0xDB, 0x4D, 0xDA, 0xF4, 0x63, 0xA8, 0x3F, 0x11, 0x86,
0xAA, 0x3D, 0x13, 0x84, 0x4F, 0xD8, 0xF6, 0x61, 0xF7, 0x60, 0x4E, 0xD9,
0x12, 0x85, 0xAB, 0x3C };
static u8
adf_pfvf_crc(u8 start_crc, u8 const *buf, u8 len)
{
u8 crc = start_crc;
while (len-- > 0)
crc = pfvf_crc8_table[(crc ^ *buf++) & 0xff];
return crc;
}
u8
adf_pfvf_calc_blkmsg_crc(u8 const *buf, u8 buf_len)
{
return adf_pfvf_crc(ADF_CRC8_INIT_VALUE, buf, buf_len);
}
static bool
set_value_on_csr_msg(struct adf_accel_dev *accel_dev,
u32 *csr_msg,
u32 value,
const struct pfvf_field_format *fmt)
{
if (unlikely((value & fmt->mask) != value)) {
device_printf(
GET_DEV(accel_dev),
"PFVF message value 0x%X out of range, %u max allowed\n",
value,
fmt->mask);
return false;
}
*csr_msg |= value << fmt->offset;
return true;
}
u32
adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
const struct pfvf_csr_format *fmt)
{
u32 csr_msg = 0;
if (!set_value_on_csr_msg(accel_dev, &csr_msg, msg.type, &fmt->type) ||
!set_value_on_csr_msg(accel_dev, &csr_msg, msg.data, &fmt->data))
return 0;
return csr_msg | ADF_PFVF_MSGORIGIN_SYSTEM;
}
struct pfvf_message
adf_pfvf_message_of(struct adf_accel_dev *accel_dev,
u32 csr_msg,
const struct pfvf_csr_format *fmt)
{
struct pfvf_message msg = { 0 };
msg.type = (csr_msg >> fmt->type.offset) & fmt->type.mask;
msg.data = (csr_msg >> fmt->data.offset) & fmt->data.mask;
if (unlikely(!msg.type))
device_printf(GET_DEV(accel_dev),
"Invalid PFVF msg with no type received\n");
return msg;
}

View file

@ -0,0 +1,185 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/bitfield.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_vf_msg.h"
#include "adf_pfvf_vf_proto.h"
/**
* adf_vf2pf_notify_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int
adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_INIT };
if (adf_send_vf2pf_msg(accel_dev, msg)) {
device_printf(GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
/**
* adf_vf2pf_notify_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
void
adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg = { .type = ADF_VF2PF_MSGTYPE_SHUTDOWN };
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
if (adf_send_vf2pf_msg(accel_dev, msg))
device_printf(GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
int
adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
u8 pf_version;
int compat;
int ret;
struct pfvf_message resp;
struct pfvf_message msg = {
.type = ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ,
.data = ADF_PFVF_COMPAT_THIS_VERSION,
};
BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
ret = adf_send_vf2pf_req(accel_dev, msg, &resp);
if (ret) {
device_printf(
GET_DEV(accel_dev),
"Failed to send Compatibility Version Request.\n");
return ret;
}
pf_version = FIELD_GET(ADF_PF2VF_VERSION_RESP_VERS_MASK, resp.data);
compat = FIELD_GET(ADF_PF2VF_VERSION_RESP_RESULT_MASK, resp.data);
/* Response from PF received, check compatibility */
switch (compat) {
case ADF_PF2VF_VF_COMPATIBLE:
break;
case ADF_PF2VF_VF_COMPAT_UNKNOWN:
/* VF is newer than PF - compatible for now */
break;
case ADF_PF2VF_VF_INCOMPATIBLE:
device_printf(
GET_DEV(accel_dev),
"PF (vers %d) and VF (vers %d) are not compatible\n",
pf_version,
ADF_PFVF_COMPAT_THIS_VERSION);
return -EINVAL;
default:
device_printf(
GET_DEV(accel_dev),
"Invalid response from PF; assume not compatible\n");
return -EINVAL;
}
accel_dev->u1.vf.pf_compat_ver = pf_version;
return 0;
}
int
adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct capabilities_v3 cap_msg = { 0 };
unsigned int len = sizeof(cap_msg);
if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_CAPABILITIES)
/* The PF is too old to support the extended capabilities */
return 0;
if (adf_send_vf2pf_blkmsg_req(accel_dev,
ADF_VF2PF_BLKMSG_REQ_CAP_SUMMARY,
(u8 *)&cap_msg,
&len)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed to get block message response\n");
return -EFAULT;
}
switch (cap_msg.hdr.version) {
default:
/* Newer version received, handle only the know parts */
fallthrough;
case ADF_PFVF_CAPABILITIES_V3_VERSION:
if (likely(len >= sizeof(struct capabilities_v3)))
hw_data->clock_frequency = cap_msg.frequency;
else
device_printf(GET_DEV(accel_dev),
"Could not get frequency");
fallthrough;
case ADF_PFVF_CAPABILITIES_V2_VERSION:
if (likely(len >= sizeof(struct capabilities_v2))) {
hw_data->accel_capabilities_mask = cap_msg.capabilities;
} else {
device_printf(GET_DEV(accel_dev),
"Could not get capabilities");
}
fallthrough;
case ADF_PFVF_CAPABILITIES_V1_VERSION:
if (likely(len >= sizeof(struct capabilities_v1))) {
hw_data->extended_dc_capabilities = cap_msg.ext_dc_caps;
} else {
device_printf(
GET_DEV(accel_dev),
"Capabilities message truncated to %d bytes\n",
len);
return -EFAULT;
}
}
return 0;
}
int
adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
{
struct ring_to_svc_map_v1 rts_map_msg = { 0 };
unsigned int len = sizeof(rts_map_msg);
if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_RING_TO_SVC_MAP)
/* Use already set default mappings */
return 0;
if (adf_send_vf2pf_blkmsg_req(accel_dev,
ADF_VF2PF_BLKMSG_REQ_RING_SVC_MAP,
(u8 *)&rts_map_msg,
&len)) {
device_printf(GET_DEV(accel_dev),
"QAT: Failed to get block message response\n");
return -EFAULT;
}
if (unlikely(len < sizeof(struct ring_to_svc_map_v1))) {
device_printf(GET_DEV(accel_dev),
"RING_TO_SVC message truncated to %d bytes\n",
len);
return -EFAULT;
}
/* Only v1 at present */
accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
return 0;
}

View file

@ -0,0 +1,405 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <linux/kernel.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pfvf_msg.h"
#include "adf_pfvf_utils.h"
#include "adf_pfvf_vf_msg.h"
#include "adf_pfvf_vf_proto.h"
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
#define FIELD_MAX(_mask) ({ (typeof(_mask))((_mask) >> __bf_shf(_mask)); })
#define FIELD_PREP(_mask, _val) \
({ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); })
#define FIELD_GET(_mask, _reg) \
({ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); })
/**
* adf_send_vf2pf_msg() - send VF to PF message
* @accel_dev: Pointer to acceleration device
* @msg: Message to send
*
* This function allows the VF to send a message to the PF.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
{
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_pf2vf_offset(0);
int ret = pfvf_ops->send_msg(accel_dev,
msg,
pfvf_offset,
&accel_dev->u1.vf.vf2pf_lock);
return ret;
}
/**
* adf_recv_pf2vf_msg() - receive a PF to VF message
* @accel_dev: Pointer to acceleration device
*
* This function allows the VF to receive a message from the PF.
*
* Return: a valid message on success, zero otherwise.
*/
static struct pfvf_message
adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev)
{
struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev);
u32 pfvf_offset = pfvf_ops->get_vf2pf_offset(0); // 1008
return pfvf_ops->recv_msg(accel_dev,
pfvf_offset,
accel_dev->u1.vf.pf_compat_ver);
}
/**
* adf_send_vf2pf_req() - send VF2PF request message
* @accel_dev: Pointer to acceleration device.
* @msg: Request message to send
* @resp: Returned PF response
*
* This function sends a message that requires a response from the VF to the PF
* and waits for a reply.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_send_vf2pf_req(struct adf_accel_dev *accel_dev,
struct pfvf_message msg,
struct pfvf_message *resp)
{
unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
unsigned int retries = ADF_PFVF_MSG_RESP_RETRIES;
int ret;
reinit_completion(&accel_dev->u1.vf.msg_received);
/* Send request from VF to PF */
do {
ret = adf_send_vf2pf_msg(accel_dev, msg);
if (ret) {
device_printf(GET_DEV(accel_dev),
"Failed to send request msg to PF\n");
return ret;
}
/* Wait for response, if it times out retry */
ret =
wait_for_completion_timeout(&accel_dev->u1.vf.msg_received,
timeout);
if (ret) {
if (likely(resp))
*resp = accel_dev->u1.vf.response;
/* Once copied, set to an invalid value */
accel_dev->u1.vf.response.type = 0;
return 0;
}
device_printf(GET_DEV(accel_dev),
"PFVF response message timeout\n");
} while (--retries);
return -EIO;
}
static int
adf_vf2pf_blkmsg_data_req(struct adf_accel_dev *accel_dev,
bool crc,
u8 *type,
u8 *data)
{
struct pfvf_message req = { 0 };
struct pfvf_message resp = { 0 };
u8 blk_type;
u8 blk_byte;
u8 msg_type;
u8 max_data;
int err;
/* Convert the block type to {small, medium, large} size category */
if (*type <= ADF_VF2PF_SMALL_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_TYPE_MASK, *type);
blk_byte = FIELD_PREP(ADF_VF2PF_SMALL_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_SMALL_BLOCK_BYTE_MAX;
} else if (*type <= ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_TYPE_MASK,
*type - ADF_VF2PF_SMALL_BLOCK_TYPE_MAX);
blk_byte = FIELD_PREP(ADF_VF2PF_MEDIUM_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_MEDIUM_BLOCK_BYTE_MAX;
} else if (*type <= ADF_VF2PF_LARGE_BLOCK_TYPE_MAX) {
msg_type = ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ;
blk_type = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_TYPE_MASK,
*type - ADF_VF2PF_MEDIUM_BLOCK_TYPE_MAX);
blk_byte = FIELD_PREP(ADF_VF2PF_LARGE_BLOCK_BYTE_MASK, *data);
max_data = ADF_VF2PF_LARGE_BLOCK_BYTE_MAX;
} else {
device_printf(GET_DEV(accel_dev),
"Invalid message type %u\n",
*type);
return -EINVAL;
}
/* Sanity check */
if (*data > max_data) {
device_printf(GET_DEV(accel_dev),
"Invalid byte %s %u for message type %u\n",
crc ? "count" : "index",
*data,
*type);
return -EINVAL;
}
/* Build the block message */
req.type = msg_type;
req.data =
blk_type | blk_byte | FIELD_PREP(ADF_VF2PF_BLOCK_CRC_REQ_MASK, crc);
err = adf_send_vf2pf_req(accel_dev, req, &resp);
if (err)
return err;
*type = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_TYPE_MASK, resp.data);
*data = FIELD_GET(ADF_PF2VF_BLKMSG_RESP_DATA_MASK, resp.data);
return 0;
}
static int
adf_vf2pf_blkmsg_get_byte(struct adf_accel_dev *accel_dev,
u8 type,
u8 index,
u8 *data)
{
int ret;
ret = adf_vf2pf_blkmsg_data_req(accel_dev, false, &type, &index);
if (ret < 0)
return ret;
if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_DATA)) {
device_printf(GET_DEV(accel_dev),
"Unexpected BLKMSG response type %u, byte 0x%x\n",
type,
index);
return -EFAULT;
}
*data = index;
return 0;
}
static int
adf_vf2pf_blkmsg_get_crc(struct adf_accel_dev *accel_dev,
u8 type,
u8 bytes,
u8 *crc)
{
int ret;
/* The count of bytes refers to a length, however shift it to a 0-based
* count to avoid overflows. Thus, a request for 0 bytes is technically
* valid.
*/
--bytes;
ret = adf_vf2pf_blkmsg_data_req(accel_dev, true, &type, &bytes);
if (ret < 0)
return ret;
if (unlikely(type != ADF_PF2VF_BLKMSG_RESP_TYPE_CRC)) {
device_printf(
GET_DEV(accel_dev),
"Unexpected CRC BLKMSG response type %u, crc 0x%x\n",
type,
bytes);
return -EFAULT;
}
*crc = bytes;
return 0;
}
/**
* adf_send_vf2pf_blkmsg_req() - retrieve block message
* @accel_dev: Pointer to acceleration VF device.
* @type: The block message type, see adf_pfvf_msg.h for allowed values
* @buffer: input buffer where to place the received data
* @buffer_len: buffer length as input, the amount of written bytes on output
*
* Request a message of type 'type' over the block message transport.
* This function will send the required amount block message requests and
* return the overall content back to the caller through the provided buffer.
* The buffer should be large enough to contain the requested message type,
* otherwise the response will be truncated.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_send_vf2pf_blkmsg_req(struct adf_accel_dev *accel_dev,
u8 type,
u8 *buffer,
unsigned int *buffer_len)
{
unsigned int index;
unsigned int msg_len;
int ret;
u8 remote_crc;
u8 local_crc;
if (unlikely(type > ADF_VF2PF_LARGE_BLOCK_TYPE_MAX)) {
device_printf(GET_DEV(accel_dev),
"Invalid block message type %d\n",
type);
return -EINVAL;
}
if (unlikely(*buffer_len < ADF_PFVF_BLKMSG_HEADER_SIZE)) {
device_printf(GET_DEV(accel_dev),
"Buffer size too small for a block message\n");
return -EINVAL;
}
ret = adf_vf2pf_blkmsg_get_byte(accel_dev,
type,
ADF_PFVF_BLKMSG_VER_BYTE,
&buffer[ADF_PFVF_BLKMSG_VER_BYTE]);
if (unlikely(ret))
return ret;
if (unlikely(!buffer[ADF_PFVF_BLKMSG_VER_BYTE])) {
device_printf(GET_DEV(accel_dev),
"Invalid version 0 received for block request %u",
type);
return -EFAULT;
}
ret = adf_vf2pf_blkmsg_get_byte(accel_dev,
type,
ADF_PFVF_BLKMSG_LEN_BYTE,
&buffer[ADF_PFVF_BLKMSG_LEN_BYTE]);
if (unlikely(ret))
return ret;
if (unlikely(!buffer[ADF_PFVF_BLKMSG_LEN_BYTE])) {
device_printf(GET_DEV(accel_dev),
"Invalid size 0 received for block request %u",
type);
return -EFAULT;
}
/* We need to pick the minimum since there is no way to request a
* specific version. As a consequence any scenario is possible:
* - PF has a newer (longer) version which doesn't fit in the buffer
* - VF expects a newer (longer) version, so we must not ask for
* bytes in excess
* - PF and VF share the same version, no problem
*/
msg_len =
ADF_PFVF_BLKMSG_HEADER_SIZE + buffer[ADF_PFVF_BLKMSG_LEN_BYTE];
msg_len = min(*buffer_len, msg_len);
/* Get the payload */
for (index = ADF_PFVF_BLKMSG_HEADER_SIZE; index < msg_len; index++) {
ret = adf_vf2pf_blkmsg_get_byte(accel_dev,
type,
index,
&buffer[index]);
if (unlikely(ret))
return ret;
}
ret = adf_vf2pf_blkmsg_get_crc(accel_dev, type, msg_len, &remote_crc);
if (unlikely(ret))
return ret;
local_crc = adf_pfvf_calc_blkmsg_crc(buffer, msg_len);
if (unlikely(local_crc != remote_crc)) {
device_printf(
GET_DEV(accel_dev),
"CRC error on msg type %d. Local %02X, remote %02X\n",
type,
local_crc,
remote_crc);
return -EIO;
}
*buffer_len = msg_len;
return 0;
}
static bool
adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
{
switch (msg.type) {
case ADF_PF2VF_MSGTYPE_RESTARTING:
adf_pf2vf_handle_pf_restarting(accel_dev);
return false;
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
adf_pf2vf_handle_pf_rp_reset(accel_dev, msg);
return true;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
accel_dev->u1.vf.response = msg;
complete(&accel_dev->u1.vf.msg_received);
return true;
default:
device_printf(
GET_DEV(accel_dev),
"Unknown message from PF (type 0x%.4x, data: 0x%.4x)\n",
msg.type,
msg.data);
}
return false;
}
bool
adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev)
{
struct pfvf_message msg;
msg = adf_recv_pf2vf_msg(accel_dev);
if (msg.type) /* Invalid or no message */
return adf_handle_pf2vf_msg(accel_dev, msg);
/* No replies for PF->VF messages at present */
return true;
}
/**
* adf_enable_vf2pf_comms() - Function enables communication from vf to pf
*
* @accel_dev: Pointer to acceleration device virtual function.
*
* Return: 0 on success, error code otherwise.
*/
int
adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int ret;
hw_data->enable_pf2vf_interrupt(accel_dev);
ret = adf_vf2pf_request_version(accel_dev);
if (ret)
return ret;
ret = adf_vf2pf_get_capabilities(accel_dev);
if (ret)
return ret;
ret = adf_vf2pf_get_ring_to_svc(accel_dev);
return ret;
}

View file

@ -74,6 +74,10 @@ static void
adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
u32 enable_int_col_mask = 0;
if (csr_ops->get_int_col_ctl_enable_mask)
enable_int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
mtx_lock(&bank->lock);
bank->irq_mask |= (1 << ring);
@ -83,7 +87,8 @@ adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask);
csr_ops->write_csr_int_col_ctl(bank->csr_addr,
bank->bank_number,
bank->irq_coalesc_timer);
bank->irq_coalesc_timer |
enable_int_col_mask);
}
static void
@ -142,9 +147,10 @@ adf_handle_response(struct adf_etr_ring_data *ring, u32 quota)
ring->callback((u32 *)msg);
atomic_dec(ring->inflights);
*msg = ADF_RING_EMPTY_SIG;
ring->head = adf_modulo(ring->head + ADF_MSG_SIZE_TO_BYTES(
ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
ring->head =
adf_modulo(ring->head +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
msg_counter++;
msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}

View file

@ -1,275 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
/**
* adf_vf2pf_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init messge from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int
adf_vf2pf_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
if (adf_iov_notify(accel_dev, msg, 0)) {
device_printf(GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
}
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
/**
* adf_vf2pf_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown messge from the VF to a PF
*
* Return: void
*/
void
adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
mutex_init(&accel_dev->u1.vf.vf2pf_lock);
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
if (adf_iov_notify(accel_dev, msg, 0))
device_printf(GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
}
static int
adf_iov_block_get_bc(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 msg_index,
u8 *data,
int get_crc)
{
u8 blk_type;
u32 msg;
unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
int response_received = 0;
int retry_count = 0;
msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
if (get_crc)
msg |= 1 << ADF_VF2PF_BLOCK_REQ_CRC_SHIFT;
if (msg_type <= ADF_VF2PF_MAX_SMALL_MESSAGE_TYPE) {
if (msg_index >=
ADF_VF2PF_SMALL_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
device_printf(
GET_DEV(accel_dev),
"Invalid byte index %d for message type %d\n",
msg_index,
msg_type);
return -EINVAL;
}
msg |= ADF_VF2PF_MSGTYPE_GET_SMALL_BLOCK_REQ
<< ADF_VF2PF_MSGTYPE_SHIFT;
blk_type = msg_type;
msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
msg |= msg_index << ADF_VF2PF_SMALL_BLOCK_BYTE_NUM_SHIFT;
} else if (msg_type <= ADF_VF2PF_MAX_MEDIUM_MESSAGE_TYPE) {
if (msg_index >=
ADF_VF2PF_MEDIUM_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
device_printf(
GET_DEV(accel_dev),
"Invalid byte index %d for message type %d\n",
msg_index,
msg_type);
return -EINVAL;
}
msg |= ADF_VF2PF_MSGTYPE_GET_MEDIUM_BLOCK_REQ
<< ADF_VF2PF_MSGTYPE_SHIFT;
blk_type = msg_type - ADF_VF2PF_MIN_MEDIUM_MESSAGE_TYPE;
msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
msg |= msg_index << ADF_VF2PF_MEDIUM_BLOCK_BYTE_NUM_SHIFT;
} else if (msg_type <= ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE) {
if (msg_index >=
ADF_VF2PF_LARGE_PAYLOAD_SIZE + ADF_VF2PF_BLOCK_DATA) {
device_printf(
GET_DEV(accel_dev),
"Invalid byte index %d for message type %d\n",
msg_index,
msg_type);
return -EINVAL;
}
msg |= ADF_VF2PF_MSGTYPE_GET_LARGE_BLOCK_REQ
<< ADF_VF2PF_MSGTYPE_SHIFT;
blk_type = msg_type - ADF_VF2PF_MIN_LARGE_MESSAGE_TYPE;
msg |= blk_type << ADF_VF2PF_BLOCK_REQ_TYPE_SHIFT;
msg |= msg_index << ADF_VF2PF_LARGE_BLOCK_BYTE_NUM_SHIFT;
} else {
device_printf(GET_DEV(accel_dev),
"Invalid message type %d\n",
msg_type);
}
accel_dev->u1.vf.iov_msg_completion = 0;
do {
/* Send request from VF to PF */
if (retry_count)
accel_dev->u1.vf.pfvf_counters.retry++;
if (adf_iov_putmsg(accel_dev, msg, 0)) {
device_printf(GET_DEV(accel_dev),
"Failed to send block request to PF\n");
return EIO;
}
/* Wait for response */
mutex_lock(&accel_dev->u1.vf.vf2pf_lock);
if (accel_dev->u1.vf.iov_msg_completion == 0 &&
sx_sleep(&accel_dev->u1.vf.iov_msg_completion,
&accel_dev->u1.vf.vf2pf_lock.sx,
0,
"pfver",
timeout) == EWOULDBLOCK) {
/* It's possible that wakeup could be missed */
if (accel_dev->u1.vf.iov_msg_completion) {
response_received = 1;
} else {
device_printf(
GET_DEV(accel_dev),
"IOV request/response message timeout expired\n");
}
} else {
response_received = 1;
}
mutex_unlock(&accel_dev->u1.vf.vf2pf_lock);
} while (!response_received &&
++retry_count < ADF_IOV_MSG_RESP_RETRIES);
if (!response_received)
accel_dev->u1.vf.pfvf_counters.rx_timeout++;
else
accel_dev->u1.vf.pfvf_counters.rx_rsp++;
if (!response_received)
return EIO;
if (accel_dev->u1.vf.pf2vf_block_resp_type !=
(get_crc ? ADF_PF2VF_BLOCK_RESP_TYPE_CRC :
ADF_PF2VF_BLOCK_RESP_TYPE_DATA)) {
device_printf(
GET_DEV(accel_dev),
"%sBlock response type %d, data %d, msg %d, index %d\n",
get_crc ? "CRC " : "",
accel_dev->u1.vf.pf2vf_block_resp_type,
accel_dev->u1.vf.pf2vf_block_byte,
msg_type,
msg_index);
return -EIO;
}
*data = accel_dev->u1.vf.pf2vf_block_byte;
return 0;
}
static int
adf_iov_block_get_byte(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 msg_index,
u8 *data)
{
return adf_iov_block_get_bc(accel_dev, msg_type, msg_index, data, 0);
}
static int
adf_iov_block_get_crc(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 msg_index,
u8 *crc)
{
return adf_iov_block_get_bc(accel_dev, msg_type, msg_index - 1, crc, 1);
}
int
adf_iov_block_get(struct adf_accel_dev *accel_dev,
u8 msg_type,
u8 *block_version,
u8 *buffer,
u8 *length)
{
u8 buf_size = *length;
u8 payload_len;
u8 remote_crc;
u8 local_crc;
u8 buf_index;
int ret;
if (msg_type > ADF_VF2PF_MAX_LARGE_MESSAGE_TYPE) {
device_printf(GET_DEV(accel_dev),
"Invalid message type %d\n",
msg_type);
return -EINVAL;
}
ret = adf_iov_block_get_byte(accel_dev,
msg_type,
ADF_VF2PF_BLOCK_VERSION_BYTE,
block_version);
if (ret)
return ret;
ret = adf_iov_block_get_byte(accel_dev,
msg_type,
ADF_VF2PF_BLOCK_LEN_BYTE,
length);
if (ret)
return ret;
payload_len = *length;
if (buf_size < payload_len) {
device_printf(
GET_DEV(accel_dev),
"Truncating block type %d response from %d to %d bytes\n",
msg_type,
payload_len,
buf_size);
payload_len = buf_size;
}
/* Get the data */
for (buf_index = 0; buf_index < payload_len; buf_index++) {
ret = adf_iov_block_get_byte(accel_dev,
msg_type,
buf_index + ADF_VF2PF_BLOCK_DATA,
buffer + buf_index);
if (ret)
return ret;
}
ret = adf_iov_block_get_crc(accel_dev,
msg_type,
payload_len + ADF_VF2PF_BLOCK_DATA,
&remote_crc);
if (ret)
return ret;
local_crc = adf_pfvf_crc(ADF_CRC8_INIT_VALUE, block_version, 1);
local_crc = adf_pfvf_crc(local_crc, length, 1);
local_crc = adf_pfvf_crc(local_crc, buffer, payload_len);
if (local_crc != remote_crc) {
device_printf(
GET_DEV(accel_dev),
"CRC error on msg type %d. Local %02X, remote %02X\n",
msg_type,
local_crc,
remote_crc);
accel_dev->u1.vf.pfvf_counters.crc_err++;
return EIO;
}
accel_dev->u1.vf.pfvf_counters.blk_rx++;
*length = payload_len;
return 0;
}

View file

@ -6,6 +6,7 @@
#include <sys/systm.h>
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/interrupt.h>
#include <dev/pci/pcivar.h>
#include <sys/param.h>
#include <linux/workqueue.h>
@ -16,19 +17,17 @@
#include "adf_cfg_common.h"
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include "adf_pf2vf_msg.h"
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
#include "adf_pfvf_utils.h"
static TASKQUEUE_DEFINE_THREAD(qat_vf);
static TASKQUEUE_DEFINE_THREAD(qat_bank_handler);
static struct workqueue_struct *adf_vf_stop_wq;
static DEFINE_MUTEX(vf_stop_wq_lock);
struct adf_vf_stop_data {
struct adf_accel_dev *accel_dev;
struct work_struct vf_stop_work;
struct work_struct work;
};
static int
@ -57,135 +56,84 @@ static void
adf_dev_stop_async(struct work_struct *work)
{
struct adf_vf_stop_data *stop_data =
container_of(work, struct adf_vf_stop_data, vf_stop_work);
container_of(work, struct adf_vf_stop_data, work);
struct adf_accel_dev *accel_dev = stop_data->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
hw_data->enable_pf2vf_interrupt(accel_dev);
kfree(stop_data);
}
int
adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev)
{
struct adf_vf_stop_data *stop_data;
clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
if (!stop_data) {
device_printf(GET_DEV(accel_dev),
"Couldn't schedule stop for vf_%d\n",
accel_dev->accel_id);
return -ENOMEM;
}
stop_data->accel_dev = accel_dev;
INIT_WORK(&stop_data->work, adf_dev_stop_async);
queue_work(adf_vf_stop_wq, &stop_data->work);
return 0;
}
int
adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
struct pfvf_message msg)
{
accel_dev->u1.vf.rpreset_sts = msg.data;
if (accel_dev->u1.vf.rpreset_sts == RPRESET_SUCCESS)
device_printf(
GET_DEV(accel_dev),
"rpreset resp(success) from PF type:0x%x data:0x%x\n",
msg.type,
msg.data);
else if (accel_dev->u1.vf.rpreset_sts == RPRESET_NOT_SUPPORTED)
device_printf(
GET_DEV(accel_dev),
"rpreset resp(not supported) from PF type:0x%x data:0x%x\n",
msg.type,
msg.data);
else if (accel_dev->u1.vf.rpreset_sts == RPRESET_INVAL_BANK)
device_printf(
GET_DEV(accel_dev),
"rpreset resp(invalid bank) from PF type:0x%x data:0x%x\n",
msg.type,
msg.data);
else
device_printf(
GET_DEV(accel_dev),
"rpreset resp(timeout) from PF type:0x%x data:0x%x\nn",
msg.type,
msg.data);
complete(&accel_dev->u1.vf.msg_received);
return 0;
}
static void
adf_pf2vf_bh_handler(void *data, int pending)
{
struct adf_accel_dev *accel_dev = data;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
struct resource *pmisc_bar_addr = pmisc->virt_addr;
u32 msg;
bool is_notification = false;
/* Read the message from PF */
msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
if (!(msg & ADF_PF2VF_INT)) {
device_printf(GET_DEV(accel_dev),
"Spurious PF2VF interrupt. msg %X. Ignored\n",
msg);
accel_dev->u1.vf.pfvf_counters.spurious++;
goto out;
}
accel_dev->u1.vf.pfvf_counters.rx++;
if (adf_recv_and_handle_pf2vf_msg(accel_dev))
/* Re-enable PF2VF interrupts */
hw_data->enable_pf2vf_interrupt(accel_dev);
if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM)) {
device_printf(GET_DEV(accel_dev),
"Ignore non-system PF2VF message(0x%x)\n",
msg);
/*
* To ack, clear the VF2PFINT bit.
* Because this must be a legacy message, the far side
* must clear the in-use pattern.
*/
msg &= ~ADF_PF2VF_INT;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
goto out;
}
switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
case ADF_PF2VF_MSGTYPE_RESTARTING: {
struct adf_vf_stop_data *stop_data;
is_notification = true;
device_printf(GET_DEV(accel_dev),
"Restarting msg received from PF 0x%x\n",
msg);
clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
if (!stop_data) {
device_printf(GET_DEV(accel_dev),
"Couldn't schedule stop for vf_%d\n",
accel_dev->accel_id);
goto out;
}
stop_data->accel_dev = accel_dev;
INIT_WORK(&stop_data->vf_stop_work, adf_dev_stop_async);
queue_work(adf_vf_stop_wq, &stop_data->vf_stop_work);
break;
}
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
device_printf(GET_DEV(accel_dev),
"Version resp received from PF 0x%x\n",
msg);
is_notification = false;
accel_dev->u1.vf.pf_version =
(msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
accel_dev->u1.vf.compatible =
(msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
accel_dev->u1.vf.iov_msg_completion = 1;
wakeup(&accel_dev->u1.vf.iov_msg_completion);
break;
case ADF_PF2VF_MSGTYPE_BLOCK_RESP:
is_notification = false;
accel_dev->u1.vf.pf2vf_block_byte =
(msg & ADF_PF2VF_BLOCK_RESP_DATA_MASK) >>
ADF_PF2VF_BLOCK_RESP_DATA_SHIFT;
accel_dev->u1.vf.pf2vf_block_resp_type =
(msg & ADF_PF2VF_BLOCK_RESP_TYPE_MASK) >>
ADF_PF2VF_BLOCK_RESP_TYPE_SHIFT;
accel_dev->u1.vf.iov_msg_completion = 1;
wakeup(&accel_dev->u1.vf.iov_msg_completion);
break;
case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
device_printf(GET_DEV(accel_dev),
"Fatal error received from PF 0x%x\n",
msg);
is_notification = true;
if (adf_notify_fatal_error(accel_dev))
device_printf(GET_DEV(accel_dev),
"Couldn't notify fatal error\n");
break;
default:
device_printf(GET_DEV(accel_dev),
"Unknown PF2VF message(0x%x)\n",
msg);
}
/* To ack, clear the PF2VFINT bit */
msg &= ~ADF_PF2VF_INT;
/*
* Clear the in-use pattern if the sender won't do it.
* Because the compatibility version must be the first message
* exchanged between the VF and PF, the pf.version must be
* set at this time.
* The in-use pattern is not cleared for notifications so that
* it can be used for collision detection.
*/
if (accel_dev->u1.vf.pf_version >= ADF_PFVF_COMPATIBILITY_FAST_ACK &&
!is_notification)
msg &= ~ADF_PF2VF_IN_USE_BY_PF_MASK;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
out:
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
return;
}
@ -211,46 +159,86 @@ adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
mutex_destroy(&accel_dev->u1.vf.vf2pf_lock);
}
static void
adf_bh_handler(void *data, int pending)
{
struct adf_etr_bank_data *bank = (void *)data;
adf_response_handler((uintptr_t)bank);
return;
}
static int
adf_setup_bh(struct adf_accel_dev *accel_dev)
{
int i = 0;
struct adf_etr_data *priv_data = accel_dev->transport;
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
TASK_INIT(&priv_data->banks[i].resp_handler,
0,
adf_bh_handler,
&priv_data->banks[i]);
}
return 0;
}
static void
adf_cleanup_bh(struct adf_accel_dev *accel_dev)
{
int i = 0;
struct adf_etr_data *transport;
if (!accel_dev || !accel_dev->transport)
return;
transport = accel_dev->transport;
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
taskqueue_cancel(taskqueue_qat_bank_handler,
&transport->banks[i].resp_handler,
NULL);
taskqueue_drain(taskqueue_qat_bank_handler,
&transport->banks[i].resp_handler);
}
}
static void
adf_isr(void *privdata)
{
struct adf_accel_dev *accel_dev = privdata;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
struct resource *pmisc_bar_addr = pmisc->virt_addr;
u32 v_int, v_mask;
int handled = 0;
/* Read VF INT source CSR to determine the source of VF interrupt */
v_int = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_vintsou_offset());
v_mask = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_vintmsk_offset(0));
struct adf_hw_csr_ops *csr_ops = &hw_data->csr_info.csr_ops;
int int_active_bundles = 0;
int i = 0;
/* Check for PF2VF interrupt */
if ((v_int & ~v_mask) & ADF_VINTSOU_PF2VF) {
if (hw_data->interrupt_active_pf2vf(accel_dev)) {
/* Disable PF to VF interrupt */
adf_disable_pf2vf_interrupts(accel_dev);
hw_data->disable_pf2vf_interrupt(accel_dev);
/* Schedule tasklet to handle interrupt BH */
taskqueue_enqueue(taskqueue_qat_vf,
&accel_dev->u1.vf.pf2vf_bh_tasklet);
handled = 1;
}
if ((v_int & ~v_mask) & ADF_VINTSOU_BUN) {
struct adf_etr_data *etr_data = accel_dev->transport;
struct adf_etr_bank_data *bank = &etr_data->banks[0];
if (hw_data->get_int_active_bundles)
int_active_bundles = hw_data->get_int_active_bundles(accel_dev);
/* Disable Flag and Coalesce Ring Interrupts */
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr,
bank->bank_number,
0);
adf_response_handler((uintptr_t)&etr_data->banks[0]);
handled = 1;
for (i = 0; i < GET_MAX_BANKS(accel_dev); i++) {
if (int_active_bundles & BIT(i)) {
struct adf_etr_data *etr_data = accel_dev->transport;
struct adf_etr_bank_data *bank = &etr_data->banks[i];
/* Disable Flag and Coalesce Ring Interrupts */
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number,
0);
/* Schedule tasklet to handle interrupt BH */
taskqueue_enqueue(taskqueue_qat_bank_handler,
&bank->resp_handler);
}
}
if (handled)
return;
}
static int
@ -259,6 +247,8 @@ adf_request_msi_irq(struct adf_accel_dev *accel_dev)
device_t pdev = accel_to_pci_dev(accel_dev);
int ret;
int rid = 1;
int cpu;
accel_dev->u1.vf.irq =
bus_alloc_resource_any(pdev, SYS_RES_IRQ, &rid, RF_ACTIVE);
if (accel_dev->u1.vf.irq == NULL) {
@ -273,23 +263,24 @@ adf_request_msi_irq(struct adf_accel_dev *accel_dev)
accel_dev,
&accel_dev->u1.vf.cookie);
if (ret) {
device_printf(GET_DEV(accel_dev),
"failed to enable irq for %s\n",
accel_dev->u1.vf.irq_name);
return ret;
device_printf(GET_DEV(accel_dev), "failed to enable irq\n");
goto errout;
}
cpu = accel_dev->accel_id % num_online_cpus();
ret = bus_bind_intr(pdev, accel_dev->u1.vf.irq, cpu);
if (ret) {
device_printf(GET_DEV(accel_dev),
"failed to bind IRQ handler to cpu core\n");
goto errout;
}
accel_dev->u1.vf.irq_enabled = true;
return ret;
}
errout:
bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
static int
adf_setup_bh(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void
adf_cleanup_bh(struct adf_accel_dev *accel_dev)
{
return ret;
}
/**
@ -302,8 +293,13 @@ void
adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
device_t pdev = accel_to_pci_dev(accel_dev);
bus_teardown_intr(pdev, accel_dev->u1.vf.irq, accel_dev->u1.vf.cookie);
bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
if (accel_dev->u1.vf.irq_enabled) {
bus_teardown_intr(pdev,
accel_dev->u1.vf.irq,
accel_dev->u1.vf.cookie);
bus_free_resource(pdev, SYS_RES_IRQ, accel_dev->u1.vf.irq);
}
adf_cleanup_bh(accel_dev);
adf_cleanup_pf2vf_bh(accel_dev);
adf_disable_msi(accel_dev);
@ -324,30 +320,39 @@ adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
goto err_out;
if (adf_setup_pf2vf_bh(accel_dev))
goto err_out;
goto err_disable_msi;
if (adf_setup_bh(accel_dev))
goto err_out;
if (adf_request_msi_irq(accel_dev))
goto err_out;
goto err_disable_msi;
return 0;
err_disable_msi:
adf_disable_msi(accel_dev);
err_out:
adf_vf_isr_resource_free(accel_dev);
return EFAULT;
return -EFAULT;
}
/**
* adf_flush_vf_wq() - Flush workqueue for VF
* @accel_dev: Pointer to acceleration device.
*
* Function flushes workqueue 'adf_vf_stop_wq' for VF.
* Function disables the PF/VF interrupts on the VF so that no new messages
* are received and flushes the workqueue 'adf_vf_stop_wq'.
*
* Return: void.
*/
void
adf_flush_vf_wq(void)
adf_flush_vf_wq(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
hw_data->disable_pf2vf_interrupt(accel_dev);
if (adf_vf_stop_wq)
flush_workqueue(adf_vf_stop_wq);
}
@ -376,18 +381,11 @@ adf_init_vf_wq(void)
return ret;
}
/**
* adf_exit_vf_wq() - Destroy workqueue for VF
*
* Function destroy workqueue 'adf_vf_stop_wq' for VF.
*
* Return: void.
*/
void
adf_exit_vf_wq(void)
{
if (adf_vf_stop_wq) {
if (adf_vf_stop_wq)
destroy_workqueue(adf_vf_stop_wq);
adf_vf_stop_wq = NULL;
}
adf_vf_stop_wq = NULL;
}

View file

@ -13,6 +13,9 @@ qat_common_register(void)
if (adf_init_fatal_error_wq())
return EFAULT;
if (adf_register_ctl_device_driver())
return EFAULT;
return 0;
}
@ -22,7 +25,7 @@ qat_common_unregister(void)
adf_exit_vf_wq();
adf_exit_aer();
adf_exit_fatal_error_wq();
adf_clean_vf_map(false);
adf_unregister_ctl_device_driver();
}
static int

View file

@ -4,9 +4,10 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_pfvf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_200xx_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_heartbeat.h"
@ -143,18 +144,6 @@ adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
*arb_map_config = thrd_to_arb_map_gen;
}
static u32
get_pf2vf_offset(u32 i)
{
return ADF_200XX_PF2VF_OFFSET(i);
}
static u32
get_vintmsk_offset(u32 i)
{
return ADF_200XX_VINTMSK_OFFSET(i);
}
static void
get_arb_info(struct arb_info *arb_csrs_info)
{
@ -489,8 +478,6 @@ adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_arb_info = get_arb_info;
hw_data->get_admin_info = get_admin_info;
hw_data->get_errsou_offset = get_errsou_offset;
@ -509,11 +496,8 @@ adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
hw_data->check_slice_hang = adf_check_slice_hang;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
hw_data->restore_device = adf_dev_restore;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->measure_clock = measure_clock;
hw_data->get_ae_clock = get_ae_clock;
hw_data->reset_device = adf_reset_flr;
@ -536,6 +520,7 @@ adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
void

View file

@ -6,8 +6,10 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dev_err.h>
#include <adf_pf2vf_msg.h>
#include <adf_pfvf_msg.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
#include "adf_heartbeat.h"
#include "icp_qat_fw_init_admin.h"
@ -85,17 +87,17 @@ struct adf_enabled_services {
u16 rng_to_svc_msk;
};
static struct adf_enabled_services adf_4xxx_svcs[] = {
{ "dc", ADF_4XXX_DC },
{ "sym", ADF_4XXX_SYM },
{ "asym", ADF_4XXX_ASYM },
{ "dc;asym", ADF_4XXX_ASYM_DC },
{ "asym;dc", ADF_4XXX_ASYM_DC },
{ "sym;dc", ADF_4XXX_SYM_DC },
{ "dc;sym", ADF_4XXX_SYM_DC },
{ "asym;sym", ADF_4XXX_ASYM_SYM },
{ "sym;asym", ADF_4XXX_ASYM_SYM },
};
static struct adf_enabled_services adf_4xxx_svcs[] =
{ { "dc", ADF_4XXX_DC },
{ "sym", ADF_4XXX_SYM },
{ "asym", ADF_4XXX_ASYM },
{ "dc;asym", ADF_4XXX_ASYM_DC },
{ "asym;dc", ADF_4XXX_ASYM_DC },
{ "sym;dc", ADF_4XXX_SYM_DC },
{ "dc;sym", ADF_4XXX_SYM_DC },
{ "asym;sym", ADF_4XXX_ASYM_SYM },
{ "sym;asym", ADF_4XXX_ASYM_SYM },
{ "cy", ADF_4XXX_ASYM_SYM } };
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
@ -117,6 +119,12 @@ get_ae_mask(struct adf_accel_dev *accel_dev)
return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK;
}
static void
adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
{
accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
}
static int
get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
{
@ -216,28 +224,45 @@ adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2 | ICP_ACCEL_CAPABILITIES_RL;
ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_HKDF |
ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 |
ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_RL | ICP_ACCEL_CAPABILITIES_ECEDMONT |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities &= ~ICP_ACCEL_CAPABILITIES_HKDF;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE)
if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE)
capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
if (fusectl1 & ICP_ACCEL_MASK_PKE_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
capabilities &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
return capabilities;
}
@ -388,15 +413,18 @@ get_accel_unit_config(struct adf_accel_dev *accel_dev,
if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) {
disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
ICP_ACCEL_CAPABILITIES_SHA3 |
ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM3 |
ICP_ACCEL_CAPABILITIES_SM4 |
ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
ICP_ACCEL_CAPABILITIES_AES_V2;
ICP_ACCEL_CAPABILITIES_AES_V2 |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
}
if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) {
disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) {
disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
@ -771,6 +799,7 @@ adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
u16 service_type;
u32 service_mask;
unsigned long thd_srv_mask = default_active_thd_mask;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
/* If ring_to_svc_map is not changed, return default arbiter value */
@ -798,6 +827,8 @@ adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
if (au->services == ADF_ACCEL_COMPRESSION)
thd_srv_mask = dc_me_active_thd_mask;
else if (au->services == ADF_ACCEL_ASYM)
thd_srv_mask = hw_data->asym_ae_active_thd_mask;
else
thd_srv_mask = default_active_thd_mask;
@ -901,7 +932,7 @@ adf_init_device(struct adf_accel_dev *accel_dev)
}
void
adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
@ -954,16 +985,28 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->config_device = adf_config_device;
hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
hw_data->get_hb_clock = get_hb_clock;
hw_data->int_timer_init = adf_int_timer_init;
hw_data->int_timer_exit = adf_int_timer_exit;
hw_data->get_heartbeat_status = adf_get_heartbeat_status;
hw_data->get_ae_clock = get_ae_clock;
hw_data->measure_clock = measure_clock;
hw_data->query_storage_cap = 1;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
switch (id) {
case ADF_401XX_PCI_DEVICE_ID:
hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
break;
case ADF_4XXX_PCI_DEVICE_ID:
default:
hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
}
adf_gen4_init_hw_csr_info(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
void

View file

@ -6,6 +6,9 @@
#include <adf_accel_devices.h>
#define DEFAULT_4XXX_ASYM_AE_MASK 0x03
#define DEFAULT_401XX_ASYM_AE_MASK 0x3F
/* PCIe configuration space */
#define ADF_4XXX_SRAM_BAR 0
#define ADF_4XXX_PMISC_BAR 1
@ -56,6 +59,7 @@
#define ADF_4XXX_ERRMSK3 (0x41A21C)
#define ADF_4XXX_VFLNOTIFY BIT(7)
#define ADF_4XXX_DEF_ASYM_MASK 0x1
/* Arbiter configuration */
#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
@ -105,7 +109,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
};
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data);
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
#endif

View file

@ -91,12 +91,12 @@ adf_attach(device_t dev)
int ret, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
/* Set pci MaxPayLoad to 512. Implemented to avoid the issue of
* Pci-passthrough causing Maxpayload to be reset to 128 bytes
* when the device is reset.
*/
if (pci_get_max_payload(dev) != 256)
pci_set_max_payload(dev, 256);
if (pci_get_max_payload(dev) != 512)
pci_set_max_payload(dev, 512);
accel_dev = device_get_softc(dev);
@ -119,7 +119,7 @@ adf_attach(device_t dev)
hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO);
accel_dev->hw_device = hw_data;
adf_init_hw_data_4xxx(accel_dev->hw_device);
adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev));
accel_pci_dev->revid = pci_get_revid(dev);
hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
if (accel_pci_dev->revid == 0x00) {
@ -154,7 +154,7 @@ adf_attach(device_t dev)
if (ret)
goto out_err;
pci_set_max_read_req(dev, 1024);
pci_set_max_read_req(dev, 4096);
ret = bus_dma_tag_create(bus_get_dma_tag(dev),
1,

View file

@ -0,0 +1,390 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_gen4vf_hw_csr_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_pfvf_vf_msg.h>
#include "adf_4xxxvf_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_transport_internal.h"
#include "adf_pfvf_vf_proto.h"
static struct adf_hw_device_class adf_4xxxiov_class =
{ .name = ADF_4XXXVF_DEVICE_NAME, .type = DEV_4XXXVF, .instances = 0 };
#define ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP \
(ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_ASYM_SYM ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP
#define ADF_4XXXIOV_DC \
(COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_SYM \
(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_ASYM \
(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_ASYM_DC \
(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_SYM_DC \
(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
#define ADF_4XXXIOV_NA \
(NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
struct adf_enabled_services {
const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
u16 rng_to_svc_msk;
};
static struct adf_enabled_services adf_4xxxiov_svcs[] =
{ { "dc", ADF_4XXXIOV_DC },
{ "sym", ADF_4XXXIOV_SYM },
{ "asym", ADF_4XXXIOV_ASYM },
{ "dc;asym", ADF_4XXXIOV_ASYM_DC },
{ "asym;dc", ADF_4XXXIOV_ASYM_DC },
{ "sym;dc", ADF_4XXXIOV_SYM_DC },
{ "dc;sym", ADF_4XXXIOV_SYM_DC },
{ "asym;sym", ADF_4XXXIOV_ASYM_SYM },
{ "sym;asym", ADF_4XXXIOV_ASYM_SYM },
{ "cy", ADF_4XXXIOV_ASYM_SYM } };
static u32
get_accel_mask(struct adf_accel_dev *accel_dev)
{
return ADF_4XXXIOV_ACCELERATORS_MASK;
}
static u32
get_ae_mask(struct adf_accel_dev *accel_dev)
{
return ADF_4XXXIOV_ACCELENGINES_MASK;
}
static u32
get_num_accels(struct adf_hw_device_data *self)
{
return ADF_4XXXIOV_MAX_ACCELERATORS;
}
static u32
get_num_aes(struct adf_hw_device_data *self)
{
return ADF_4XXXIOV_MAX_ACCELENGINES;
}
static u32
get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_4XXXIOV_PMISC_BAR;
}
static u32
get_etr_bar_id(struct adf_hw_device_data *self)
{
return ADF_4XXXIOV_ETR_BAR;
}
static u32
get_clock_speed(struct adf_hw_device_data *self)
{
/* CPP clock is half high-speed clock */
return self->clock_frequency / 2;
}
static enum dev_sku_info
get_sku(struct adf_hw_device_data *self)
{
return DEV_SKU_VF;
}
static int
adf_vf_int_noop(struct adf_accel_dev *accel_dev)
{
return 0;
}
static void
adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
u32
adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev)
{
device_t pdev = accel_dev->accel_pci_dev.pci_dev;
u32 vffusectl1;
u32 capabilities;
capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
ICP_ACCEL_CAPABILITIES_CIPHER +
ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
ICP_ACCEL_CAPABILITIES_COMPRESSION +
ICP_ACCEL_CAPABILITIES_SHA3_EXT + ICP_ACCEL_CAPABILITIES_SM2 +
ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4 +
ICP_ACCEL_CAPABILITIES_CHACHA_POLY +
ICP_ACCEL_CAPABILITIES_AESGCM_SPC +
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 +
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION +
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
/* Get fused capabilities */
vffusectl1 = pci_read_config(pdev, ADF_4XXXIOV_VFFUSECTL1_OFFSET, 4);
if (vffusectl1 & BIT(7)) {
capabilities &=
~(ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4);
}
if (vffusectl1 & BIT(6)) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
}
if (vffusectl1 & BIT(3)) {
capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION +
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64);
}
if (vffusectl1 & BIT(2)) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
}
if (vffusectl1 & BIT(1)) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
}
if (vffusectl1 & BIT(0)) {
capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
return capabilities;
}
static void
adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
{
accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
}
static void
enable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data;
struct adf_bar *pmisc;
struct resource *pmisc_bar_addr;
hw_data = accel_dev->hw_device;
pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
pmisc_bar_addr = pmisc->virt_addr;
ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, 0x0);
}
static void
disable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data;
struct adf_bar *pmisc;
struct resource *pmisc_bar_addr;
hw_data = accel_dev->hw_device;
pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
pmisc_bar_addr = pmisc->virt_addr;
ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, BIT(0));
}
static int
interrupt_active_pf2vm(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data;
struct adf_bar *pmisc;
struct resource *pmisc_bar_addr;
u32 v_sou, v_msk;
hw_data = accel_dev->hw_device;
pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
pmisc_bar_addr = pmisc->virt_addr;
v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOUPF2VM_OFFSET);
v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET);
return ((v_sou & ~v_msk) & BIT(0)) ? 1 : 0;
}
static int
get_int_active_bundles(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data;
struct adf_bar *pmisc;
struct resource *pmisc_bar_addr;
u32 v_sou, v_msk;
hw_data = accel_dev->hw_device;
pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
pmisc_bar_addr = pmisc->virt_addr;
v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOU_OFFSET);
v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSK_OFFSET);
return v_sou & ~v_msk & 0xF;
}
static void
get_ring_svc_map_data(int ring_pair_index,
u16 ring_to_svc_map,
u8 *serv_type,
int *ring_index,
int *num_rings_per_srv,
int bank_num)
{
*serv_type =
GET_SRV_TYPE(ring_to_svc_map, bank_num % ADF_CFG_NUM_SERVICES);
*ring_index = 0;
*num_rings_per_srv = ADF_4XXXIOV_NUM_RINGS_PER_BANK / 2;
}
static int
get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
{
char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
u32 i = 0;
/* Get the services enabled by user if provided.
* The function itself will also be called during the driver probe
* procedure where no ServicesEnable is provided. Then the device
* should still start with default configuration without
* ServicesEnable. Hence it still returns 0 when the
* adf_cfg_get_param_value() function returns failure.
*/
snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
return 0;
for (i = 0; i < ARRAY_SIZE(adf_4xxxiov_svcs); i++) {
if (!strncmp(val,
adf_4xxxiov_svcs[i].svcs_enabled,
ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
*ring_to_svc_map = adf_4xxxiov_svcs[i].rng_to_svc_msk;
return 0;
}
}
device_printf(GET_DEV(accel_dev),
"Invalid services enabled: %s\n",
val);
return EFAULT;
}
static int
adf_4xxxvf_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
{
struct pfvf_message req = { 0 };
unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
int ret = 0;
if (bank_number >= accel_dev->hw_device->num_banks)
return EINVAL;
req.type = ADF_VF2PF_MSGTYPE_RP_RESET;
req.data = bank_number;
mutex_lock(&accel_dev->u1.vf.rpreset_lock);
init_completion(&accel_dev->u1.vf.msg_received);
accel_dev->u1.vf.rpreset_sts = RPRESET_SUCCESS;
if (adf_send_vf2pf_msg(accel_dev, req)) {
device_printf(GET_DEV(accel_dev),
"vf ring pair reset failure (vf2pf msg error)\n");
ret = EFAULT;
goto out;
}
if (!wait_for_completion_timeout(&accel_dev->u1.vf.msg_received,
timeout)) {
device_printf(
GET_DEV(accel_dev),
"vf ring pair reset failure (pf2vf msg timeout)\n");
ret = EFAULT;
goto out;
}
if (accel_dev->u1.vf.rpreset_sts != RPRESET_SUCCESS) {
device_printf(
GET_DEV(accel_dev),
"vf ring pair reset failure (pf reports error)\n");
ret = EFAULT;
goto out;
}
out:
mutex_unlock(&accel_dev->u1.vf.rpreset_lock);
return ret;
}
void
adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &adf_4xxxiov_class;
hw_data->num_banks = ADF_4XXXIOV_ETR_MAX_BANKS;
hw_data->num_rings_per_bank = ADF_4XXXIOV_NUM_RINGS_PER_BANK;
hw_data->num_accel = ADF_4XXXIOV_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_4XXXIOV_MAX_ACCELENGINES;
hw_data->tx_rx_gap = ADF_4XXXIOV_RX_RINGS_OFFSET;
hw_data->tx_rings_mask = ADF_4XXXIOV_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_vf_isr_resource_alloc;
hw_data->free_irq = adf_vf_isr_resource_free;
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;
hw_data->get_num_aes = get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_clock_speed = get_clock_speed;
hw_data->get_sku = get_sku;
hw_data->enable_ints = adf_vf_void_noop;
hw_data->reset_device = adf_reset_flr;
hw_data->restore_device = adf_dev_restore;
hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
hw_data->get_accel_cap = adf_4xxxvf_get_hw_cap;
hw_data->config_device = adf_config_device;
hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
hw_data->ring_pair_reset = adf_4xxxvf_ring_pair_reset;
hw_data->enable_pf2vf_interrupt = enable_pf2vm_interrupt;
hw_data->disable_pf2vf_interrupt = disable_pf2vm_interrupt;
hw_data->interrupt_active_pf2vf = interrupt_active_pf2vm;
hw_data->get_int_active_bundles = get_int_active_bundles;
hw_data->dev_class->instances++;
adf_devmgr_update_class_index(hw_data);
gen4vf_init_hw_csr_info(&hw_data->csr_info);
adf_gen4_init_vf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
void
adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class->instances--;
adf_devmgr_update_class_index(hw_data);
}

View file

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#ifndef ADF_4XXXVF_HW_DATA_H_
#define ADF_4XXXVF_HW_DATA_H_
#define ADF_4XXXIOV_PMISC_BAR 1
#define ADF_4XXXIOV_ACCELERATORS_MASK 0x1
#define ADF_4XXXIOV_ACCELENGINES_MASK 0x1
#define ADF_4XXXIOV_MAX_ACCELERATORS 1
#define ADF_4XXXIOV_MAX_ACCELENGINES 1
#define ADF_4XXXIOV_NUM_RINGS_PER_BANK 2
#define ADF_4XXXIOV_RX_RINGS_OFFSET 1
#define ADF_4XXXIOV_TX_RINGS_MASK 0x1
#define ADF_4XXXIOV_ETR_BAR 0
#define ADF_4XXXIOV_ETR_MAX_BANKS 4
#define ADF_4XXXIOV_VINTSOU_OFFSET 0x0
#define ADF_4XXXIOV_VINTMSK_OFFSET 0x4
#define ADF_4XXXIOV_VINTSOUPF2VM_OFFSET 0x1000
#define ADF_4XXXIOV_VINTMSKPF2VM_OFFSET 0x1004
#define ADF_4XXX_DEF_ASYM_MASK 0x1
/* Virtual function fuses */
#define ADF_4XXXIOV_VFFUSECTL0_OFFSET (0x40)
#define ADF_4XXXIOV_VFFUSECTL1_OFFSET (0x44)
#define ADF_4XXXIOV_VFFUSECTL2_OFFSET (0x4C)
#define ADF_4XXXIOV_VFFUSECTL4_OFFSET (0x1C4)
#define ADF_4XXXIOV_VFFUSECTL5_OFFSET (0x1C8)
void adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data);
u32 adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev);
#endif

View file

@ -0,0 +1,282 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/* Copyright(c) 2007-2022 Intel Corporation */
/* $FreeBSD$ */
#include "qat_freebsd.h"
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include "adf_4xxxvf_hw_data.h"
#include "adf_gen4_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf");
#define ADF_SYSTEM_DEVICE(device_id) \
{ \
PCI_VENDOR_ID_INTEL, device_id \
}
static const struct pci_device_id adf_pci_tbl[] =
{ ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
{
0,
} };
static int
adf_probe(device_t dev)
{
const struct pci_device_id *id;
for (id = adf_pci_tbl; id->vendor != 0; id++) {
if (pci_get_vendor(dev) == id->vendor &&
pci_get_device(dev) == id->device) {
device_set_desc(dev,
"Intel " ADF_4XXXVF_DEVICE_NAME
" QuickAssist");
return BUS_PROBE_GENERIC;
}
}
return ENXIO;
}
static void
adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
struct adf_accel_dev *pf;
int i;
if (accel_dev->dma_tag)
bus_dma_tag_destroy(accel_dev->dma_tag);
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
if (bar->virt_addr)
bus_free_resource(accel_pci_dev->pci_dev,
SYS_RES_MEMORY,
bar->virt_addr);
}
/*
* As adf_clean_hw_data_4xxxiov() will update class index, before
* index is updated, vf must be remove from accel_table.
*/
pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(accel_pci_dev->pci_dev));
adf_devmgr_rm_dev(accel_dev, pf);
if (accel_dev->hw_device) {
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXXIOV_PCI_DEVICE_ID:
case ADF_401XXIOV_PCI_DEVICE_ID:
adf_clean_hw_data_4xxxiov(accel_dev->hw_device);
break;
default:
break;
}
free(accel_dev->hw_device, M_QAT_4XXXVF);
accel_dev->hw_device = NULL;
}
adf_cfg_dev_remove(accel_dev);
}
static int
adf_attach(device_t dev)
{
struct adf_accel_dev *accel_dev;
struct adf_accel_dev *pf;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
int ret = 0;
int rid;
struct adf_cfg_device *cfg_dev = NULL;
accel_dev = device_get_softc(dev);
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(dev));
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
if (bus_get_domain(dev, &accel_pci_dev->node) != 0)
accel_pci_dev->node = 0;
/* Add accel device to accel table */
if (adf_devmgr_add_dev(accel_dev, pf)) {
device_printf(GET_DEV(accel_dev),
"Failed to add new accelerator device.\n");
return -EFAULT;
}
/* Allocate and configure device configuration structure */
hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO);
if (!hw_data) {
ret = -ENOMEM;
goto out_err;
}
accel_dev->hw_device = hw_data;
adf_init_hw_data_4xxxiov(accel_dev->hw_device);
accel_pci_dev->revid = pci_get_revid(dev);
hw_data->fuses = pci_read_config(dev, ADF_4XXXIOV_VFFUSECTL4_OFFSET, 4);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
hw_data->admin_ae_mask = hw_data->ae_mask;
accel_pci_dev->sku = hw_data->get_sku(hw_data);
/* Create device configuration table */
ret = adf_cfg_dev_add(accel_dev);
if (ret)
goto out_err;
pci_set_max_read_req(dev, 1024);
ret = bus_dma_tag_create(bus_get_dma_tag(dev),
1,
0,
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL,
NULL,
BUS_SPACE_MAXSIZE,
/* BUS_SPACE_UNRESTRICTED */ 1,
BUS_SPACE_MAXSIZE,
0,
NULL,
NULL,
&accel_dev->dma_tag);
hw_data->accel_capabilities_mask = adf_4xxxvf_get_hw_cap(accel_dev);
/* Find and map all the device's BARS */
i = 0;
for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0;
bar_nr++) {
struct adf_bar *bar;
rid = PCIR_BAR(bar_nr);
if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) !=
0) {
continue;
}
bar = &accel_pci_dev->pci_bars[i++];
bar->virt_addr = bus_alloc_resource_any(dev,
SYS_RES_MEMORY,
&rid,
RF_ACTIVE);
if (!bar->virt_addr) {
device_printf(GET_DEV(accel_dev),
"Failed to map BAR %d\n",
bar_nr);
ret = ENXIO;
goto out_err;
}
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
if (i == 0) {
device_printf(
GET_DEV(accel_dev),
"No BARs mapped. Please check if PCI BARs are mapped correctly for device\n");
ret = ENXIO;
goto out_err;
}
pci_enable_busmaster(dev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->u1.vf.msg_received);
mutex_init(&accel_dev->u1.vf.rpreset_lock);
ret = hw_data->config_device(accel_dev);
if (ret)
goto out_err;
ret = adf_dev_init(accel_dev);
if (!ret)
ret = adf_dev_start(accel_dev);
if (ret) {
device_printf(
GET_DEV(accel_dev),
"Failed to start - make sure PF enabled services match VF configuration.\n");
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
return 0;
}
cfg_dev = accel_dev->cfg->dev;
adf_cfg_device_clear(cfg_dev, accel_dev);
free(cfg_dev, M_QAT);
accel_dev->cfg->dev = NULL;
return ret;
out_err:
adf_cleanup_accel(accel_dev);
return ret;
}
static int
adf_detach(device_t dev)
{
struct adf_accel_dev *accel_dev = device_get_softc(dev);
if (!accel_dev) {
printf("QAT: Driver removal failed\n");
return EFAULT;
}
adf_flush_vf_wq(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
return 0;
}
static int
adf_modevent(module_t mod, int type, void *data)
{
switch (type) {
case MOD_UNLOAD:
adf_clean_vf_map(true);
return 0;
default:
return EOPNOTSUPP;
}
}
static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe),
DEVMETHOD(device_attach, adf_attach),
DEVMETHOD(device_detach, adf_detach),
DEVMETHOD_END };
static driver_t adf_driver = { "qat",
adf_methods,
sizeof(struct adf_accel_dev) };
DRIVER_MODULE_ORDERED(qat_4xxxvf,
pci,
adf_driver,
adf_modevent,
NULL,
SI_ORDER_THIRD);
MODULE_VERSION(qat_4xxxvf, 1);
MODULE_DEPEND(qat_4xxxvf, qat_common, 1, 1, 1);
MODULE_DEPEND(qat_4xxxvf, qat_api, 1, 1, 1);
MODULE_DEPEND(qat_4xxxvf, linuxkpi, 1, 1, 1);

View file

@ -4,9 +4,10 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_pfvf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c3xxx_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_heartbeat.h"
@ -142,18 +143,6 @@ adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
*arb_map_config = thrd_to_arb_map_gen;
}
static u32
get_pf2vf_offset(u32 i)
{
return ADF_C3XXX_PF2VF_OFFSET(i);
}
static u32
get_vintmsk_offset(u32 i)
{
return ADF_C3XXX_VINTMSK_OFFSET(i);
}
static void
get_arb_info(struct arb_info *arb_csrs_info)
{
@ -362,8 +351,6 @@ adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_arb_info = get_arb_info;
hw_data->get_admin_info = get_admin_info;
hw_data->get_errsou_offset = get_errsou_offset;
@ -382,11 +369,8 @@ adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
hw_data->check_slice_hang = adf_check_slice_hang;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
hw_data->restore_device = adf_dev_restore;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->measure_clock = measure_clock;
hw_data->get_ae_clock = get_ae_clock;
hw_data->reset_device = adf_reset_flr;
@ -410,6 +394,7 @@ adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
void

View file

@ -5,11 +5,12 @@
#include <linux/compiler.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
#include <adf_pfvf_msg.h>
#include <adf_dev_err.h>
#include <adf_cfg.h>
#include <adf_fw_counters.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c4xxx_hw_data.h"
#include "adf_c4xxx_reset.h"
#include "adf_c4xxx_inline.h"
@ -608,18 +609,6 @@ adf_enable_mmp_error_correction(struct resource *csr,
}
}
static u32
get_pf2vf_offset(u32 i)
{
return ADF_C4XXX_PF2VF_OFFSET(i);
}
static u32
get_vintmsk_offset(u32 i)
{
return ADF_C4XXX_VINTMSK_OFFSET(i);
}
static void
get_arb_info(struct arb_info *arb_csrs_info)
{
@ -2154,8 +2143,6 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_arb_info = get_arb_info;
hw_data->get_admin_info = get_admin_info;
hw_data->get_errsou_offset = get_errsou_offset;
@ -2180,11 +2167,8 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer;
hw_data->check_slice_hang = c4xxx_check_slice_hang;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
hw_data->reset_device = adf_reset_flr;
hw_data->restore_device = adf_c4xxx_dev_restore;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->init_accel_units = adf_init_accel_units;
hw_data->reset_hw_units = adf_c4xxx_reset_hw_units;
hw_data->exit_accel_units = adf_exit_accel_units;
@ -2210,6 +2194,7 @@ adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
hw_data->csr_info.arb_enable_mask = 0xF;
}

View file

@ -4,9 +4,10 @@
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
#include <adf_pf2vf_msg.h>
#include <adf_pfvf_msg.h>
#include <adf_dev_err.h>
#include <adf_gen2_hw_data.h>
#include <adf_gen2_pfvf.h>
#include "adf_c62x_hw_data.h"
#include "icp_qat_hw.h"
#include "adf_cfg.h"
@ -146,18 +147,6 @@ adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
*arb_map_config = thrd_to_arb_map_gen;
}
static u32
get_pf2vf_offset(u32 i)
{
return ADF_C62X_PF2VF_OFFSET(i);
}
static u32
get_vintmsk_offset(u32 i)
{
return ADF_C62X_VINTMSK_OFFSET(i);
}
static void
get_arb_info(struct arb_info *arb_csrs_info)
{
@ -367,8 +356,6 @@ adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_pf2vf_offset = get_pf2vf_offset;
hw_data->get_vintmsk_offset = get_vintmsk_offset;
hw_data->get_arb_info = get_arb_info;
hw_data->get_admin_info = get_admin_info;
hw_data->get_errsou_offset = get_errsou_offset;
@ -387,11 +374,8 @@ adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
hw_data->check_slice_hang = adf_check_slice_hang;
hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
hw_data->disable_vf2pf_comms = adf_pf_disable_vf2pf_comms;
hw_data->restore_device = adf_dev_restore;
hw_data->reset_device = adf_reset_flr;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
hw_data->get_objs_num = get_objs_num;
hw_data->get_obj_name = get_obj_name;
hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
@ -415,6 +399,7 @@ adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->post_reset = adf_dev_post_reset;
adf_gen2_init_hw_csr_info(&hw_data->csr_info);
adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
void

Some files were not shown because too many files have changed in this diff Show more