crypto: mediatek - remove obsolete driver

The crypto mediatek driver has been replaced by the inside-secure
driver now. Remove this driver to avoid having duplicate drivers.

Signed-off-by: Vic Wu <vic.wu@mediatek.com>
Acked-by: Ryder Lee <ryder.lee@mediatek.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Vic Wu 2020-12-07 15:58:42 +08:00 committed by Herbert Xu
parent 0aa171e9b2
commit 6a702fa533
8 changed files with 0 additions and 3650 deletions

View file

@ -772,21 +772,6 @@ config CRYPTO_DEV_ZYNQMP_AES
accelerator. Select this if you want to use the ZynqMP module
for AES algorithms.
config CRYPTO_DEV_MEDIATEK
tristate "MediaTek's EIP97 Cryptographic Engine driver"
depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST
select CRYPTO_LIB_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_HMAC
help
This driver allows you to utilize the hardware crypto accelerator
EIP97 which can be found on the MT7623 MT2701, MT8521p, etc ....
Select this if you want to use it for AES/SHA1/SHA2 algorithms.
source "drivers/crypto/chelsio/Kconfig"
source "drivers/crypto/virtio/Kconfig"

View file

@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/
obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o

View file

@ -1,3 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o
mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o

File diff suppressed because it is too large Load diff

View file

@ -1,586 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for EIP97 cryptographic accelerator.
*
* Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
*/
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "mtk-platform.h"
#define MTK_BURST_SIZE_MSK GENMASK(7, 4)
#define MTK_BURST_SIZE(x) ((x) << 4)
#define MTK_DESC_SIZE(x) ((x) << 0)
#define MTK_DESC_OFFSET(x) ((x) << 16)
#define MTK_DESC_FETCH_SIZE(x) ((x) << 0)
#define MTK_DESC_FETCH_THRESH(x) ((x) << 16)
#define MTK_DESC_OVL_IRQ_EN BIT(25)
#define MTK_DESC_ATP_PRESENT BIT(30)
#define MTK_DFSE_IDLE GENMASK(3, 0)
#define MTK_DFSE_THR_CTRL_EN BIT(30)
#define MTK_DFSE_THR_CTRL_RESET BIT(31)
#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0))
#define MTK_DFSE_MIN_DATA(x) ((x) << 0)
#define MTK_DFSE_MAX_DATA(x) ((x) << 8)
#define MTK_DFE_MIN_CTRL(x) ((x) << 16)
#define MTK_DFE_MAX_CTRL(x) ((x) << 24)
#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8)
#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12)
#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0)
#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4)
#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0))
#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0))
#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0))
#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0))
#define MTK_PE_TK_LOC_AVL BIT(2)
#define MTK_PE_PROC_HELD BIT(14)
#define MTK_PE_TK_TIMEOUT_EN BIT(22)
#define MTK_PE_INPUT_DMA_ERR BIT(0)
#define MTK_PE_OUTPUT_DMA_ERR BIT(1)
#define MTK_PE_PKT_PORC_ERR BIT(2)
#define MTK_PE_PKT_TIMEOUT BIT(3)
#define MTK_PE_FATAL_ERR BIT(14)
#define MTK_PE_INPUT_DMA_ERR_EN BIT(16)
#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17)
#define MTK_PE_PKT_PORC_ERR_EN BIT(18)
#define MTK_PE_PKT_TIMEOUT_EN BIT(19)
#define MTK_PE_FATAL_ERR_EN BIT(30)
#define MTK_PE_INT_OUT_EN BIT(31)
#define MTK_HIA_SIGNATURE ((u16)0x35ca)
#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0))
#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0))
#define MTK_CDR_STAT_CLR GENMASK(4, 0)
#define MTK_RDR_STAT_CLR GENMASK(7, 0)
#define MTK_AIC_INT_MSK GENMASK(5, 0)
#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20))
#define MTK_AIC_VER11 0x011036c9
#define MTK_AIC_VER12 0x012036c9
#define MTK_AIC_G_CLR GENMASK(30, 20)
/**
* EIP97 is an integrated security subsystem to accelerate cryptographic
* functions and protocols to offload the host processor.
* Some important hardware modules are briefly introduced below:
*
* Host Interface Adapter(HIA) - the main interface between the host
* system and the hardware subsystem. It is responsible for attaching
* processing engine to the specific host bus interface and provides a
* standardized software view for off loading tasks to the engine.
*
* Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
* CD the host has prepared in the CDR. It monitors the fill level of its
* CD-FIFO and if there's sufficient space for the next block of descriptors,
* then it fires off a DMA request to fetch a block of CDs.
*
* Data fetch engine(DFE) - It is responsible for parsing the CD and
* setting up the required control and packet data DMA transfers from
* system memory to the processing engine.
*
* Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
* but target is result descriptors, Moreover, it also handles the RD
* updates under control of the DSE. For each packet data segment
* processed, the DSE triggers the RDR Manager to write the updated RD.
* If triggered to update, the RDR Manager sets up a DMA operation to
* copy the RD from the DSE to the correct location in the RDR.
*
* Data Store Engine(DSE) - It is responsible for parsing the prepared RD
* and setting up the required control and packet data DMA transfers from
* the processing engine to system memory.
*
* Advanced Interrupt Controllers(AICs) - receive interrupt request signals
* from various sources and combine them into one interrupt output.
* The AICs are used by:
* - One for the HIA global and processing engine interrupts.
* - The others for the descriptor ring interrupts.
*/
/* Cryptographic engine capabilities */
struct mtk_sys_cap {
/* host interface adapter */
u32 hia_ver;
u32 hia_opt;
/* packet engine */
u32 pkt_eng_opt;
/* global hardware */
u32 hw_opt;
};
static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
{
/* Assign rings to DFE/DSE thread and enable it */
writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
}
static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
struct mtk_sys_cap *cap)
{
u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
MTK_DFSE_MAX_DATA(ipbuf) |
MTK_DFE_MIN_CTRL(itbuf - 1) |
MTK_DFE_MAX_CTRL(itbuf),
cryp->base + DFE_CFG);
writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
MTK_DFSE_MAX_DATA(opbuf),
cryp->base + DSE_CFG);
writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
MTK_IN_BUF_MAX_THRESH(ipbuf),
cryp->base + PE_IN_DBUF_THRESH);
writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
MTK_IN_BUF_MAX_THRESH(itbuf),
cryp->base + PE_IN_TBUF_THRESH);
writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
MTK_OUT_BUF_MAX_THRESH(opbuf),
cryp->base + PE_OUT_DBUF_THRESH);
writel(0, cryp->base + PE_OUT_TBUF_THRESH);
writel(0, cryp->base + PE_OUT_BUF_CTRL);
}
static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
{
int ret = -EINVAL;
u32 val;
/* Check for completion of all DMA transfers */
val = readl(cryp->base + DFE_THR_STAT);
if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
val = readl(cryp->base + DSE_THR_STAT);
if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
ret = 0;
}
if (!ret) {
/* Take DFE/DSE thread out of reset */
writel(0, cryp->base + DFE_THR_CTRL);
writel(0, cryp->base + DSE_THR_CTRL);
} else {
return -EBUSY;
}
return 0;
}
static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
{
/* Reset DSE/DFE and correct system priorities for all rings. */
writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
writel(0, cryp->base + DFE_PRIO_0);
writel(0, cryp->base + DFE_PRIO_1);
writel(0, cryp->base + DFE_PRIO_2);
writel(0, cryp->base + DFE_PRIO_3);
writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
writel(0, cryp->base + DSE_PRIO_0);
writel(0, cryp->base + DSE_PRIO_1);
writel(0, cryp->base + DSE_PRIO_2);
writel(0, cryp->base + DSE_PRIO_3);
return mtk_dfe_dse_state_check(cryp);
}
static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
int i, struct mtk_sys_cap *cap)
{
/* Full descriptor that fits FIFO minus one */
u32 count =
((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
/* Temporarily disable external triggering */
writel(0, cryp->base + CDR_CFG(i));
/* Clear CDR count */
writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
writel(0, cryp->base + CDR_PREP_PNTR(i));
writel(0, cryp->base + CDR_PROC_PNTR(i));
writel(0, cryp->base + CDR_DMA_CFG(i));
/* Configure CDR host address space */
writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
/* Clear and disable all CDR interrupts */
writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
/*
* Set command descriptor offset and enable additional
* token present in descriptor.
*/
writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
MTK_DESC_OFFSET(MTK_DESC_OFF) |
MTK_DESC_ATP_PRESENT,
cryp->base + CDR_DESC_SIZE(i));
writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
cryp->base + CDR_CFG(i));
}
static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
int i, struct mtk_sys_cap *cap)
{
u32 rndup = 2;
u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
/* Temporarily disable external triggering */
writel(0, cryp->base + RDR_CFG(i));
/* Clear RDR count */
writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
writel(0, cryp->base + RDR_PREP_PNTR(i));
writel(0, cryp->base + RDR_PROC_PNTR(i));
writel(0, cryp->base + RDR_DMA_CFG(i));
/* Configure RDR host address space */
writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
/*
* RDR manager generates update interrupts on a per-completed-packet,
* and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
* for the RDR exceeds the number of packets.
*/
writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
cryp->base + RDR_THRESH(i));
/*
* Configure a threshold and time-out value for the processed
* result descriptors (or complete packets) that are written to
* the RDR.
*/
writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
cryp->base + RDR_DESC_SIZE(i));
/*
* Configure HIA fetch size and fetch threshold that are used to
* fetch blocks of multiple descriptors.
*/
writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
MTK_DESC_FETCH_THRESH(count * rndup) |
MTK_DESC_OVL_IRQ_EN,
cryp->base + RDR_CFG(i));
}
static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
{
struct mtk_sys_cap cap;
int i, err;
u32 val;
cap.hia_ver = readl(cryp->base + HIA_VERSION);
cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
return -EINVAL;
/* Configure endianness conversion method for master (DMA) interface */
writel(0, cryp->base + EIP97_MST_CTRL);
/* Set HIA burst size */
val = readl(cryp->base + HIA_MST_CTRL);
val &= ~MTK_BURST_SIZE_MSK;
val |= MTK_BURST_SIZE(5);
writel(val, cryp->base + HIA_MST_CTRL);
err = mtk_dfe_dse_reset(cryp);
if (err) {
dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
return err;
}
mtk_dfe_dse_buf_setup(cryp, &cap);
/* Enable the 4 rings for the packet engines. */
mtk_desc_ring_link(cryp, 0xf);
for (i = 0; i < MTK_RING_MAX; i++) {
mtk_cmd_desc_ring_setup(cryp, i, &cap);
mtk_res_desc_ring_setup(cryp, i, &cap);
}
writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
cryp->base + PE_TOKEN_CTRL_STAT);
/* Clear all pending interrupts */
writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
MTK_PE_INT_OUT_EN,
cryp->base + PE_INTERRUPT_CTRL_STAT);
return 0;
}
static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
{
u32 val;
if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_VERSION);
else
val = readl(cryp->base + AIC_VERSION(hw));
val &= MTK_AIC_VER_MSK;
if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
return -ENXIO;
if (hw == MTK_RING_MAX)
val = readl(cryp->base + AIC_G_OPTIONS);
else
val = readl(cryp->base + AIC_OPTIONS(hw));
val &= MTK_AIC_INT_MSK;
if (!val || val > 32)
return -ENXIO;
return 0;
}
static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
{
int err;
err = mtk_aic_cap_check(cryp, hw);
if (err)
return err;
/* Disable all interrupts and set initial configuration */
if (hw == MTK_RING_MAX) {
writel(0, cryp->base + AIC_G_ENABLE_CTRL);
writel(0, cryp->base + AIC_G_POL_CTRL);
writel(0, cryp->base + AIC_G_TYPE_CTRL);
writel(0, cryp->base + AIC_G_ENABLE_SET);
} else {
writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
writel(0, cryp->base + AIC_POL_CTRL(hw));
writel(0, cryp->base + AIC_TYPE_CTRL(hw));
writel(0, cryp->base + AIC_ENABLE_SET(hw));
}
return 0;
}
static int mtk_accelerator_init(struct mtk_cryp *cryp)
{
int i, err;
/* Initialize advanced interrupt controller(AIC) */
for (i = 0; i < MTK_IRQ_NUM; i++) {
err = mtk_aic_init(cryp, i);
if (err) {
dev_err(cryp->dev, "Failed to initialize AIC.\n");
return err;
}
}
/* Initialize packet engine */
err = mtk_packet_engine_setup(cryp);
if (err) {
dev_err(cryp->dev, "Failed to configure packet engine.\n");
return err;
}
return 0;
}
static void mtk_desc_dma_free(struct mtk_cryp *cryp)
{
int i;
for (i = 0; i < MTK_RING_MAX; i++) {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
cryp->ring[i]->res_base,
cryp->ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
cryp->ring[i]->cmd_base,
cryp->ring[i]->cmd_dma);
kfree(cryp->ring[i]);
}
}
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{
struct mtk_ring **ring = cryp->ring;
int i;
for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
if (!ring[i])
goto err_cleanup;
ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
MTK_DESC_RING_SZ,
&ring[i]->cmd_dma,
GFP_KERNEL);
if (!ring[i]->cmd_base)
goto err_cleanup;
ring[i]->res_base = dma_alloc_coherent(cryp->dev,
MTK_DESC_RING_SZ,
&ring[i]->res_dma,
GFP_KERNEL);
if (!ring[i]->res_base)
goto err_cleanup;
ring[i]->cmd_next = ring[i]->cmd_base;
ring[i]->res_next = ring[i]->res_base;
}
return 0;
err_cleanup:
do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]);
} while (i--);
return -ENOMEM;
}
static int mtk_crypto_probe(struct platform_device *pdev)
{
struct mtk_cryp *cryp;
int i, err;
cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
if (!cryp)
return -ENOMEM;
cryp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cryp->base))
return PTR_ERR(cryp->base);
for (i = 0; i < MTK_IRQ_NUM; i++) {
cryp->irq[i] = platform_get_irq(pdev, i);
if (cryp->irq[i] < 0)
return cryp->irq[i];
}
cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
if (IS_ERR(cryp->clk_cryp))
return -EPROBE_DEFER;
cryp->dev = &pdev->dev;
pm_runtime_enable(cryp->dev);
pm_runtime_get_sync(cryp->dev);
err = clk_prepare_enable(cryp->clk_cryp);
if (err)
goto err_clk_cryp;
/* Allocate four command/result descriptor rings */
err = mtk_desc_ring_alloc(cryp);
if (err) {
dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
goto err_resource;
}
/* Initialize hardware modules */
err = mtk_accelerator_init(cryp);
if (err) {
dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
goto err_engine;
}
err = mtk_cipher_alg_register(cryp);
if (err) {
dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
goto err_cipher;
}
err = mtk_hash_alg_register(cryp);
if (err) {
dev_err(cryp->dev, "Unable to register hash algorithm.\n");
goto err_hash;
}
platform_set_drvdata(pdev, cryp);
return 0;
err_hash:
mtk_cipher_alg_release(cryp);
err_cipher:
mtk_dfe_dse_reset(cryp);
err_engine:
mtk_desc_dma_free(cryp);
err_resource:
clk_disable_unprepare(cryp->clk_cryp);
err_clk_cryp:
pm_runtime_put_sync(cryp->dev);
pm_runtime_disable(cryp->dev);
return err;
}
static int mtk_crypto_remove(struct platform_device *pdev)
{
struct mtk_cryp *cryp = platform_get_drvdata(pdev);
mtk_hash_alg_release(cryp);
mtk_cipher_alg_release(cryp);
mtk_desc_dma_free(cryp);
clk_disable_unprepare(cryp->clk_cryp);
pm_runtime_put_sync(cryp->dev);
pm_runtime_disable(cryp->dev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id of_crypto_id[] = {
{ .compatible = "mediatek,eip97-crypto" },
{},
};
MODULE_DEVICE_TABLE(of, of_crypto_id);
static struct platform_driver mtk_crypto_driver = {
.probe = mtk_crypto_probe,
.remove = mtk_crypto_remove,
.driver = {
.name = "mtk-crypto",
.of_match_table = of_crypto_id,
},
};
module_platform_driver(mtk_crypto_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");

View file

@ -1,231 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Driver for EIP97 cryptographic accelerator.
*
* Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
*/
#ifndef __MTK_PLATFORM_H_
#define __MTK_PLATFORM_H_
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/scatterlist.h>
#include "mtk-regs.h"
#define MTK_RDR_PROC_THRESH BIT(0)
#define MTK_RDR_PROC_MODE BIT(23)
#define MTK_CNT_RST BIT(31)
#define MTK_IRQ_RDR0 BIT(1)
#define MTK_IRQ_RDR1 BIT(3)
#define MTK_IRQ_RDR2 BIT(5)
#define MTK_IRQ_RDR3 BIT(7)
#define SIZE_IN_WORDS(x) ((x) >> 2)
/**
* Ring 0/1 are used by AES encrypt and decrypt.
* Ring 2/3 are used by SHA.
*/
enum {
MTK_RING0,
MTK_RING1,
MTK_RING2,
MTK_RING3,
MTK_RING_MAX
};
#define MTK_REC_NUM (MTK_RING_MAX / 2)
#define MTK_IRQ_NUM 5
/**
* struct mtk_desc - DMA descriptor
* @hdr: the descriptor control header
* @buf: DMA address of input buffer segment
* @ct: DMA address of command token that control operation flow
* @ct_hdr: the command token control header
* @tag: the user-defined field
* @tfm: DMA address of transform state
* @bound: align descriptors offset boundary
*
* Structure passed to the crypto engine to describe where source
* data needs to be fetched and how it needs to be processed.
*/
struct mtk_desc {
__le32 hdr;
__le32 buf;
__le32 ct;
__le32 ct_hdr;
__le32 tag;
__le32 tfm;
__le32 bound[2];
};
#define MTK_DESC_NUM 512
#define MTK_DESC_OFF SIZE_IN_WORDS(sizeof(struct mtk_desc))
#define MTK_DESC_SZ (MTK_DESC_OFF - 2)
#define MTK_DESC_RING_SZ ((sizeof(struct mtk_desc) * MTK_DESC_NUM))
#define MTK_DESC_CNT(x) ((MTK_DESC_OFF * (x)) << 2)
#define MTK_DESC_LAST cpu_to_le32(BIT(22))
#define MTK_DESC_FIRST cpu_to_le32(BIT(23))
#define MTK_DESC_BUF_LEN(x) cpu_to_le32(x)
#define MTK_DESC_CT_LEN(x) cpu_to_le32((x) << 24)
/**
* struct mtk_ring - Descriptor ring
* @cmd_base: pointer to command descriptor ring base
* @cmd_next: pointer to the next command descriptor
* @cmd_dma: DMA address of command descriptor ring
* @res_base: pointer to result descriptor ring base
* @res_next: pointer to the next result descriptor
* @res_prev: pointer to the previous result descriptor
* @res_dma: DMA address of result descriptor ring
*
* A descriptor ring is a circular buffer that is used to manage
* one or more descriptors. There are two type of descriptor rings;
* the command descriptor ring and result descriptor ring.
*/
struct mtk_ring {
struct mtk_desc *cmd_base;
struct mtk_desc *cmd_next;
dma_addr_t cmd_dma;
struct mtk_desc *res_base;
struct mtk_desc *res_next;
struct mtk_desc *res_prev;
dma_addr_t res_dma;
};
/**
* struct mtk_aes_dma - Structure that holds sg list info
* @sg: pointer to scatter-gather list
* @nents: number of entries in the sg list
* @remainder: remainder of sg list
* @sg_len: number of entries in the sg mapped list
*/
struct mtk_aes_dma {
struct scatterlist *sg;
int nents;
u32 remainder;
u32 sg_len;
};
struct mtk_aes_base_ctx;
struct mtk_aes_rec;
struct mtk_cryp;
typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes);
/**
* struct mtk_aes_rec - AES operation record
* @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @areq: pointer to async request
* @done_task: the tasklet is use in AES interrupt
* @queue_task: the tasklet is used to dequeue request
* @ctx: pointer to current context
* @src: the structure that holds source sg list info
* @dst: the structure that holds destination sg list info
* @aligned_sg: the scatter list is use to alignment
* @real_dst: pointer to the destination sg list
* @resume: pointer to resume function
* @total: request buffer length
* @buf: pointer to page buffer
* @id: the current use of ring
* @flags: it's describing AES operation state
* @lock: the async queue lock
*
* Structure used to record AES execution state.
*/
struct mtk_aes_rec {
struct mtk_cryp *cryp;
struct crypto_queue queue;
struct crypto_async_request *areq;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
struct mtk_aes_base_ctx *ctx;
struct mtk_aes_dma src;
struct mtk_aes_dma dst;
struct scatterlist aligned_sg;
struct scatterlist *real_dst;
mtk_aes_fn resume;
size_t total;
void *buf;
u8 id;
unsigned long flags;
/* queue lock */
spinlock_t lock;
};
/**
* struct mtk_sha_rec - SHA operation record
* @cryp: pointer to Cryptographic device
* @queue: crypto request queue
* @req: pointer to ahash request
* @done_task: the tasklet is use in SHA interrupt
* @queue_task: the tasklet is used to dequeue request
* @id: the current use of ring
* @flags: it's describing SHA operation state
* @lock: the async queue lock
*
* Structure used to record SHA execution state.
*/
struct mtk_sha_rec {
struct mtk_cryp *cryp;
struct crypto_queue queue;
struct ahash_request *req;
struct tasklet_struct done_task;
struct tasklet_struct queue_task;
u8 id;
unsigned long flags;
/* queue lock */
spinlock_t lock;
};
/**
* struct mtk_cryp - Cryptographic device
* @base: pointer to mapped register I/O base
* @dev: pointer to device
* @clk_cryp: pointer to crypto clock
* @irq: global system and rings IRQ
* @ring: pointer to descriptor rings
* @aes: pointer to operation record of AES
* @sha: pointer to operation record of SHA
* @aes_list: device list of AES
* @sha_list: device list of SHA
* @rec: it's used to select SHA record for tfm
*
* Structure storing cryptographic device information.
*/
struct mtk_cryp {
void __iomem *base;
struct device *dev;
struct clk *clk_cryp;
int irq[MTK_IRQ_NUM];
struct mtk_ring *ring[MTK_RING_MAX];
struct mtk_aes_rec *aes[MTK_REC_NUM];
struct mtk_sha_rec *sha[MTK_REC_NUM];
struct list_head aes_list;
struct list_head sha_list;
bool rec;
};
int mtk_cipher_alg_register(struct mtk_cryp *cryp);
void mtk_cipher_alg_release(struct mtk_cryp *cryp);
int mtk_hash_alg_register(struct mtk_cryp *cryp);
void mtk_hash_alg_release(struct mtk_cryp *cryp);
#endif /* __MTK_PLATFORM_H_ */

View file

@ -1,190 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Support for MediaTek cryptographic accelerator.
*
* Copyright (c) 2016 MediaTek Inc.
* Author: Ryder Lee <ryder.lee@mediatek.com>
*/
#ifndef __MTK_REGS_H__
#define __MTK_REGS_H__
/* HIA, Command Descriptor Ring Manager */
#define CDR_BASE_ADDR_LO(x) (0x0 + ((x) << 12))
#define CDR_BASE_ADDR_HI(x) (0x4 + ((x) << 12))
#define CDR_DATA_BASE_ADDR_LO(x) (0x8 + ((x) << 12))
#define CDR_DATA_BASE_ADDR_HI(x) (0xC + ((x) << 12))
#define CDR_ACD_BASE_ADDR_LO(x) (0x10 + ((x) << 12))
#define CDR_ACD_BASE_ADDR_HI(x) (0x14 + ((x) << 12))
#define CDR_RING_SIZE(x) (0x18 + ((x) << 12))
#define CDR_DESC_SIZE(x) (0x1C + ((x) << 12))
#define CDR_CFG(x) (0x20 + ((x) << 12))
#define CDR_DMA_CFG(x) (0x24 + ((x) << 12))
#define CDR_THRESH(x) (0x28 + ((x) << 12))
#define CDR_PREP_COUNT(x) (0x2C + ((x) << 12))
#define CDR_PROC_COUNT(x) (0x30 + ((x) << 12))
#define CDR_PREP_PNTR(x) (0x34 + ((x) << 12))
#define CDR_PROC_PNTR(x) (0x38 + ((x) << 12))
#define CDR_STAT(x) (0x3C + ((x) << 12))
/* HIA, Result Descriptor Ring Manager */
#define RDR_BASE_ADDR_LO(x) (0x800 + ((x) << 12))
#define RDR_BASE_ADDR_HI(x) (0x804 + ((x) << 12))
#define RDR_DATA_BASE_ADDR_LO(x) (0x808 + ((x) << 12))
#define RDR_DATA_BASE_ADDR_HI(x) (0x80C + ((x) << 12))
#define RDR_ACD_BASE_ADDR_LO(x) (0x810 + ((x) << 12))
#define RDR_ACD_BASE_ADDR_HI(x) (0x814 + ((x) << 12))
#define RDR_RING_SIZE(x) (0x818 + ((x) << 12))
#define RDR_DESC_SIZE(x) (0x81C + ((x) << 12))
#define RDR_CFG(x) (0x820 + ((x) << 12))
#define RDR_DMA_CFG(x) (0x824 + ((x) << 12))
#define RDR_THRESH(x) (0x828 + ((x) << 12))
#define RDR_PREP_COUNT(x) (0x82C + ((x) << 12))
#define RDR_PROC_COUNT(x) (0x830 + ((x) << 12))
#define RDR_PREP_PNTR(x) (0x834 + ((x) << 12))
#define RDR_PROC_PNTR(x) (0x838 + ((x) << 12))
#define RDR_STAT(x) (0x83C + ((x) << 12))
/* HIA, Ring AIC */
#define AIC_POL_CTRL(x) (0xE000 - ((x) << 12))
#define AIC_TYPE_CTRL(x) (0xE004 - ((x) << 12))
#define AIC_ENABLE_CTRL(x) (0xE008 - ((x) << 12))
#define AIC_RAW_STAL(x) (0xE00C - ((x) << 12))
#define AIC_ENABLE_SET(x) (0xE00C - ((x) << 12))
#define AIC_ENABLED_STAT(x) (0xE010 - ((x) << 12))
#define AIC_ACK(x) (0xE010 - ((x) << 12))
#define AIC_ENABLE_CLR(x) (0xE014 - ((x) << 12))
#define AIC_OPTIONS(x) (0xE018 - ((x) << 12))
#define AIC_VERSION(x) (0xE01C - ((x) << 12))
/* HIA, Global AIC */
#define AIC_G_POL_CTRL 0xF800
#define AIC_G_TYPE_CTRL 0xF804
#define AIC_G_ENABLE_CTRL 0xF808
#define AIC_G_RAW_STAT 0xF80C
#define AIC_G_ENABLE_SET 0xF80C
#define AIC_G_ENABLED_STAT 0xF810
#define AIC_G_ACK 0xF810
#define AIC_G_ENABLE_CLR 0xF814
#define AIC_G_OPTIONS 0xF818
#define AIC_G_VERSION 0xF81C
/* HIA, Data Fetch Engine */
#define DFE_CFG 0xF000
#define DFE_PRIO_0 0xF010
#define DFE_PRIO_1 0xF014
#define DFE_PRIO_2 0xF018
#define DFE_PRIO_3 0xF01C
/* HIA, Data Fetch Engine access monitoring for CDR */
#define DFE_RING_REGION_LO(x) (0xF080 + ((x) << 3))
#define DFE_RING_REGION_HI(x) (0xF084 + ((x) << 3))
/* HIA, Data Fetch Engine thread control and status for thread */
#define DFE_THR_CTRL 0xF200
#define DFE_THR_STAT 0xF204
#define DFE_THR_DESC_CTRL 0xF208
#define DFE_THR_DESC_DPTR_LO 0xF210
#define DFE_THR_DESC_DPTR_HI 0xF214
#define DFE_THR_DESC_ACDPTR_LO 0xF218
#define DFE_THR_DESC_ACDPTR_HI 0xF21C
/* HIA, Data Store Engine */
#define DSE_CFG 0xF400
#define DSE_PRIO_0 0xF410
#define DSE_PRIO_1 0xF414
#define DSE_PRIO_2 0xF418
#define DSE_PRIO_3 0xF41C
/* HIA, Data Store Engine access monitoring for RDR */
#define DSE_RING_REGION_LO(x) (0xF480 + ((x) << 3))
#define DSE_RING_REGION_HI(x) (0xF484 + ((x) << 3))
/* HIA, Data Store Engine thread control and status for thread */
#define DSE_THR_CTRL 0xF600
#define DSE_THR_STAT 0xF604
#define DSE_THR_DESC_CTRL 0xF608
#define DSE_THR_DESC_DPTR_LO 0xF610
#define DSE_THR_DESC_DPTR_HI 0xF614
#define DSE_THR_DESC_S_DPTR_LO 0xF618
#define DSE_THR_DESC_S_DPTR_HI 0xF61C
#define DSE_THR_ERROR_STAT 0xF620
/* HIA Global */
#define HIA_MST_CTRL 0xFFF4
#define HIA_OPTIONS 0xFFF8
#define HIA_VERSION 0xFFFC
/* Processing Engine Input Side, Processing Engine */
#define PE_IN_DBUF_THRESH 0x10000
#define PE_IN_TBUF_THRESH 0x10100
/* Packet Engine Configuration / Status Registers */
#define PE_TOKEN_CTRL_STAT 0x11000
#define PE_FUNCTION_EN 0x11004
#define PE_CONTEXT_CTRL 0x11008
#define PE_INTERRUPT_CTRL_STAT 0x11010
#define PE_CONTEXT_STAT 0x1100C
#define PE_OUT_TRANS_CTRL_STAT 0x11018
#define PE_OUT_BUF_CTRL 0x1101C
/* Packet Engine PRNG Registers */
#define PE_PRNG_STAT 0x11040
#define PE_PRNG_CTRL 0x11044
#define PE_PRNG_SEED_L 0x11048
#define PE_PRNG_SEED_H 0x1104C
#define PE_PRNG_KEY_0_L 0x11050
#define PE_PRNG_KEY_0_H 0x11054
#define PE_PRNG_KEY_1_L 0x11058
#define PE_PRNG_KEY_1_H 0x1105C
#define PE_PRNG_RES_0 0x11060
#define PE_PRNG_RES_1 0x11064
#define PE_PRNG_RES_2 0x11068
#define PE_PRNG_RES_3 0x1106C
#define PE_PRNG_LFSR_L 0x11070
#define PE_PRNG_LFSR_H 0x11074
/* Packet Engine AIC */
#define PE_EIP96_AIC_POL_CTRL 0x113C0
#define PE_EIP96_AIC_TYPE_CTRL 0x113C4
#define PE_EIP96_AIC_ENABLE_CTRL 0x113C8
#define PE_EIP96_AIC_RAW_STAT 0x113CC
#define PE_EIP96_AIC_ENABLE_SET 0x113CC
#define PE_EIP96_AIC_ENABLED_STAT 0x113D0
#define PE_EIP96_AIC_ACK 0x113D0
#define PE_EIP96_AIC_ENABLE_CLR 0x113D4
#define PE_EIP96_AIC_OPTIONS 0x113D8
#define PE_EIP96_AIC_VERSION 0x113DC
/* Packet Engine Options & Version Registers */
#define PE_EIP96_OPTIONS 0x113F8
#define PE_EIP96_VERSION 0x113FC
/* Processing Engine Output Side */
#define PE_OUT_DBUF_THRESH 0x11C00
#define PE_OUT_TBUF_THRESH 0x11D00
/* Processing Engine Local AIC */
#define PE_AIC_POL_CTRL 0x11F00
#define PE_AIC_TYPE_CTRL 0x11F04
#define PE_AIC_ENABLE_CTRL 0x11F08
#define PE_AIC_RAW_STAT 0x11F0C
#define PE_AIC_ENABLE_SET 0x11F0C
#define PE_AIC_ENABLED_STAT 0x11F10
#define PE_AIC_ENABLE_CLR 0x11F14
#define PE_AIC_OPTIONS 0x11F18
#define PE_AIC_VERSION 0x11F1C
/* Processing Engine General Configuration and Version */
#define PE_IN_FLIGHT 0x11FF0
#define PE_OPTIONS 0x11FF8
#define PE_VERSION 0x11FFC
/* EIP-97 - Global */
#define EIP97_CLOCK_STATE 0x1FFE4
#define EIP97_FORCE_CLOCK_ON 0x1FFE8
#define EIP97_FORCE_CLOCK_OFF 0x1FFEC
#define EIP97_MST_CTRL 0x1FFF4
#define EIP97_OPTIONS 0x1FFF8
#define EIP97_VERSION 0x1FFFC
#endif /* __MTK_REGS_H__ */

File diff suppressed because it is too large Load diff