linux/drivers/crypto/rockchip/rk3288_crypto.c
Ard Biesheuvel ce0183cb64 crypto: rockchip - switch to skcipher API
Commit 7a7ffe65c8 ("crypto: skcipher - Add top-level skcipher interface")
dated 20 august 2015 introduced the new skcipher API which is supposed to
replace both blkcipher and ablkcipher. While all consumers of the API have
been converted long ago, some producers of the ablkcipher remain, forcing
us to keep the ablkcipher support routines alive, along with the matching
code to expose [a]blkciphers via the skcipher API.

So switch this driver to the skcipher API, allowing us to finally drop the
ablkcipher code in the near future.

Cc: Heiko Stuebner <heiko@sntech.de>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2019-11-17 09:02:48 +08:00

444 lines
11 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Crypto acceleration support for Rockchip RK3288
*
* Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
*
* Author: Zain Wang <zain.wang@rock-chips.com>
*
* Some ideas are from marvell-cesa.c and s5p-sss.c driver.
*/
#include "rk3288_crypto.h"
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/reset.h>
static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
{
int err;
err = clk_prepare_enable(dev->sclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
__func__, __LINE__);
goto err_return;
}
err = clk_prepare_enable(dev->aclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
__func__, __LINE__);
goto err_aclk;
}
err = clk_prepare_enable(dev->hclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
__func__, __LINE__);
goto err_hclk;
}
err = clk_prepare_enable(dev->dmaclk);
if (err) {
dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
__func__, __LINE__);
goto err_dmaclk;
}
return err;
err_dmaclk:
clk_disable_unprepare(dev->hclk);
err_hclk:
clk_disable_unprepare(dev->aclk);
err_aclk:
clk_disable_unprepare(dev->sclk);
err_return:
return err;
}
static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
{
clk_disable_unprepare(dev->dmaclk);
clk_disable_unprepare(dev->hclk);
clk_disable_unprepare(dev->aclk);
clk_disable_unprepare(dev->sclk);
}
static int check_alignment(struct scatterlist *sg_src,
struct scatterlist *sg_dst,
int align_mask)
{
int in, out, align;
in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
IS_ALIGNED((uint32_t)sg_src->length, align_mask);
if (!sg_dst)
return in;
out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
align = in && out;
return (align && (sg_src->length == sg_dst->length));
}
static int rk_load_data(struct rk_crypto_info *dev,
struct scatterlist *sg_src,
struct scatterlist *sg_dst)
{
unsigned int count;
dev->aligned = dev->aligned ?
check_alignment(sg_src, sg_dst, dev->align_size) :
dev->aligned;
if (dev->aligned) {
count = min(dev->left_bytes, sg_src->length);
dev->left_bytes -= count;
if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
__func__, __LINE__);
return -EINVAL;
}
dev->addr_in = sg_dma_address(sg_src);
if (sg_dst) {
if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
dev_err(dev->dev,
"[%s:%d] dma_map_sg(dst) error\n",
__func__, __LINE__);
dma_unmap_sg(dev->dev, sg_src, 1,
DMA_TO_DEVICE);
return -EINVAL;
}
dev->addr_out = sg_dma_address(sg_dst);
}
} else {
count = (dev->left_bytes > PAGE_SIZE) ?
PAGE_SIZE : dev->left_bytes;
if (!sg_pcopy_to_buffer(dev->first, dev->src_nents,
dev->addr_vir, count,
dev->total - dev->left_bytes)) {
dev_err(dev->dev, "[%s:%d] pcopy err\n",
__func__, __LINE__);
return -EINVAL;
}
dev->left_bytes -= count;
sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
__func__, __LINE__);
return -ENOMEM;
}
dev->addr_in = sg_dma_address(&dev->sg_tmp);
if (sg_dst) {
if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
DMA_FROM_DEVICE)) {
dev_err(dev->dev,
"[%s:%d] dma_map_sg(sg_tmp) error\n",
__func__, __LINE__);
dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
DMA_TO_DEVICE);
return -ENOMEM;
}
dev->addr_out = sg_dma_address(&dev->sg_tmp);
}
}
dev->count = count;
return 0;
}
static void rk_unload_data(struct rk_crypto_info *dev)
{
struct scatterlist *sg_in, *sg_out;
sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
if (dev->sg_dst) {
sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
}
}
static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
{
struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
u32 interrupt_status;
spin_lock(&dev->lock);
interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
if (interrupt_status & 0x0a) {
dev_warn(dev->dev, "DMA Error\n");
dev->err = -EFAULT;
}
tasklet_schedule(&dev->done_task);
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
static int rk_crypto_enqueue(struct rk_crypto_info *dev,
struct crypto_async_request *async_req)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&dev->lock, flags);
ret = crypto_enqueue_request(&dev->queue, async_req);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
dev->busy = true;
spin_unlock_irqrestore(&dev->lock, flags);
tasklet_schedule(&dev->queue_task);
return ret;
}
static void rk_crypto_queue_task_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
struct crypto_async_request *async_req, *backlog;
unsigned long flags;
int err = 0;
dev->err = 0;
spin_lock_irqsave(&dev->lock, flags);
backlog = crypto_get_backlog(&dev->queue);
async_req = crypto_dequeue_request(&dev->queue);
if (!async_req) {
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (backlog) {
backlog->complete(backlog, -EINPROGRESS);
backlog = NULL;
}
dev->async_req = async_req;
err = dev->start(dev);
if (err)
dev->complete(dev->async_req, err);
}
static void rk_crypto_done_task_cb(unsigned long data)
{
struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
if (dev->err) {
dev->complete(dev->async_req, dev->err);
return;
}
dev->err = dev->update(dev);
if (dev->err)
dev->complete(dev->async_req, dev->err);
}
static struct rk_crypto_tmp *rk_cipher_algs[] = {
&rk_ecb_aes_alg,
&rk_cbc_aes_alg,
&rk_ecb_des_alg,
&rk_cbc_des_alg,
&rk_ecb_des3_ede_alg,
&rk_cbc_des3_ede_alg,
&rk_ahash_sha1,
&rk_ahash_sha256,
&rk_ahash_md5,
};
static int rk_crypto_register(struct rk_crypto_info *crypto_info)
{
unsigned int i, k;
int err = 0;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
rk_cipher_algs[i]->dev = crypto_info;
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
err = crypto_register_skcipher(
&rk_cipher_algs[i]->alg.skcipher);
else
err = crypto_register_ahash(
&rk_cipher_algs[i]->alg.hash);
if (err)
goto err_cipher_algs;
}
return 0;
err_cipher_algs:
for (k = 0; k < i; k++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
return err;
}
static void rk_crypto_unregister(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
else
crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
}
}
static void rk_crypto_action(void *data)
{
struct rk_crypto_info *crypto_info = data;
reset_control_assert(crypto_info->rst);
}
static const struct of_device_id crypto_of_id_table[] = {
{ .compatible = "rockchip,rk3288-crypto" },
{}
};
MODULE_DEVICE_TABLE(of, crypto_of_id_table);
static int rk_crypto_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rk_crypto_info *crypto_info;
int err = 0;
crypto_info = devm_kzalloc(&pdev->dev,
sizeof(*crypto_info), GFP_KERNEL);
if (!crypto_info) {
err = -ENOMEM;
goto err_crypto;
}
crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
if (IS_ERR(crypto_info->rst)) {
err = PTR_ERR(crypto_info->rst);
goto err_crypto;
}
reset_control_assert(crypto_info->rst);
usleep_range(10, 20);
reset_control_deassert(crypto_info->rst);
err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
if (err)
goto err_crypto;
spin_lock_init(&crypto_info->lock);
crypto_info->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(crypto_info->reg)) {
err = PTR_ERR(crypto_info->reg);
goto err_crypto;
}
crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
if (IS_ERR(crypto_info->aclk)) {
err = PTR_ERR(crypto_info->aclk);
goto err_crypto;
}
crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
if (IS_ERR(crypto_info->hclk)) {
err = PTR_ERR(crypto_info->hclk);
goto err_crypto;
}
crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(crypto_info->sclk)) {
err = PTR_ERR(crypto_info->sclk);
goto err_crypto;
}
crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
if (IS_ERR(crypto_info->dmaclk)) {
err = PTR_ERR(crypto_info->dmaclk);
goto err_crypto;
}
crypto_info->irq = platform_get_irq(pdev, 0);
if (crypto_info->irq < 0) {
dev_warn(crypto_info->dev,
"control Interrupt is not available.\n");
err = crypto_info->irq;
goto err_crypto;
}
err = devm_request_irq(&pdev->dev, crypto_info->irq,
rk_crypto_irq_handle, IRQF_SHARED,
"rk-crypto", pdev);
if (err) {
dev_err(crypto_info->dev, "irq request failed.\n");
goto err_crypto;
}
crypto_info->dev = &pdev->dev;
platform_set_drvdata(pdev, crypto_info);
tasklet_init(&crypto_info->queue_task,
rk_crypto_queue_task_cb, (unsigned long)crypto_info);
tasklet_init(&crypto_info->done_task,
rk_crypto_done_task_cb, (unsigned long)crypto_info);
crypto_init_queue(&crypto_info->queue, 50);
crypto_info->enable_clk = rk_crypto_enable_clk;
crypto_info->disable_clk = rk_crypto_disable_clk;
crypto_info->load_data = rk_load_data;
crypto_info->unload_data = rk_unload_data;
crypto_info->enqueue = rk_crypto_enqueue;
crypto_info->busy = false;
err = rk_crypto_register(crypto_info);
if (err) {
dev_err(dev, "err in register alg");
goto err_register_alg;
}
dev_info(dev, "Crypto Accelerator successfully registered\n");
return 0;
err_register_alg:
tasklet_kill(&crypto_info->queue_task);
tasklet_kill(&crypto_info->done_task);
err_crypto:
return err;
}
static int rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
rk_crypto_unregister();
tasklet_kill(&crypto_tmp->done_task);
tasklet_kill(&crypto_tmp->queue_task);
return 0;
}
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
.remove = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.of_match_table = crypto_of_id_table,
},
};
module_platform_driver(crypto_driver);
MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
MODULE_LICENSE("GPL");