Fixed MTP to work with TWRP

This commit is contained in:
awab228 2018-06-19 23:16:04 +02:00
commit f6dfaef42e
50820 changed files with 20846062 additions and 0 deletions

121
drivers/crypto/caam/Kconfig Normal file
View file

@ -0,0 +1,121 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
depends on FSL_SOC
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
This module creates job ring devices, and configures h/w
to operate as a DPAA component automatically, depending
on h/w feature availability.
To compile this driver as a module, choose M here: the module
will be called caam.
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
Freescale's Cryptographic Accelerator
and Assurance Module (CAAM). This module adds a job ring operation
interface.
To compile this driver as a module, choose M here: the module
will be called caam_jr.
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
Select size of Job Rings as a power of 2, within the
range 2-9 (ring size 4-512).
Examples:
2 => 4
3 => 8
4 => 16
5 => 32
6 => 64
7 => 128
8 => 256
9 => 512
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
depends on CRYPTO_DEV_FSL_CAAM_JR
default n
help
Enable the Job Ring's interrupt coalescing feature.
Note: the driver already provides adequate
interrupt coalescing in software.
config CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
int "Job Ring interrupt coalescing count threshold"
depends on CRYPTO_DEV_FSL_CAAM_INTC
range 1 255
default 255
help
Select number of descriptor completions to queue before
raising an interrupt, in the range 1-255. Note that a selection
of 1 functionally defeats the coalescing feature, and a selection
equal or greater than the job ring size will force timeouts.
config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
int "Job Ring interrupt coalescing timer threshold"
depends on CRYPTO_DEV_FSL_CAAM_INTC
range 1 65535
default 2048
help
Select number of bus clocks/64 to timeout in the case that one or
more descriptor completions are queued without reaching the count
threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
help
Selecting this will offload crypto for users of the
scatterlist crypto API (such as the linux native IPSec
stack) to the SEC4 via job ring.
To compile this as a module, choose M here: the module
will be called caamalg.
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
Selecting this will offload ahash for users of the
scatterlist crypto API to the SEC4 via job ring.
To compile this as a module, choose M here: the module
will be called caamhash.
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
help
Selecting this will register the SEC4 hardware rng to
the hw_random API for suppying the kernel entropy pool.
To compile this as a module, choose M here: the module
will be called caamrng.
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
default n
help
Selecting this will enable printing of various debug
information in the CAAM driver.

View file

@ -0,0 +1,15 @@
#
# Makefile for the CAAM backend and dependent components
#
ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
EXTRA_CFLAGS := -DDEBUG
endif
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
caam-objs := ctrl.o
caam_jr-objs := jr.o key_gen.o error.o

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,362 @@
/*
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
*
* Based on caamalg.c crypto API driver.
*
* relationship between job descriptors to shared descriptors:
*
* --------------- --------------
* | JobDesc #0 |-------------------->| ShareDesc |
* | *(buffer 0) | |------------->| (generate) |
* --------------- | | (move) |
* | | (store) |
* --------------- | --------------
* | JobDesc #1 |------|
* | *(buffer 1) |
* ---------------
*
* A job desc looks like this:
*
* ---------------------
* | Header |
* | ShareDesc Pointer |
* | SEQ_OUT_PTR |
* | (output buffer) |
* ---------------------
*
* The SharedDesc never changes, and each job descriptor points to one of two
* buffers for each device, from which the data will be copied into the
* requested destination
*/
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "desc_constr.h"
#include "jr.h"
#include "error.h"
/*
* Maximum buffer size: maximum number of random, cache-aligned bytes that
* will be generated and moved to seq out ptr (extlen not allowed)
*/
#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \
L1_CACHE_BYTES)
/* length of descriptors */
#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
#define DESC_RNG_LEN (10 * CAAM_CMD_SZ)
/* Buffer, its dma address and lock */
struct buf_data {
u8 buf[RN_BUF_SIZE];
dma_addr_t addr;
struct completion filled;
u32 hw_desc[DESC_JOB_O_LEN];
#define BUF_NOT_EMPTY 0
#define BUF_EMPTY 1
#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */
atomic_t empty;
};
/* rng per-device context */
struct caam_rng_ctx {
struct device *jrdev;
dma_addr_t sh_desc_dma;
u32 sh_desc[DESC_RNG_LEN];
unsigned int cur_buf_idx;
int current_buf;
struct buf_data bufs[2];
};
static struct caam_rng_ctx *rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
if (bd->addr)
dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
DMA_FROM_DEVICE);
}
static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
{
struct device *jrdev = ctx->jrdev;
if (ctx->sh_desc_dma)
dma_unmap_single(jrdev, ctx->sh_desc_dma, DESC_RNG_LEN,
DMA_TO_DEVICE);
rng_unmap_buf(jrdev, &ctx->bufs[0]);
rng_unmap_buf(jrdev, &ctx->bufs[1]);
}
static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
{
struct buf_data *bd;
bd = (struct buf_data *)((char *)desc -
offsetof(struct buf_data, hw_desc));
if (err)
caam_jr_strstatus(jrdev, err);
atomic_set(&bd->empty, BUF_NOT_EMPTY);
complete(&bd->filled);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
#endif
}
static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
{
struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
struct device *jrdev = ctx->jrdev;
u32 *desc = bd->hw_desc;
int err;
dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
init_completion(&bd->filled);
err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
if (err)
complete(&bd->filled); /* don't wait on failed job*/
else
atomic_inc(&bd->empty); /* note if pending */
return err;
}
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct caam_rng_ctx *ctx = rng_ctx;
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
int next_buf_idx, copied_idx;
int err;
if (atomic_read(&bd->empty)) {
/* try to submit job if there wasn't one */
if (atomic_read(&bd->empty) == BUF_EMPTY) {
err = submit_job(ctx, 1);
/* if can't submit job, can't even wait */
if (err)
return 0;
}
/* no immediate data, so exit if not waiting */
if (!wait)
return 0;
/* waiting for pending job */
if (atomic_read(&bd->empty))
wait_for_completion(&bd->filled);
}
next_buf_idx = ctx->cur_buf_idx + max;
dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
__func__, ctx->current_buf, ctx->cur_buf_idx);
/* if enough data in current buffer */
if (next_buf_idx < RN_BUF_SIZE) {
memcpy(data, bd->buf + ctx->cur_buf_idx, max);
ctx->cur_buf_idx = next_buf_idx;
return max;
}
/* else, copy what's left... */
copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
ctx->cur_buf_idx = 0;
atomic_set(&bd->empty, BUF_EMPTY);
/* ...refill... */
submit_job(ctx, 1);
/* and use next buffer */
ctx->current_buf = !ctx->current_buf;
dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
/* since there already is some data read, don't wait */
return copied_idx + caam_read(rng, data + copied_idx,
max - copied_idx, false);
}
static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
{
struct device *jrdev = ctx->jrdev;
u32 *desc = ctx->sh_desc;
init_sh_desc(desc, HDR_SHARE_SERIAL);
/* Propagate errors from shared to job descriptor */
append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
/* Generate random bytes */
append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
/* Store bytes */
append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
dev_err(jrdev, "unable to map shared descriptor\n");
return -ENOMEM;
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
return 0;
}
static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
{
struct device *jrdev = ctx->jrdev;
struct buf_data *bd = &ctx->bufs[buf_id];
u32 *desc = bd->hw_desc;
int sh_len = desc_len(ctx->sh_desc);
init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
HDR_REVERSE);
bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, bd->addr)) {
dev_err(jrdev, "unable to map dst\n");
return -ENOMEM;
}
append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
desc, desc_bytes(desc), 1);
#endif
return 0;
}
static void caam_cleanup(struct hwrng *rng)
{
int i;
struct buf_data *bd;
for (i = 0; i < 2; i++) {
bd = &rng_ctx->bufs[i];
if (atomic_read(&bd->empty) == BUF_PENDING)
wait_for_completion(&bd->filled);
}
rng_unmap_ctx(rng_ctx);
}
static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
{
struct buf_data *bd = &ctx->bufs[buf_id];
int err;
err = rng_create_job_desc(ctx, buf_id);
if (err)
return err;
atomic_set(&bd->empty, BUF_EMPTY);
submit_job(ctx, buf_id == ctx->current_buf);
wait_for_completion(&bd->filled);
return 0;
}
static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
{
int err;
ctx->jrdev = jrdev;
err = rng_create_sh_desc(ctx);
if (err)
return err;
ctx->current_buf = 0;
ctx->cur_buf_idx = 0;
err = caam_init_buf(ctx, 0);
if (err)
return err;
err = caam_init_buf(ctx, 1);
if (err)
return err;
return 0;
}
static struct hwrng caam_rng = {
.name = "rng-caam",
.cleanup = caam_cleanup,
.read = caam_read,
};
static void __exit caam_rng_exit(void)
{
caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
kfree(rng_ctx);
}
static int __init caam_rng_init(void)
{
struct device *dev;
struct device_node *dev_node;
struct platform_device *pdev;
struct device *ctrldev;
void *priv;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
if (!dev_node)
return -ENODEV;
}
pdev = of_find_device_by_node(dev_node);
if (!pdev) {
of_node_put(dev_node);
return -ENODEV;
}
ctrldev = &pdev->dev;
priv = dev_get_drvdata(ctrldev);
of_node_put(dev_node);
/*
* If priv is NULL, it's probably because the caam driver wasn't
* properly initialized (e.g. RNG4 init failed). Thus, bail out here.
*/
if (!priv)
return -ENODEV;
dev = caam_jr_alloc();
if (IS_ERR(dev)) {
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
if (!rng_ctx)
return -ENOMEM;
err = caam_init_rng(rng_ctx, dev);
if (err)
return err;
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
}
module_init(caam_rng_init);
module_exit(caam_rng_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
MODULE_AUTHOR("Freescale Semiconductor - NMG");

View file

@ -0,0 +1,40 @@
/*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_COMPAT_H
#define CAAM_COMPAT_H
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/crypto.h>
#include <linux/hash.h>
#include <linux/hw_random.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>
#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#endif /* !defined(CAAM_COMPAT_H) */

734
drivers/crypto/caam/ctrl.c Normal file
View file

@ -0,0 +1,734 @@
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
* load the JDKEK, TDKEK and TDSK registers
*/
static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
{
u32 *jump_cmd, op_flags;
init_job_desc(desc, 0);
op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
/* INIT RNG in non-test mode */
append_operation(desc, op_flags);
if (!handle && do_sk) {
/*
* For SH0, Secure Keys must be generated as well
*/
/* wait for done */
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
set_jump_tgt_here(desc, jump_cmd);
/*
* load 1 to clear written reg:
* resets the done interrrupt and returns the RNG to idle.
*/
append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
/* Initialize State Handle */
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
OP_ALG_AAI_RNG4_SK);
}
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
static void build_deinstantiation_desc(u32 *desc, int handle)
{
init_job_desc(desc, 0);
/* Uninstantiate State Handle 0 */
append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
(handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
}
/*
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
* the software (no JR/QI used).
* @ctrldev - pointer to device
* @status - descriptor status, after being run
*
* Return: - 0 if no error occurred
* - -ENODEV if the DECO couldn't be acquired
* - -EAGAIN if an error occurred while executing the descriptor
*/
static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
u32 *status)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
u32 deco_dbg_reg, flags;
int i;
if (ctrlpriv->virt_en == 1) {
setbits32(&ctrl->deco_rsr, DECORSR_JR0);
while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
--timeout)
cpu_relax();
timeout = 100000;
}
setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
--timeout)
cpu_relax();
if (!timeout) {
dev_err(ctrldev, "failed to acquire DECO 0\n");
clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
return -ENODEV;
}
for (i = 0; i < desc_len(desc); i++)
wr_reg32(&deco->descbuf[i], *(desc + i));
flags = DECO_JQCR_WHL;
/*
* If the descriptor length is longer than 4 words, then the
* FOUR bit in JRCTRL register must be set.
*/
if (desc_len(desc) >= 4)
flags |= DECO_JQCR_FOUR;
/* Instruct the DECO to execute it */
wr_reg32(&deco->jr_ctl_hi, flags);
timeout = 10000000;
do {
deco_dbg_reg = rd_reg32(&deco->desc_dbg);
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
*/
if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
DESC_DBG_DECO_STAT_HOST_ERR)
break;
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
*status = rd_reg32(&deco->op_status_hi) &
DECO_OP_STATUS_HI_ERR_MASK;
if (ctrlpriv->virt_en == 1)
clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
/* Mark the DECO as free */
clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
if (!timeout)
return -EAGAIN;
return 0;
}
/*
* instantiate_rng - builds and executes a descriptor on DECO0,
* which initializes the RNG block.
* @ctrldev - pointer to device
* @state_handle_mask - bitmask containing the instantiation status
* for the RNG4 state handles which exist in
* the RNG4 block: 1 if it's been instantiated
* by an external entry, 0 otherwise.
* @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
* Caution: this can be done only once; if the keys need to be
* regenerated, a POR is required
*
* Return: - 0 if no error occurred
* - -ENOMEM if there isn't enough memory to allocate the descriptor
* - -ENODEV if DECO0 couldn't be acquired
* - -EAGAIN if an error occurred when executing the descriptor
* f.i. there was a RNG hardware error due to not "good enough"
* entropy being aquired.
*/
static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int gen_sk)
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 *desc, status, rdsta_val;
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
/*
* If the corresponding bit is set, this state handle
* was initialized by somebody else, so it's left alone.
*/
if ((1 << sh_idx) & state_handle_mask)
continue;
/* Create the descriptor for instantiating RNG State Handle */
build_instantiation_desc(desc, sh_idx, gen_sk);
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
/*
* If ret is not 0, or descriptor status is not 0, then
* something went wrong. No need to try the next state
* handle (if available), bail out here.
* Also, if for some reason, the State Handle didn't get
* instantiated although the descriptor has finished
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
rdsta_val =
rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if (status || !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
break;
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
}
kfree(desc);
return ret;
}
/*
* deinstantiate_rng - builds and executes a descriptor on DECO0,
* which deinitializes the RNG block.
* @ctrldev - pointer to device
* @state_handle_mask - bitmask containing the instantiation status
* for the RNG4 state handles which exist in
* the RNG4 block: 1 if it's been instantiated
*
* Return: - 0 if no error occurred
* - -ENOMEM if there isn't enough memory to allocate the descriptor
* - -ENODEV if DECO0 couldn't be acquired
* - -EAGAIN if an error occurred when executing the descriptor
*/
static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
{
u32 *desc, status;
int sh_idx, ret = 0;
desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
if (!desc)
return -ENOMEM;
for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
/*
* If the corresponding bit is set, then it means the state
* handle was initialized by us, and thus it needs to be
* deintialized as well
*/
if ((1 << sh_idx) & state_handle_mask) {
/*
* Create the descriptor for deinstantating this state
* handle
*/
build_deinstantiation_desc(desc, sh_idx);
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
if (ret || status) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
break;
}
dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
}
}
kfree(desc);
return ret;
}
static int caam_remove(struct platform_device *pdev)
{
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
int ring, ret = 0;
ctrldev = &pdev->dev;
ctrlpriv = dev_get_drvdata(ctrldev);
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
/* Remove platform devices for JobRs */
for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
if (ctrlpriv->jrpdev[ring])
of_device_unregister(ctrlpriv->jrpdev[ring]);
}
/* De-initialize RNG state handles initialized by this driver. */
if (ctrlpriv->rng4_sh_init)
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
#ifdef CONFIG_DEBUG_FS
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
/* Unmap controller region */
iounmap(&ctrl);
return ret;
}
/*
* kick_trng - sets the various parameters for enabling the initialization
* of the RNG4 block in CAAM
* @pdev - pointer to the platform device
* @ent_delay - Defines the length (in system clocks) of each entropy sample.
*/
static void kick_trng(struct platform_device *pdev, int ent_delay)
{
struct device *ctrldev = &pdev->dev;
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
struct rng4tst __iomem *r4tst;
u32 val;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
r4tst = &ctrl->r4tst[0];
/* put RNG4 into program mode */
setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
/*
* Performance-wise, it does not make sense to
* set the delay to a value that is lower
* than the last one that worked (i.e. the state handles
* were instantiated properly. Thus, instead of wasting
* time trying to set the values controlling the sample
* frequency, the function simply returns.
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
if (ent_delay <= val) {
/* put RNG4 into run mode */
clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
return;
}
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
(ent_delay << RTSDCTL_ENT_DLY_SHIFT);
wr_reg32(&r4tst->rtsdctl, val);
/* min. freq. count, equal to 1/4 of the entropy sample length */
wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
/* disable maximum frequency count */
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
/*
* select raw sampling in both entropy shifter
* and statistical checker
*/
setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
/* put RNG4 into run mode */
clrbits32(&val, RTMCTL_PRGM);
/* write back the control register */
wr_reg32(&r4tst->rtmctl, val);
}
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
* on "sec-era" propery in the DTS. This property is updated by u-boot.
**/
int caam_get_era(void)
{
struct device_node *caam_node;
for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
"fsl,sec-era",
NULL);
return prop ? *prop : -ENOTSUPP;
}
return -ENOTSUPP;
}
EXPORT_SYMBOL(caam_get_era);
/* Probe routine for CAAM top (controller) level */
static int caam_probe(struct platform_device *pdev)
{
int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
struct caam_drv_private *ctrlpriv;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
u32 cha_vid_ls;
int pg_size;
int BLOCK_OFFSET = 0;
ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
GFP_KERNEL);
if (!ctrlpriv)
return -ENOMEM;
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
ctrlpriv->pdev = pdev;
nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
if (ctrl == NULL) {
dev_err(dev, "caam: of_iomap() failed\n");
return -ENOMEM;
}
/* Finding the page size for using the CTPR_MS register */
comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
/* Allocating the BLOCK_OFFSET based on the supported page size on
* the platform
*/
if (pg_size == 0)
BLOCK_OFFSET = PG_SIZE_4K;
else
BLOCK_OFFSET = PG_SIZE_64K;
ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
ctrlpriv->assure = (struct caam_assurance __force *)
((uint8_t *)ctrl +
BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
);
ctrlpriv->deco = (struct caam_deco __force *)
((uint8_t *)ctrl +
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
/* Get the IRQ of the controller (for security violations only) */
ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
* long pointers in master configuration register
*/
setbits32(&ctrl->mcr, MCFGR_WDENABLE |
(sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
* if Virtualization is enabled for this platform
*/
scfgr = rd_reg32(&ctrl->scfgr);
ctrlpriv->virt_en = 0;
if (comp_params & CTPR_MS_VIRT_EN_INCL) {
/* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
* VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
*/
if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
(!(comp_params & CTPR_MS_VIRT_EN_POR) &&
(scfgr & SCFGR_VIRT_EN)))
ctrlpriv->virt_en = 1;
} else {
/* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
if (comp_params & CTPR_MS_VIRT_EN_POR)
ctrlpriv->virt_en = 1;
}
if (ctrlpriv->virt_en == 1)
setbits32(&ctrl->jrstart, JRSTART_JR0_START |
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
else
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
else
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
* for all, then go probe each one.
*/
rspec = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
if (ctrlpriv->jrpdev == NULL) {
iounmap(&ctrl);
return -ENOMEM;
}
ring = 0;
ctrlpriv->total_jobrs = 0;
for_each_available_child_of_node(nprop, np)
if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
if (!ctrlpriv->jrpdev[ring]) {
pr_warn("JR%d Platform device creation error\n",
ring);
continue;
}
ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
((uint8_t *)ctrl +
(ring + JR_BLOCK_NUMBER) *
BLOCK_OFFSET
);
ctrlpriv->total_jobrs++;
ring++;
}
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present =
!!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
CTPR_MS_QI_MASK);
if (ctrlpriv->qi_present) {
ctrlpriv->qi = (struct caam_queue_if __force *)
((uint8_t *)ctrl +
BLOCK_OFFSET * QI_BLOCK_NUMBER
);
/* This is all that's required to physically enable QI */
wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
}
/* If no QI and no rings specified, quit and go home */
if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
dev_err(dev, "no queues configured, terminating\n");
caam_remove(pdev);
return -ENOMEM;
}
cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
*/
if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
* If the secure keys (TDKEK, JDKEK, TDSK), were already
* generated, signal this to the function that is instantiating
* the state handles. An error would occur if RNG4 attempts
* to regenerate these keys before the next POR.
*/
gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
do {
int inst_handles =
rd_reg32(&ctrl->r4tst[0].rdsta) &
RDSTA_IFMASK;
/*
* If either SH were instantiated by somebody else
* (e.g. u-boot) then it is assumed that the entropy
* parameters are properly set and thus the function
* setting these (kick_trng(...)) is skipped.
* Also, if a handle was instantiated, do not change
* the TRNG parameters.
*/
if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
dev_info(dev,
"Entropy delay = %u\n",
ent_delay);
kick_trng(pdev, ent_delay);
ent_delay += 400;
}
/*
* if instantiate_rng(...) fails, the loop will rerun
* and the kick_trng(...) function will modfiy the
* upper and lower limits of the entropy sampling
* interval, leading to a sucessful initialization of
* the RNG.
*/
ret = instantiate_rng(dev, inst_handles,
gen_sk);
if (ret == -EAGAIN)
/*
* if here, the loop will rerun,
* so don't hog the CPU
*/
cpu_relax();
} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
if (ret) {
dev_err(dev, "failed to instantiate RNG");
caam_remove(pdev);
return ret;
}
/*
* Set handles init'ed by this module as the complement of the
* already initialized ones
*/
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
/* Enable RDB bit so that RNG works faster */
setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
}
/* NOTE: RTIC detection ought to go here, around Si time */
caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
(u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
caam_get_era());
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
#ifdef CONFIG_DEBUG_FS
/*
* FIXME: needs better naming distinction, as some amalgamation of
* "caam" and nprop->full_name. The OF name isn't distinctive,
* but does separate instances
*/
perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_u64("rq_dequeued",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->req_dequeued);
ctrlpriv->ctl_ob_enc_req =
debugfs_create_u64("ob_rq_encrypted",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_req);
ctrlpriv->ctl_ib_dec_req =
debugfs_create_u64("ib_rq_decrypted",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_req);
ctrlpriv->ctl_ob_enc_bytes =
debugfs_create_u64("ob_bytes_encrypted",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_bytes);
ctrlpriv->ctl_ob_prot_bytes =
debugfs_create_u64("ob_bytes_protected",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_prot_bytes);
ctrlpriv->ctl_ib_dec_bytes =
debugfs_create_u64("ib_bytes_decrypted",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_bytes);
ctrlpriv->ctl_ib_valid_bytes =
debugfs_create_u64("ib_bytes_validated",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_valid_bytes);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
debugfs_create_u64("fault_addr",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultaddr);
ctrlpriv->ctl_faultdetail =
debugfs_create_u32("fault_detail",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultdetail);
ctrlpriv->ctl_faultstatus =
debugfs_create_u32("fault_status",
S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->status);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
#endif
return 0;
}
static struct of_device_id caam_match[] = {
{
.compatible = "fsl,sec-v4.0",
},
{
.compatible = "fsl,sec4.0",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_match);
static struct platform_driver caam_driver = {
.driver = {
.name = "caam",
.owner = THIS_MODULE,
.of_match_table = caam_match,
},
.probe = caam_probe,
.remove = caam_remove,
};
module_platform_driver(caam_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM request backend");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");

View file

@ -0,0 +1,13 @@
/*
* CAAM control-plane driver backend public-level include definitions
*
* Copyright 2012 Freescale Semiconductor, Inc.
*/
#ifndef CTRL_H
#define CTRL_H
/* Prototypes for backend-level services exposed to APIs */
int caam_get_era(void);
#endif /* CTRL_H */

1621
drivers/crypto/caam/desc.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,388 @@
/*
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include "desc.h"
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
&__func__[sizeof("append")]); } while (0)
#else
#define PRINT_POS
#endif
#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_CHG_SHARE_OK_NO_PROP << \
LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
#define ENABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_ENABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
static inline int desc_len(u32 *desc)
{
return *desc & HDR_DESCLEN_MASK;
}
static inline int desc_bytes(void *desc)
{
return desc_len(desc) * CAAM_CMD_SZ;
}
static inline u32 *desc_end(u32 *desc)
{
return desc + desc_len(desc);
}
static inline void *sh_desc_pdb(u32 *desc)
{
return desc + 1;
}
static inline void init_desc(u32 *desc, u32 options)
{
*desc = (options | HDR_ONE) + 1;
}
static inline void init_sh_desc(u32 *desc, u32 options)
{
PRINT_POS;
init_desc(desc, CMD_SHARED_DESC_HDR | options);
}
static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
{
u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) |
options);
}
static inline void init_job_desc(u32 *desc, u32 options)
{
init_desc(desc, CMD_DESC_HDR | options);
}
static inline void append_ptr(u32 *desc, dma_addr_t ptr)
{
dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
*offset = ptr;
(*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ;
}
static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
u32 options)
{
PRINT_POS;
init_job_desc(desc, HDR_SHARED | options |
(len << HDR_START_IDX_SHIFT));
append_ptr(desc, ptr);
}
static inline void append_data(u32 *desc, void *data, int len)
{
u32 *offset = desc_end(desc);
if (len) /* avoid sparse warning: memcpy with byte count of 0 */
memcpy(offset, data, len);
(*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
}
static inline void append_cmd(u32 *desc, u32 command)
{
u32 *cmd = desc_end(desc);
*cmd = command;
(*desc)++;
}
#define append_u32 append_cmd
static inline void append_u64(u32 *desc, u64 data)
{
u32 *offset = desc_end(desc);
*offset = upper_32_bits(data);
*(++offset) = lower_32_bits(data);
(*desc) += 2;
}
/* Write command without affecting header, and return pointer to next word */
static inline u32 *write_cmd(u32 *desc, u32 command)
{
*desc = command;
return desc + 1;
}
static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
u32 command)
{
append_cmd(desc, command | len);
append_ptr(desc, ptr);
}
/* Write length after pointer, rather than inside command */
static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
unsigned int len, u32 command)
{
append_cmd(desc, command);
if (!(command & (SQIN_RTO | SQIN_PRE)))
append_ptr(desc, ptr);
append_cmd(desc, len);
}
static inline void append_cmd_data(u32 *desc, void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
append_data(desc, data, len);
}
#define APPEND_CMD_RET(cmd, op) \
static inline u32 *append_##cmd(u32 *desc, u32 options) \
{ \
u32 *cmd = desc_end(desc); \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
return cmd; \
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
}
static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
{
*move_cmd &= ~MOVE_OFFSET_MASK;
*move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
MOVE_OFFSET_MASK);
}
#define APPEND_CMD(cmd, op) \
static inline void append_##cmd(u32 *desc, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | options); \
}
APPEND_CMD(operation, OPERATION)
#define APPEND_CMD_LEN(cmd, op) \
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | len | options); \
}
APPEND_CMD_LEN(seq_store, SEQ_STORE)
APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
#define APPEND_CMD_PTR(cmd, op) \
static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
}
APPEND_CMD_PTR(key, KEY)
APPEND_CMD_PTR(load, LOAD)
APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
APPEND_CMD_PTR(fifo_store, FIFO_STORE)
static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
u32 options)
{
u32 cmd_src;
cmd_src = options & LDST_SRCDST_MASK;
append_cmd(desc, CMD_STORE | options | len);
/* The following options do not require pointer */
if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB ||
cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
append_ptr(desc, ptr);
}
#define APPEND_SEQ_PTR_INTLEN(cmd, op) \
static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
unsigned int len, \
u32 options) \
{ \
PRINT_POS; \
if (options & (SQIN_RTO | SQIN_PRE)) \
append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
else \
append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
}
APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_data(desc, data, len, CMD_##op | options); \
}
APPEND_CMD_PTR_TO_IMM(load, LOAD);
APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
#define APPEND_CMD_PTR_EXTLEN(cmd, op) \
static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
}
APPEND_CMD_PTR_EXTLEN(seq_in_ptr, SEQ_IN_PTR)
APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_OUT_PTR)
/*
* Determine whether to store length internally or externally depending on
* the size of its type
*/
#define APPEND_CMD_PTR_LEN(cmd, op, type) \
static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
type len, u32 options) \
{ \
PRINT_POS; \
if (sizeof(type) > sizeof(u16)) \
append_##cmd##_extlen(desc, ptr, len, options); \
else \
append_##cmd##_intlen(desc, ptr, len, options); \
}
APPEND_CMD_PTR_LEN(seq_in_ptr, SEQ_IN_PTR, u32)
APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_PTR, u32)
/*
* 2nd variant for commands whose specified immediate length differs
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
append_data(desc, data, data_len); \
}
APPEND_CMD_PTR_TO_IMM2(key, KEY);
#define APPEND_CMD_RAW_IMM(cmd, op, type) \
static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
/*
* Append math command. Only the last part of destination and source need to
* be specified
*/
#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len);
#define append_math_add(desc, dest, src0, src1, len) \
APPEND_MATH(ADD, desc, dest, src0, src1, len)
#define append_math_sub(desc, dest, src0, src1, len) \
APPEND_MATH(SUB, desc, dest, src0, src1, len)
#define append_math_add_c(desc, dest, src0, src1, len) \
APPEND_MATH(ADDC, desc, dest, src0, src1, len)
#define append_math_sub_b(desc, dest, src0, src1, len) \
APPEND_MATH(SUBB, desc, dest, src0, src1, len)
#define append_math_and(desc, dest, src0, src1, len) \
APPEND_MATH(AND, desc, dest, src0, src1, len)
#define append_math_or(desc, dest, src0, src1, len) \
APPEND_MATH(OR, desc, dest, src0, src1, len)
#define append_math_xor(desc, dest, src0, src1, len) \
APPEND_MATH(XOR, desc, dest, src0, src1, len)
#define append_math_lshift(desc, dest, src0, src1, len) \
APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
#define append_math_rshift(desc, dest, src0, src1, len) \
APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
#define append_math_ldshift(desc, dest, src0, src1, len) \
APPEND_MATH(SHLD, desc, dest, src0, src1, len)
/* Exactly one source is IMM. Data is passed in as u32 value */
#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
do { \
APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
append_cmd(desc, data); \
} while (0)
#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
/* Exactly one source is IMM. Data is passed in as u64 value */
#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
do { \
u32 upper = (data >> 16) >> 16; \
APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
(upper ? 0 : MATH_IFB)); \
if (upper) \
append_u64(desc, data); \
else \
append_u32(desc, data); \
} while (0)
#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
#define append_math_and_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
#define append_math_or_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)

239
drivers/crypto/caam/error.c Normal file
View file

@ -0,0 +1,239 @@
/*
* CAAM Error Reporting
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*/
#include "compat.h"
#include "regs.h"
#include "intern.h"
#include "desc.h"
#include "jr.h"
#include "error.h"
static const struct {
u8 value;
const char *error_text;
} desc_error_list[] = {
{ 0x00, "No error." },
{ 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
{ 0x02, "SGT Null Entry Error." },
{ 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
{ 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
{ 0x05, "Reserved." },
{ 0x06, "Invalid KEY Command" },
{ 0x07, "Invalid LOAD Command" },
{ 0x08, "Invalid STORE Command" },
{ 0x09, "Invalid OPERATION Command" },
{ 0x0A, "Invalid FIFO LOAD Command" },
{ 0x0B, "Invalid FIFO STORE Command" },
{ 0x0C, "Invalid MOVE/MOVE_LEN Command" },
{ 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
{ 0x0E, "Invalid MATH Command" },
{ 0x0F, "Invalid SIGNATURE Command" },
{ 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
{ 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
{ 0x12, "Shared Descriptor Header Error" },
{ 0x13, "Header Error. Invalid length or parity, or certain other problems." },
{ 0x14, "Burster Error. Burster has gotten to an illegal state" },
{ 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
{ 0x16, "DMA Error" },
{ 0x17, "Reserved." },
{ 0x1A, "Job failed due to JR reset" },
{ 0x1B, "Job failed due to Fail Mode" },
{ 0x1C, "DECO Watchdog timer timeout error" },
{ 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
{ 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
{ 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
{ 0x20, "DECO has completed a reset initiated via the DRR register" },
{ 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
{ 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
{ 0x23, "Read Input Frame error" },
{ 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
{ 0x80, "DNR (do not run) error" },
{ 0x81, "undefined protocol command" },
{ 0x82, "invalid setting in PDB" },
{ 0x83, "Anti-replay LATE error" },
{ 0x84, "Anti-replay REPLAY error" },
{ 0x85, "Sequence number overflow" },
{ 0x86, "Sigver invalid signature" },
{ 0x87, "DSA Sign Illegal test descriptor" },
{ 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
{ 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
{ 0xC1, "Blob Command error: Undefined mode" },
{ 0xC2, "Blob Command error: Secure Memory Blob mode error" },
{ 0xC4, "Blob Command error: Black Blob key or input size error" },
{ 0xC5, "Blob Command error: Invalid key destination" },
{ 0xC8, "Blob Command error: Trusted/Secure mode error" },
{ 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
static const char * const cha_id_list[] = {
"",
"AES",
"DES",
"ARC4",
"MDHA",
"RNG",
"SNOW f8",
"Kasumi f8/9",
"PKHA",
"CRCA",
"SNOW f9",
"ZUCE",
"ZUCA",
};
static const char * const err_id_list[] = {
"No error.",
"Mode error.",
"Data size error.",
"Key size error.",
"PKHA A memory size error.",
"PKHA B memory size error.",
"Data arrived out of sequence error.",
"PKHA divide-by-zero error.",
"PKHA modulus even error.",
"DES key parity error.",
"ICV check failed.",
"Hardware error.",
"Unsupported CCM AAD size.",
"Class 1 CHA is not reset",
"Invalid CHA combination was selected",
"Invalid CHA selected.",
};
static const char * const rng_err_id_list[] = {
"",
"",
"",
"Instantiate",
"Not instantiated",
"Test instantiate",
"Prediction resistance",
"Prediction resistance and test request",
"Uninstantiate",
"Secure key generation",
};
static void report_ccb_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
JRSTA_CCBERR_CHAID_SHIFT;
u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
JRSTA_DECOERR_INDEX_SHIFT;
char *idx_str;
const char *cha_str = "unidentified cha_id value 0x";
char cha_err_code[3] = { 0 };
const char *err_str = "unidentified err_id value 0x";
char err_err_code[3] = { 0 };
if (status & JRSTA_DECOERR_JUMP)
idx_str = "jump tgt desc idx";
else
idx_str = "desc idx";
if (cha_id < ARRAY_SIZE(cha_id_list))
cha_str = cha_id_list[cha_id];
else
snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
err_id < ARRAY_SIZE(rng_err_id_list) &&
strlen(rng_err_id_list[err_id])) {
/* RNG-only error */
err_str = rng_err_id_list[err_id];
} else if (err_id < ARRAY_SIZE(err_id_list))
err_str = err_id_list[err_id];
else
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
status, error, idx_str, idx,
cha_str, cha_err_code,
err_str, err_err_code);
}
static void report_jump_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
static void report_deco_status(struct device *jrdev, const u32 status,
const char *error)
{
u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
JRSTA_DECOERR_INDEX_SHIFT;
char *idx_str;
const char *err_str = "unidentified error value 0x";
char err_err_code[3] = { 0 };
int i;
if (status & JRSTA_DECOERR_JUMP)
idx_str = "jump tgt desc idx";
else
idx_str = "desc idx";
for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
if (desc_error_list[i].value == err_id)
break;
if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
err_str = desc_error_list[i].error_text;
else
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
status, error, idx_str, idx, err_str, err_err_code);
}
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
static void report_cond_code_status(struct device *jrdev, const u32 status,
const char *error)
{
dev_err(jrdev, "%08x: %s: %s() not implemented\n",
status, error, __func__);
}
void caam_jr_strstatus(struct device *jrdev, u32 status)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
const char *error);
const char *error;
} status_src[] = {
{ NULL, "No error" },
{ NULL, NULL },
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
{ NULL, NULL },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
};
u32 ssrc = status >> JRSTA_SSRC_SHIFT;
const char *error = status_src[ssrc].error;
/*
* If there is no further error handling function, just
* print the error code, error string and exit. Otherwise
* call the handler function.
*/
if (!status_src[ssrc].report_ssed)
dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
else
status_src[ssrc].report_ssed(jrdev, status, error);
}
EXPORT_SYMBOL(caam_jr_strstatus);

View file

@ -0,0 +1,11 @@
/*
* CAAM Error Reporting code header
*
* Copyright 2009-2011 Freescale Semiconductor, Inc.
*/
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
void caam_jr_strstatus(struct device *jrdev, u32 status);
#endif /* CAAM_ERROR_H */

View file

@ -0,0 +1,113 @@
/*
* CAAM/SEC 4.x driver backend
* Private/internal definitions between modules
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
*/
#ifndef INTERN_H
#define INTERN_H
/* Currently comes from Kconfig param as a ^2 (driver-required) */
#define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
/* Kconfig params for interrupt coalescing if selected (else zero) */
#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
#define JOBR_INTC JRCFG_ICEN
#define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
#define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
#else
#define JOBR_INTC 0
#define JOBR_INTC_TIME_THLD 0
#define JOBR_INTC_COUNT_THLD 0
#endif
/*
* Storage for tracking each in-process entry moving across a ring
* Each entry on an output ring needs one of these
*/
struct caam_jrentry_info {
void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
void *cbkarg; /* Argument per ring entry */
u32 *desc_addr_virt; /* Stored virt addr for postprocessing */
dma_addr_t desc_addr_dma; /* Stored bus addr for done matching */
u32 desc_size; /* Stored size for postprocessing, header derived */
};
/* Private sub-storage for a single JobR */
struct caam_drv_private_jr {
struct list_head list_node; /* Job Ring device list */
struct device *dev;
int ridx;
struct caam_job_ring __iomem *rregs; /* JobR's register space */
struct tasklet_struct irqtask;
int irq; /* One per queue */
/* Number of scatterlist crypt transforms active on the JobR */
atomic_t tfm_count ____cacheline_aligned;
/* Job ring info */
int ringsize; /* Size of rings (assume input = output) */
struct caam_jrentry_info *entinfo; /* Alloc'ed 1 per ring entry */
spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
int inp_ring_write_index; /* Input index "tail" */
int head; /* entinfo (s/w ring) head index */
dma_addr_t *inpring; /* Base of input ring, alloc DMA-safe */
spinlock_t outlock ____cacheline_aligned; /* Output ring index lock */
int out_ring_read_index; /* Output index "tail" */
int tail; /* entinfo (s/w ring) tail index */
struct jr_outentry *outring; /* Base of output ring, DMA-safe */
};
/*
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
struct device *dev;
struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
struct platform_device *pdev;
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
struct caam_deco __iomem *deco; /* DECO/CCB views */
struct caam_assurance __iomem *assure;
struct caam_queue_if __iomem *qi; /* QI control region */
struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
/*
* Detected geometry block. Filled in from device tree if powerpc,
* or from register-based version detection code
*/
u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
u32 rng4_sh_init; /* This bitmap shows which of the State
Handles of the RNG4 block are initialized
by this driver */
/*
* debugfs entries for developer view into driver/device
* variables at runtime.
*/
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
};
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
#endif /* INTERN_H */

541
drivers/crypto/caam/jr.c Normal file
View file

@ -0,0 +1,541 @@
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*/
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include "compat.h"
#include "regs.h"
#include "jr.h"
#include "desc.h"
#include "intern.h"
struct jr_driver_data {
/* List of Physical JobR's with the Driver */
struct list_head jr_list;
spinlock_t jr_alloc_lock; /* jr_list lock */
} ____cacheline_aligned;
static struct jr_driver_data driver_data;
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
unsigned int timeout = 100000;
/*
* mask interrupts since we are going to poll
* for reset completion status
*/
setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
/* initiate flush (required prior to reset) */
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
JRINT_ERR_HALT_INPROGRESS) && --timeout)
cpu_relax();
if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
JRINT_ERR_HALT_COMPLETE || timeout == 0) {
dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
return -EIO;
}
/* initiate reset */
timeout = 100000;
wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
cpu_relax();
if (timeout == 0) {
dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
return -EIO;
}
/* unmask interrupts */
clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
return 0;
}
/*
* Shutdown JobR independent of platform property code
*/
int caam_jr_shutdown(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
dma_addr_t inpbusaddr, outbusaddr;
int ret;
ret = caam_reset_hw_jr(dev);
tasklet_kill(&jrp->irqtask);
/* Release interrupt */
free_irq(jrp->irq, dev);
/* Free rings */
inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
outbusaddr = rd_reg64(&jrp->rregs->outring_base);
dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
jrp->inpring, inpbusaddr);
dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
jrp->outring, outbusaddr);
kfree(jrp->entinfo);
return ret;
}
static int caam_jr_remove(struct platform_device *pdev)
{
int ret;
struct device *jrdev;
struct caam_drv_private_jr *jrpriv;
jrdev = &pdev->dev;
jrpriv = dev_get_drvdata(jrdev);
/*
* Return EBUSY if job ring already allocated.
*/
if (atomic_read(&jrpriv->tfm_count)) {
dev_err(jrdev, "Device is busy\n");
return -EBUSY;
}
/* Remove the node from Physical JobR list maintained by driver */
spin_lock(&driver_data.jr_alloc_lock);
list_del(&jrpriv->list_node);
spin_unlock(&driver_data.jr_alloc_lock);
/* Release ring */
ret = caam_jr_shutdown(jrdev);
if (ret)
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
return ret;
}
/* Main per-ring interrupt handler */
static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
{
struct device *dev = st_dev;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
u32 irqstate;
/*
* Check the output ring for ready responses, kick
* tasklet if jobs done.
*/
irqstate = rd_reg32(&jrp->rregs->jrintstatus);
if (!irqstate)
return IRQ_NONE;
/*
* If JobR error, we got more development work to do
* Flag a bug now, but we really need to shut down and
* restart the queue (and fix code).
*/
if (irqstate & JRINT_JR_ERROR) {
dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
BUG();
}
/* mask valid interrupts */
setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
/* Have valid interrupt at this point, just ACK and trigger */
wr_reg32(&jrp->rregs->jrintstatus, irqstate);
preempt_disable();
tasklet_schedule(&jrp->irqtask);
preempt_enable();
return IRQ_HANDLED;
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void caam_jr_dequeue(unsigned long devarg)
{
int hw_idx, sw_idx, i, head, tail;
struct device *dev = (struct device *)devarg;
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
u32 *userdesc, userstatus;
void *userarg;
while (rd_reg32(&jrp->rregs->outring_used)) {
head = ACCESS_ONCE(jrp->head);
spin_lock(&jrp->outlock);
sw_idx = tail = jrp->tail;
hw_idx = jrp->out_ring_read_index;
for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
sw_idx = (tail + i) & (JOBR_DEPTH - 1);
smp_read_barrier_depends();
if (jrp->outring[hw_idx].desc ==
jrp->entinfo[sw_idx].desc_addr_dma)
break; /* found */
}
/* we should never fail to find a matching descriptor */
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev, jrp->outring[hw_idx].desc,
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);
/* mark completed, avoid matching on a recycled desc addr */
jrp->entinfo[sw_idx].desc_addr_dma = 0;
/* Stash callback params for use outside of lock */
usercall = jrp->entinfo[sw_idx].callbk;
userarg = jrp->entinfo[sw_idx].cbkarg;
userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
userstatus = jrp->outring[hw_idx].jrstatus;
/* set done */
wr_reg32(&jrp->rregs->outring_rmvd, 1);
jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
(JOBR_DEPTH - 1);
/*
* if this job completed out-of-order, do not increment
* the tail. Otherwise, increment tail by 1 plus the
* number of subsequent jobs already completed out-of-order
*/
if (sw_idx == tail) {
do {
tail = (tail + 1) & (JOBR_DEPTH - 1);
smp_read_barrier_depends();
} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
jrp->entinfo[tail].desc_addr_dma == 0);
jrp->tail = tail;
}
spin_unlock(&jrp->outlock);
/* Finally, execute user's callback */
usercall(dev, userdesc, userstatus, userarg);
}
/* reenable / unmask IRQs */
clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
}
/**
* caam_jr_alloc() - Alloc a job ring for someone to use as needed.
*
* returns : pointer to the newly allocated physical
* JobR dev can be written to if successful.
**/
struct device *caam_jr_alloc(void)
{
struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
struct device *dev = NULL;
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
spin_lock(&driver_data.jr_alloc_lock);
if (list_empty(&driver_data.jr_list)) {
spin_unlock(&driver_data.jr_alloc_lock);
return ERR_PTR(-ENODEV);
}
list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
tfm_cnt = atomic_read(&jrpriv->tfm_count);
if (tfm_cnt < min_tfm_cnt) {
min_tfm_cnt = tfm_cnt;
min_jrpriv = jrpriv;
}
if (!min_tfm_cnt)
break;
}
if (min_jrpriv) {
atomic_inc(&min_jrpriv->tfm_count);
dev = min_jrpriv->dev;
}
spin_unlock(&driver_data.jr_alloc_lock);
return dev;
}
EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
**/
void caam_jr_free(struct device *rdev)
{
struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
atomic_dec(&jrpriv->tfm_count);
}
EXPORT_SYMBOL(caam_jr_free);
/**
* caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
* -EBUSY if the queue is full, -EIO if it cannot map the caller's
* descriptor.
* @dev: device of the job ring to be used. This device should have
* been assigned prior by caam_jr_register().
* @desc: points to a job descriptor that execute our request. All
* descriptors (and all referenced data) must be in a DMAable
* region, and all data references must be physical addresses
* accessible to CAAM (i.e. within a PAMU window granted
* to it).
* @cbk: pointer to a callback function to be invoked upon completion
* of this request. This has the form:
* callback(struct device *dev, u32 *desc, u32 stat, void *arg)
* where:
* @dev: contains the job ring device that processed this
* response.
* @desc: descriptor that initiated the request, same as
* "desc" being argued to caam_jr_enqueue().
* @status: untranslated status received from CAAM. See the
* reference manual for a detailed description of
* error meaning, or see the JRSTA definitions in the
* register header file
* @areq: optional pointer to an argument passed with the
* original request
* @areq: optional pointer to a user argument for use at callback
* time.
**/
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc,
u32 status, void *areq),
void *areq)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
struct caam_jrentry_info *head_entry;
int head, tail, desc_size;
dma_addr_t desc_dma;
desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, desc_dma)) {
dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
return -EIO;
}
spin_lock_bh(&jrp->inplock);
head = jrp->head;
tail = ACCESS_ONCE(jrp->tail);
if (!rd_reg32(&jrp->rregs->inpring_avail) ||
CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
spin_unlock_bh(&jrp->inplock);
dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
return -EBUSY;
}
head_entry = &jrp->entinfo[head];
head_entry->desc_addr_virt = desc;
head_entry->desc_size = desc_size;
head_entry->callbk = (void *)cbk;
head_entry->cbkarg = areq;
head_entry->desc_addr_dma = desc_dma;
jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
smp_wmb();
jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
(JOBR_DEPTH - 1);
jrp->head = (head + 1) & (JOBR_DEPTH - 1);
wr_reg32(&jrp->rregs->inpring_jobadd, 1);
spin_unlock_bh(&jrp->inplock);
return 0;
}
EXPORT_SYMBOL(caam_jr_enqueue);
/*
* Init JobR independent of platform property detection
*/
static int caam_jr_init(struct device *dev)
{
struct caam_drv_private_jr *jrp;
dma_addr_t inpbusaddr, outbusaddr;
int i, error;
jrp = dev_get_drvdata(dev);
tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
/* Connect job ring interrupt handler. */
error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
dev_name(dev), dev);
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
irq_dispose_mapping(jrp->irq);
jrp->irq = 0;
return -EINVAL;
}
error = caam_reset_hw_jr(dev);
if (error)
return error;
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
&inpbusaddr, GFP_KERNEL);
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
GFP_KERNEL);
if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
(jrp->entinfo == NULL)) {
dev_err(dev, "can't allocate job rings for %d\n",
jrp->ridx);
return -ENOMEM;
}
for (i = 0; i < JOBR_DEPTH; i++)
jrp->entinfo[i].desc_addr_dma = !0;
/* Setup rings */
jrp->inp_ring_write_index = 0;
jrp->out_ring_read_index = 0;
jrp->head = 0;
jrp->tail = 0;
wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
wr_reg64(&jrp->rregs->outring_base, outbusaddr);
wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
jrp->ringsize = JOBR_DEPTH;
spin_lock_init(&jrp->inplock);
spin_lock_init(&jrp->outlock);
/* Select interrupt coalescing parameters */
setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC |
(JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
return 0;
}
/*
* Probe routine for each detected JobR subsystem.
*/
static int caam_jr_probe(struct platform_device *pdev)
{
struct device *jrdev;
struct device_node *nprop;
struct caam_job_ring __iomem *ctrl;
struct caam_drv_private_jr *jrpriv;
static int total_jobrs;
int error;
jrdev = &pdev->dev;
jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
GFP_KERNEL);
if (!jrpriv)
return -ENOMEM;
dev_set_drvdata(jrdev, jrpriv);
/* save ring identity relative to detection */
jrpriv->ridx = total_jobrs++;
nprop = pdev->dev.of_node;
/* Get configuration properties from device tree */
/* First, get register page */
ctrl = of_iomap(nprop, 0);
if (!ctrl) {
dev_err(jrdev, "of_iomap() failed\n");
return -ENOMEM;
}
jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
if (sizeof(dma_addr_t) == sizeof(u64))
if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
else
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
else
dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
if (error)
return error;
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
spin_unlock(&driver_data.jr_alloc_lock);
atomic_set(&jrpriv->tfm_count, 0);
return 0;
}
static struct of_device_id caam_jr_match[] = {
{
.compatible = "fsl,sec-v4.0-job-ring",
},
{
.compatible = "fsl,sec4.0-job-ring",
},
{},
};
MODULE_DEVICE_TABLE(of, caam_jr_match);
static struct platform_driver caam_jr_driver = {
.driver = {
.name = "caam_jr",
.owner = THIS_MODULE,
.of_match_table = caam_jr_match,
},
.probe = caam_jr_probe,
.remove = caam_jr_remove,
};
static int __init jr_driver_init(void)
{
spin_lock_init(&driver_data.jr_alloc_lock);
INIT_LIST_HEAD(&driver_data.jr_list);
return platform_driver_register(&caam_jr_driver);
}
static void __exit jr_driver_exit(void)
{
platform_driver_unregister(&caam_jr_driver);
}
module_init(jr_driver_init);
module_exit(jr_driver_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM JR request backend");
MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");

18
drivers/crypto/caam/jr.h Normal file
View file

@ -0,0 +1,18 @@
/*
* CAAM public-level include definitions for the JobR backend
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#ifndef JR_H
#define JR_H
/* Prototypes for backend-level services exposed to APIs */
struct device *caam_jr_alloc(void);
void caam_jr_free(struct device *rdev);
int caam_jr_enqueue(struct device *dev, u32 *desc,
void (*cbk)(struct device *dev, u32 *desc, u32 status,
void *areq),
void *areq);
#endif /* JR_H */

View file

@ -0,0 +1,123 @@
/*
* CAAM/SEC 4.x functions for handling key-generation jobs
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
*/
#include "compat.h"
#include "jr.h"
#include "error.h"
#include "desc_constr.h"
#include "key_gen.h"
void split_key_done(struct device *dev, u32 *desc, u32 err,
void *context)
{
struct split_key_result *res = context;
#ifdef DEBUG
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(dev, err);
res->err = err;
complete(&res->completion);
}
EXPORT_SYMBOL(split_key_done);
/*
get a split ipad/opad key
Split key generation-----------------------------------------------
[00] 0xb0810008 jobdesc: stidx=1 share=never len=8
[01] 0x04000014 key: class2->keyreg len=20
@0xffe01000
[03] 0x84410014 operation: cls2-op sha1 hmac init dec
[04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm
[05] 0xa4000001 jump: class2 local all ->1 [06]
[06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
@0xffe04000
*/
int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op)
{
u32 *desc;
struct split_key_result result;
dma_addr_t dma_addr_in, dma_addr_out;
int ret = -ENOMEM;
desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return ret;
}
dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_in)) {
dev_err(jrdev, "unable to map key input memory\n");
goto out_free;
}
dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(jrdev, dma_addr_out)) {
dev_err(jrdev, "unable to map key output memory\n");
goto out_unmap_in;
}
init_job_desc(desc, 0);
append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
/* Sets MDHA up into an HMAC-INIT */
append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
/*
* do a FIFO_LOAD of zero, this will trigger the internal key expansion
* into both pads inside MDHA
*/
append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
/*
* FIFO_STORE with the explicit split-key content store
* (0x26 output type)
*/
append_fifo_store(desc, dma_addr_out, split_key_len,
LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
result.err = 0;
init_completion(&result.completion);
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
wait_for_completion_interruptible(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key_out,
split_key_pad_len, 1);
#endif
}
dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
DMA_FROM_DEVICE);
out_unmap_in:
dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
out_free:
kfree(desc);
return ret;
}
EXPORT_SYMBOL(gen_split_key);

View file

@ -0,0 +1,17 @@
/*
* CAAM/SEC 4.x definitions for handling key-generation jobs
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
*/
struct split_key_result {
struct completion completion;
int err;
};
void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
int split_key_pad_len, const u8 *key_in, u32 keylen,
u32 alg_op);

402
drivers/crypto/caam/pdb.h Normal file
View file

@ -0,0 +1,402 @@
/*
* CAAM Protocol Data Block (PDB) definition header file
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
*
*/
#ifndef CAAM_PDB_H
#define CAAM_PDB_H
/*
* PDB- IPSec ESP Header Modification Options
*/
#define PDBHMO_ESP_DECAP_SHIFT 12
#define PDBHMO_ESP_ENCAP_SHIFT 4
/*
* Encap and Decap - Decrement TTL (Hop Limit) - Based on the value of the
* Options Byte IP version (IPvsn) field:
* if IPv4, decrement the inner IP header TTL field (byte 8);
* if IPv6 decrement the inner IP header Hop Limit field (byte 7).
*/
#define PDBHMO_ESP_DECAP_DEC_TTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
#define PDBHMO_ESP_ENCAP_DEC_TTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
/*
* Decap - DiffServ Copy - Copy the IPv4 TOS or IPv6 Traffic Class byte
* from the outer IP header to the inner IP header.
*/
#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
/*
* Encap- Copy DF bit -if an IPv4 tunnel mode outer IP header is coming from
* the PDB, copy the DF bit from the inner IP header to the outer IP header.
*/
#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
/*
* PDB - IPSec ESP Encap/Decap Options
*/
#define PDBOPTS_ESP_ARSNONE 0x00 /* no antireplay window */
#define PDBOPTS_ESP_ARS32 0x40 /* 32-entry antireplay window */
#define PDBOPTS_ESP_ARS64 0xc0 /* 64-entry antireplay window */
#define PDBOPTS_ESP_IVSRC 0x20 /* IV comes from internal random gen */
#define PDBOPTS_ESP_ESN 0x10 /* extended sequence included */
#define PDBOPTS_ESP_OUTFMT 0x08 /* output only decapsulation (decap) */
#define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
#define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */
#define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */
#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/
#define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */
#define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */
#define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */
#define PDBOPTS_ESP_UPDATE_CSUM 0x80 /* encap-update ip header checksum */
#define PDBOPTS_ESP_VERIFY_CSUM 0x20 /* decap-validate ip header checksum */
/*
* General IPSec encap/decap PDB definitions
*/
struct ipsec_encap_cbc {
u32 iv[4];
};
struct ipsec_encap_ctr {
u32 ctr_nonce;
u32 ctr_initial;
u32 iv[2];
};
struct ipsec_encap_ccm {
u32 salt; /* lower 24 bits */
u8 b0_flags;
u8 ctr_flags;
u16 ctr_initial;
u32 iv[2];
};
struct ipsec_encap_gcm {
u32 salt; /* lower 24 bits */
u32 rsvd1;
u32 iv[2];
};
struct ipsec_encap_pdb {
u8 hmo_rsvd;
u8 ip_nh;
u8 ip_nh_offset;
u8 options;
u32 seq_num_ext_hi;
u32 seq_num;
union {
struct ipsec_encap_cbc cbc;
struct ipsec_encap_ctr ctr;
struct ipsec_encap_ccm ccm;
struct ipsec_encap_gcm gcm;
};
u32 spi;
u16 rsvd1;
u16 ip_hdr_len;
u32 ip_hdr[0]; /* optional IP Header content */
};
struct ipsec_decap_cbc {
u32 rsvd[2];
};
struct ipsec_decap_ctr {
u32 salt;
u32 ctr_initial;
};
struct ipsec_decap_ccm {
u32 salt;
u8 iv_flags;
u8 ctr_flags;
u16 ctr_initial;
};
struct ipsec_decap_gcm {
u32 salt;
u32 resvd;
};
struct ipsec_decap_pdb {
u16 hmo_ip_hdr_len;
u8 ip_nh_offset;
u8 options;
union {
struct ipsec_decap_cbc cbc;
struct ipsec_decap_ctr ctr;
struct ipsec_decap_ccm ccm;
struct ipsec_decap_gcm gcm;
};
u32 seq_num_ext_hi;
u32 seq_num;
u32 anti_replay[2];
u32 end_index[0];
};
/*
* IPSec ESP Datapath Protocol Override Register (DPOVRD)
*/
struct ipsec_deco_dpovrd {
#define IPSEC_ENCAP_DECO_DPOVRD_USE 0x80
u8 ovrd_ecn;
u8 ip_hdr_len;
u8 nh_offset;
u8 next_header; /* reserved if decap */
};
/*
* IEEE 802.11i WiFi Protocol Data Block
*/
#define WIFI_PDBOPTS_FCS 0x01
#define WIFI_PDBOPTS_AR 0x40
struct wifi_encap_pdb {
u16 mac_hdr_len;
u8 rsvd;
u8 options;
u8 iv_flags;
u8 pri;
u16 pn1;
u32 pn2;
u16 frm_ctrl_mask;
u16 seq_ctrl_mask;
u8 rsvd1[2];
u8 cnst;
u8 key_id;
u8 ctr_flags;
u8 rsvd2;
u16 ctr_init;
};
struct wifi_decap_pdb {
u16 mac_hdr_len;
u8 rsvd;
u8 options;
u8 iv_flags;
u8 pri;
u16 pn1;
u32 pn2;
u16 frm_ctrl_mask;
u16 seq_ctrl_mask;
u8 rsvd1[4];
u8 ctr_flags;
u8 rsvd2;
u16 ctr_init;
};
/*
* IEEE 802.16 WiMAX Protocol Data Block
*/
#define WIMAX_PDBOPTS_FCS 0x01
#define WIMAX_PDBOPTS_AR 0x40 /* decap only */
struct wimax_encap_pdb {
u8 rsvd[3];
u8 options;
u32 nonce;
u8 b0_flags;
u8 ctr_flags;
u16 ctr_init;
/* begin DECO writeback region */
u32 pn;
/* end DECO writeback region */
};
struct wimax_decap_pdb {
u8 rsvd[3];
u8 options;
u32 nonce;
u8 iv_flags;
u8 ctr_flags;
u16 ctr_init;
/* begin DECO writeback region */
u32 pn;
u8 rsvd1[2];
u16 antireplay_len;
u64 antireplay_scorecard;
/* end DECO writeback region */
};
/*
* IEEE 801.AE MacSEC Protocol Data Block
*/
#define MACSEC_PDBOPTS_FCS 0x01
#define MACSEC_PDBOPTS_AR 0x40 /* used in decap only */
struct macsec_encap_pdb {
u16 aad_len;
u8 rsvd;
u8 options;
u64 sci;
u16 ethertype;
u8 tci_an;
u8 rsvd1;
/* begin DECO writeback region */
u32 pn;
/* end DECO writeback region */
};
struct macsec_decap_pdb {
u16 aad_len;
u8 rsvd;
u8 options;
u64 sci;
u8 rsvd1[3];
/* begin DECO writeback region */
u8 antireplay_len;
u32 pn;
u64 antireplay_scorecard;
/* end DECO writeback region */
};
/*
* SSL/TLS/DTLS Protocol Data Blocks
*/
#define TLS_PDBOPTS_ARS32 0x40
#define TLS_PDBOPTS_ARS64 0xc0
#define TLS_PDBOPTS_OUTFMT 0x08
#define TLS_PDBOPTS_IV_WRTBK 0x02 /* 1.1/1.2/DTLS only */
#define TLS_PDBOPTS_EXP_RND_IV 0x01 /* 1.1/1.2/DTLS only */
struct tls_block_encap_pdb {
u8 type;
u8 version[2];
u8 options;
u64 seq_num;
u32 iv[4];
};
struct tls_stream_encap_pdb {
u8 type;
u8 version[2];
u8 options;
u64 seq_num;
u8 i;
u8 j;
u8 rsvd1[2];
};
struct dtls_block_encap_pdb {
u8 type;
u8 version[2];
u8 options;
u16 epoch;
u16 seq_num[3];
u32 iv[4];
};
struct tls_block_decap_pdb {
u8 rsvd[3];
u8 options;
u64 seq_num;
u32 iv[4];
};
struct tls_stream_decap_pdb {
u8 rsvd[3];
u8 options;
u64 seq_num;
u8 i;
u8 j;
u8 rsvd1[2];
};
struct dtls_block_decap_pdb {
u8 rsvd[3];
u8 options;
u16 epoch;
u16 seq_num[3];
u32 iv[4];
u64 antireplay_scorecard;
};
/*
* SRTP Protocol Data Blocks
*/
#define SRTP_PDBOPTS_MKI 0x08
#define SRTP_PDBOPTS_AR 0x40
struct srtp_encap_pdb {
u8 x_len;
u8 mki_len;
u8 n_tag;
u8 options;
u32 cnst0;
u8 rsvd[2];
u16 cnst1;
u16 salt[7];
u16 cnst2;
u32 rsvd1;
u32 roc;
u32 opt_mki;
};
struct srtp_decap_pdb {
u8 x_len;
u8 mki_len;
u8 n_tag;
u8 options;
u32 cnst0;
u8 rsvd[2];
u16 cnst1;
u16 salt[7];
u16 cnst2;
u16 rsvd1;
u16 seq_num;
u32 roc;
u64 antireplay_scorecard;
};
/*
* DSA/ECDSA Protocol Data Blocks
* Two of these exist: DSA-SIGN, and DSA-VERIFY. They are similar
* except for the treatment of "w" for verify, "s" for sign,
* and the placement of "a,b".
*/
#define DSA_PDB_SGF_SHIFT 24
#define DSA_PDB_SGF_MASK (0xff << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_Q (0x80 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_R (0x40 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_G (0x20 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_W (0x10 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_S (0x10 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_F (0x08 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_C (0x04 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_D (0x02 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_AB_SIGN (0x02 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_SGF_AB_VERIFY (0x01 << DSA_PDB_SGF_SHIFT)
#define DSA_PDB_L_SHIFT 7
#define DSA_PDB_L_MASK (0x3ff << DSA_PDB_L_SHIFT)
#define DSA_PDB_N_MASK 0x7f
struct dsa_sign_pdb {
u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
u8 *q;
u8 *r;
u8 *g; /* or Gx,y */
u8 *s;
u8 *f;
u8 *c;
u8 *d;
u8 *ab; /* ECC only */
u8 *u;
};
struct dsa_verify_pdb {
u32 sgf_ln;
u8 *q;
u8 *r;
u8 *g; /* or Gx,y */
u8 *w; /* or Wx,y */
u8 *f;
u8 *c;
u8 *d;
u8 *tmp; /* temporary data block */
u8 *ab; /* only used if ECC processing */
};
#endif

780
drivers/crypto/caam/regs.h Normal file
View file

@ -0,0 +1,780 @@
/*
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*/
#ifndef REGS_H
#define REGS_H
#include <linux/types.h>
#include <linux/io.h>
/*
* Architecture-specific register access methods
*
* CAAM's bus-addressable registers are 64 bits internally.
* They have been wired to be safely accessible on 32-bit
* architectures, however. Registers were organized such
* that (a) they can be contained in 32 bits, (b) if not, then they
* can be treated as two 32-bit entities, or finally (c) if they
* must be treated as a single 64-bit value, then this can safely
* be done with two 32-bit cycles.
*
* For 32-bit operations on 64-bit values, CAAM follows the same
* 64-bit register access conventions as it's predecessors, in that
* writes are "triggered" by a write to the register at the numerically
* higher address, thus, a full 64-bit write cycle requires a write
* to the lower address, followed by a write to the higher address,
* which will latch/execute the write cycle.
*
* For example, let's assume a SW reset of CAAM through the master
* configuration register.
* - SWRST is in bit 31 of MCFG.
* - MCFG begins at base+0x0000.
* - Bits 63-32 are a 32-bit word at base+0x0000 (numerically-lower)
* - Bits 31-0 are a 32-bit word at base+0x0004 (numerically-higher)
*
* (and on Power, the convention is 0-31, 32-63, I know...)
*
* Assuming a 64-bit write to this MCFG to perform a software reset
* would then require a write of 0 to base+0x0000, followed by a
* write of 0x80000000 to base+0x0004, which would "execute" the
* reset.
*
* Of course, since MCFG 63-32 is all zero, we could cheat and simply
* write 0x8000000 to base+0x0004, and the reset would work fine.
* However, since CAAM does contain some write-and-read-intended
* 64-bit registers, this code defines 64-bit access methods for
* the sake of internal consistency and simplicity, and so that a
* clean transition to 64-bit is possible when it becomes necessary.
*
* There are limitations to this that the developer must recognize.
* 32-bit architectures cannot enforce an atomic-64 operation,
* Therefore:
*
* - On writes, since the HW is assumed to latch the cycle on the
* write of the higher-numeric-address word, then ordered
* writes work OK.
*
* - For reads, where a register contains a relevant value of more
* that 32 bits, the hardware employs logic to latch the other
* "half" of the data until read, ensuring an accurate value.
* This is of particular relevance when dealing with CAAM's
* performance counters.
*
*/
#ifdef __BIG_ENDIAN
#define wr_reg32(reg, data) out_be32(reg, data)
#define rd_reg32(reg) in_be32(reg)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) out_be64(reg, data)
#define rd_reg64(reg) in_be64(reg)
#endif
#else
#ifdef __LITTLE_ENDIAN
#define wr_reg32(reg, data) __raw_writel(data, reg)
#define rd_reg32(reg) __raw_readl(reg)
#ifdef CONFIG_64BIT
#define wr_reg64(reg, data) __raw_writeq(data, reg)
#define rd_reg64(reg) __raw_readq(reg)
#endif
#endif
#endif
#ifndef CONFIG_64BIT
#ifdef __BIG_ENDIAN
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
wr_reg32((u32 __iomem *)reg + 1, data & 0x00000000ffffffffull);
}
static inline u64 rd_reg64(u64 __iomem *reg)
{
return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
((u64)rd_reg32((u32 __iomem *)reg + 1));
}
#else
#ifdef __LITTLE_ENDIAN
static inline void wr_reg64(u64 __iomem *reg, u64 data)
{
wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
}
static inline u64 rd_reg64(u64 __iomem *reg)
{
return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
((u64)rd_reg32((u32 __iomem *)reg));
}
#endif
#endif
#endif
/*
* jr_outentry
* Represents each entry in a JobR output ring
*/
struct jr_outentry {
dma_addr_t desc;/* Pointer to completed descriptor */
u32 jrstatus; /* Status for completed descriptor */
} __packed;
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
*
* Spans f00-fff wherever instantiated
*/
/* Number of DECOs */
#define CHA_NUM_MS_DECONUM_SHIFT 24
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/* CHA Version IDs */
#define CHA_ID_LS_AES_SHIFT 0
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
#define CHA_ID_LS_ARC4_SHIFT 8
#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
#define CHA_ID_LS_SNW8_SHIFT 20
#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
#define CHA_ID_LS_KAS_SHIFT 24
#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
#define CHA_ID_LS_PK_SHIFT 28
#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
#define CHA_ID_MS_CRC_SHIFT 0
#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
#define CHA_ID_MS_SNW9_SHIFT 4
#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
#define CHA_ID_MS_DECO_SHIFT 24
#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
#define CHA_ID_MS_JR_SHIFT 28
#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
struct sec_vid {
u16 ip_id;
u8 maj_rev;
u8 min_rev;
};
struct caam_perfmon {
/* Performance Monitor Registers f00-f9f */
u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
u64 ob_enc_req; /* PC_OB_ENC_REQ - Outbound Encrypt Requests */
u64 ib_dec_req; /* PC_IB_DEC_REQ - Inbound Decrypt Requests */
u64 ob_enc_bytes; /* PC_OB_ENCRYPT - Outbound Bytes Encrypted */
u64 ob_prot_bytes; /* PC_OB_PROTECT - Outbound Bytes Protected */
u64 ib_dec_bytes; /* PC_IB_DECRYPT - Inbound Bytes Decrypted */
u64 ib_valid_bytes; /* PC_IB_VALIDATED Inbound Bytes Validated */
u64 rsvd[13];
/* CAAM Hardware Instantiation Parameters fa0-fbf */
u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
#define CTPR_MS_QI_SHIFT 25
#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
#define CTPR_MS_VIRT_EN_INCL 0x00000001
#define CTPR_MS_VIRT_EN_POR 0x00000002
#define CTPR_MS_PG_SZ_MASK 0x10
#define CTPR_MS_PG_SZ_SHIFT 4
u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
u64 rsvd1[2];
/* CAAM Global Status fc0-fdf */
u64 faultaddr; /* FAR - Fault Address */
u32 faultliodn; /* FALR - Fault Address LIODN */
u32 faultdetail; /* FADR - Fault Addr Detail */
u32 rsvd2;
u32 status; /* CSTA - CAAM Status */
u64 rsvd3;
/* Component Instantiation Parameters fe0-fff */
u32 rtic_id; /* RVID - RTIC Version ID */
u32 ccb_id; /* CCBVID - CCB Version ID */
u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
};
/* LIODN programming for DMA configuration */
#define MSTRID_LOCK_LIODN 0x80000000
#define MSTRID_LOCK_MAKETRUSTED 0x00010000 /* only for JR masterid */
#define MSTRID_LIODN_MASK 0x0fff
struct masterid {
u32 liodn_ms; /* lock and make-trusted control bits */
u32 liodn_ls; /* LIODN for non-sequence and seq access */
};
/* Partition ID for DMA configuration */
struct partid {
u32 rsvd1;
u32 pidr; /* partition ID, DECO */
};
/* RNGB test mode (replicated twice in some configurations) */
/* Padded out to 0x100 */
struct rngtst {
u32 mode; /* RTSTMODEx - Test mode */
u32 rsvd1[3];
u32 reset; /* RTSTRESETx - Test reset control */
u32 rsvd2[3];
u32 status; /* RTSTSSTATUSx - Test status */
u32 rsvd3;
u32 errstat; /* RTSTERRSTATx - Test error status */
u32 rsvd4;
u32 errctl; /* RTSTERRCTLx - Test error control */
u32 rsvd5;
u32 entropy; /* RTSTENTROPYx - Test entropy */
u32 rsvd6[15];
u32 verifctl; /* RTSTVERIFCTLx - Test verification control */
u32 rsvd7;
u32 verifstat; /* RTSTVERIFSTATx - Test verification status */
u32 rsvd8;
u32 verifdata; /* RTSTVERIFDx - Test verification data */
u32 rsvd9;
u32 xkey; /* RTSTXKEYx - Test XKEY */
u32 rsvd10;
u32 oscctctl; /* RTSTOSCCTCTLx - Test osc. counter control */
u32 rsvd11;
u32 oscct; /* RTSTOSCCTx - Test oscillator counter */
u32 rsvd12;
u32 oscctstat; /* RTSTODCCTSTATx - Test osc counter status */
u32 rsvd13[2];
u32 ofifo[4]; /* RTSTOFIFOx - Test output FIFO */
u32 rsvd14[15];
};
/* RNG4 TRNG test registers */
struct rng4tst {
#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
both entropy shifter and
statistical checker */
#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
entropy shifter and
statistical checker */
#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
entropy shifter, raw data
in statistical checker */
#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
u32 rtmctl; /* misc. control register */
u32 rtscmisc; /* statistical check misc. register */
u32 rtpkrrng; /* poker range register */
union {
u32 rtpkrmax; /* PRGM=1: poker max. limit register */
u32 rtpkrsq; /* PRGM=0: poker square calc. result register */
};
#define RTSDCTL_ENT_DLY_SHIFT 16
#define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
#define RTSDCTL_ENT_DLY_MIN 3200
#define RTSDCTL_ENT_DLY_MAX 12800
u32 rtsdctl; /* seed control register */
union {
u32 rtsblim; /* PRGM=1: sparse bit limit register */
u32 rttotsam; /* PRGM=0: total samples register */
};
u32 rtfrqmin; /* frequency count min. limit register */
#define RTFRQMAX_DISABLE (1 << 20)
union {
u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
u32 rtfrqcnt; /* PRGM=0: freq. count register */
};
u32 rsvd1[40];
#define RDSTA_SKVT 0x80000000
#define RDSTA_SKVN 0x40000000
#define RDSTA_IF0 0x00000001
#define RDSTA_IF1 0x00000002
#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
u32 rdsta;
u32 rsvd2[15];
};
/*
* caam_ctrl - basic core configuration
* starts base + 0x0000 padded out to 0x1000
*/
#define KEK_KEY_SIZE 8
#define TKEK_KEY_SIZE 8
#define TDSK_KEY_SIZE 8
#define DECO_RESET 1 /* Use with DECO reset/availability regs */
#define DECO_RESET_0 (DECO_RESET << 0)
#define DECO_RESET_1 (DECO_RESET << 1)
#define DECO_RESET_2 (DECO_RESET << 2)
#define DECO_RESET_3 (DECO_RESET << 3)
#define DECO_RESET_4 (DECO_RESET << 4)
struct caam_ctrl {
/* Basic Configuration Section 000-01f */
/* Read/Writable */
u32 rsvd1;
u32 mcr; /* MCFG Master Config Register */
u32 rsvd2;
u32 scfgr; /* SCFGR, Security Config Register */
/* Bus Access Configuration Section 010-11f */
/* Read/Writable */
struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
u32 rsvd3[11];
u32 jrstart; /* JRSTART - Job Ring Start Register */
struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
u32 rsvd4[5];
u32 deco_rsr; /* DECORSR - Deco Request Source */
u32 rsvd11;
u32 deco_rq; /* DECORR - DECO Request */
struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
u32 rsvd5[22];
/* DECO Availability/Reset Section 120-3ff */
u32 deco_avail; /* DAR - DECO availability */
u32 deco_reset; /* DRR - DECO reset */
u32 rsvd6[182];
/* Key Encryption/Decryption Configuration 400-5ff */
/* Read/Writable only while in Non-secure mode */
u32 kek[KEK_KEY_SIZE]; /* JDKEKR - Key Encryption Key */
u32 tkek[TKEK_KEY_SIZE]; /* TDKEKR - Trusted Desc KEK */
u32 tdsk[TDSK_KEY_SIZE]; /* TDSKR - Trusted Desc Signing Key */
u32 rsvd7[32];
u64 sknonce; /* SKNR - Secure Key Nonce */
u32 rsvd8[70];
/* RNG Test/Verification/Debug Access 600-7ff */
/* (Useful in Test/Debug modes only...) */
union {
struct rngtst rtst[2];
struct rng4tst r4tst[2];
};
u32 rsvd9[448];
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
/*
* Controller master config register defs
*/
#define MCFGR_SWRESET 0x80000000 /* software reset */
#define MCFGR_WDENABLE 0x40000000 /* DECO watchdog enable */
#define MCFGR_WDFAIL 0x20000000 /* DECO watchdog force-fail */
#define MCFGR_DMA_RESET 0x10000000
#define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
#define SCFGR_RDBENABLE 0x00000400
#define SCFGR_VIRT_EN 0x00008000
#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
#define DECORSR_VALID 0x80000000
#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
/* AXI read cache control */
#define MCFGR_ARCACHE_SHIFT 12
#define MCFGR_ARCACHE_MASK (0xf << MCFGR_ARCACHE_SHIFT)
/* AXI write cache control */
#define MCFGR_AWCACHE_SHIFT 8
#define MCFGR_AWCACHE_MASK (0xf << MCFGR_AWCACHE_SHIFT)
/* AXI pipeline depth */
#define MCFGR_AXIPIPE_SHIFT 4
#define MCFGR_AXIPIPE_MASK (0xf << MCFGR_AXIPIPE_SHIFT)
#define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
#define MCFGR_BURST_64 0x00000001 /* Max burst size */
/* JRSTART register offsets */
#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
/*
* caam_job_ring - direct job ring setup
* 1-4 possible per instantiation, base + 1000/2000/3000/4000
* Padded out to 0x1000
*/
struct caam_job_ring {
/* Input ring */
u64 inpring_base; /* IRBAx - Input desc ring baseaddr */
u32 rsvd1;
u32 inpring_size; /* IRSx - Input ring size */
u32 rsvd2;
u32 inpring_avail; /* IRSAx - Input ring room remaining */
u32 rsvd3;
u32 inpring_jobadd; /* IRJAx - Input ring jobs added */
/* Output Ring */
u64 outring_base; /* ORBAx - Output status ring base addr */
u32 rsvd4;
u32 outring_size; /* ORSx - Output ring size */
u32 rsvd5;
u32 outring_rmvd; /* ORJRx - Output ring jobs removed */
u32 rsvd6;
u32 outring_used; /* ORSFx - Output ring slots full */
/* Status/Configuration */
u32 rsvd7;
u32 jroutstatus; /* JRSTAx - JobR output status */
u32 rsvd8;
u32 jrintstatus; /* JRINTx - JobR interrupt status */
u32 rconfig_hi; /* JRxCFG - Ring configuration */
u32 rconfig_lo;
/* Indices. CAAM maintains as "heads" of each queue */
u32 rsvd9;
u32 inp_rdidx; /* IRRIx - Input ring read index */
u32 rsvd10;
u32 out_wtidx; /* ORWIx - Output ring write index */
/* Command/control */
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
u32 rsvd12[932];
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
#define JR_RINGSIZE_MASK 0x03ff
/*
* jrstatus - Job Ring Output Status
* All values in lo word
* Also note, same values written out as status through QI
* in the command/status field of a frame descriptor
*/
#define JRSTA_SSRC_SHIFT 28
#define JRSTA_SSRC_MASK 0xf0000000
#define JRSTA_SSRC_NONE 0x00000000
#define JRSTA_SSRC_CCB_ERROR 0x20000000
#define JRSTA_SSRC_JUMP_HALT_USER 0x30000000
#define JRSTA_SSRC_DECO 0x40000000
#define JRSTA_SSRC_JRERROR 0x60000000
#define JRSTA_SSRC_JUMP_HALT_CC 0x70000000
#define JRSTA_DECOERR_JUMP 0x08000000
#define JRSTA_DECOERR_INDEX_SHIFT 8
#define JRSTA_DECOERR_INDEX_MASK 0xff00
#define JRSTA_DECOERR_ERROR_MASK 0x00ff
#define JRSTA_DECOERR_NONE 0x00
#define JRSTA_DECOERR_LINKLEN 0x01
#define JRSTA_DECOERR_LINKPTR 0x02
#define JRSTA_DECOERR_JRCTRL 0x03
#define JRSTA_DECOERR_DESCCMD 0x04
#define JRSTA_DECOERR_ORDER 0x05
#define JRSTA_DECOERR_KEYCMD 0x06
#define JRSTA_DECOERR_LOADCMD 0x07
#define JRSTA_DECOERR_STORECMD 0x08
#define JRSTA_DECOERR_OPCMD 0x09
#define JRSTA_DECOERR_FIFOLDCMD 0x0a
#define JRSTA_DECOERR_FIFOSTCMD 0x0b
#define JRSTA_DECOERR_MOVECMD 0x0c
#define JRSTA_DECOERR_JUMPCMD 0x0d
#define JRSTA_DECOERR_MATHCMD 0x0e
#define JRSTA_DECOERR_SHASHCMD 0x0f
#define JRSTA_DECOERR_SEQCMD 0x10
#define JRSTA_DECOERR_DECOINTERNAL 0x11
#define JRSTA_DECOERR_SHDESCHDR 0x12
#define JRSTA_DECOERR_HDRLEN 0x13
#define JRSTA_DECOERR_BURSTER 0x14
#define JRSTA_DECOERR_DESCSIGNATURE 0x15
#define JRSTA_DECOERR_DMA 0x16
#define JRSTA_DECOERR_BURSTFIFO 0x17
#define JRSTA_DECOERR_JRRESET 0x1a
#define JRSTA_DECOERR_JOBFAIL 0x1b
#define JRSTA_DECOERR_DNRERR 0x80
#define JRSTA_DECOERR_UNDEFPCL 0x81
#define JRSTA_DECOERR_PDBERR 0x82
#define JRSTA_DECOERR_ANRPLY_LATE 0x83
#define JRSTA_DECOERR_ANRPLY_REPLAY 0x84
#define JRSTA_DECOERR_SEQOVF 0x85
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
#define JRSTA_CCBERR_CHAID_MASK 0x00f0
#define JRSTA_CCBERR_CHAID_SHIFT 4
#define JRSTA_CCBERR_ERRID_MASK 0x000f
#define JRSTA_CCBERR_CHAID_AES (0x01 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_DES (0x02 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_ARC4 (0x03 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_MD (0x04 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_RNG (0x05 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_SNOW (0x06 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_KASUMI (0x07 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_PK (0x08 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_CHAID_CRC (0x09 << JRSTA_CCBERR_CHAID_SHIFT)
#define JRSTA_CCBERR_ERRID_NONE 0x00
#define JRSTA_CCBERR_ERRID_MODE 0x01
#define JRSTA_CCBERR_ERRID_DATASIZ 0x02
#define JRSTA_CCBERR_ERRID_KEYSIZ 0x03
#define JRSTA_CCBERR_ERRID_PKAMEMSZ 0x04
#define JRSTA_CCBERR_ERRID_PKBMEMSZ 0x05
#define JRSTA_CCBERR_ERRID_SEQUENCE 0x06
#define JRSTA_CCBERR_ERRID_PKDIVZRO 0x07
#define JRSTA_CCBERR_ERRID_PKMODEVN 0x08
#define JRSTA_CCBERR_ERRID_KEYPARIT 0x09
#define JRSTA_CCBERR_ERRID_ICVCHK 0x0a
#define JRSTA_CCBERR_ERRID_HARDWARE 0x0b
#define JRSTA_CCBERR_ERRID_CCMAAD 0x0c
#define JRSTA_CCBERR_ERRID_INVCHA 0x0f
#define JRINT_ERR_INDEX_MASK 0x3fff0000
#define JRINT_ERR_INDEX_SHIFT 16
#define JRINT_ERR_TYPE_MASK 0xf00
#define JRINT_ERR_TYPE_SHIFT 8
#define JRINT_ERR_HALT_MASK 0xc
#define JRINT_ERR_HALT_SHIFT 2
#define JRINT_ERR_HALT_INPROGRESS 0x4
#define JRINT_ERR_HALT_COMPLETE 0x8
#define JRINT_JR_ERROR 0x02
#define JRINT_JR_INT 0x01
#define JRINT_ERR_TYPE_WRITE 1
#define JRINT_ERR_TYPE_BAD_INPADDR 3
#define JRINT_ERR_TYPE_BAD_OUTADDR 4
#define JRINT_ERR_TYPE_INV_INPWRT 5
#define JRINT_ERR_TYPE_INV_OUTWRT 6
#define JRINT_ERR_TYPE_RESET 7
#define JRINT_ERR_TYPE_REMOVE_OFL 8
#define JRINT_ERR_TYPE_ADD_OFL 9
#define JRCFG_SOE 0x04
#define JRCFG_ICEN 0x02
#define JRCFG_IMSK 0x01
#define JRCFG_ICDCT_SHIFT 8
#define JRCFG_ICTT_SHIFT 16
#define JRCR_RESET 0x01
/*
* caam_assurance - Assurance Controller View
* base + 0x6000 padded out to 0x1000
*/
struct rtic_element {
u64 address;
u32 rsvd;
u32 length;
};
struct rtic_block {
struct rtic_element element[2];
};
struct rtic_memhash {
u32 memhash_be[32];
u32 memhash_le[32];
};
struct caam_assurance {
/* Status/Command/Watchdog */
u32 rsvd1;
u32 status; /* RSTA - Status */
u32 rsvd2;
u32 cmd; /* RCMD - Command */
u32 rsvd3;
u32 ctrl; /* RCTL - Control */
u32 rsvd4;
u32 throttle; /* RTHR - Throttle */
u32 rsvd5[2];
u64 watchdog; /* RWDOG - Watchdog Timer */
u32 rsvd6;
u32 rend; /* REND - Endian corrections */
u32 rsvd7[50];
/* Block access/configuration @ 100/110/120/130 */
struct rtic_block memblk[4]; /* Memory Blocks A-D */
u32 rsvd8[32];
/* Block hashes @ 200/300/400/500 */
struct rtic_memhash hash[4]; /* Block hash values A-D */
u32 rsvd_3[640];
};
/*
* caam_queue_if - QI configuration and control
* starts base + 0x7000, padded out to 0x1000 long
*/
struct caam_queue_if {
u32 qi_control_hi; /* QICTL - QI Control */
u32 qi_control_lo;
u32 rsvd1;
u32 qi_status; /* QISTA - QI Status */
u32 qi_deq_cfg_hi; /* QIDQC - QI Dequeue Configuration */
u32 qi_deq_cfg_lo;
u32 qi_enq_cfg_hi; /* QISEQC - QI Enqueue Command */
u32 qi_enq_cfg_lo;
u32 rsvd2[1016];
};
/* QI control bits - low word */
#define QICTL_DQEN 0x01 /* Enable frame pop */
#define QICTL_STOP 0x02 /* Stop dequeue/enqueue */
#define QICTL_SOE 0x04 /* Stop on error */
/* QI control bits - high word */
#define QICTL_MBSI 0x01
#define QICTL_MHWSI 0x02
#define QICTL_MWSI 0x04
#define QICTL_MDWSI 0x08
#define QICTL_CBSI 0x10 /* CtrlDataByteSwapInput */
#define QICTL_CHWSI 0x20 /* CtrlDataHalfSwapInput */
#define QICTL_CWSI 0x40 /* CtrlDataWordSwapInput */
#define QICTL_CDWSI 0x80 /* CtrlDataDWordSwapInput */
#define QICTL_MBSO 0x0100
#define QICTL_MHWSO 0x0200
#define QICTL_MWSO 0x0400
#define QICTL_MDWSO 0x0800
#define QICTL_CBSO 0x1000 /* CtrlDataByteSwapOutput */
#define QICTL_CHWSO 0x2000 /* CtrlDataHalfSwapOutput */
#define QICTL_CWSO 0x4000 /* CtrlDataWordSwapOutput */
#define QICTL_CDWSO 0x8000 /* CtrlDataDWordSwapOutput */
#define QICTL_DMBS 0x010000
#define QICTL_EPO 0x020000
/* QI status bits */
#define QISTA_PHRDERR 0x01 /* PreHeader Read Error */
#define QISTA_CFRDERR 0x02 /* Compound Frame Read Error */
#define QISTA_OFWRERR 0x04 /* Output Frame Read Error */
#define QISTA_BPDERR 0x08 /* Buffer Pool Depleted */
#define QISTA_BTSERR 0x10 /* Buffer Undersize */
#define QISTA_CFWRERR 0x20 /* Compound Frame Write Err */
#define QISTA_STOPD 0x80000000 /* QI Stopped (see QICTL) */
/* deco_sg_table - DECO view of scatter/gather table */
struct deco_sg_table {
u64 addr; /* Segment Address */
u32 elen; /* E, F bits + 30-bit length */
u32 bpid_offset; /* Buffer Pool ID + 16-bit length */
};
/*
* caam_deco - descriptor controller - CHA cluster block
*
* Only accessible when direct DECO access is turned on
* (done in DECORR, via MID programmed in DECOxMID
*
* 5 typical, base + 0x8000/9000/a000/b000
* Padded out to 0x1000 long
*/
struct caam_deco {
u32 rsvd1;
u32 cls1_mode; /* CxC1MR - Class 1 Mode */
u32 rsvd2;
u32 cls1_keysize; /* CxC1KSR - Class 1 Key Size */
u32 cls1_datasize_hi; /* CxC1DSR - Class 1 Data Size */
u32 cls1_datasize_lo;
u32 rsvd3;
u32 cls1_icvsize; /* CxC1ICVSR - Class 1 ICV size */
u32 rsvd4[5];
u32 cha_ctrl; /* CCTLR - CHA control */
u32 rsvd5;
u32 irq_crtl; /* CxCIRQ - CCB interrupt done/error/clear */
u32 rsvd6;
u32 clr_written; /* CxCWR - Clear-Written */
u32 ccb_status_hi; /* CxCSTA - CCB Status/Error */
u32 ccb_status_lo;
u32 rsvd7[3];
u32 aad_size; /* CxAADSZR - Current AAD Size */
u32 rsvd8;
u32 cls1_iv_size; /* CxC1IVSZR - Current Class 1 IV Size */
u32 rsvd9[7];
u32 pkha_a_size; /* PKASZRx - Size of PKHA A */
u32 rsvd10;
u32 pkha_b_size; /* PKBSZRx - Size of PKHA B */
u32 rsvd11;
u32 pkha_n_size; /* PKNSZRx - Size of PKHA N */
u32 rsvd12;
u32 pkha_e_size; /* PKESZRx - Size of PKHA E */
u32 rsvd13[24];
u32 cls1_ctx[16]; /* CxC1CTXR - Class 1 Context @100 */
u32 rsvd14[48];
u32 cls1_key[8]; /* CxC1KEYR - Class 1 Key @200 */
u32 rsvd15[121];
u32 cls2_mode; /* CxC2MR - Class 2 Mode */
u32 rsvd16;
u32 cls2_keysize; /* CxX2KSR - Class 2 Key Size */
u32 cls2_datasize_hi; /* CxC2DSR - Class 2 Data Size */
u32 cls2_datasize_lo;
u32 rsvd17;
u32 cls2_icvsize; /* CxC2ICVSZR - Class 2 ICV Size */
u32 rsvd18[56];
u32 cls2_ctx[18]; /* CxC2CTXR - Class 2 Context @500 */
u32 rsvd19[46];
u32 cls2_key[32]; /* CxC2KEYR - Class2 Key @600 */
u32 rsvd20[84];
u32 inp_infofifo_hi; /* CxIFIFO - Input Info FIFO @7d0 */
u32 inp_infofifo_lo;
u32 rsvd21[2];
u64 inp_datafifo; /* CxDFIFO - Input Data FIFO */
u32 rsvd22[2];
u64 out_datafifo; /* CxOFIFO - Output Data FIFO */
u32 rsvd23[2];
u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
u32 jr_ctl_lo;
u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
u32 op_status_lo;
u32 rsvd24[2];
u32 liodn; /* DxLSR - DECO LIODN Status - non-seq */
u32 td_liodn; /* DxLSR - DECO LIODN Status - trustdesc */
u32 rsvd26[6];
u64 math[4]; /* DxMTH - Math register */
u32 rsvd27[8];
struct deco_sg_table gthr_tbl[4]; /* DxGTR - Gather Tables */
u32 rsvd28[16];
struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
#define DESC_DBG_DECO_STAT_VALID 0x80000000
#define DESC_DBG_DECO_STAT_MASK 0x00F00000
u32 desc_dbg; /* DxDDR - DECO Debug Register */
u32 rsvd31[126];
};
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
#define JR_BLOCK_NUMBER 1
#define ASSURE_BLOCK_NUMBER 6
#define QI_BLOCK_NUMBER 7
#define DECO_BLOCK_NUMBER 8
#define PG_SIZE_4K 0x1000
#define PG_SIZE_64K 0x10000
#endif /* REGS_H */

View file

@ -0,0 +1,118 @@
/*
* CAAM/SEC 4.x functions for using scatterlists in caam driver
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
*/
struct sec4_sg_entry;
/*
* convert single dma address to h/w link table format
*/
static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
dma_addr_t dma, u32 len, u32 offset)
{
sec4_sg_ptr->ptr = dma;
sec4_sg_ptr->len = len;
sec4_sg_ptr->reserved = 0;
sec4_sg_ptr->buf_pool_id = 0;
sec4_sg_ptr->offset = offset;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
sizeof(struct sec4_sg_entry), 1);
#endif
}
/*
* convert scatterlist to h/w link table format
* but does not have final bit; instead, returns last entry
*/
static inline struct sec4_sg_entry *
sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
struct sec4_sg_entry *sec4_sg_ptr, u32 offset)
{
while (sg_count) {
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset);
sec4_sg_ptr++;
sg = scatterwalk_sg_next(sg);
sg_count--;
}
return sec4_sg_ptr - 1;
}
/*
* convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped
*/
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count,
struct sec4_sg_entry *sec4_sg_ptr,
u32 offset)
{
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
sec4_sg_ptr->len |= SEC4_SG_LEN_FIN;
}
/* count number of elements in scatterlist */
static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
bool *chained)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = true;
sg = scatterwalk_sg_next(sg);
}
return sg_nents;
}
/* derive number of elements in scatterlist, but return 0 for 1 */
static inline int sg_count(struct scatterlist *sg_list, int nbytes,
bool *chained)
{
int sg_nents = __sg_count(sg_list, nbytes, chained);
if (likely(sg_nents == 1))
return 0;
return sg_nents;
}
static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
bool chained)
{
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {
dma_map_sg(dev, sg, 1, dir);
sg = scatterwalk_sg_next(sg);
}
} else {
dma_map_sg(dev, sg, nents, dir);
}
return nents;
}
static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
unsigned int nents, enum dma_data_direction dir,
bool chained)
{
if (unlikely(chained)) {
int i;
for (i = 0; i < nents; i++) {
dma_unmap_sg(dev, sg, 1, dir);
sg = scatterwalk_sg_next(sg);
}
} else {
dma_unmap_sg(dev, sg, nents, dir);
}
return nents;
}