diff options
author | Horia Geantă <horia.geanta@nxp.com> | 2018-11-08 16:36:27 +0300 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2018-11-16 09:11:03 +0300 |
commit | d239b10d4ceb986d998779a4ed81824368aca831 (patch) | |
tree | f4d78848429e68298f7457320feb6327445ef53a /drivers/crypto | |
parent | 57f002891e08c589abf09409f2d606dfd1164e32 (diff) | |
download | linux-d239b10d4ceb986d998779a4ed81824368aca831.tar.xz |
crypto: caam - add register map changes cf. Era 10
Era 10 changes the register map.
The updates that affect the drivers:
-new version registers are added
-DBG_DBG[deco_state] field is moved to a new register -
DBG_EXEC[19:16] @ 8_0E3Ch.
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 47 | ||||
-rw-r--r-- | drivers/crypto/caam/caamalg_qi.c | 37 | ||||
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 20 | ||||
-rw-r--r-- | drivers/crypto/caam/caampkc.c | 10 | ||||
-rw-r--r-- | drivers/crypto/caam/caamrng.c | 10 | ||||
-rw-r--r-- | drivers/crypto/caam/ctrl.c | 28 | ||||
-rw-r--r-- | drivers/crypto/caam/desc.h | 7 | ||||
-rw-r--r-- | drivers/crypto/caam/regs.h | 74 |
8 files changed, 184 insertions, 49 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 869f092432de..9f1414030bc2 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3135,7 +3135,7 @@ static int __init caam_algapi_init(void) struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -3168,14 +3168,34 @@ static int __init caam_algapi_init(void) * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -3196,10 +3216,10 @@ static int __init caam_algapi_init(void) * Check support for AES modes not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == - OP_ALG_AAI_XTS) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && + (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; caam_skcipher_alg_init(t_alg); @@ -3236,9 +3256,8 @@ static int __init caam_algapi_init(void) * Check support for AES algorithms not available * on LP devices. */ - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) - if (alg_aai == OP_ALG_AAI_GCM) - continue; + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) + continue; /* * Skip algorithms requiring message digests diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index 23c9fc4975f8..c0d55310aade 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -2462,7 +2462,7 @@ static int __init caam_qi_algapi_init(void) struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst; + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -2497,14 +2497,34 @@ static int __init caam_qi_algapi_init(void) * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT; - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + if (priv->era < 10) { + u32 cha_vid, cha_inst; + + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); + aes_vid = cha_vid & CHA_ID_LS_AES_MASK; + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> + CHA_ID_LS_DES_SHIFT; + aes_inst = cha_inst & CHA_ID_LS_AES_MASK; + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 aesa, mdha; + + aesa = rd_reg32(&priv->ctrl->vreg.aesa); + mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; + aes_inst = aesa & CHA_VER_NUM_MASK; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* If MD is present, limit digest size based on LP256 */ - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)) + if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { @@ -2556,8 +2576,7 @@ static int __init caam_qi_algapi_init(void) * Check support for AES algorithms not available * on LP devices. */ - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) && - (alg_aai == OP_ALG_AAI_GCM)) + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) continue; /* diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 46924affa0bd..81712aa5d0f2 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for ahash functions of crypto API * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * Based on caamalg.c crypto API driver. * @@ -1801,7 +1802,7 @@ static int __init caam_algapi_hash_init(void) int i = 0, err = 0; struct caam_drv_private *priv; unsigned int md_limit = SHA512_DIGEST_SIZE; - u32 cha_inst, cha_vid; + u32 md_inst, md_vid; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); if (!dev_node) { @@ -1831,18 +1832,27 @@ static int __init caam_algapi_hash_init(void) * Register crypto algorithms the device supports. First, identify * presence and attributes of MD block. */ - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); + if (priv->era < 10) { + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + } else { + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha); + + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; + md_inst = mdha & CHA_VER_NUM_MASK; + } /* * Skip registration of any hashing algorithms if MD block * is not present. */ - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT)) + if (!md_inst) return -ENODEV; /* Limit digest size based on LP256 */ - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256) + if (md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; INIT_LIST_HEAD(&hash_list); diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 4fc209cbbeab..77ab28a2811a 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for Public Key Cryptography * * Copyright 2016 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. @@ -1017,7 +1018,7 @@ static int __init caam_pkc_init(void) struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; - u32 cha_inst, pk_inst; + u32 pk_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -1045,8 +1046,11 @@ static int __init caam_pkc_init(void) return -ENODEV; /* Determine public key hardware accelerator presence. */ - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + if (priv->era < 10) + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; + else + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index 4318b0aa6fb9..a387c8d49a62 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -3,6 +3,7 @@ * caam - Freescale FSL CAAM support for hw_random * * Copyright 2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP * * Based on caamalg.c crypto API driver. * @@ -309,6 +310,7 @@ static int __init caam_rng_init(void) struct platform_device *pdev; struct device *ctrldev; struct caam_drv_private *priv; + u32 rng_inst; int err; dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); @@ -336,7 +338,13 @@ static int __init caam_rng_init(void) return -ENODEV; /* Check for an instantiated RNG before registration */ - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) + if (priv->era < 10) + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; + + if (!rng_inst) return -ENODEV; dev = caam_jr_alloc(); diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 3fc793193821..16bbc72f041a 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -3,6 +3,7 @@ * Controller-level driver, kernel property detection, initialization * * Copyright 2008-2012 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #include <linux/device.h> @@ -106,7 +107,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; struct caam_deco __iomem *deco = ctrlpriv->deco; unsigned int timeout = 100000; - u32 deco_dbg_reg, flags; + u32 deco_dbg_reg, deco_state, flags; int i; @@ -149,13 +150,22 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, timeout = 10000000; do { deco_dbg_reg = rd_reg32(&deco->desc_dbg); + + if (ctrlpriv->era < 10) + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >> + DESC_DBG_DECO_STAT_SHIFT; + else + deco_state = (rd_reg32(&deco->dbg_exec) & + DESC_DER_DECO_STAT_MASK) >> + DESC_DER_DECO_STAT_SHIFT; + /* * If an error occured in the descriptor, then * the DECO status field will be set to 0x0D */ - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) == - DESC_DBG_DECO_STAT_HOST_ERR) + if (deco_state == DECO_STAT_HOST_ERR) break; + cpu_relax(); } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); @@ -491,7 +501,7 @@ static int caam_probe(struct platform_device *pdev) struct caam_perfmon *perfmon; #endif u32 scfgr, comp_params; - u32 cha_vid_ls; + u8 rng_vid; int pg_size; int BLOCK_OFFSET = 0; @@ -733,15 +743,19 @@ static int caam_probe(struct platform_device *pdev) goto caam_remove; } - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls); + if (ctrlpriv->era < 10) + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >> + CHA_VER_VID_SHIFT; /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ - if (!ctrlpriv->mc_en && - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) { + if (!ctrlpriv->mc_en && rng_vid >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h index f76ff160a02c..ec1ef06049b4 100644 --- a/drivers/crypto/caam/desc.h +++ b/drivers/crypto/caam/desc.h @@ -4,6 +4,7 @@ * Definitions to support CAAM descriptor instruction generation * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef DESC_H @@ -1133,6 +1134,12 @@ #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT) #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT) +/* version register fields */ +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */ +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */ +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */ +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */ + #define OP_ALG_ALGSEL_SHIFT 16 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT) #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT) diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h index 457815f965c0..3cd0822ea819 100644 --- a/drivers/crypto/caam/regs.h +++ b/drivers/crypto/caam/regs.h @@ -3,6 +3,7 @@ * CAAM hardware register-level view * * Copyright 2008-2011 Freescale Semiconductor, Inc. + * Copyright 2018 NXP */ #ifndef REGS_H @@ -211,6 +212,47 @@ struct jr_outentry { u32 jrstatus; /* Status for completed descriptor */ } __packed; +/* Version registers (Era 10+) e80-eff */ +struct version_regs { + u32 crca; /* CRCA_VERSION */ + u32 afha; /* AFHA_VERSION */ + u32 kfha; /* KFHA_VERSION */ + u32 pkha; /* PKHA_VERSION */ + u32 aesa; /* AESA_VERSION */ + u32 mdha; /* MDHA_VERSION */ + u32 desa; /* DESA_VERSION */ + u32 snw8a; /* SNW8A_VERSION */ + u32 snw9a; /* SNW9A_VERSION */ + u32 zuce; /* ZUCE_VERSION */ + u32 zuca; /* ZUCA_VERSION */ + u32 ccha; /* CCHA_VERSION */ + u32 ptha; /* PTHA_VERSION */ + u32 rng; /* RNG_VERSION */ + u32 trng; /* TRNG_VERSION */ + u32 aaha; /* AAHA_VERSION */ + u32 rsvd[10]; + u32 sr; /* SR_VERSION */ + u32 dma; /* DMA_VERSION */ + u32 ai; /* AI_VERSION */ + u32 qi; /* QI_VERSION */ + u32 jr; /* JR_VERSION */ + u32 deco; /* DECO_VERSION */ +}; + +/* Version registers bitfields */ + +/* Number of CHAs instantiated */ +#define CHA_VER_NUM_MASK 0xffull +/* CHA Miscellaneous Information */ +#define CHA_VER_MISC_SHIFT 8 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT) +/* CHA Revision Number */ +#define CHA_VER_REV_SHIFT 16 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT) +/* CHA Version ID */ +#define CHA_VER_VID_SHIFT 24 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT) + /* * caam_perfmon - Performance Monitor/Secure Memory Status/ * CAAM Global Status/Component Version IDs @@ -223,15 +265,13 @@ struct jr_outentry { #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT) /* - * CHA version IDs / instantiation bitfields + * CHA version IDs / instantiation bitfields (< Era 10) * Defined for use with the cha_id fields in perfmon, but the same shift/mask * selectors can be used to pull out the number of instantiated blocks within * cha_num fields in perfmon because the locations are the same. */ #define CHA_ID_LS_AES_SHIFT 0 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT) -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT) #define CHA_ID_LS_DES_SHIFT 4 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT) @@ -241,9 +281,6 @@ struct jr_outentry { #define CHA_ID_LS_MD_SHIFT 12 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT) -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT) #define CHA_ID_LS_RNG_SHIFT 16 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT) @@ -269,6 +306,13 @@ struct jr_outentry { #define CHA_ID_MS_JR_SHIFT 28 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT) +/* Specific CHA version IDs */ +#define CHA_VER_VID_AES_LP 0x3ull +#define CHA_VER_VID_AES_HP 0x4ull +#define CHA_VER_VID_MD_LP256 0x0ull +#define CHA_VER_VID_MD_LP512 0x1ull +#define CHA_VER_VID_MD_HP 0x2ull + struct sec_vid { u16 ip_id; u8 maj_rev; @@ -479,8 +523,10 @@ struct caam_ctrl { struct rng4tst r4tst[2]; }; - u32 rsvd9[448]; + u32 rsvd9[416]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -570,8 +616,10 @@ struct caam_job_ring { u32 rsvd11; u32 jrcommand; /* JRCRx - JobR command */ - u32 rsvd12[932]; + u32 rsvd12[900]; + /* Version registers - introduced with era 10 e80-eff */ + struct version_regs vreg; /* Performance Monitor f00-fff */ struct caam_perfmon perfmon; }; @@ -878,13 +926,19 @@ struct caam_deco { u32 rsvd29[48]; u32 descbuf[64]; /* DxDESB - Descriptor buffer */ u32 rscvd30[193]; -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000 #define DESC_DBG_DECO_STAT_VALID 0x80000000 #define DESC_DBG_DECO_STAT_MASK 0x00F00000 +#define DESC_DBG_DECO_STAT_SHIFT 20 u32 desc_dbg; /* DxDDR - DECO Debug Register */ - u32 rsvd31[126]; + u32 rsvd31[13]; +#define DESC_DER_DECO_STAT_MASK 0x000F0000 +#define DESC_DER_DECO_STAT_SHIFT 16 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */ + u32 rsvd32[112]; }; +#define DECO_STAT_HOST_ERR 0xD + #define DECO_JQCR_WHL 0x20000000 #define DECO_JQCR_FOUR 0x10000000 |