0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/hw_random.h>
0013 #include <linux/completion.h>
0014 #include <linux/atomic.h>
0015 #include <linux/kfifo.h>
0016
0017 #include "compat.h"
0018
0019 #include "regs.h"
0020 #include "intern.h"
0021 #include "desc_constr.h"
0022 #include "jr.h"
0023 #include "error.h"
0024
0025 #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16
0026
0027
0028
0029
0030 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \
0031 CAAM_CMD_SZ + \
0032 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
0033
0034
0035 struct caam_rng_ctx {
0036 struct hwrng rng;
0037 struct device *jrdev;
0038 struct device *ctrldev;
0039 void *desc_async;
0040 void *desc_sync;
0041 struct work_struct worker;
0042 struct kfifo fifo;
0043 };
0044
0045 struct caam_rng_job_ctx {
0046 struct completion *done;
0047 int *err;
0048 };
0049
0050 static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
0051 {
0052 return (struct caam_rng_ctx *)r->priv;
0053 }
0054
0055 static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
0056 void *context)
0057 {
0058 struct caam_rng_job_ctx *jctx = context;
0059
0060 if (err)
0061 *jctx->err = caam_jr_strstatus(jrdev, err);
0062
0063 complete(jctx->done);
0064 }
0065
0066 static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
0067 {
0068 init_job_desc(desc, 0);
0069
0070 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
0071 OP_ALG_PR_ON);
0072
0073 append_fifo_store(desc, dst_dma,
0074 CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
0075
0076 print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
0077 16, 4, desc, desc_bytes(desc), 1);
0078
0079 return desc;
0080 }
0081
0082 static int caam_rng_read_one(struct device *jrdev,
0083 void *dst, int len,
0084 void *desc,
0085 struct completion *done)
0086 {
0087 dma_addr_t dst_dma;
0088 int err, ret = 0;
0089 struct caam_rng_job_ctx jctx = {
0090 .done = done,
0091 .err = &ret,
0092 };
0093
0094 len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
0095
0096 dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
0097 if (dma_mapping_error(jrdev, dst_dma)) {
0098 dev_err(jrdev, "unable to map destination memory\n");
0099 return -ENOMEM;
0100 }
0101
0102 init_completion(done);
0103 err = caam_jr_enqueue(jrdev,
0104 caam_init_desc(desc, dst_dma),
0105 caam_rng_done, &jctx);
0106 if (err == -EINPROGRESS) {
0107 wait_for_completion(done);
0108 err = 0;
0109 }
0110
0111 dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
0112
0113 return err ?: (ret ?: len);
0114 }
0115
0116 static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
0117 {
0118 struct scatterlist sg[1];
0119 struct completion done;
0120 int len, nents;
0121
0122 sg_init_table(sg, ARRAY_SIZE(sg));
0123 nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
0124 CAAM_RNG_MAX_FIFO_STORE_SIZE);
0125 if (!nents)
0126 return;
0127
0128 len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
0129 sg[0].length,
0130 ctx->desc_async,
0131 &done);
0132 if (len < 0)
0133 return;
0134
0135 kfifo_dma_in_finish(&ctx->fifo, len);
0136 }
0137
0138 static void caam_rng_worker(struct work_struct *work)
0139 {
0140 struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
0141 worker);
0142 caam_rng_fill_async(ctx);
0143 }
0144
0145 static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
0146 {
0147 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
0148 int out;
0149
0150 if (wait) {
0151 struct completion done;
0152
0153 return caam_rng_read_one(ctx->jrdev, dst, max,
0154 ctx->desc_sync, &done);
0155 }
0156
0157 out = kfifo_out(&ctx->fifo, dst, max);
0158 if (kfifo_is_empty(&ctx->fifo))
0159 schedule_work(&ctx->worker);
0160
0161 return out;
0162 }
0163
0164 static void caam_cleanup(struct hwrng *rng)
0165 {
0166 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
0167
0168 flush_work(&ctx->worker);
0169 caam_jr_free(ctx->jrdev);
0170 kfifo_free(&ctx->fifo);
0171 }
0172
0173 static int caam_init(struct hwrng *rng)
0174 {
0175 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
0176 int err;
0177
0178 ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
0179 GFP_DMA | GFP_KERNEL);
0180 if (!ctx->desc_sync)
0181 return -ENOMEM;
0182
0183 ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
0184 GFP_DMA | GFP_KERNEL);
0185 if (!ctx->desc_async)
0186 return -ENOMEM;
0187
0188 if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
0189 GFP_DMA | GFP_KERNEL))
0190 return -ENOMEM;
0191
0192 INIT_WORK(&ctx->worker, caam_rng_worker);
0193
0194 ctx->jrdev = caam_jr_alloc();
0195 err = PTR_ERR_OR_ZERO(ctx->jrdev);
0196 if (err) {
0197 kfifo_free(&ctx->fifo);
0198 pr_err("Job Ring Device allocation for transform failed\n");
0199 return err;
0200 }
0201
0202
0203
0204
0205
0206 caam_rng_fill_async(ctx);
0207
0208 return 0;
0209 }
0210
0211 int caam_rng_init(struct device *ctrldev);
0212
0213 void caam_rng_exit(struct device *ctrldev)
0214 {
0215 devres_release_group(ctrldev, caam_rng_init);
0216 }
0217
0218 int caam_rng_init(struct device *ctrldev)
0219 {
0220 struct caam_rng_ctx *ctx;
0221 u32 rng_inst;
0222 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
0223 int ret;
0224
0225
0226 if (priv->era < 10)
0227 rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
0228 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
0229 else
0230 rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
0231
0232 if (!rng_inst)
0233 return 0;
0234
0235 if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
0236 return -ENOMEM;
0237
0238 ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
0239 if (!ctx)
0240 return -ENOMEM;
0241
0242 ctx->ctrldev = ctrldev;
0243
0244 ctx->rng.name = "rng-caam";
0245 ctx->rng.init = caam_init;
0246 ctx->rng.cleanup = caam_cleanup;
0247 ctx->rng.read = caam_read;
0248 ctx->rng.priv = (unsigned long)ctx;
0249 ctx->rng.quality = 1024;
0250
0251 dev_info(ctrldev, "registering rng-caam\n");
0252
0253 ret = devm_hwrng_register(ctrldev, &ctx->rng);
0254 if (ret) {
0255 caam_rng_exit(ctrldev);
0256 return ret;
0257 }
0258
0259 devres_close_group(ctrldev, caam_rng_init);
0260 return 0;
0261 }