Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (C) 2019-2020 ARM Limited or its affiliates. */
0003 
0004 #include <linux/kernel.h>
0005 #include <linux/module.h>
0006 #include <linux/clk.h>
0007 #include <linux/hw_random.h>
0008 #include <linux/io.h>
0009 #include <linux/platform_device.h>
0010 #include <linux/pm_runtime.h>
0011 #include <linux/interrupt.h>
0012 #include <linux/irqreturn.h>
0013 #include <linux/workqueue.h>
0014 #include <linux/circ_buf.h>
0015 #include <linux/completion.h>
0016 #include <linux/of.h>
0017 #include <linux/bitfield.h>
0018 #include <linux/fips.h>
0019 
0020 #include "cctrng.h"
0021 
0022 #define CC_REG_LOW(name)  (name ## _BIT_SHIFT)
0023 #define CC_REG_HIGH(name) (CC_REG_LOW(name) + name ## _BIT_SIZE - 1)
0024 #define CC_GENMASK(name)  GENMASK(CC_REG_HIGH(name), CC_REG_LOW(name))
0025 
0026 #define CC_REG_FLD_GET(reg_name, fld_name, reg_val)     \
0027     (FIELD_GET(CC_GENMASK(CC_ ## reg_name ## _ ## fld_name), reg_val))
0028 
0029 #define CC_HW_RESET_LOOP_COUNT 10
0030 #define CC_TRNG_SUSPEND_TIMEOUT 3000
0031 
0032 /* data circular buffer in words must be:
0033  *  - of a power-of-2 size (limitation of circ_buf.h macros)
0034  *  - at least 6, the size generated in the EHR according to HW implementation
0035  */
0036 #define CCTRNG_DATA_BUF_WORDS 32
0037 
0038 /* The timeout for the TRNG operation should be calculated with the formula:
0039  * Timeout = EHR_NUM * VN_COEFF * EHR_LENGTH * SAMPLE_CNT * SCALE_VALUE
0040  * while:
0041  *  - SAMPLE_CNT is input value from the characterisation process
0042  *  - all the rest are constants
0043  */
0044 #define EHR_NUM 1
0045 #define VN_COEFF 4
0046 #define EHR_LENGTH CC_TRNG_EHR_IN_BITS
0047 #define SCALE_VALUE 2
0048 #define CCTRNG_TIMEOUT(smpl_cnt) \
0049     (EHR_NUM * VN_COEFF * EHR_LENGTH * smpl_cnt * SCALE_VALUE)
0050 
0051 struct cctrng_drvdata {
0052     struct platform_device *pdev;
0053     void __iomem *cc_base;
0054     struct clk *clk;
0055     struct hwrng rng;
0056     u32 active_rosc;
0057     /* Sampling interval for each ring oscillator:
0058      * count of ring oscillator cycles between consecutive bits sampling.
0059      * Value of 0 indicates non-valid rosc
0060      */
0061     u32 smpl_ratio[CC_TRNG_NUM_OF_ROSCS];
0062 
0063     u32 data_buf[CCTRNG_DATA_BUF_WORDS];
0064     struct circ_buf circ;
0065     struct work_struct compwork;
0066     struct work_struct startwork;
0067 
0068     /* pending_hw - 1 when HW is pending, 0 when it is idle */
0069     atomic_t pending_hw;
0070 
0071     /* protects against multiple concurrent consumers of data_buf */
0072     spinlock_t read_lock;
0073 };
0074 
0075 
0076 /* functions for write/read CC registers */
0077 static inline void cc_iowrite(struct cctrng_drvdata *drvdata, u32 reg, u32 val)
0078 {
0079     iowrite32(val, (drvdata->cc_base + reg));
0080 }
0081 static inline u32 cc_ioread(struct cctrng_drvdata *drvdata, u32 reg)
0082 {
0083     return ioread32(drvdata->cc_base + reg);
0084 }
0085 
0086 
0087 static int cc_trng_pm_get(struct device *dev)
0088 {
0089     int rc = 0;
0090 
0091     rc = pm_runtime_get_sync(dev);
0092 
0093     /* pm_runtime_get_sync() can return 1 as a valid return code */
0094     return (rc == 1 ? 0 : rc);
0095 }
0096 
0097 static void cc_trng_pm_put_suspend(struct device *dev)
0098 {
0099     int rc = 0;
0100 
0101     pm_runtime_mark_last_busy(dev);
0102     rc = pm_runtime_put_autosuspend(dev);
0103     if (rc)
0104         dev_err(dev, "pm_runtime_put_autosuspend returned %x\n", rc);
0105 }
0106 
0107 static int cc_trng_pm_init(struct cctrng_drvdata *drvdata)
0108 {
0109     struct device *dev = &(drvdata->pdev->dev);
0110 
0111     /* must be before the enabling to avoid redundant suspending */
0112     pm_runtime_set_autosuspend_delay(dev, CC_TRNG_SUSPEND_TIMEOUT);
0113     pm_runtime_use_autosuspend(dev);
0114     /* set us as active - note we won't do PM ops until cc_trng_pm_go()! */
0115     return pm_runtime_set_active(dev);
0116 }
0117 
0118 static void cc_trng_pm_go(struct cctrng_drvdata *drvdata)
0119 {
0120     struct device *dev = &(drvdata->pdev->dev);
0121 
0122     /* enable the PM module*/
0123     pm_runtime_enable(dev);
0124 }
0125 
0126 static void cc_trng_pm_fini(struct cctrng_drvdata *drvdata)
0127 {
0128     struct device *dev = &(drvdata->pdev->dev);
0129 
0130     pm_runtime_disable(dev);
0131 }
0132 
0133 
0134 static inline int cc_trng_parse_sampling_ratio(struct cctrng_drvdata *drvdata)
0135 {
0136     struct device *dev = &(drvdata->pdev->dev);
0137     struct device_node *np = drvdata->pdev->dev.of_node;
0138     int rc;
0139     int i;
0140     /* ret will be set to 0 if at least one rosc has (sampling ratio > 0) */
0141     int ret = -EINVAL;
0142 
0143     rc = of_property_read_u32_array(np, "arm,rosc-ratio",
0144                     drvdata->smpl_ratio,
0145                     CC_TRNG_NUM_OF_ROSCS);
0146     if (rc) {
0147         /* arm,rosc-ratio was not found in device tree */
0148         return rc;
0149     }
0150 
0151     /* verify that at least one rosc has (sampling ratio > 0) */
0152     for (i = 0; i < CC_TRNG_NUM_OF_ROSCS; ++i) {
0153         dev_dbg(dev, "rosc %d sampling ratio %u",
0154             i, drvdata->smpl_ratio[i]);
0155 
0156         if (drvdata->smpl_ratio[i] > 0)
0157             ret = 0;
0158     }
0159 
0160     return ret;
0161 }
0162 
0163 static int cc_trng_change_rosc(struct cctrng_drvdata *drvdata)
0164 {
0165     struct device *dev = &(drvdata->pdev->dev);
0166 
0167     dev_dbg(dev, "cctrng change rosc (was %d)\n", drvdata->active_rosc);
0168     drvdata->active_rosc += 1;
0169 
0170     while (drvdata->active_rosc < CC_TRNG_NUM_OF_ROSCS) {
0171         if (drvdata->smpl_ratio[drvdata->active_rosc] > 0)
0172             return 0;
0173 
0174         drvdata->active_rosc += 1;
0175     }
0176     return -EINVAL;
0177 }
0178 
0179 
0180 static void cc_trng_enable_rnd_source(struct cctrng_drvdata *drvdata)
0181 {
0182     u32 max_cycles;
0183 
0184     /* Set watchdog threshold to maximal allowed time (in CPU cycles) */
0185     max_cycles = CCTRNG_TIMEOUT(drvdata->smpl_ratio[drvdata->active_rosc]);
0186     cc_iowrite(drvdata, CC_RNG_WATCHDOG_VAL_REG_OFFSET, max_cycles);
0187 
0188     /* enable the RND source */
0189     cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0x1);
0190 
0191     /* unmask RNG interrupts */
0192     cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, (u32)~CC_RNG_INT_MASK);
0193 }
0194 
0195 
0196 /* increase circular data buffer index (head/tail) */
0197 static inline void circ_idx_inc(int *idx, int bytes)
0198 {
0199     *idx += (bytes + 3) >> 2;
0200     *idx &= (CCTRNG_DATA_BUF_WORDS - 1);
0201 }
0202 
0203 static inline size_t circ_buf_space(struct cctrng_drvdata *drvdata)
0204 {
0205     return CIRC_SPACE(drvdata->circ.head,
0206               drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
0207 
0208 }
0209 
0210 static int cctrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
0211 {
0212     /* current implementation ignores "wait" */
0213 
0214     struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)rng->priv;
0215     struct device *dev = &(drvdata->pdev->dev);
0216     u32 *buf = (u32 *)drvdata->circ.buf;
0217     size_t copied = 0;
0218     size_t cnt_w;
0219     size_t size;
0220     size_t left;
0221 
0222     if (!spin_trylock(&drvdata->read_lock)) {
0223         /* concurrent consumers from data_buf cannot be served */
0224         dev_dbg_ratelimited(dev, "unable to hold lock\n");
0225         return 0;
0226     }
0227 
0228     /* copy till end of data buffer (without wrap back) */
0229     cnt_w = CIRC_CNT_TO_END(drvdata->circ.head,
0230                 drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
0231     size = min((cnt_w<<2), max);
0232     memcpy(data, &(buf[drvdata->circ.tail]), size);
0233     copied = size;
0234     circ_idx_inc(&drvdata->circ.tail, size);
0235     /* copy rest of data in data buffer */
0236     left = max - copied;
0237     if (left > 0) {
0238         cnt_w = CIRC_CNT(drvdata->circ.head,
0239                  drvdata->circ.tail, CCTRNG_DATA_BUF_WORDS);
0240         size = min((cnt_w<<2), left);
0241         memcpy(data, &(buf[drvdata->circ.tail]), size);
0242         copied += size;
0243         circ_idx_inc(&drvdata->circ.tail, size);
0244     }
0245 
0246     spin_unlock(&drvdata->read_lock);
0247 
0248     if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
0249         if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
0250             /* re-check space in buffer to avoid potential race */
0251             if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
0252                 /* increment device's usage counter */
0253                 int rc = cc_trng_pm_get(dev);
0254 
0255                 if (rc) {
0256                     dev_err(dev,
0257                         "cc_trng_pm_get returned %x\n",
0258                         rc);
0259                     return rc;
0260                 }
0261 
0262                 /* schedule execution of deferred work handler
0263                  * for filling of data buffer
0264                  */
0265                 schedule_work(&drvdata->startwork);
0266             } else {
0267                 atomic_set(&drvdata->pending_hw, 0);
0268             }
0269         }
0270     }
0271 
0272     return copied;
0273 }
0274 
0275 static void cc_trng_hw_trigger(struct cctrng_drvdata *drvdata)
0276 {
0277     u32 tmp_smpl_cnt = 0;
0278     struct device *dev = &(drvdata->pdev->dev);
0279 
0280     dev_dbg(dev, "cctrng hw trigger.\n");
0281 
0282     /* enable the HW RND clock */
0283     cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
0284 
0285     /* do software reset */
0286     cc_iowrite(drvdata, CC_RNG_SW_RESET_REG_OFFSET, 0x1);
0287     /* in order to verify that the reset has completed,
0288      * the sample count need to be verified
0289      */
0290     do {
0291         /* enable the HW RND clock   */
0292         cc_iowrite(drvdata, CC_RNG_CLK_ENABLE_REG_OFFSET, 0x1);
0293 
0294         /* set sampling ratio (rng_clocks) between consecutive bits */
0295         cc_iowrite(drvdata, CC_SAMPLE_CNT1_REG_OFFSET,
0296                drvdata->smpl_ratio[drvdata->active_rosc]);
0297 
0298         /* read the sampling ratio  */
0299         tmp_smpl_cnt = cc_ioread(drvdata, CC_SAMPLE_CNT1_REG_OFFSET);
0300 
0301     } while (tmp_smpl_cnt != drvdata->smpl_ratio[drvdata->active_rosc]);
0302 
0303     /* disable the RND source for setting new parameters in HW */
0304     cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
0305 
0306     cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, 0xFFFFFFFF);
0307 
0308     cc_iowrite(drvdata, CC_TRNG_CONFIG_REG_OFFSET, drvdata->active_rosc);
0309 
0310     /* Debug Control register: set to 0 - no bypasses */
0311     cc_iowrite(drvdata, CC_TRNG_DEBUG_CONTROL_REG_OFFSET, 0);
0312 
0313     cc_trng_enable_rnd_source(drvdata);
0314 }
0315 
0316 static void cc_trng_compwork_handler(struct work_struct *w)
0317 {
0318     u32 isr = 0;
0319     u32 ehr_valid = 0;
0320     struct cctrng_drvdata *drvdata =
0321             container_of(w, struct cctrng_drvdata, compwork);
0322     struct device *dev = &(drvdata->pdev->dev);
0323     int i;
0324 
0325     /* stop DMA and the RNG source */
0326     cc_iowrite(drvdata, CC_RNG_DMA_ENABLE_REG_OFFSET, 0);
0327     cc_iowrite(drvdata, CC_RND_SOURCE_ENABLE_REG_OFFSET, 0);
0328 
0329     /* read RNG_ISR and check for errors */
0330     isr = cc_ioread(drvdata, CC_RNG_ISR_REG_OFFSET);
0331     ehr_valid = CC_REG_FLD_GET(RNG_ISR, EHR_VALID, isr);
0332     dev_dbg(dev, "Got RNG_ISR=0x%08X (EHR_VALID=%u)\n", isr, ehr_valid);
0333 
0334     if (fips_enabled && CC_REG_FLD_GET(RNG_ISR, CRNGT_ERR, isr)) {
0335         fips_fail_notify();
0336         /* FIPS error is fatal */
0337         panic("Got HW CRNGT error while fips is enabled!\n");
0338     }
0339 
0340     /* Clear all pending RNG interrupts */
0341     cc_iowrite(drvdata, CC_RNG_ICR_REG_OFFSET, isr);
0342 
0343 
0344     if (!ehr_valid) {
0345         /* in case of AUTOCORR/TIMEOUT error, try the next ROSC */
0346         if (CC_REG_FLD_GET(RNG_ISR, AUTOCORR_ERR, isr) ||
0347                 CC_REG_FLD_GET(RNG_ISR, WATCHDOG, isr)) {
0348             dev_dbg(dev, "cctrng autocorr/timeout error.\n");
0349             goto next_rosc;
0350         }
0351 
0352         /* in case of VN error, ignore it */
0353     }
0354 
0355     /* read EHR data from registers */
0356     for (i = 0; i < CC_TRNG_EHR_IN_WORDS; i++) {
0357         /* calc word ptr in data_buf */
0358         u32 *buf = (u32 *)drvdata->circ.buf;
0359 
0360         buf[drvdata->circ.head] = cc_ioread(drvdata,
0361                 CC_EHR_DATA_0_REG_OFFSET + (i*sizeof(u32)));
0362 
0363         /* EHR_DATA registers are cleared on read. In case 0 value was
0364          * returned, restart the entropy collection.
0365          */
0366         if (buf[drvdata->circ.head] == 0) {
0367             dev_dbg(dev, "Got 0 value in EHR. active_rosc %u\n",
0368                 drvdata->active_rosc);
0369             goto next_rosc;
0370         }
0371 
0372         circ_idx_inc(&drvdata->circ.head, 1<<2);
0373     }
0374 
0375     atomic_set(&drvdata->pending_hw, 0);
0376 
0377     /* continue to fill data buffer if needed */
0378     if (circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) {
0379         if (atomic_cmpxchg(&drvdata->pending_hw, 0, 1) == 0) {
0380             /* Re-enable rnd source */
0381             cc_trng_enable_rnd_source(drvdata);
0382             return;
0383         }
0384     }
0385 
0386     cc_trng_pm_put_suspend(dev);
0387 
0388     dev_dbg(dev, "compwork handler done\n");
0389     return;
0390 
0391 next_rosc:
0392     if ((circ_buf_space(drvdata) >= CC_TRNG_EHR_IN_WORDS) &&
0393             (cc_trng_change_rosc(drvdata) == 0)) {
0394         /* trigger trng hw with next rosc */
0395         cc_trng_hw_trigger(drvdata);
0396     } else {
0397         atomic_set(&drvdata->pending_hw, 0);
0398         cc_trng_pm_put_suspend(dev);
0399     }
0400 }
0401 
0402 static irqreturn_t cc_isr(int irq, void *dev_id)
0403 {
0404     struct cctrng_drvdata *drvdata = (struct cctrng_drvdata *)dev_id;
0405     struct device *dev = &(drvdata->pdev->dev);
0406     u32 irr;
0407 
0408     /* if driver suspended return, probably shared interrupt */
0409     if (pm_runtime_suspended(dev))
0410         return IRQ_NONE;
0411 
0412     /* read the interrupt status */
0413     irr = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
0414     dev_dbg(dev, "Got IRR=0x%08X\n", irr);
0415 
0416     if (irr == 0) /* Probably shared interrupt line */
0417         return IRQ_NONE;
0418 
0419     /* clear interrupt - must be before processing events */
0420     cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, irr);
0421 
0422     /* RNG interrupt - most probable */
0423     if (irr & CC_HOST_RNG_IRQ_MASK) {
0424         /* Mask RNG interrupts - will be unmasked in deferred work */
0425         cc_iowrite(drvdata, CC_RNG_IMR_REG_OFFSET, 0xFFFFFFFF);
0426 
0427         /* We clear RNG interrupt here,
0428          * to avoid it from firing as we'll unmask RNG interrupts.
0429          */
0430         cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET,
0431                CC_HOST_RNG_IRQ_MASK);
0432 
0433         irr &= ~CC_HOST_RNG_IRQ_MASK;
0434 
0435         /* schedule execution of deferred work handler */
0436         schedule_work(&drvdata->compwork);
0437     }
0438 
0439     if (irr) {
0440         dev_dbg_ratelimited(dev,
0441                 "IRR includes unknown cause bits (0x%08X)\n",
0442                 irr);
0443         /* Just warning */
0444     }
0445 
0446     return IRQ_HANDLED;
0447 }
0448 
0449 static void cc_trng_startwork_handler(struct work_struct *w)
0450 {
0451     struct cctrng_drvdata *drvdata =
0452             container_of(w, struct cctrng_drvdata, startwork);
0453 
0454     drvdata->active_rosc = 0;
0455     cc_trng_hw_trigger(drvdata);
0456 }
0457 
0458 
0459 static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
0460 {
0461     struct clk *clk;
0462     struct device *dev = &(drvdata->pdev->dev);
0463     int rc = 0;
0464 
0465     clk = devm_clk_get_optional(dev, NULL);
0466     if (IS_ERR(clk))
0467         return dev_err_probe(dev, PTR_ERR(clk),
0468                      "Error getting clock\n");
0469 
0470     drvdata->clk = clk;
0471 
0472     rc = clk_prepare_enable(drvdata->clk);
0473     if (rc) {
0474         dev_err(dev, "Failed to enable clock\n");
0475         return rc;
0476     }
0477 
0478     return 0;
0479 }
0480 
0481 static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
0482 {
0483     clk_disable_unprepare(drvdata->clk);
0484 }
0485 
0486 
0487 static int cctrng_probe(struct platform_device *pdev)
0488 {
0489     struct cctrng_drvdata *drvdata;
0490     struct device *dev = &pdev->dev;
0491     int rc = 0;
0492     u32 val;
0493     int irq;
0494 
0495     drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
0496     if (!drvdata)
0497         return -ENOMEM;
0498 
0499     drvdata->rng.name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
0500     if (!drvdata->rng.name)
0501         return -ENOMEM;
0502 
0503     drvdata->rng.read = cctrng_read;
0504     drvdata->rng.priv = (unsigned long)drvdata;
0505     drvdata->rng.quality = CC_TRNG_QUALITY;
0506 
0507     platform_set_drvdata(pdev, drvdata);
0508     drvdata->pdev = pdev;
0509 
0510     drvdata->circ.buf = (char *)drvdata->data_buf;
0511 
0512     drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
0513     if (IS_ERR(drvdata->cc_base)) {
0514         dev_err(dev, "Failed to ioremap registers");
0515         return PTR_ERR(drvdata->cc_base);
0516     }
0517 
0518     /* Then IRQ */
0519     irq = platform_get_irq(pdev, 0);
0520     if (irq < 0)
0521         return irq;
0522 
0523     /* parse sampling rate from device tree */
0524     rc = cc_trng_parse_sampling_ratio(drvdata);
0525     if (rc) {
0526         dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
0527         return rc;
0528     }
0529 
0530     rc = cc_trng_clk_init(drvdata);
0531     if (rc) {
0532         dev_err(dev, "cc_trng_clk_init failed\n");
0533         return rc;
0534     }
0535 
0536     INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
0537     INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
0538     spin_lock_init(&drvdata->read_lock);
0539 
0540     /* register the driver isr function */
0541     rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
0542     if (rc) {
0543         dev_err(dev, "Could not register to interrupt %d\n", irq);
0544         goto post_clk_err;
0545     }
0546     dev_dbg(dev, "Registered to IRQ: %d\n", irq);
0547 
0548     /* Clear all pending interrupts */
0549     val = cc_ioread(drvdata, CC_HOST_RGF_IRR_REG_OFFSET);
0550     dev_dbg(dev, "IRR=0x%08X\n", val);
0551     cc_iowrite(drvdata, CC_HOST_RGF_ICR_REG_OFFSET, val);
0552 
0553     /* unmask HOST RNG interrupt */
0554     cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
0555            cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
0556            ~CC_HOST_RNG_IRQ_MASK);
0557 
0558     /* init PM */
0559     rc = cc_trng_pm_init(drvdata);
0560     if (rc) {
0561         dev_err(dev, "cc_trng_pm_init failed\n");
0562         goto post_clk_err;
0563     }
0564 
0565     /* increment device's usage counter */
0566     rc = cc_trng_pm_get(dev);
0567     if (rc) {
0568         dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
0569         goto post_pm_err;
0570     }
0571 
0572     /* set pending_hw to verify that HW won't be triggered from read */
0573     atomic_set(&drvdata->pending_hw, 1);
0574 
0575     /* registration of the hwrng device */
0576     rc = devm_hwrng_register(dev, &drvdata->rng);
0577     if (rc) {
0578         dev_err(dev, "Could not register hwrng device.\n");
0579         goto post_pm_err;
0580     }
0581 
0582     /* trigger HW to start generate data */
0583     drvdata->active_rosc = 0;
0584     cc_trng_hw_trigger(drvdata);
0585 
0586     /* All set, we can allow auto-suspend */
0587     cc_trng_pm_go(drvdata);
0588 
0589     dev_info(dev, "ARM cctrng device initialized\n");
0590 
0591     return 0;
0592 
0593 post_pm_err:
0594     cc_trng_pm_fini(drvdata);
0595 
0596 post_clk_err:
0597     cc_trng_clk_fini(drvdata);
0598 
0599     return rc;
0600 }
0601 
0602 static int cctrng_remove(struct platform_device *pdev)
0603 {
0604     struct cctrng_drvdata *drvdata = platform_get_drvdata(pdev);
0605     struct device *dev = &pdev->dev;
0606 
0607     dev_dbg(dev, "Releasing cctrng resources...\n");
0608 
0609     cc_trng_pm_fini(drvdata);
0610 
0611     cc_trng_clk_fini(drvdata);
0612 
0613     dev_info(dev, "ARM cctrng device terminated\n");
0614 
0615     return 0;
0616 }
0617 
0618 static int __maybe_unused cctrng_suspend(struct device *dev)
0619 {
0620     struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
0621 
0622     dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
0623     cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
0624             POWER_DOWN_ENABLE);
0625 
0626     clk_disable_unprepare(drvdata->clk);
0627 
0628     return 0;
0629 }
0630 
0631 static bool cctrng_wait_for_reset_completion(struct cctrng_drvdata *drvdata)
0632 {
0633     unsigned int val;
0634     unsigned int i;
0635 
0636     for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
0637         /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
0638          *  completed and device is fully functional
0639          */
0640         val = cc_ioread(drvdata, CC_NVM_IS_IDLE_REG_OFFSET);
0641         if (val & BIT(CC_NVM_IS_IDLE_VALUE_BIT_SHIFT)) {
0642             /* hw indicate reset completed */
0643             return true;
0644         }
0645         /* allow scheduling other process on the processor */
0646         schedule();
0647     }
0648     /* reset not completed */
0649     return false;
0650 }
0651 
0652 static int __maybe_unused cctrng_resume(struct device *dev)
0653 {
0654     struct cctrng_drvdata *drvdata = dev_get_drvdata(dev);
0655     int rc;
0656 
0657     dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
0658     /* Enables the device source clk */
0659     rc = clk_prepare_enable(drvdata->clk);
0660     if (rc) {
0661         dev_err(dev, "failed getting clock back on. We're toast.\n");
0662         return rc;
0663     }
0664 
0665     /* wait for Cryptocell reset completion */
0666     if (!cctrng_wait_for_reset_completion(drvdata)) {
0667         dev_err(dev, "Cryptocell reset not completed");
0668         return -EBUSY;
0669     }
0670 
0671     /* unmask HOST RNG interrupt */
0672     cc_iowrite(drvdata, CC_HOST_RGF_IMR_REG_OFFSET,
0673            cc_ioread(drvdata, CC_HOST_RGF_IMR_REG_OFFSET) &
0674            ~CC_HOST_RNG_IRQ_MASK);
0675 
0676     cc_iowrite(drvdata, CC_HOST_POWER_DOWN_EN_REG_OFFSET,
0677            POWER_DOWN_DISABLE);
0678 
0679     return 0;
0680 }
0681 
0682 static UNIVERSAL_DEV_PM_OPS(cctrng_pm, cctrng_suspend, cctrng_resume, NULL);
0683 
0684 static const struct of_device_id arm_cctrng_dt_match[] = {
0685     { .compatible = "arm,cryptocell-713-trng", },
0686     { .compatible = "arm,cryptocell-703-trng", },
0687     {},
0688 };
0689 MODULE_DEVICE_TABLE(of, arm_cctrng_dt_match);
0690 
0691 static struct platform_driver cctrng_driver = {
0692     .driver = {
0693         .name = "cctrng",
0694         .of_match_table = arm_cctrng_dt_match,
0695         .pm = &cctrng_pm,
0696     },
0697     .probe = cctrng_probe,
0698     .remove = cctrng_remove,
0699 };
0700 
0701 static int __init cctrng_mod_init(void)
0702 {
0703     /* Compile time assertion checks */
0704     BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
0705     BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
0706 
0707     return platform_driver_register(&cctrng_driver);
0708 }
0709 module_init(cctrng_mod_init);
0710 
0711 static void __exit cctrng_mod_exit(void)
0712 {
0713     platform_driver_unregister(&cctrng_driver);
0714 }
0715 module_exit(cctrng_mod_exit);
0716 
0717 /* Module description */
0718 MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
0719 MODULE_AUTHOR("ARM");
0720 MODULE_LICENSE("GPL v2");