Lines Matching defs:ctx
92 struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer);
95 disable_irq(ctx->irq);
96 ctx->failure_cnt = 0;
97 del_timer(&ctx->failure_timer);
98 enable_irq(ctx->irq);
101 static void xgene_rng_start_timer(struct xgene_rng_dev *ctx)
103 ctx->failure_timer.expires = jiffies + 120 * HZ;
104 add_timer(&ctx->failure_timer);
110 static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val)
112 writel(fro_val, ctx->csr_base + RNG_FRODETUNE);
113 writel(0x00000000, ctx->csr_base + RNG_ALARMMASK);
114 writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP);
115 writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE);
118 static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx)
122 val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
129 dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val);
137 dev_err(ctx->dev, "test poker failure error 0x%08X\n", val);
143 dev_err(ctx->dev, "test long run failure error 0x%08X\n", val);
150 dev_err(ctx->dev, "test run failure error 0x%08X\n", val);
153 dev_err(ctx->dev, "noise failure error 0x%08X\n", val);
159 dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val);
165 if (++ctx->failure_cnt == 1) {
167 ctx->failure_ts = jiffies;
168 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
169 xgene_rng_init_fro(ctx, frostopped);
175 xgene_rng_start_timer(ctx);
178 if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) {
179 dev_err(ctx->dev,
184 ctx->failure_ts = jiffies;
185 ctx->failure_cnt = 1;
191 xgene_rng_start_timer(ctx);
193 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP);
194 xgene_rng_init_fro(ctx, frostopped);
198 writel(val, ctx->csr_base + RNG_INTR_STS_ACK);
203 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id;
206 xgene_rng_chk_overflow(ctx);
213 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
217 val = readl(ctx->csr_base + RNG_INTR_STS_ACK);
228 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
231 for (i = 0; i < ctx->datum_size; i++)
232 data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4);
235 writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
237 return ctx->datum_size << 2;
240 static void xgene_rng_init_internal(struct xgene_rng_dev *ctx)
244 writel(0x00000000, ctx->csr_base + RNG_CONTROL);
248 writel(val, ctx->csr_base + RNG_CONFIG);
251 writel(val, ctx->csr_base + RNG_ALARMCNT);
253 xgene_rng_init_fro(ctx, 0);
262 READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK);
272 writel(val, ctx->csr_base + RNG_CONTROL);
277 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv;
279 ctx->failure_cnt = 0;
280 timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0);
282 ctx->revision = readl(ctx->csr_base + RNG_EIP_REV);
284 dev_dbg(ctx->dev, "Rev %d.%d.%d\n",
285 MAJOR_HW_REV_RD(ctx->revision),
286 MINOR_HW_REV_RD(ctx->revision),
287 HW_PATCH_LEVEL_RD(ctx->revision));
289 dev_dbg(ctx->dev, "Options 0x%08X",
290 readl(ctx->csr_base + RNG_OPTIONS));
292 xgene_rng_init_internal(ctx);
294 ctx->datum_size = RNG_MAX_DATUM;
316 struct xgene_rng_dev *ctx;
319 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
320 if (!ctx)
323 ctx->dev = &pdev->dev;
324 platform_set_drvdata(pdev, ctx);
326 ctx->csr_base = devm_platform_ioremap_resource(pdev, 0);
327 if (IS_ERR(ctx->csr_base))
328 return PTR_ERR(ctx->csr_base);
333 ctx->irq = rc;
336 ctx->csr_base, ctx->irq);
338 rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0,
339 dev_name(&pdev->dev), ctx);
346 ctx->clk = devm_clk_get(&pdev->dev, NULL);
347 if (IS_ERR(ctx->clk)) {
350 rc = clk_prepare_enable(ctx->clk);
358 xgene_rng_func.priv = (unsigned long) ctx;
363 if (!IS_ERR(ctx->clk))
364 clk_disable_unprepare(ctx->clk);
372 if (!IS_ERR(ctx->clk))
373 clk_disable_unprepare(ctx->clk);
382 struct xgene_rng_dev *ctx = platform_get_drvdata(pdev);
388 if (!IS_ERR(ctx->clk))
389 clk_disable_unprepare(ctx->clk);