Lines Matching refs:xsdfec

235 static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
238 dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
239 iowrite32(value, xsdfec->regs + addr);
242 static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
246 rval = ioread32(xsdfec->regs + addr);
247 dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
251 static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
258 reg_val = xsdfec_regread(xsdfec, reg_offset);
262 static void update_config_from_hw(struct xsdfec_dev *xsdfec)
268 reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
269 xsdfec->config.order = reg_value;
271 update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
273 &xsdfec->config.bypass);
275 update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
277 &xsdfec->config.code_wr_protect);
279 reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
280 xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
282 reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
283 xsdfec->config.irq.enable_ecc_isr =
286 reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
289 xsdfec->state = XSDFEC_STARTED;
291 xsdfec->state = XSDFEC_STOPPED;
294 static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
300 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
301 status.state = xsdfec->state;
302 xsdfec->state_updated = false;
303 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
304 status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
314 static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
318 err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
325 static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
331 xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
332 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
334 dev_dbg(xsdfec->dev,
340 xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
341 mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
343 dev_dbg(xsdfec->dev,
351 static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
357 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
359 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
361 dev_dbg(xsdfec->dev,
367 xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
369 mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
374 dev_dbg(xsdfec->dev,
382 static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
394 isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
396 xsdfec->config.irq.enable_isr = irq.enable_isr;
399 ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
401 xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
409 static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
426 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
432 xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
436 static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
442 if (xsdfec->config.code == XSDFEC_LDPC_CODE)
446 reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
459 static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
466 dev_dbg(xsdfec->dev, "N value is not in range");
473 dev_dbg(xsdfec->dev, "K value is not in range");
481 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
486 xsdfec_regwrite(xsdfec,
493 static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
499 dev_dbg(xsdfec->dev, "Psize is not in range");
504 dev_dbg(xsdfec->dev, "No-packing bit register invalid");
509 dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
515 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
520 xsdfec_regwrite(xsdfec,
527 static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
535 dev_dbg(xsdfec->dev, "Nlayers is not in range");
540 dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
544 dev_dbg(xsdfec->dev, "Norm type is invalid");
548 dev_dbg(xsdfec->dev, "Special QC in invalid");
553 dev_dbg(xsdfec->dev, "No final parity check invalid");
559 dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
568 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
573 xsdfec_regwrite(xsdfec,
580 static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
589 dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
594 xsdfec_regwrite(xsdfec,
601 static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
618 dev_dbg(xsdfec->dev, "Write exceeds SC table length");
642 xsdfec_regwrite(xsdfec,
655 static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
664 if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
670 if (xsdfec->state == XSDFEC_STARTED) {
675 if (xsdfec->config.code_wr_protect) {
681 ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
687 ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
693 ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
701 ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
711 ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
717 ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
723 ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
731 static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
747 if (xsdfec->state == XSDFEC_STARTED)
750 xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
752 xsdfec->config.order = order;
757 static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
767 if (xsdfec->state == XSDFEC_STARTED)
771 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
773 xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
775 xsdfec->config.bypass = bypass;
780 static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
786 reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
830 static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
837 struct xsdfec_config *config = &xsdfec->config;
854 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
859 static int xsdfec_start(struct xsdfec_dev *xsdfec)
863 regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
865 if (regread != xsdfec->config.code) {
866 dev_dbg(xsdfec->dev,
868 __func__, regread, xsdfec->config.code);
873 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
876 xsdfec->state = XSDFEC_STARTED;
880 static int xsdfec_stop(struct xsdfec_dev *xsdfec)
884 if (xsdfec->state != XSDFEC_STARTED)
885 dev_dbg(xsdfec->dev, "Device not started correctly");
887 regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
889 xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
891 xsdfec->state = XSDFEC_STOPPED;
895 static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
897 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
898 xsdfec->isr_err_count = 0;
899 xsdfec->uecc_count = 0;
900 xsdfec->cecc_count = 0;
901 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
906 static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
911 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
912 user_stats.isr_err_count = xsdfec->isr_err_count;
913 user_stats.cecc_count = xsdfec->cecc_count;
914 user_stats.uecc_count = xsdfec->uecc_count;
915 xsdfec->stats_updated = false;
916 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
925 static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
928 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
929 xsdfec_cfg_axi_streams(xsdfec);
930 update_config_from_hw(xsdfec);
938 struct xsdfec_dev *xsdfec;
942 xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
945 if (xsdfec->state == XSDFEC_NEEDS_RESET &&
953 rval = xsdfec_start(xsdfec);
956 rval = xsdfec_stop(xsdfec);
959 rval = xsdfec_clear_stats(xsdfec);
962 rval = xsdfec_get_stats(xsdfec, arg);
965 rval = xsdfec_get_status(xsdfec, arg);
968 rval = xsdfec_get_config(xsdfec, arg);
971 rval = xsdfec_set_default_config(xsdfec);
974 rval = xsdfec_set_irq(xsdfec, arg);
977 rval = xsdfec_set_turbo(xsdfec, arg);
980 rval = xsdfec_get_turbo(xsdfec, arg);
983 rval = xsdfec_add_ldpc(xsdfec, arg);
986 rval = xsdfec_set_order(xsdfec, arg);
989 rval = xsdfec_set_bypass(xsdfec, arg);
992 rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
1004 struct xsdfec_dev *xsdfec;
1006 xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
1008 poll_wait(file, &xsdfec->waitq, wait);
1011 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1012 if (xsdfec->state_updated)
1015 if (xsdfec->stats_updated)
1017 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1029 static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
1031 struct device *dev = xsdfec->dev;
1045 xsdfec->config.code = XSDFEC_LDPC_CODE;
1047 xsdfec->config.code = XSDFEC_TURBO_CODE;
1057 xsdfec->config.din_word_include = din_word_include;
1070 xsdfec->config.din_width = din_width;
1082 xsdfec->config.dout_word_include = dout_word_include;
1095 xsdfec->config.dout_width = dout_width;
1102 xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
1104 xsdfec_cfg_axi_streams(xsdfec);
1111 struct xsdfec_dev *xsdfec = dev_id;
1121 WARN_ON(xsdfec->irq != irq);
1124 xsdfec_isr_enable(xsdfec, false);
1125 xsdfec_ecc_isr_enable(xsdfec, false);
1127 ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
1128 isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
1130 xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
1131 xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
1142 dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
1144 dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
1145 xsdfec->cecc_count, xsdfec->isr_err_count);
1147 spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
1150 xsdfec->uecc_count += uecc_count;
1153 xsdfec->cecc_count += cecc_count;
1156 xsdfec->isr_err_count += isr_err_count;
1161 xsdfec->state = XSDFEC_NEEDS_RESET;
1163 xsdfec->state = XSDFEC_PL_RECONFIGURE;
1164 xsdfec->stats_updated = true;
1165 xsdfec->state_updated = true;
1169 xsdfec->stats_updated = true;
1172 xsdfec->state = XSDFEC_NEEDS_RESET;
1173 xsdfec->stats_updated = true;
1174 xsdfec->state_updated = true;
1177 spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
1178 dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
1179 xsdfec->stats_updated);
1182 if (xsdfec->state_updated || xsdfec->stats_updated)
1183 wake_up_interruptible(&xsdfec->waitq);
1188 xsdfec_isr_enable(xsdfec, true);
1189 xsdfec_ecc_isr_enable(xsdfec, true);
1349 struct xsdfec_dev *xsdfec;
1354 xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
1355 if (!xsdfec)
1358 xsdfec->dev = &pdev->dev;
1359 spin_lock_init(&xsdfec->error_data_lock);
1361 err = xsdfec_clk_init(pdev, &xsdfec->clks);
1365 dev = xsdfec->dev;
1366 xsdfec->regs = devm_platform_ioremap_resource(pdev, 0);
1367 if (IS_ERR(xsdfec->regs)) {
1368 err = PTR_ERR(xsdfec->regs);
1372 xsdfec->irq = platform_get_irq(pdev, 0);
1373 if (xsdfec->irq < 0) {
1378 err = xsdfec_parse_of(xsdfec);
1382 update_config_from_hw(xsdfec);
1385 platform_set_drvdata(pdev, xsdfec);
1388 init_waitqueue_head(&xsdfec->waitq);
1390 err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
1392 "xilinx-sdfec16", xsdfec);
1394 dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
1402 xsdfec->dev_id = err;
1404 snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
1405 xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
1406 xsdfec->miscdev.name = xsdfec->dev_name;
1407 xsdfec->miscdev.fops = &xsdfec_fops;
1408 xsdfec->miscdev.parent = dev;
1409 err = misc_register(&xsdfec->miscdev);
1417 ida_free(&dev_nrs, xsdfec->dev_id);
1419 xsdfec_disable_all_clks(&xsdfec->clks);
1425 struct xsdfec_dev *xsdfec;
1427 xsdfec = platform_get_drvdata(pdev);
1428 misc_deregister(&xsdfec->miscdev);
1429 ida_free(&dev_nrs, xsdfec->dev_id);
1430 xsdfec_disable_all_clks(&xsdfec->clks);