Lines Matching defs:cfg
418 u32 fmqm_dtcfg1; /* 0x90 dbg trap cfg 1 Register 0x00 */
422 u32 fmqm_dtcfg2; /* dbg Trap cfg 2 Register 0x10 */
655 static void fman_defconfig(struct fman_cfg *cfg)
657 memset(cfg, 0, sizeof(struct fman_cfg));
659 cfg->catastrophic_err = DEFAULT_CATASTROPHIC_ERR;
660 cfg->dma_err = DEFAULT_DMA_ERR;
661 cfg->dma_aid_mode = DEFAULT_AID_MODE;
662 cfg->dma_comm_qtsh_clr_emer = DEFAULT_DMA_COMM_Q_LOW;
663 cfg->dma_comm_qtsh_asrt_emer = DEFAULT_DMA_COMM_Q_HIGH;
664 cfg->dma_cache_override = DEFAULT_CACHE_OVERRIDE;
665 cfg->dma_cam_num_of_entries = DEFAULT_DMA_CAM_NUM_OF_ENTRIES;
666 cfg->dma_dbg_cnt_mode = DEFAULT_DMA_DBG_CNT_MODE;
667 cfg->dma_sos_emergency = DEFAULT_DMA_SOS_EMERGENCY;
668 cfg->dma_watchdog = DEFAULT_DMA_WATCHDOG;
669 cfg->disp_limit_tsh = DEFAULT_DISP_LIMIT;
670 cfg->prs_disp_tsh = DEFAULT_PRS_DISP_TH;
671 cfg->plcr_disp_tsh = DEFAULT_PLCR_DISP_TH;
672 cfg->kg_disp_tsh = DEFAULT_KG_DISP_TH;
673 cfg->bmi_disp_tsh = DEFAULT_BMI_DISP_TH;
674 cfg->qmi_enq_disp_tsh = DEFAULT_QMI_ENQ_DISP_TH;
675 cfg->qmi_deq_disp_tsh = DEFAULT_QMI_DEQ_DISP_TH;
676 cfg->fm_ctl1_disp_tsh = DEFAULT_FM_CTL1_DISP_TH;
677 cfg->fm_ctl2_disp_tsh = DEFAULT_FM_CTL2_DISP_TH;
683 struct fman_cfg *cfg = fman->cfg;
695 tmp_reg |= cfg->dma_cache_override << DMA_MODE_CACHE_OR_SHIFT;
696 if (cfg->exceptions & EX_DMA_BUS_ERROR)
698 if ((cfg->exceptions & EX_DMA_SYSTEM_WRITE_ECC) |
699 (cfg->exceptions & EX_DMA_READ_ECC) |
700 (cfg->exceptions & EX_DMA_FM_WRITE_ECC))
702 if (cfg->dma_axi_dbg_num_of_beats)
704 ((cfg->dma_axi_dbg_num_of_beats - 1)
707 tmp_reg |= (((cfg->dma_cam_num_of_entries / DMA_CAM_UNITS) - 1) &
710 tmp_reg |= cfg->dma_dbg_cnt_mode << DMA_MODE_DBG_SHIFT;
711 tmp_reg |= cfg->dma_aid_mode << DMA_MODE_AID_MODE_SHIFT;
716 tmp_reg = ((u32)cfg->dma_comm_qtsh_asrt_emer <<
718 tmp_reg |= (cfg->dma_read_buf_tsh_asrt_emer &
720 tmp_reg |= cfg->dma_write_buf_tsh_asrt_emer &
726 tmp_reg = ((u32)cfg->dma_comm_qtsh_clr_emer <<
728 tmp_reg |= (cfg->dma_read_buf_tsh_clr_emer &
730 tmp_reg |= cfg->dma_write_buf_tsh_clr_emer &
736 iowrite32be(cfg->dma_sos_emergency, &dma_rg->fmdmsetr);
739 iowrite32be((cfg->dma_watchdog * cfg->clk_freq), &dma_rg->fmdmwcr);
741 iowrite32be(cfg->cam_base_addr, &dma_rg->fmdmebcr);
745 (u32)(fman->cfg->dma_cam_num_of_entries * DMA_CAM_SIZEOF_ENTRY);
759 fman->cam_size = fman->cfg->dma_cam_num_of_entries * 72 + 128;
768 if (fman->cfg->dma_cam_num_of_entries % 8 ||
769 fman->cfg->dma_cam_num_of_entries > 32) {
779 (32 - fman->cfg->dma_cam_num_of_entries)) - 1),
783 fman->cfg->cam_base_addr = fman->cam_offset;
788 static void fpm_init(struct fman_fpm_regs __iomem *fpm_rg, struct fman_cfg *cfg)
795 tmp_reg = (u32)(cfg->disp_limit_tsh << FPM_DISP_LIMIT_SHIFT);
798 tmp_reg = (((u32)cfg->prs_disp_tsh << FPM_THR1_PRS_SHIFT) |
799 ((u32)cfg->kg_disp_tsh << FPM_THR1_KG_SHIFT) |
800 ((u32)cfg->plcr_disp_tsh << FPM_THR1_PLCR_SHIFT) |
801 ((u32)cfg->bmi_disp_tsh << FPM_THR1_BMI_SHIFT));
805 (((u32)cfg->qmi_enq_disp_tsh << FPM_THR2_QMI_ENQ_SHIFT) |
806 ((u32)cfg->qmi_deq_disp_tsh << FPM_THR2_QMI_DEQ_SHIFT) |
807 ((u32)cfg->fm_ctl1_disp_tsh << FPM_THR2_FM_CTL1_SHIFT) |
808 ((u32)cfg->fm_ctl2_disp_tsh << FPM_THR2_FM_CTL2_SHIFT));
817 if (cfg->exceptions & EX_FPM_STALL_ON_TASKS)
819 if (cfg->exceptions & EX_FPM_SINGLE_ECC)
821 if (cfg->exceptions & EX_FPM_DOUBLE_ECC)
823 tmp_reg |= (cfg->catastrophic_err << FPM_EV_MASK_CAT_ERR_SHIFT);
824 tmp_reg |= (cfg->dma_err << FPM_EV_MASK_DMA_ERR_SHIFT);
845 if (cfg->exceptions & EX_IRAM_ECC) {
849 if (cfg->exceptions & EX_MURAM_ECC) {
857 struct fman_cfg *cfg)
864 tmp_reg = cfg->fifo_base_addr;
867 tmp_reg |= ((cfg->total_fifo_size / FMAN_BMI_FIFO_UNITS - 1) <<
871 tmp_reg = ((cfg->total_num_of_tasks - 1) & BMI_CFG2_TASKS_MASK) <<
883 if (cfg->exceptions & EX_BMI_LIST_RAM_ECC)
885 if (cfg->exceptions & EX_BMI_STORAGE_PROFILE_ECC)
887 if (cfg->exceptions & EX_BMI_STATISTICS_RAM_ECC)
889 if (cfg->exceptions & EX_BMI_DISPATCH_RAM_ECC)
895 struct fman_cfg *cfg)
906 if (cfg->exceptions & EX_QMI_DEQ_FROM_UNKNOWN_PORTID)
908 if (cfg->exceptions & EX_QMI_DOUBLE_ECC)
916 if (cfg->exceptions & EX_QMI_SINGLE_ECC)
928 static int enable(struct fman *fman, struct fman_cfg *cfg)
940 cfg_reg |= (cfg->qmi_def_tnums_thresh << 8) | cfg->qmi_def_tnums_thresh;
1182 static bool is_init_done(struct fman_cfg *cfg)
1185 if (!cfg)
1697 fman->cfg = kzalloc(sizeof(*fman->cfg), GFP_KERNEL);
1698 if (!fman->cfg)
1726 fman_defconfig(fman->cfg);
1754 fman->cfg->dma_aid_mode = FMAN_DMA_AID_OUT_PORT_ID;
1756 fman->cfg->qmi_def_tnums_thresh = fman->state->qmi_def_tnums_thresh;
1764 fman->cfg->dma_comm_qtsh_clr_emer =
1768 fman->cfg->dma_comm_qtsh_asrt_emer =
1772 fman->cfg->dma_cam_num_of_entries =
1775 fman->cfg->dma_read_buf_tsh_clr_emer =
1778 fman->cfg->dma_read_buf_tsh_asrt_emer =
1781 fman->cfg->dma_write_buf_tsh_clr_emer =
1784 fman->cfg->dma_write_buf_tsh_asrt_emer =
1787 fman->cfg->dma_axi_dbg_num_of_beats =
1794 kfree(fman->cfg);
1896 struct fman_cfg *cfg = NULL;
1899 if (is_init_done(fman->cfg))
1904 cfg = fman->cfg;
1959 cfg->exceptions = fman->state->exceptions;
1970 fpm_init(fman->fpm_regs, fman->cfg);
1983 cfg->fifo_base_addr = fman->fifo_offset;
1984 cfg->total_fifo_size = fman->state->total_fifo_size;
1985 cfg->total_num_of_tasks = fman->state->total_num_of_tasks;
1986 cfg->clk_freq = fman->state->fm_clk_freq;
1989 bmi_init(fman->bmi_regs, fman->cfg);
1992 qmi_init(fman->qmi_regs, fman->cfg);
2002 err = enable(fman, cfg);
2008 kfree(fman->cfg);
2009 fman->cfg = NULL;
2019 if (!is_init_done(fman->cfg))
2514 if (!is_init_done(fman->cfg))
2612 if (!is_init_done(fman->cfg))