Lines Matching defs:cfg
798 const struct rtw89_hfc_pub_cfg *cfg = ¶m->pub_cfg;
801 if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) {
827 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
842 val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
843 u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
844 (cfg[ch].grp ? B_AX_GRP : 0);
856 const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
872 info[ch].used = cfg[ch].min - info[ch].aval;
881 const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
893 val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
894 u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
897 val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
1091 const struct rtw89_pwr_cfg *cfg)
1095 u32 addr = cfg->base == PWR_INTF_MSK_SDIO ?
1096 cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr;
1098 ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk),
1105 rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr);
1106 rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val);
1112 u8 intf_msk, const struct rtw89_pwr_cfg *cfg)
1118 for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) {
1532 const struct rtw89_dle_mem *cfg;
1534 cfg = &rtwdev->chip->dle_mem[mode];
1535 if (!cfg)
1538 if (cfg->mode != mode) {
1543 mac->dle_info.ple_pg_size = cfg->ple_size->pge_size;
1545 mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma;
1546 mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma;
1548 return cfg;
1646 static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
1653 size_cfg = cfg->wde_size;
1678 size_cfg = cfg->ple_size;
1754 const struct rtw89_dle_mem *cfg;
1766 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_WOW);
1768 cfg = get_dle_mem_cfg(rtwdev, RTW89_QTA_SCC);
1769 if (!cfg) {
1774 min_cfg = cfg->ple_min_qt;
1775 max_cfg = cfg->ple_max_qt;
1794 const struct rtw89_dle_mem *cfg,
1797 wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu);
1798 ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt);
1804 const struct rtw89_dle_mem *cfg, *ext_cfg;
1813 cfg = get_dle_mem_cfg(rtwdev, mode);
1814 if (!cfg) {
1831 if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
1833 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
1841 ret = dle_mix_cfg(rtwdev, cfg);
1843 rtw89_err(rtwdev, "[ERR] dle mix cfg\n");
1846 dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu);
1854 rtw89_err(rtwdev, "[ERR]WDE cfg ready\n");
1862 rtw89_err(rtwdev, "[ERR]PLE cfg ready\n");
2469 const struct rtw89_dle_mem *cfg;
2471 cfg = get_dle_mem_cfg(rtwdev, mode);
2472 if (!cfg) {
2477 return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma);
2953 const struct rtw89_dle_mem *cfg;
2958 cfg = get_dle_mem_cfg(rtwdev, mode);
2959 if (!cfg) {
2960 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
2964 if (dle_used_size(cfg->wde_size, cfg->ple_size) !=
2966 rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
2970 dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);