Lines Matching refs:ipa
16 #include "ipa.h"
125 static u32 ipa_status_extract(struct ipa *ipa, const void *data,
128 enum ipa_version version = ipa->version;
231 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
236 struct device *dev = &ipa->pdev->dev;
305 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
319 if (ipa->version >= IPA_VERSION_4_5) {
387 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
391 struct device *dev = &ipa->pdev->dev;
421 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
433 struct gsi *gsi = &endpoint->ipa->gsi;
449 struct ipa *ipa = endpoint->ipa;
458 WARN_ON(ipa->version >= IPA_VERSION_4_2);
460 WARN_ON(ipa->version >= IPA_VERSION_4_0);
462 reg = ipa_reg(ipa, ENDP_INIT_CTRL);
464 val = ioread32(ipa->reg_virt + offset);
474 iowrite32(val, ipa->reg_virt + offset);
485 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
494 struct ipa *ipa = endpoint->ipa;
499 WARN_ON(!test_bit(endpoint_id, ipa->available));
501 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
502 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
511 struct ipa *ipa = endpoint->ipa;
515 WARN_ON(!test_bit(endpoint_id, ipa->available));
517 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
518 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
532 struct ipa *ipa = endpoint->ipa;
544 ipa_interrupt_simulate_suspend(ipa->interrupt);
553 if (endpoint->ipa->version >= IPA_VERSION_4_0)
575 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
579 while (endpoint_id < ipa->endpoint_count) {
580 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
587 else if (ipa->version < IPA_VERSION_4_2)
590 gsi_modem_channel_flow_control(&ipa->gsi,
597 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
606 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
607 trans = ipa_cmd_trans_alloc(ipa, count);
609 dev_err(&ipa->pdev->dev,
614 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
620 endpoint = &ipa->endpoint[endpoint_id];
624 reg = ipa_reg(ipa, ENDP_STATUS);
638 ipa_cmd_pipeline_clear_wait(ipa);
646 struct ipa *ipa = endpoint->ipa;
651 reg = ipa_reg(ipa, ENDP_INIT_CFG);
654 enum ipa_version version = ipa->version;
677 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
683 struct ipa *ipa = endpoint->ipa;
690 reg = ipa_reg(ipa, ENDP_INIT_NAT);
693 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
785 struct ipa *ipa = endpoint->ipa;
789 reg = ipa_reg(ipa, ENDP_INIT_HDR);
791 enum ipa_version version = ipa->version;
823 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
830 struct ipa *ipa = endpoint->ipa;
834 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
861 if (ipa->version >= IPA_VERSION_4_5) {
875 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
881 struct ipa *ipa = endpoint->ipa;
889 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
896 iowrite32(val, ipa->reg_virt + offset);
901 struct ipa *ipa = endpoint->ipa;
909 reg = ipa_reg(ipa, ENDP_INIT_MODE);
912 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
922 iowrite32(val, ipa->reg_virt + offset);
937 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
953 if (ipa->version >= IPA_VERSION_5_0) {
966 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
976 if (ipa->version >= IPA_VERSION_4_5) {
979 ticks = ipa_qtime_val(ipa, microseconds, max, &select);
996 struct ipa *ipa = endpoint->ipa;
1000 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
1017 val |= aggr_time_limit_encode(ipa, reg, limit);
1035 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1046 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
1059 if (ipa->version >= IPA_VERSION_4_5) {
1064 ticks = ipa_qtime_val(ipa, microseconds, max, &select);
1071 rate = ipa_core_clock_rate(ipa);
1078 if (ipa->version < IPA_VERSION_4_2)
1111 struct ipa *ipa = endpoint->ipa;
1116 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
1117 val = hol_block_timer_encode(ipa, reg, microseconds);
1119 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1126 struct ipa *ipa = endpoint->ipa;
1131 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
1135 iowrite32(val, ipa->reg_virt + offset);
1138 if (enable && ipa->version >= IPA_VERSION_4_5)
1139 iowrite32(val, ipa->reg_virt + offset);
1155 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1159 while (endpoint_id < ipa->endpoint_count) {
1160 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1173 struct ipa *ipa = endpoint->ipa;
1180 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1186 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1193 struct ipa *ipa = endpoint->ipa;
1197 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1200 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1206 struct ipa *ipa = endpoint->ipa;
1213 reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1219 if (ipa->version < IPA_VERSION_4_5)
1223 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1272 struct ipa *ipa = endpoint->ipa;
1276 reg = ipa_reg(ipa, ENDP_STATUS);
1284 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1294 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1464 struct ipa *ipa = endpoint->ipa;
1468 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
1472 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
1484 struct ipa *ipa = endpoint->ipa;
1487 status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
1496 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
1497 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1499 complete(&ipa->completion);
1501 dev_err(&ipa->pdev->dev,
1514 struct ipa *ipa = endpoint->ipa;
1522 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
1527 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
1538 struct ipa *ipa = endpoint->ipa;
1547 dev_err(&endpoint->ipa->pdev->dev,
1554 length = ipa_status_extract(ipa, data, STATUS_LENGTH);
1619 struct ipa *ipa = endpoint->ipa;
1622 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1636 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1641 reg = ipa_reg(ipa, ROUTE);
1649 iowrite32(val, ipa->reg_virt + reg_offset(reg));
1652 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1654 ipa_endpoint_default_route_set(ipa, 0);
1669 struct device *dev = &endpoint->ipa->pdev->dev;
1670 struct ipa *ipa = endpoint->ipa;
1671 struct gsi *gsi = &ipa->gsi;
1756 struct ipa *ipa = endpoint->ipa;
1764 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1769 gsi_channel_reset(&ipa->gsi, channel_id, true);
1772 dev_err(&ipa->pdev->dev,
1786 if (endpoint->ipa->version < IPA_VERSION_4_2)
1814 struct ipa *ipa = endpoint->ipa;
1815 struct gsi *gsi = &ipa->gsi;
1820 dev_err(&ipa->pdev->dev,
1828 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1832 __set_bit(endpoint_id, ipa->enabled);
1840 struct ipa *ipa = endpoint->ipa;
1841 struct gsi *gsi = &ipa->gsi;
1844 if (!test_bit(endpoint_id, ipa->enabled))
1847 __clear_bit(endpoint_id, endpoint->ipa->enabled);
1851 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1857 dev_err(&ipa->pdev->dev,
1864 struct device *dev = &endpoint->ipa->pdev->dev;
1865 struct gsi *gsi = &endpoint->ipa->gsi;
1868 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1884 struct device *dev = &endpoint->ipa->pdev->dev;
1885 struct gsi *gsi = &endpoint->ipa->gsi;
1888 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1902 void ipa_endpoint_suspend(struct ipa *ipa)
1904 if (!ipa->setup_complete)
1907 if (ipa->modem_netdev)
1908 ipa_modem_suspend(ipa->modem_netdev);
1910 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1911 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1914 void ipa_endpoint_resume(struct ipa *ipa)
1916 if (!ipa->setup_complete)
1919 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1920 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1922 if (ipa->modem_netdev)
1923 ipa_modem_resume(ipa->modem_netdev);
1928 struct gsi *gsi = &endpoint->ipa->gsi;
1948 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1953 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1961 void ipa_endpoint_setup(struct ipa *ipa)
1965 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1966 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1969 void ipa_endpoint_teardown(struct ipa *ipa)
1973 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1974 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1977 void ipa_endpoint_deconfig(struct ipa *ipa)
1979 ipa->available_count = 0;
1980 bitmap_free(ipa->available);
1981 ipa->available = NULL;
1984 int ipa_endpoint_config(struct ipa *ipa)
1986 struct device *dev = &ipa->pdev->dev;
2006 if (ipa->version < IPA_VERSION_3_5) {
2007 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
2008 if (!ipa->available)
2010 ipa->available_count = IPA_ENDPOINT_MAX;
2012 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
2020 reg = ipa_reg(ipa, FLAVOR_0);
2021 val = ioread32(ipa->reg_virt + reg_offset(reg));
2036 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
2044 ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
2045 if (!ipa->available)
2047 ipa->available_count = limit;
2050 bitmap_set(ipa->available, 0, tx_count);
2051 bitmap_set(ipa->available, rx_base, rx_count);
2053 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
2062 if (!test_bit(endpoint_id, ipa->available)) {
2069 endpoint = &ipa->endpoint[endpoint_id];
2084 ipa_endpoint_deconfig(ipa);
2089 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
2094 endpoint = &ipa->endpoint[data->endpoint_id];
2097 ipa->channel_map[data->channel_id] = endpoint;
2098 ipa->name_map[name] = endpoint;
2100 endpoint->ipa = ipa;
2107 __set_bit(endpoint->endpoint_id, ipa->defined);
2112 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2117 void ipa_endpoint_exit(struct ipa *ipa)
2121 ipa->filtered = 0;
2123 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
2124 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2126 bitmap_free(ipa->enabled);
2127 ipa->enabled = NULL;
2128 bitmap_free(ipa->set_up);
2129 ipa->set_up = NULL;
2130 bitmap_free(ipa->defined);
2131 ipa->defined = NULL;
2133 memset(ipa->name_map, 0, sizeof(ipa->name_map));
2134 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
2138 int ipa_endpoint_init(struct ipa *ipa, u32 count,
2147 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
2148 if (!ipa->endpoint_count)
2152 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2153 if (!ipa->defined)
2156 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2157 if (!ipa->set_up)
2160 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2161 if (!ipa->enabled)
2169 ipa_endpoint_init_one(ipa, name, data);
2174 ipa->modem_tx_count++;
2178 if (!ipa_filtered_valid(ipa, filtered)) {
2179 ipa_endpoint_exit(ipa);
2184 ipa->filtered = filtered;
2189 bitmap_free(ipa->set_up);
2190 ipa->set_up = NULL;
2192 bitmap_free(ipa->defined);
2193 ipa->defined = NULL;