Lines Matching refs:endpoint

249 		if (data->endpoint.filter_support) {
251 "RX endpoint %u\n",
260 rx_config = &data->endpoint.config.rx;
266 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
271 if (!data->endpoint.config.aggregation) {
277 "time limit with no aggregation for RX endpoint %u\n",
283 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
289 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
297 /* For an endpoint supporting receive aggregation, the byte
309 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
320 if (data->endpoint.config.tx.seq_rep_type) {
321 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
327 if (data->endpoint.config.status_enable) {
328 other_name = data->endpoint.config.tx.status_endpoint;
330 dev_err(dev, "status endpoint name %u out of range "
331 "for endpoint %u\n",
336 /* Status endpoint must be defined... */
339 dev_err(dev, "DMA endpoint name %u undefined "
340 "for endpoint %u\n",
345 /* ...and has to be an RX endpoint... */
348 "status endpoint for endpoint %u not RX\n",
353 /* ...and if it's to be an AP endpoint... */
356 if (!other_data->endpoint.config.status_enable) {
358 "status not enabled for endpoint %u\n",
365 if (data->endpoint.config.dma_mode) {
366 other_name = data->endpoint.config.dma_endpoint;
368 dev_err(dev, "DMA endpoint name %u out of range "
369 "for endpoint %u\n",
376 dev_err(dev, "DMA endpoint name %u undefined "
377 "for endpoint %u\n",
386 /* Validate endpoint configuration data. Return max defined endpoint ID */
403 dev_err(dev, "command TX endpoint not defined\n");
407 dev_err(dev, "LAN RX endpoint not defined\n");
411 dev_err(dev, "AP->modem TX endpoint not defined\n");
415 dev_err(dev, "AP<-modem RX endpoint not defined\n");
429 /* Allocate a transaction to use on a non-command endpoint */
430 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
433 struct gsi *gsi = &endpoint->ipa->gsi;
434 u32 channel_id = endpoint->channel_id;
437 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
447 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
449 struct ipa *ipa = endpoint->ipa;
457 if (endpoint->toward_ipa)
463 offset = reg_n_offset(reg, endpoint->endpoint_id);
466 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
482 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
485 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
486 WARN_ON(!endpoint->toward_ipa);
488 (void)ipa_endpoint_init_ctrl(endpoint, enable);
491 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
493 u32 endpoint_id = endpoint->endpoint_id;
494 struct ipa *ipa = endpoint->ipa;
507 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
509 u32 endpoint_id = endpoint->endpoint_id;
511 struct ipa *ipa = endpoint->ipa;
523 * @endpoint: Endpoint on which to emulate a suspend
525 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
530 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
532 struct ipa *ipa = endpoint->ipa;
534 if (!endpoint->config.aggregation)
537 /* Nothing to do if the endpoint doesn't have aggregation open */
538 if (!ipa_endpoint_aggr_active(endpoint))
542 ipa_endpoint_force_close(endpoint);
549 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
553 if (endpoint->ipa->version >= IPA_VERSION_4_0)
556 WARN_ON(endpoint->toward_ipa);
558 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
565 ipa_endpoint_suspend_aggr(endpoint);
571 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
580 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
582 if (endpoint->ee_id != GSI_EE_MODEM)
585 if (!endpoint->toward_ipa)
586 (void)ipa_endpoint_program_suspend(endpoint, enable);
588 ipa_endpoint_program_delay(endpoint, enable);
591 endpoint->channel_id,
596 /* Reset all modem endpoints to use the default exception endpoint */
603 /* We need one command per modem TX endpoint, plus the commands
615 struct ipa_endpoint *endpoint;
620 endpoint = &ipa->endpoint[endpoint_id];
621 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
628 * means status is disabled on the endpoint, and as a
643 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
645 u32 endpoint_id = endpoint->endpoint_id;
646 struct ipa *ipa = endpoint->ipa;
653 if (endpoint->config.checksum) {
656 if (endpoint->toward_ipa) {
680 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
682 u32 endpoint_id = endpoint->endpoint_id;
683 struct ipa *ipa = endpoint->ipa;
687 if (!endpoint->toward_ipa)
697 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
702 if (!endpoint->config.checksum)
707 if (endpoint->toward_ipa)
762 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
763 * @endpoint: Endpoint pointer
777 * endpoint's METADATA_MASK register defines which byte within the modem
782 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
784 u32 endpoint_id = endpoint->endpoint_id;
785 struct ipa *ipa = endpoint->ipa;
790 if (endpoint->config.qmap) {
794 header_size = ipa_qmap_header_size(version, endpoint);
798 if (!endpoint->toward_ipa) {
826 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
828 u32 pad_align = endpoint->config.rx.pad_align;
829 u32 endpoint_id = endpoint->endpoint_id;
830 struct ipa *ipa = endpoint->ipa;
835 if (endpoint->config.qmap) {
846 if (!endpoint->toward_ipa) {
855 if (!endpoint->toward_ipa)
863 if (endpoint->config.qmap && !endpoint->toward_ipa) {
878 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
880 u32 endpoint_id = endpoint->endpoint_id;
881 struct ipa *ipa = endpoint->ipa;
886 if (endpoint->toward_ipa)
893 if (endpoint->config.qmap)
899 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
901 struct ipa *ipa = endpoint->ipa;
906 if (!endpoint->toward_ipa)
910 if (endpoint->config.dma_mode) {
911 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
921 offset = reg_n_offset(reg, endpoint->endpoint_id);
993 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
995 u32 endpoint_id = endpoint->endpoint_id;
996 struct ipa *ipa = endpoint->ipa;
1001 if (endpoint->config.aggregation) {
1002 if (!endpoint->toward_ipa) {
1007 rx_config = &endpoint->config.rx;
1107 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
1110 u32 endpoint_id = endpoint->endpoint_id;
1111 struct ipa *ipa = endpoint->ipa;
1123 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
1125 u32 endpoint_id = endpoint->endpoint_id;
1126 struct ipa *ipa = endpoint->ipa;
1143 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1146 ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1147 ipa_endpoint_init_hol_block_en(endpoint, true);
1150 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1152 ipa_endpoint_init_hol_block_en(endpoint, false);
1160 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1162 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1165 ipa_endpoint_init_hol_block_disable(endpoint);
1166 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1170 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1172 u32 endpoint_id = endpoint->endpoint_id;
1173 struct ipa *ipa = endpoint->ipa;
1177 if (!endpoint->toward_ipa)
1189 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1191 u32 resource_group = endpoint->config.resource_group;
1192 u32 endpoint_id = endpoint->endpoint_id;
1193 struct ipa *ipa = endpoint->ipa;
1203 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1205 u32 endpoint_id = endpoint->endpoint_id;
1206 struct ipa *ipa = endpoint->ipa;
1210 if (!endpoint->toward_ipa)
1216 val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1221 endpoint->config.tx.seq_rep_type);
1228 * @endpoint: Endpoint pointer
1233 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1239 /* Make sure source endpoint's TLV FIFO has enough entries to
1244 if (nr_frags > endpoint->skb_frag_max) {
1250 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1269 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1271 u32 endpoint_id = endpoint->endpoint_id;
1272 struct ipa *ipa = endpoint->ipa;
1277 if (endpoint->config.status_enable) {
1279 if (endpoint->toward_ipa) {
1283 name = endpoint->config.tx.status_endpoint;
1297 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1306 buffer_size = endpoint->config.rx.buffer_size;
1325 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1326 * @endpoint: Endpoint to be replenished
1329 * endpoint, based on the number of entries in the underlying channel ring
1330 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1332 * an endpoint can be disabled, in which case buffers are not queued to
1335 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1339 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1343 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1346 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1349 if (ipa_endpoint_replenish_one(endpoint, trans))
1354 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1358 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1364 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1373 schedule_delayed_work(&endpoint->replenish_work,
1377 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1379 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1383 ipa_endpoint_replenish(endpoint);
1386 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1388 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1394 struct ipa_endpoint *endpoint;
1396 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1398 ipa_endpoint_replenish(endpoint);
1401 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1406 if (!endpoint->netdev)
1417 ipa_modem_skb_rx(endpoint->netdev, skb);
1420 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1423 u32 buffer_size = endpoint->config.rx.buffer_size;
1427 if (!endpoint->netdev)
1440 ipa_modem_skb_rx(endpoint->netdev, skb);
1462 ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
1464 struct ipa *ipa = endpoint->ipa;
1473 if (endpoint_id != endpoint->endpoint_id)
1480 ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
1484 struct ipa *ipa = endpoint->ipa;
1492 * this endpoint (already verified by ipa_endpoint_status_skip()).
1493 * If the packet came from the AP->command TX endpoint we know
1502 "unexpected tagged packet from endpoint %u\n",
1511 ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
1514 struct ipa *ipa = endpoint->ipa;
1518 if (ipa_endpoint_status_tag_valid(endpoint, data))
1532 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1535 u32 buffer_size = endpoint->config.rx.buffer_size;
1538 struct ipa *ipa = endpoint->ipa;
1547 dev_err(&endpoint->ipa->pdev->dev,
1555 if (!length || ipa_endpoint_status_skip(endpoint, data)) {
1567 align = endpoint->config.rx.pad_align ? : 1;
1569 if (endpoint->config.checksum)
1572 if (!ipa_endpoint_status_drop(endpoint, data)) {
1585 ipa_endpoint_skb_copy(endpoint, data2, length, extra);
1594 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1599 if (endpoint->toward_ipa)
1607 if (endpoint->config.status_enable)
1608 ipa_endpoint_status_parse(endpoint, page, trans->len);
1609 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1612 ipa_endpoint_replenish(endpoint);
1615 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1618 if (endpoint->toward_ipa) {
1619 struct ipa *ipa = endpoint->ipa;
1622 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1658 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1659 * @endpoint: Endpoint to be reset
1661 * If aggregation is active on an RX endpoint when a reset is performed
1667 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1669 struct device *dev = &endpoint->ipa->pdev->dev;
1670 struct ipa *ipa = endpoint->ipa;
1690 ipa_endpoint_force_close(endpoint);
1697 gsi_channel_reset(gsi, endpoint->channel_id, false);
1700 suspended = ipa_endpoint_program_suspend(endpoint, false);
1703 ret = gsi_channel_start(gsi, endpoint->channel_id);
1707 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1714 if (!ipa_endpoint_aggr_active(endpoint))
1720 if (ipa_endpoint_aggr_active(endpoint))
1721 dev_err(dev, "endpoint %u still active during reset\n",
1722 endpoint->endpoint_id);
1724 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1726 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1735 gsi_channel_reset(gsi, endpoint->channel_id, true);
1742 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1745 (void)ipa_endpoint_program_suspend(endpoint, true);
1753 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1755 u32 channel_id = endpoint->channel_id;
1756 struct ipa *ipa = endpoint->ipa;
1760 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1764 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1765 endpoint->config.aggregation;
1766 if (special && ipa_endpoint_aggr_active(endpoint))
1767 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1773 "error %d resetting channel %u for endpoint %u\n",
1774 ret, endpoint->channel_id, endpoint->endpoint_id);
1777 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1779 if (endpoint->toward_ipa) {
1781 * instead of endpoint DELAY mode to prevent sending data.
1786 if (endpoint->ipa->version < IPA_VERSION_4_2)
1787 ipa_endpoint_program_delay(endpoint, false);
1790 (void)ipa_endpoint_program_suspend(endpoint, false);
1792 ipa_endpoint_init_cfg(endpoint);
1793 ipa_endpoint_init_nat(endpoint);
1794 ipa_endpoint_init_hdr(endpoint);
1795 ipa_endpoint_init_hdr_ext(endpoint);
1796 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1797 ipa_endpoint_init_mode(endpoint);
1798 ipa_endpoint_init_aggr(endpoint);
1799 if (!endpoint->toward_ipa) {
1800 if (endpoint->config.rx.holb_drop)
1801 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1803 ipa_endpoint_init_hol_block_disable(endpoint);
1805 ipa_endpoint_init_deaggr(endpoint);
1806 ipa_endpoint_init_rsrc_grp(endpoint);
1807 ipa_endpoint_init_seq(endpoint);
1808 ipa_endpoint_status(endpoint);
1811 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1813 u32 endpoint_id = endpoint->endpoint_id;
1814 struct ipa *ipa = endpoint->ipa;
1818 ret = gsi_channel_start(gsi, endpoint->channel_id);
1821 "error %d starting %cX channel %u for endpoint %u\n",
1822 ret, endpoint->toward_ipa ? 'T' : 'R',
1823 endpoint->channel_id, endpoint_id);
1827 if (!endpoint->toward_ipa) {
1829 ipa_endpoint_replenish_enable(endpoint);
1837 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1839 u32 endpoint_id = endpoint->endpoint_id;
1840 struct ipa *ipa = endpoint->ipa;
1847 __clear_bit(endpoint_id, endpoint->ipa->enabled);
1849 if (!endpoint->toward_ipa) {
1850 ipa_endpoint_replenish_disable(endpoint);
1855 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1858 "error %d attempting to stop endpoint %u\n", ret,
1862 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1864 struct device *dev = &endpoint->ipa->pdev->dev;
1865 struct gsi *gsi = &endpoint->ipa->gsi;
1868 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1871 if (!endpoint->toward_ipa) {
1872 ipa_endpoint_replenish_disable(endpoint);
1873 (void)ipa_endpoint_program_suspend(endpoint, true);
1876 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1879 endpoint->channel_id);
1882 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1884 struct device *dev = &endpoint->ipa->pdev->dev;
1885 struct gsi *gsi = &endpoint->ipa->gsi;
1888 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1891 if (!endpoint->toward_ipa)
1892 (void)ipa_endpoint_program_suspend(endpoint, false);
1894 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1897 endpoint->channel_id);
1898 else if (!endpoint->toward_ipa)
1899 ipa_endpoint_replenish_enable(endpoint);
1926 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1928 struct gsi *gsi = &endpoint->ipa->gsi;
1929 u32 channel_id = endpoint->channel_id;
1932 if (endpoint->ee_id != GSI_EE_AP)
1935 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1936 if (!endpoint->toward_ipa) {
1940 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1941 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1942 INIT_DELAYED_WORK(&endpoint->replenish_work,
1946 ipa_endpoint_program(endpoint);
1948 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1951 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1953 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1955 if (!endpoint->toward_ipa)
1956 cancel_delayed_work_sync(&endpoint->replenish_work);
1958 ipa_endpoint_reset(endpoint);
1966 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1974 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1998 * endpoint numbers started with 0 and RX endpoints had numbers
2003 * just set the available mask to support any endpoint, and
2035 /* Until IPA v5.0, the max endpoint ID was 32 */
2038 dev_err(dev, "unexpected endpoint count, %u > %u\n",
2043 /* Allocate and initialize the available endpoint bitmap */
2054 struct ipa_endpoint *endpoint;
2057 dev_err(dev, "invalid endpoint id, %u > %u\n",
2063 dev_err(dev, "unavailable endpoint id %u\n",
2069 endpoint = &ipa->endpoint[endpoint_id];
2070 if (endpoint->toward_ipa) {
2077 dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
2092 struct ipa_endpoint *endpoint;
2094 endpoint = &ipa->endpoint[data->endpoint_id];
2097 ipa->channel_map[data->channel_id] = endpoint;
2098 ipa->name_map[name] = endpoint;
2100 endpoint->ipa = ipa;
2101 endpoint->ee_id = data->ee_id;
2102 endpoint->channel_id = data->channel_id;
2103 endpoint->endpoint_id = data->endpoint_id;
2104 endpoint->toward_ipa = data->toward_ipa;
2105 endpoint->config = data->endpoint.config;
2107 __set_bit(endpoint->endpoint_id, ipa->defined);
2110 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
2112 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
2114 memset(endpoint, 0, sizeof(*endpoint));
2124 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
2151 /* Initialize endpoint state bitmaps */
2171 if (data->endpoint.filter_support)