Lines Matching refs:endpoint
114 if (data->endpoint.filter_support) {
116 "RX endpoint %u\n",
124 if (data->endpoint.config.status_enable) {
125 other_name = data->endpoint.config.tx.status_endpoint;
127 dev_err(dev, "status endpoint name %u out of range "
128 "for endpoint %u\n",
133 /* Status endpoint must be defined... */
136 dev_err(dev, "DMA endpoint name %u undefined "
137 "for endpoint %u\n",
142 /* ...and has to be an RX endpoint... */
145 "status endpoint for endpoint %u not RX\n",
150 /* ...and if it's to be an AP endpoint... */
153 if (!other_data->endpoint.config.status_enable) {
155 "status not enabled for endpoint %u\n",
162 if (data->endpoint.config.dma_mode) {
163 other_name = data->endpoint.config.dma_endpoint;
165 dev_err(dev, "DMA endpoint name %u out of range "
166 "for endpoint %u\n",
173 dev_err(dev, "DMA endpoint name %u undefined "
174 "for endpoint %u\n",
200 dev_err(dev, "command TX endpoint not defined\n");
204 dev_err(dev, "LAN RX endpoint not defined\n");
208 dev_err(dev, "AP->modem TX endpoint not defined\n");
212 dev_err(dev, "AP<-modem RX endpoint not defined\n");
233 /* Allocate a transaction to use on a non-command endpoint */
234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
237 struct gsi *gsi = &endpoint->ipa->gsi;
238 u32 channel_id = endpoint->channel_id;
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
253 struct ipa *ipa = endpoint->ipa;
261 * if (endpoint->toward_ipa)
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
283 /* assert(endpoint->toward_ipa); */
286 if (endpoint->ipa->version != IPA_VERSION_4_2)
287 (void)ipa_endpoint_init_ctrl(endpoint, enable);
290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
292 u32 mask = BIT(endpoint->endpoint_id);
293 struct ipa *ipa = endpoint->ipa;
304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
306 u32 mask = BIT(endpoint->endpoint_id);
307 struct ipa *ipa = endpoint->ipa;
315 * @endpoint: Endpoint on which to emulate a suspend
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
324 struct ipa *ipa = endpoint->ipa;
326 if (!endpoint->data->aggregation)
329 /* Nothing to do if the endpoint doesn't have aggregation open */
330 if (!ipa_endpoint_aggr_active(endpoint))
334 ipa_endpoint_force_close(endpoint);
341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1)
348 /* assert(!endpoint->toward_ipa); */
350 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
357 ipa_endpoint_suspend_aggr(endpoint);
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
374 if (endpoint->ee_id != GSI_EE_MODEM)
378 if (endpoint->toward_ipa)
379 ipa_endpoint_program_delay(endpoint, enable);
381 (void)ipa_endpoint_program_suspend(endpoint, enable);
385 /* Reset all modem endpoints to use the default exception endpoint */
392 /* We need one command per modem TX endpoint. We can get an upper
407 struct ipa_endpoint *endpoint;
413 endpoint = &ipa->endpoint[endpoint_id];
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
420 * means status is disabled on the endpoint, and as a
434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
440 if (endpoint->data->checksum) {
441 if (endpoint->toward_ipa) {
461 iowrite32(val, endpoint->ipa->reg_virt + offset);
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
466 * @endpoint: Endpoint pointer
480 * endpoint's METADATA_MASK register defines which byte within the modem
485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
490 if (endpoint->data->qmap) {
494 if (endpoint->toward_ipa && endpoint->data->checksum)
499 if (!endpoint->toward_ipa) {
520 iowrite32(val, endpoint->ipa->reg_virt + offset);
523 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
526 u32 pad_align = endpoint->data->rx.pad_align;
537 if (endpoint->data->qmap && !endpoint->toward_ipa) {
545 if (!endpoint->toward_ipa)
548 iowrite32(val, endpoint->ipa->reg_virt + offset);
552 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
554 u32 endpoint_id = endpoint->endpoint_id;
558 if (endpoint->toward_ipa)
564 if (endpoint->data->qmap)
567 iowrite32(val, endpoint->ipa->reg_virt + offset);
570 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
575 if (!endpoint->toward_ipa)
578 if (endpoint->data->dma_mode) {
579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
591 iowrite32(val, endpoint->ipa->reg_virt + offset);
606 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
611 if (endpoint->data->aggregation) {
612 if (!endpoint->toward_ipa) {
629 if (endpoint->data->rx.aggr_close_eof)
644 iowrite32(val, endpoint->ipa->reg_virt + offset);
700 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
703 u32 endpoint_id = endpoint->endpoint_id;
704 struct ipa *ipa = endpoint->ipa;
715 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
717 u32 endpoint_id = endpoint->endpoint_id;
723 iowrite32(val, endpoint->ipa->reg_virt + offset);
731 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
733 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
736 ipa_endpoint_init_hol_block_enable(endpoint, false);
737 ipa_endpoint_init_hol_block_timer(endpoint, 0);
738 ipa_endpoint_init_hol_block_enable(endpoint, true);
742 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
744 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
747 if (!endpoint->toward_ipa)
755 iowrite32(val, endpoint->ipa->reg_virt + offset);
758 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
760 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
761 u32 seq_type = endpoint->seq_type;
764 if (!endpoint->toward_ipa)
774 iowrite32(val, endpoint->ipa->reg_virt + offset);
779 * @endpoint: Endpoint pointer
784 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
790 /* Make sure source endpoint's TLV FIFO has enough entries to
795 if (1 + nr_frags > endpoint->trans_tre_max) {
801 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
820 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
822 u32 endpoint_id = endpoint->endpoint_id;
823 struct ipa *ipa = endpoint->ipa;
829 if (endpoint->data->status_enable) {
831 if (endpoint->toward_ipa) {
835 name = endpoint->data->tx.status_endpoint;
849 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
862 trans = ipa_endpoint_trans_alloc(endpoint, 1);
875 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
877 endpoint->replenish_ready = 0;
894 * @endpoint: Endpoint to be replenished
898 * for an endpoint. These are supplied to the hardware, which fills
901 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
906 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
908 atomic_add(count, &endpoint->replenish_saved);
913 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
915 atomic_add(count, &endpoint->replenish_backlog);
919 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
920 if (ipa_endpoint_replenish_one(endpoint))
923 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
926 atomic_add(count, &endpoint->replenish_backlog);
931 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
934 backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog);
942 gsi = &endpoint->ipa->gsi;
943 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
944 schedule_delayed_work(&endpoint->replenish_work,
948 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
950 struct gsi *gsi = &endpoint->ipa->gsi;
954 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
955 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
956 atomic_add(saved, &endpoint->replenish_backlog);
959 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
960 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
961 ipa_endpoint_replenish(endpoint, 0);
964 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
968 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
969 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
970 atomic_add(backlog, &endpoint->replenish_saved);
976 struct ipa_endpoint *endpoint;
978 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
980 ipa_endpoint_replenish(endpoint, 0);
983 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
996 if (endpoint->netdev)
997 ipa_modem_skb_rx(endpoint->netdev, skb);
1002 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1008 if (!endpoint->netdev)
1020 ipa_modem_skb_rx(endpoint->netdev, skb);
1041 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1052 if (endpoint_id != endpoint->endpoint_id)
1073 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1086 dev_err(&endpoint->ipa->pdev->dev,
1093 if (ipa_endpoint_status_skip(endpoint, status)) {
1106 align = endpoint->data->rx.pad_align ? : 1;
1109 if (endpoint->data->checksum)
1122 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1132 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1138 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1143 ipa_endpoint_replenish(endpoint, 1);
1150 if (endpoint->data->status_enable)
1151 ipa_endpoint_status_parse(endpoint, page, trans->len);
1152 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1156 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1159 if (endpoint->toward_ipa)
1160 ipa_endpoint_tx_complete(endpoint, trans);
1162 ipa_endpoint_rx_complete(endpoint, trans);
1165 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1168 if (endpoint->toward_ipa) {
1169 struct ipa *ipa = endpoint->ipa;
1172 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1206 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1207 * @endpoint: Endpoint to be reset
1209 * If aggregation is active on an RX endpoint when a reset is performed
1215 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1217 struct device *dev = &endpoint->ipa->pdev->dev;
1218 struct ipa *ipa = endpoint->ipa;
1239 ipa_endpoint_force_close(endpoint);
1246 gsi_channel_reset(gsi, endpoint->channel_id, false);
1249 suspended = ipa_endpoint_program_suspend(endpoint, false);
1252 ret = gsi_channel_start(gsi, endpoint->channel_id);
1256 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1263 if (!ipa_endpoint_aggr_active(endpoint))
1269 if (ipa_endpoint_aggr_active(endpoint))
1270 dev_err(dev, "endpoint %u still active during reset\n",
1271 endpoint->endpoint_id);
1273 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1275 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1285 gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1292 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1295 (void)ipa_endpoint_program_suspend(endpoint, true);
1303 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1305 u32 channel_id = endpoint->channel_id;
1306 struct ipa *ipa = endpoint->ipa;
1311 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1318 special = !endpoint->toward_ipa && endpoint->data->aggregation;
1319 if (special && ipa_endpoint_aggr_active(endpoint))
1320 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1326 "error %d resetting channel %u for endpoint %u\n",
1327 ret, endpoint->channel_id, endpoint->endpoint_id);
1330 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1332 if (endpoint->toward_ipa)
1333 ipa_endpoint_program_delay(endpoint, false);
1335 (void)ipa_endpoint_program_suspend(endpoint, false);
1336 ipa_endpoint_init_cfg(endpoint);
1337 ipa_endpoint_init_hdr(endpoint);
1338 ipa_endpoint_init_hdr_ext(endpoint);
1339 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1340 ipa_endpoint_init_mode(endpoint);
1341 ipa_endpoint_init_aggr(endpoint);
1342 ipa_endpoint_init_deaggr(endpoint);
1343 ipa_endpoint_init_seq(endpoint);
1344 ipa_endpoint_status(endpoint);
1347 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1349 struct ipa *ipa = endpoint->ipa;
1353 ret = gsi_channel_start(gsi, endpoint->channel_id);
1356 "error %d starting %cX channel %u for endpoint %u\n",
1357 ret, endpoint->toward_ipa ? 'T' : 'R',
1358 endpoint->channel_id, endpoint->endpoint_id);
1362 if (!endpoint->toward_ipa) {
1364 endpoint->endpoint_id);
1365 ipa_endpoint_replenish_enable(endpoint);
1368 ipa->enabled |= BIT(endpoint->endpoint_id);
1373 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1375 u32 mask = BIT(endpoint->endpoint_id);
1376 struct ipa *ipa = endpoint->ipa;
1385 if (!endpoint->toward_ipa) {
1386 ipa_endpoint_replenish_disable(endpoint);
1388 endpoint->endpoint_id);
1392 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1395 "error %d attempting to stop endpoint %u\n", ret,
1396 endpoint->endpoint_id);
1399 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1401 struct device *dev = &endpoint->ipa->pdev->dev;
1402 struct gsi *gsi = &endpoint->ipa->gsi;
1406 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1409 if (!endpoint->toward_ipa) {
1410 ipa_endpoint_replenish_disable(endpoint);
1411 (void)ipa_endpoint_program_suspend(endpoint, true);
1415 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1416 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1419 endpoint->channel_id);
1422 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1424 struct device *dev = &endpoint->ipa->pdev->dev;
1425 struct gsi *gsi = &endpoint->ipa->gsi;
1429 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1432 if (!endpoint->toward_ipa)
1433 (void)ipa_endpoint_program_suspend(endpoint, false);
1436 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1437 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1440 endpoint->channel_id);
1441 else if (!endpoint->toward_ipa)
1442 ipa_endpoint_replenish_enable(endpoint);
1471 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1473 struct gsi *gsi = &endpoint->ipa->gsi;
1474 u32 channel_id = endpoint->channel_id;
1477 if (endpoint->ee_id != GSI_EE_AP)
1480 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1481 if (!endpoint->toward_ipa) {
1485 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1486 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1487 atomic_set(&endpoint->replenish_saved,
1488 gsi_channel_tre_max(gsi, endpoint->channel_id));
1489 atomic_set(&endpoint->replenish_backlog, 0);
1490 INIT_DELAYED_WORK(&endpoint->replenish_work,
1494 ipa_endpoint_program(endpoint);
1496 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1499 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1501 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1503 if (!endpoint->toward_ipa)
1504 cancel_delayed_work_sync(&endpoint->replenish_work);
1506 ipa_endpoint_reset(endpoint);
1519 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1532 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1571 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1579 struct ipa_endpoint *endpoint;
1584 endpoint = &ipa->endpoint[endpoint_id];
1585 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1586 dev_err(dev, "endpoint id %u wrong direction\n",
1603 struct ipa_endpoint *endpoint;
1605 endpoint = &ipa->endpoint[data->endpoint_id];
1608 ipa->channel_map[data->channel_id] = endpoint;
1609 ipa->name_map[name] = endpoint;
1611 endpoint->ipa = ipa;
1612 endpoint->ee_id = data->ee_id;
1613 endpoint->seq_type = data->endpoint.seq_type;
1614 endpoint->channel_id = data->channel_id;
1615 endpoint->endpoint_id = data->endpoint_id;
1616 endpoint->toward_ipa = data->toward_ipa;
1617 endpoint->data = &data->endpoint.config;
1619 ipa->initialized |= BIT(endpoint->endpoint_id);
1622 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1624 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1626 memset(endpoint, 0, sizeof(*endpoint));
1638 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1663 if (data->endpoint.filter_support)