Lines Matching defs:tpd
236 #define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX)))
239 #define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)]))
305 writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
308 writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
311 writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
592 if (!tx_q->tpd.tpbuff)
595 for (i = 0; i < tx_q->tpd.count; i++) {
610 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
611 memset(tx_q->tpd.tpbuff, 0, size);
614 memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
616 tx_q->tpd.consume_idx = 0;
617 tx_q->tpd.produce_idx = 0;
666 kfree(tx_q->tpd.tpbuff);
667 tx_q->tpd.tpbuff = NULL;
668 tx_q->tpd.v_addr = NULL;
669 tx_q->tpd.dma_addr = 0;
670 tx_q->tpd.size = 0;
681 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
682 tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
683 if (!tx_q->tpd.tpbuff)
686 tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
687 tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
688 tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
689 ring_header->used += ALIGN(tx_q->tpd.size, 8);
690 tx_q->tpd.produce_idx = 0;
691 tx_q->tpd.consume_idx = 0;
757 adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
831 adpt->tx_q.tpd.produce_idx = 0;
832 adpt->tx_q.tpd.consume_idx = 0;
833 for (i = 0; i < adpt->tx_q.tpd.count; i++)
834 adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
1026 struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
1030 tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
1031 hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
1033 if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
1034 tx_q->tpd.produce_idx = 0;
1036 *(hw_tpd++) = tpd->word[0];
1037 *(hw_tpd++) = tpd->word[1];
1038 *(hw_tpd++) = tpd->word[2];
1039 *hw_tpd = tpd->word[3];
1047 EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
1170 u32 produce_idx = tx_q->tpd.produce_idx;
1171 u32 consume_idx = tx_q->tpd.consume_idx;
1175 (tx_q->tpd.count + consume_idx - produce_idx - 1);
1187 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1188 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1203 if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
1204 tx_q->tpd.consume_idx = 0;
1248 struct emac_tpd *tpd)
1281 TPD_IPV4_SET(tpd, 1);
1285 /* ipv6 tso need an extra tpd */
1288 memset(tpd, 0, sizeof(*tpd));
1297 TPD_LSOV_SET(tpd, 1);
1300 TPD_LSO_SET(tpd, 1);
1301 TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
1302 TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
1318 TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
1319 TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
1320 TPD_CSX_SET(tpd, 1);
1329 struct emac_tpd *tpd)
1332 unsigned int first = tx_q->tpd.produce_idx;
1341 if (TPD_LSO(tpd)) {
1344 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1356 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1357 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1358 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1359 emac_tx_tpd_create(adpt, tx_q, tpd);
1364 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1377 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1378 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1379 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1380 emac_tx_tpd_create(adpt, tx_q, tpd);
1387 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1397 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1398 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1399 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1400 emac_tx_tpd_create(adpt, tx_q, tpd);
1404 /* The last tpd */
1417 tx_q->tpd.produce_idx = first;
1426 if (++first == tx_q->tpd.count)
1438 struct emac_tpd tpd;
1442 memset(&tpd, 0, sizeof(tpd));
1444 if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
1453 TPD_CVLAN_TAG_SET(&tpd, tag);
1454 TPD_INSTC_SET(&tpd, 1);
1458 TPD_TYP_SET(&tpd, 1);
1461 emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
1474 prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &