Lines Matching defs:skb

6564 		struct sk_buff *skb = ri->skb;
6567 if (unlikely(skb == NULL)) {
6579 skb_tstamp_tx(skb, &timestamp);
6584 skb_headlen(skb),
6587 ri->skb = NULL;
6597 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6599 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6604 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6617 bytes_compl += skb->len;
6619 dev_consume_skb_any(skb);
6671 /* Returns size of skb allocated or < 0 on error.
6789 /* Ensure that the update to the skb happens after the physical
6845 struct sk_buff *skb;
6910 skb = build_skb(data, frag_size);
6911 if (!skb) {
6915 skb_reserve(skb, TG3_RX_OFFSET(tp));
6920 skb = netdev_alloc_skb(tp->dev,
6922 if (skb == NULL)
6925 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6927 memcpy(skb->data,
6933 skb_put(skb, len);
6936 skb_hwtstamps(skb));
6942 skb->ip_summed = CHECKSUM_UNNECESSARY;
6944 skb_checksum_none_assert(skb);
6946 skb->protocol = eth_type_trans(skb, tp->dev);
6949 skb->protocol != htons(ETH_P_8021Q) &&
6950 skb->protocol != htons(ETH_P_8021AD)) {
6951 dev_kfree_skb_any(skb);
6957 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6960 napi_gro_receive(&tnapi->napi, skb);
7093 * ordered correctly WRT the skb check above.
7151 * ordered correctly WRT the skb check above.
7774 struct sk_buff *skb;
7777 skb = txb->skb;
7778 txb->skb = NULL;
7782 skb_headlen(skb),
7792 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7816 struct sk_buff *new_skb, *skb = *pskb;
7821 new_skb = skb_copy(skb, GFP_ATOMIC);
7823 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7825 new_skb = skb_copy_expand(skb,
7826 skb_headroom(skb) + more_headroom,
7827 skb_tailroom(skb), GFP_ATOMIC);
7845 tnapi->tx_buffers[*entry].skb = new_skb;
7859 dev_consume_skb_any(skb);
7864 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7869 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7878 struct netdev_queue *txq, struct sk_buff *skb)
7880 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7899 segs = skb_gso_segment(skb, tp->dev->features &
7912 dev_consume_skb_any(skb);
7918 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7933 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7934 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7945 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7959 mss = skb_shinfo(skb)->gso_size;
7963 if (skb_cow_head(skb, 0))
7966 iph = ip_hdr(skb);
7967 tcp_opt_len = tcp_optlen(skb);
7969 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7974 if (skb->protocol == htons(ETH_P_8021Q) ||
7975 skb->protocol == htons(ETH_P_8021AD)) {
7976 if (tg3_tso_bug_gso_check(tnapi, skb))
7977 return tg3_tso_bug(tp, tnapi, txq, skb);
7981 if (!skb_is_gso_v6(skb)) {
7984 if (tg3_tso_bug_gso_check(tnapi, skb))
7985 return tg3_tso_bug(tp, tnapi, txq, skb);
7997 tcph = tcp_hdr(skb);
8033 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8037 if (skb->protocol == htons(ETH_P_8021Q) ||
8038 skb->protocol == htons(ETH_P_8021AD)) {
8039 if (skb_checksum_help(skb))
8047 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8050 if (skb_vlan_tag_present(skb)) {
8052 vlan = skb_vlan_tag_get(skb);
8055 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8057 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8061 len = skb_headlen(skb);
8063 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8068 tnapi->tx_buffers[entry].skb = skb;
8077 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8080 } else if (skb_shinfo(skb)->nr_frags > 0) {
8091 last = skb_shinfo(skb)->nr_frags - 1;
8093 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8099 tnapi->tx_buffers[entry].skb = NULL;
8119 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8128 return tg3_tso_bug(tp, tnapi, txq, skb);
8136 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8141 skb_tx_timestamp(skb);
8142 netdev_tx_sent_queue(txq, skb->len);
8170 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8172 dev_kfree_skb_any(skb);
8564 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8566 if (!skb)
8570 skb_shinfo(skb)->nr_frags - 1);
8572 dev_consume_skb_any(skb);
13469 struct sk_buff *skb;
13490 skb = netdev_alloc_skb(tp->dev, tx_len);
13491 if (!skb)
13494 tx_data = skb_put(skb, tx_len);
13556 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13558 dev_kfree_skb(skb);
13563 tnapi->tx_buffers[val].skb = skb;
13576 tnapi->tx_buffers[val].skb = NULL;
13577 dev_kfree_skb(skb);
13606 dev_kfree_skb(skb);