Lines Matching defs:skb

6549 		struct sk_buff *skb = ri->skb;
6552 if (unlikely(skb == NULL)) {
6564 skb_tstamp_tx(skb, &timestamp);
6568 skb_headlen(skb), DMA_TO_DEVICE);
6570 ri->skb = NULL;
6580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6582 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6587 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6600 bytes_compl += skb->len;
6602 dev_consume_skb_any(skb);
6654 /* Returns size of skb allocated or < 0 on error.
6770 /* Ensure that the update to the skb happens after the physical
6826 struct sk_buff *skb;
6892 skb = build_skb(data, frag_size);
6894 skb = slab_build_skb(data);
6895 if (!skb) {
6899 skb_reserve(skb, TG3_RX_OFFSET(tp));
6904 skb = netdev_alloc_skb(tp->dev,
6906 if (skb == NULL)
6909 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6912 memcpy(skb->data,
6919 skb_put(skb, len);
6922 skb_hwtstamps(skb));
6928 skb->ip_summed = CHECKSUM_UNNECESSARY;
6930 skb_checksum_none_assert(skb);
6932 skb->protocol = eth_type_trans(skb, tp->dev);
6935 skb->protocol != htons(ETH_P_8021Q) &&
6936 skb->protocol != htons(ETH_P_8021AD)) {
6937 dev_kfree_skb_any(skb);
6943 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6946 napi_gro_receive(&tnapi->napi, skb);
7079 * ordered correctly WRT the skb check above.
7137 * ordered correctly WRT the skb check above.
7760 struct sk_buff *skb;
7763 skb = txb->skb;
7764 txb->skb = NULL;
7767 skb_headlen(skb), DMA_TO_DEVICE);
7776 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7800 struct sk_buff *new_skb, *skb = *pskb;
7805 new_skb = skb_copy(skb, GFP_ATOMIC);
7807 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7809 new_skb = skb_copy_expand(skb,
7810 skb_headroom(skb) + more_headroom,
7811 skb_tailroom(skb), GFP_ATOMIC);
7829 tnapi->tx_buffers[*entry].skb = new_skb;
7843 dev_consume_skb_any(skb);
7848 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7853 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7862 struct netdev_queue *txq, struct sk_buff *skb)
7864 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7883 segs = skb_gso_segment(skb, tp->dev->features &
7896 dev_consume_skb_any(skb);
7902 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7917 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7918 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7929 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7943 mss = skb_shinfo(skb)->gso_size;
7947 if (skb_cow_head(skb, 0))
7950 iph = ip_hdr(skb);
7951 tcp_opt_len = tcp_optlen(skb);
7953 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7958 if (skb->protocol == htons(ETH_P_8021Q) ||
7959 skb->protocol == htons(ETH_P_8021AD)) {
7960 if (tg3_tso_bug_gso_check(tnapi, skb))
7961 return tg3_tso_bug(tp, tnapi, txq, skb);
7965 if (!skb_is_gso_v6(skb)) {
7968 if (tg3_tso_bug_gso_check(tnapi, skb))
7969 return tg3_tso_bug(tp, tnapi, txq, skb);
7981 tcph = tcp_hdr(skb);
8017 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8021 if (skb->protocol == htons(ETH_P_8021Q) ||
8022 skb->protocol == htons(ETH_P_8021AD)) {
8023 if (skb_checksum_help(skb))
8031 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8034 if (skb_vlan_tag_present(skb)) {
8036 vlan = skb_vlan_tag_get(skb);
8039 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8041 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8045 len = skb_headlen(skb);
8047 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8053 tnapi->tx_buffers[entry].skb = skb;
8062 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8065 } else if (skb_shinfo(skb)->nr_frags > 0) {
8076 last = skb_shinfo(skb)->nr_frags - 1;
8078 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8084 tnapi->tx_buffers[entry].skb = NULL;
8104 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8113 return tg3_tso_bug(tp, tnapi, txq, skb);
8121 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8126 skb_tx_timestamp(skb);
8127 netdev_tx_sent_queue(txq, skb->len);
8155 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8157 dev_kfree_skb_any(skb);
8549 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8551 if (!skb)
8555 skb_shinfo(skb)->nr_frags - 1);
8557 dev_consume_skb_any(skb);
13418 struct sk_buff *skb;
13439 skb = netdev_alloc_skb(tp->dev, tx_len);
13440 if (!skb)
13443 tx_data = skb_put(skb, tx_len);
13505 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13507 dev_kfree_skb(skb);
13512 tnapi->tx_buffers[val].skb = skb;
13525 tnapi->tx_buffers[val].skb = NULL;
13526 dev_kfree_skb(skb);
13555 dev_kfree_skb(skb);