Lines Matching defs:skb

2212 static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
2215 if (skb_shinfo(skb)->gso_size) {
2221 segs = skb_gso_segment(skb, features);
2233 dev_kfree_skb(skb);
2234 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2235 if (skb_checksum_help(skb) < 0)
2238 __skb_queue_head(list, skb);
2245 dev_kfree_skb(skb);
2249 static inline void rtl_tx_vlan_tag(struct tx_desc *desc, struct sk_buff *skb)
2251 if (skb_vlan_tag_present(skb)) {
2254 opts2 = TX_VLAN_TAG | swab16(skb_vlan_tag_get(skb));
2259 static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)
2264 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2269 struct sk_buff *skb, u32 len)
2271 u32 mss = skb_shinfo(skb)->gso_size;
2280 u32 transport_offset = (u32)skb_transport_offset(skb);
2290 switch (vlan_get_protocol(skb)) {
2296 if (skb_cow_head(skb, 0)) {
2300 tcp_v6_gso_csum_prep(skb);
2311 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2312 u32 transport_offset = (u32)skb_transport_offset(skb);
2323 switch (vlan_get_protocol(skb)) {
2326 ip_protocol = ip_hdr(skb)->protocol;
2331 ip_protocol = ipv6_hdr(skb)->nexthdr;
2374 struct sk_buff *skb;
2377 skb = __skb_dequeue(&skb_head);
2378 if (!skb)
2381 len = skb->len + sizeof(*tx_desc);
2384 __skb_queue_head(&skb_head, skb);
2391 if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {
2392 r8152_csum_workaround(tp, skb, &skb_head);
2396 rtl_tx_vlan_tag(tx_desc, skb);
2400 len = skb->len;
2401 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
2405 dev_kfree_skb_any(skb);
2412 agg->skb_num += skb_shinfo(skb)->gso_segs ?: 1;
2414 dev_kfree_skb_any(skb);
2530 struct sk_buff *skb = __skb_dequeue(&tp->rx_queue);
2535 if (!skb)
2538 pkt_len = skb->len;
2539 napi_gro_receive(napi, skb);
2588 struct sk_buff *skb;
2608 skb = napi_alloc_skb(napi, rx_frag_head_sz);
2609 if (!skb) {
2614 skb->ip_summed = r8152_rx_csum(tp, rx_desc);
2615 memcpy(skb->data, rx_data, rx_frag_head_sz);
2616 skb_put(skb, rx_frag_head_sz);
2620 skb_add_rx_frag(skb, 0, agg->page,
2627 skb->protocol = eth_type_trans(skb, netdev);
2628 rtl_rx_vlan_tag(rx_desc, skb);
2632 stats->rx_bytes += skb->len;
2633 napi_gro_receive(napi, skb);
2635 __skb_queue_tail(&tp->rx_queue, skb);
2797 struct sk_buff *skb;
2807 while ((skb = __skb_dequeue(&skb_head))) {
2808 dev_kfree_skb(skb);
2882 rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
2885 u32 mss = skb_shinfo(skb)->gso_size;
2888 if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) &&
2889 skb_transport_offset(skb) > max_offset)
2891 else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
2897 static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
2902 skb_tx_timestamp(skb);
2904 skb_queue_tail(&tp->tx_queue, skb);