Lines Matching refs:rx_buf
44 struct efx_rx_buffer *rx_buf,
48 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
56 rx_buf->flags |= EFX_RX_PKT_DISCARD;
68 struct efx_rx_buffer *rx_buf,
84 EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
92 if (rx_buf->len > hdr_len) {
93 rx_buf->page_offset += hdr_len;
94 rx_buf->len -= hdr_len;
98 rx_buf->page, rx_buf->page_offset,
99 rx_buf->len);
100 rx_buf->page = NULL;
101 skb->len += rx_buf->len;
102 skb->data_len += rx_buf->len;
106 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
109 __free_pages(rx_buf->page, efx->rx_buffer_order);
110 rx_buf->page = NULL;
129 struct efx_rx_buffer *rx_buf;
133 rx_buf = efx_rx_buffer(rx_queue, index);
134 rx_buf->flags |= flags;
139 efx_rx_packet__check_len(rx_queue, rx_buf, len);
147 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
148 rx_buf->flags |= EFX_RX_PKT_DISCARD;
155 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
156 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
161 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
163 efx_discard_rx_packet(channel, rx_buf, n_frags);
168 rx_buf->len = len;
173 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
178 prefetch(efx_rx_buf_va(rx_buf));
180 rx_buf->page_offset += efx->rx_prefix_size;
181 rx_buf->len -= efx->rx_prefix_size;
190 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
193 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
195 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
196 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
200 rx_buf = efx_rx_buffer(rx_queue, index);
201 efx_recycle_rx_pages(channel, rx_buf, n_frags);
212 struct efx_rx_buffer *rx_buf,
216 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
218 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
223 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
230 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
232 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
255 struct efx_rx_buffer *rx_buf, u8 **ehp)
278 efx_free_rx_buffers(rx_queue, rx_buf,
288 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
289 rx_buf->len, DMA_FROM_DEVICE);
301 xdp.data_end = xdp.data + rx_buf->len;
315 rx_buf->page_offset += offset;
316 rx_buf->len -= offset;
327 efx_free_rx_buffers(rx_queue, rx_buf, 1);
341 efx_free_rx_buffers(rx_queue, rx_buf, 1);
354 efx_free_rx_buffers(rx_queue, rx_buf, 1);
363 efx_free_rx_buffers(rx_queue, rx_buf, 1);
375 struct efx_rx_buffer *rx_buf =
377 u8 *eh = efx_rx_buf_va(rx_buf);
382 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
383 rx_buf->len = le16_to_cpup((__le16 *)
387 * loopback layer, and free the rx_buf here
392 efx_loopback_rx_packet(efx, eh, rx_buf->len);
394 efx_free_rx_buffers(rx_queue, rx_buf,
399 if (!efx_do_xdp(efx, channel, rx_buf, &eh))
403 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
405 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
406 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
408 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);