Lines Matching refs:buffer

38 	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
40 if (!tx_queue->buffer)
62 kfree(tx_queue->buffer);
63 tx_queue->buffer = NULL;
99 struct efx_tx_buffer *buffer;
104 if (!tx_queue->buffer)
111 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
112 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
124 if (!tx_queue->buffer)
139 kfree(tx_queue->buffer);
140 tx_queue->buffer = NULL;
145 struct efx_tx_buffer *buffer,
149 if (buffer->unmap_len) {
151 dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
153 if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
154 dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
157 dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
159 buffer->unmap_len = 0;
162 if (buffer->flags & EFX_TX_BUF_SKB) {
163 struct sk_buff *skb = (struct sk_buff *)buffer->skb;
180 dev_consume_skb_any((struct sk_buff *)buffer->skb);
184 } else if (buffer->flags & EFX_TX_BUF_XDP) {
185 xdp_return_frame_rx_napi(buffer->xdpf);
188 buffer->len = 0;
189 buffer->flags = 0;
209 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
211 if (!efx_tx_buffer_in_use(buffer)) {
219 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
275 struct efx_tx_buffer *buffer;
282 buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
283 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
291 struct efx_tx_buffer *buffer;
296 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
303 buffer->len = dma_len;
304 buffer->dma_addr = dma_addr;
305 buffer->flags = EFX_TX_BUF_CONT;
311 return buffer;
368 struct efx_tx_buffer *buffer;
371 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
376 buffer->flags = EFX_TX_BUF_CONT | dma_flags;
377 buffer->unmap_len = unmap_len;
378 buffer->dma_offset = buffer->dma_addr - unmap_addr;
381 /* Store SKB details with the final buffer for
384 buffer->skb = skb;
385 buffer->flags = EFX_TX_BUF_SKB | dma_flags;