Lines Matching defs:buffer
90 void *buffer;
106 * A buffer that contains a block of DMA-able coherent memory used for
114 struct descriptor buffer[];
133 * Pointer to a buffer inside buffer_list that contains the tail
725 vunmap(ctx->buffer);
758 * We search for the buffer that contains the last AR packet DMA data written
770 /* A buffer that is not yet completely filled must be the last one. */
785 * buffer's descriptor might be never updated by the
842 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
849 p.header[0] = cond_le32_to_cpu(buffer[0]);
850 p.header[1] = cond_le32_to_cpu(buffer[1]);
851 p.header[2] = cond_le32_to_cpu(buffer[2]);
857 p.header[3] = (__force __u32) buffer[3];
863 p.header[3] = cond_le32_to_cpu(buffer[3]);
872 p.header[3] = cond_le32_to_cpu(buffer[3]);
893 p.payload = (void *) buffer + p.header_length;
897 status = cond_le32_to_cpu(buffer[length]);
937 return buffer + length + 1;
981 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
985 * The filled part of the overall buffer wraps around; handle
986 * all packets up to the buffer end here. If the last packet
987 * wraps around, its tail will be visible after the buffer end
988 * because the buffer start pages are mapped there again.
990 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
994 /* adjust p to point back into the actual buffer */
1041 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1042 if (!ctx->buffer)
1074 ctx->pointer = ctx->buffer;
1111 /* If the branch address points to a buffer outside of the
1112 * current buffer, advance to the next buffer. */
1117 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1124 /* If we've advanced to the next buffer, move the
1125 * previous buffer to the free list. */
1137 * Allocate a new buffer and add it to the list of free buffers for this
1158 offset = (void *)&desc->buffer - (void *)desc;
1193 * We put a dummy descriptor in the buffer that has a NULL
1197 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1198 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1199 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1200 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1201 ctx->last = ctx->buffer_tail->buffer;
1202 ctx->prev = ctx->buffer_tail->buffer;
1216 ((void *)&desc->buffer - (void *)desc));
1230 /* No room for the descriptor in this buffer, so advance to the
1234 /* If there is no free buffer next in the list,
1244 d = desc->buffer + desc->used / sizeof(*d);
1270 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1957 * bytes in the self ID receive buffer. Since we also receive
2014 * read out the self IDs from the DMA buffer. If this happens,
2015 * the DMA buffer will be overwritten with new self IDs and we
2019 * self IDs in the buffer before reading them out and compare
2535 * won't deallocate the new buffer.
2538 * use that buffer with the new config_rom data, and
2906 * data buffer is in the context program's coherent page and must not
3238 struct fw_iso_buffer *buffer,
3314 page_bus = page_private(buffer->pages[page]);
3342 struct fw_iso_buffer *buffer,
3354 * buffer, so we need at least 8 bytes.
3397 page_bus = page_private(buffer->pages[page]);
3423 struct fw_iso_buffer *buffer,
3434 /* We need one descriptor for each page in the buffer. */
3437 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3460 page_bus = page_private(buffer->pages[page]);
3479 struct fw_iso_buffer *buffer,
3489 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3492 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3495 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3671 * we save space by using a common buffer for the AR request/
3672 * response descriptors and the self IDs buffer.