Lines Matching refs:buffer

90 	void *buffer;
106 * A buffer that contains a block of DMA-able coherent memory used for
114 struct descriptor buffer[];
133 * Pointer to a buffer inside buffer_list that contains the tail
725 if (!ctx->buffer)
728 vunmap(ctx->buffer);
761 * We search for the buffer that contains the last AR packet DMA data written
773 /* A buffer that is not yet completely filled must be the last one. */
788 * buffer's descriptor might be never updated by the
845 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
852 p.header[0] = cond_le32_to_cpu(buffer[0]);
853 p.header[1] = cond_le32_to_cpu(buffer[1]);
854 p.header[2] = cond_le32_to_cpu(buffer[2]);
860 p.header[3] = (__force __u32) buffer[3];
866 p.header[3] = cond_le32_to_cpu(buffer[3]);
875 p.header[3] = cond_le32_to_cpu(buffer[3]);
896 p.payload = (void *) buffer + p.header_length;
900 status = cond_le32_to_cpu(buffer[length]);
940 return buffer + length + 1;
984 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
988 * The filled part of the overall buffer wraps around; handle
989 * all packets up to the buffer end here. If the last packet
990 * wraps around, its tail will be visible after the buffer end
991 * because the buffer start pages are mapped there again.
993 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
997 /* adjust p to point back into the actual buffer */
1044 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1045 if (!ctx->buffer)
1077 ctx->pointer = ctx->buffer;
1114 /* If the branch address points to a buffer outside of the
1115 * current buffer, advance to the next buffer. */
1120 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1127 /* If we've advanced to the next buffer, move the
1128 * previous buffer to the free list. */
1140 * Allocate a new buffer and add it to the list of free buffers for this
1160 offset = (void *)&desc->buffer - (void *)desc;
1195 * We put a dummy descriptor in the buffer that has a NULL
1199 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1200 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1201 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1202 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1203 ctx->last = ctx->buffer_tail->buffer;
1204 ctx->prev = ctx->buffer_tail->buffer;
1217 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1232 /* No room for the descriptor in this buffer, so advance to the
1236 /* If there is no free buffer next in the list,
1246 d = desc->buffer + desc->used / sizeof(*d);
1272 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1968 * bytes in the self ID receive buffer. Since we also receive
2025 * read out the self IDs from the DMA buffer. If this happens,
2026 * the DMA buffer will be overwritten with new self IDs and we
2030 * self IDs in the buffer before reading them out and compare
2542 * won't deallocate the new buffer.
2545 * use that buffer with the new config_rom data, and
2918 * data buffer is in the context program's coherent page and must not
3250 struct fw_iso_buffer *buffer,
3326 page_bus = page_private(buffer->pages[page]);
3354 struct fw_iso_buffer *buffer,
3366 * buffer, so we need at least 8 bytes.
3409 page_bus = page_private(buffer->pages[page]);
3435 struct fw_iso_buffer *buffer,
3446 /* We need one descriptor for each page in the buffer. */
3449 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3472 page_bus = page_private(buffer->pages[page]);
3491 struct fw_iso_buffer *buffer,
3501 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3504 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3507 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3686 * we save space by using a common buffer for the AR request/
3687 * response descriptors and the self IDs buffer.