Lines Matching refs:buffer

25 static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
27 u32 *vaddr = (u32 *)buffer->vaddr;
29 BUG_ON(buffer->user_size >= buffer->size);
31 vaddr[buffer->user_size / 4] = data;
32 buffer->user_size += 4;
35 static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
40 buffer->user_size = ALIGN(buffer->user_size, 8);
43 OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
46 OUT(buffer, value);
49 static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
51 buffer->user_size = ALIGN(buffer->user_size, 8);
53 OUT(buffer, VIV_FE_END_HEADER_OP_END);
56 static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
58 buffer->user_size = ALIGN(buffer->user_size, 8);
60 OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
63 static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
66 buffer->user_size = ALIGN(buffer->user_size, 8);
68 OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
70 OUT(buffer, address);
73 static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
76 buffer->user_size = ALIGN(buffer->user_size, 8);
78 OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
79 OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
82 static inline void CMD_SEM(struct etnaviv_cmdbuf *buffer, u32 from, u32 to)
84 CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN,
90 struct etnaviv_cmdbuf *buffer, u8 pipe)
107 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
108 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
109 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
111 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
136 static void etnaviv_buffer_replace_wait(struct etnaviv_cmdbuf *buffer,
139 u32 *lw = buffer->vaddr + wl_offset;
148 * Ensure that there is space in the command buffer to contiguously write
149 * 'cmd_dwords' 64-bit words into the buffer, wrapping if necessary.
152 struct etnaviv_cmdbuf *buffer, unsigned int cmd_dwords)
154 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size)
155 buffer->user_size = 0;
157 return etnaviv_cmdbuf_get_va(buffer,
159 buffer->user_size;
164 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
168 /* initialize buffer */
169 buffer->user_size = 0;
171 CMD_WAIT(buffer);
172 CMD_LINK(buffer, 2,
173 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
174 + buffer->user_size - 4);
176 return buffer->user_size / 8;
181 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
185 buffer->user_size = 0;
188 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
190 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
192 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
193 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
194 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
198 CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
200 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
202 CMD_LOAD_STATE(buffer, VIVS_MMUv2_SAFE_ADDRESS, safe_addr);
203 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
204 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
207 CMD_END(buffer);
209 buffer->user_size = ALIGN(buffer->user_size, 8);
211 return buffer->user_size / 8;
216 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
220 buffer->user_size = 0;
222 CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
225 CMD_END(buffer);
227 buffer->user_size = ALIGN(buffer->user_size, 8);
229 return buffer->user_size / 8;
234 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
235 unsigned int waitlink_offset = buffer->user_size - 16;
257 link_target = etnaviv_buffer_reserve(gpu, buffer, dwords);
259 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
260 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
262 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
263 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
264 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
265 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
267 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
270 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
271 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
272 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
274 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
278 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
279 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
281 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
282 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
283 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
284 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
286 CMD_END(buffer);
288 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
294 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
299 /* Append a 'sync point' to the ring buffer. */
302 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
303 unsigned int waitlink_offset = buffer->user_size - 16;
313 target = etnaviv_buffer_reserve(gpu, buffer, dwords);
316 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
320 CMD_END(buffer);
323 CMD_WAIT(buffer);
324 CMD_LINK(buffer, 2,
325 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
326 + buffer->user_size - 4);
330 * WAIT with a link to the address in the ring buffer.
332 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
338 /* Append a command buffer to the ring buffer. */
343 struct etnaviv_cmdbuf *buffer = &gpu->buffer;
344 unsigned int waitlink_offset = buffer->user_size - 16;
357 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
364 * If we need maintenance prior to submitting this buffer, we will
366 * link to this buffer - a total of four additional words.
390 target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
407 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
421 CMD_LOAD_STATE(buffer,
429 CMD_LOAD_STATE(buffer, VIVS_MMUv2_CONFIGURATION,
431 CMD_SEM(buffer, SYNC_RECIPIENT_FE,
433 CMD_STALL(buffer, SYNC_RECIPIENT_FE,
441 etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
445 /* And the link to the submitted buffer */
448 CMD_LINK(buffer, link_dwords, link_target);
456 * Append a LINK to the submitted command buffer to return to
457 * the ring buffer. return_target is the ring target address.
471 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
476 * the wait command to the ring buffer.
479 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
482 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
486 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
487 CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
488 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
490 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
494 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
495 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
498 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
499 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
500 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_BLT);
501 CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x0);
504 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
506 CMD_WAIT(buffer);
507 CMD_LINK(buffer, 2,
508 etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
509 + buffer->user_size - 4);
521 pr_info("link op: %p\n", buffer->vaddr + waitlink_offset);
529 * WAIT with a link to the address in the ring buffer.
531 etnaviv_buffer_replace_wait(buffer, waitlink_offset,
537 etnaviv_buffer_dump(gpu, buffer, 0, 0x50);