Lines Matching defs:buffer

47  *    - GPR 14 for secondary command buffer returns
176 /* Broadwell requires that we specify a buffer size for a bunch of
510 * portion of a HiZ buffer. Testing has revealed that Gfx8 only supports
623 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
624 * the initial layout is undefined, the HiZ buffer and depth buffer will
703 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
704 * the initial layout is undefined, the HiZ buffer and depth buffer will
720 /* On gfx7, we have to store a texturable version of the stencil buffer in
1103 * image's fast clear state buffer.
1174 * @brief Transitions a color buffer from one layout to another.
1354 * Having an aux buffer with invalid data is a problem for two reasons:
1356 * 1) Having an invalid value in the buffer can confuse the hardware.
1363 * image as a storage image, then we must have the aux buffer in the
1420 "define an MCS buffer.");
1491 /* Perform a resolve to synchronize data between the main and aux buffer.
1625 * command buffer's state. Otherwise, we must *reset* its state. In both
1630 * If a command buffer is in the executable state and the command buffer
1633 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
1636 * the command buffer in the recording state.
1649 * secondary command buffer is considered to be entirely inside a render
1650 * pass. If this is a primary command buffer, then this bit is ignored.
1659 /* We sometimes store vertex data in the dynamic state buffer for blorp
1665 * blorp at least once per primary command buffer so it shouldn't be
1674 "new cmd buffer");
1676 /* Re-emit the aux table register in every command buffer. This way we're
1677 * ensured that we have the table even if this command buffer doesn't
1683 "new cmd buffer with aux-tt");
1738 * changed by a previous command buffer.
1752 /* If secondary buffer supports conditional rendering
1783 * the end of the previous batch buffer. This has been fine so far since
1825 /* We want every command buffer to start with the PMA fix in a known state,
1826 * so we disable it at the end of the command buffer.
1859 /* The secondary command buffer doesn't know which textures etc. have been
1873 /* Secondary buffer is constructed as if it will be executed
1928 "Secondary cmd buffer not tracked in VF cache");
2039 * surface) a previous render target and/or depth/stencil buffer
2668 /* This is a descriptor set buffer so the set index is actually
2786 if (desc->buffer) {
2787 /* Compute the offset within the buffer */
2791 /* Clamp to the buffer size */
2792 offset = MIN2(offset, desc->buffer->vk.size);
2793 /* Clamp the range to the buffer size */
2794 uint32_t range = MIN2(desc->range, desc->buffer->vk.size - offset);
2801 anv_address_add(desc->buffer->address, offset);
3020 /* This is a descriptor set buffer so the set index is
3059 if (desc->buffer) {
3064 return anv_address_add(desc->buffer->address,
3083 /** Returns the size in bytes of the bound buffer
3085 * The range is relative to the start of the buffer, not the start of the
3131 if (!desc->buffer)
3135 /* Compute the offset within the buffer */
3141 /* Clamp to the buffer size */
3142 offset = MIN2(offset, desc->buffer->vk.size);
3143 /* Clamp the range to the buffer size */
3144 uint32_t bound_range = MIN2(desc->range, desc->buffer->vk.size - offset);
3187 * buffer. We could go out of our way here to walk over all of
3209 * buffer 3 read length equal to zero committed followed by a
3210 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3376 /* We have to gather buffer addresses as a second step because the
3379 * them into the actual GPU buffer. If we did the two loops at the
3381 * constant buffer when we did the copy.
3446 struct anv_address buffer =
3449 uint64_t addr = anv_address_physical(buffer);
3469 struct anv_address buffer =
3472 uint64_t addr = anv_address_physical(buffer);
3827 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
3831 if (buffer) {
3844 .MOCS = anv_mocs(cmd_buffer->device, buffer->address.bo,
3852 .BufferStartingAddress = anv_address_add(buffer->address, offset),
3853 .NullVertexBuffer = offset >= buffer->vk.size,
3866 .EndAddress = anv_address_add(buffer->address, buffer->vk.size - 1),
3914 /* We don't need any per-buffer dirty tracking because you're not
3927 if (cmd_buffer->state.xfb_enabled && xfb->buffer && xfb->size != 0) {
3928 sob.MOCS = anv_mocs(cmd_buffer->device, xfb->buffer->address.bo, 0);
3929 sob.SurfaceBaseAddress = anv_address_add(xfb->buffer->address,
3942 sob.SurfaceEndAddress = anv_address_add(xfb->buffer->address,
4578 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4597 struct anv_address draw = anv_address_add(buffer->address, offset);
4635 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4654 struct anv_address draw = anv_address_add(buffer->address, offset);
4702 /* Upload the current draw count from the draw parameters buffer to
4800 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4824 struct anv_address draw = anv_address_add(buffer->address, offset);
4868 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4892 struct anv_address draw = anv_address_add(buffer->address, offset);
4953 /* If we have a counter buffer, this is a resume so we need to load the
5010 /* If we have a counter buffer, this is a resume so we need to load the
5110 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
5130 struct anv_address draw = anv_address_add(buffer->address, offset);
5152 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
5174 struct anv_address draw = anv_address_add(buffer->address, offset);
5469 /* The num_workgroups buffer goes in the binding table */
5495 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
5498 struct anv_address addr = anv_address_add(buffer->address, offset);
5521 /* The num_workgroups buffer goes in the binding table */
5613 /* The ray query HW computes offsets from the top of the buffer, so
5614 * let the address at the end of the buffer.
6106 * This is implemented by carefully tracking all vertex and index buffer
6121 * range for each used buffer. This has to be a separate step because
6167 /* We have an index buffer */
6607 /* This image has the auxiliary buffer enabled. We can mark the
7180 /* On gfx7, we have to store a texturable version of the stencil buffer in
7249 ANV_FROM_HANDLE(anv_buffer, buffer, pConditionalRenderingBegin->buffer);
7252 anv_address_add(buffer->address, pConditionalRenderingBegin->offset);
7266 * If the value of the predicate in buffer memory changes
7273 * So it's perfectly fine to read a value from the buffer once.
7434 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
7437 cmd_buffer->state.gfx.index_buffer = buffer;