1 /*
2  * Copyright 2021 Alyssa Rosenzweig
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <stdio.h>
24 #include "agx_state.h"
25 #include "asahi/lib/agx_pack.h"
26 
27 /* Computes the address for a push uniform, adding referenced BOs to the
28  * current batch as necessary. Note anything uploaded via the batch's pool does
29  * not require an update to the BO list, since the entire pool will be added
30  * once at submit time. */
31 
32 static uint64_t
agx_const_buffer_ptr(struct agx_batch *batch, struct pipe_constant_buffer *cb)33 agx_const_buffer_ptr(struct agx_batch *batch,
34                      struct pipe_constant_buffer *cb)
35 {
36    if (cb->buffer) {
37       struct agx_bo *bo = agx_resource(cb->buffer)->bo;
38       agx_batch_add_bo(batch, bo);
39 
40       return bo->ptr.gpu + cb->buffer_offset;
41    } else {
42       return agx_pool_upload_aligned(&batch->pool,
43                                      ((uint8_t *) cb->user_buffer) + cb->buffer_offset,
44                                      cb->buffer_size - cb->buffer_offset, 64);
45    }
46 }
47 
48 static uint64_t
agx_push_location_direct(struct agx_context *ctx, struct agx_push push, enum pipe_shader_type stage)49 agx_push_location_direct(struct agx_context *ctx, struct agx_push push,
50                          enum pipe_shader_type stage)
51 {
52    struct agx_batch *batch = ctx->batch;
53    struct agx_stage *st = &ctx->stage[stage];
54 
55    switch (push.type) {
56    case AGX_PUSH_UBO_BASES: {
57       unsigned count = util_last_bit(st->cb_mask);
58       struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint64_t), 8);
59       uint64_t *addresses = ptr.cpu;
60 
61       for (unsigned i = 0; i < count; ++i) {
62          struct pipe_constant_buffer *cb = &st->cb[i];
63          addresses[i] = agx_const_buffer_ptr(batch, cb);
64       }
65 
66       return ptr.gpu;
67    }
68 
69    case AGX_PUSH_VBO_BASES: {
70       unsigned count = util_last_bit(ctx->vb_mask);
71       struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint64_t), 8);
72       uint64_t *addresses = ptr.cpu;
73 
74       u_foreach_bit(i, ctx->vb_mask) {
75          struct pipe_vertex_buffer vb = ctx->vertex_buffers[i];
76          assert(!vb.is_user_buffer);
77 
78          struct agx_bo *bo = agx_resource(vb.buffer.resource)->bo;
79          agx_batch_add_bo(batch, bo);
80 
81          addresses[i] = bo->ptr.gpu + vb.buffer_offset;
82       }
83 
84       return ptr.gpu;
85    }
86 
87    case AGX_PUSH_BLEND_CONST:
88    {
89       return agx_pool_upload_aligned(&batch->pool, &ctx->blend_color,
90             sizeof(ctx->blend_color), 8);
91    }
92 
93    case AGX_PUSH_ARRAY_SIZE_MINUS_1: {
94       struct agx_stage *st = &ctx->stage[stage];
95       unsigned count = st->texture_count;
96       struct agx_ptr ptr = agx_pool_alloc_aligned(&batch->pool, count * sizeof(uint16_t), 8);
97       uint16_t *d1 = ptr.cpu;
98 
99       for (unsigned i = 0; i < count; ++i) {
100          unsigned array_size = 1;
101 
102          if (st->textures[i])
103             array_size = st->textures[i]->base.texture->array_size;
104 
105          d1[i] = array_size - 1;
106       }
107 
108       return ptr.gpu;
109    }
110 
111    default:
112       unreachable("todo: push more");
113    }
114 }
115 
116 uint64_t
agx_push_location(struct agx_context *ctx, struct agx_push push, enum pipe_shader_type stage)117 agx_push_location(struct agx_context *ctx, struct agx_push push,
118                   enum pipe_shader_type stage)
119 {
120    uint64_t direct = agx_push_location_direct(ctx, push, stage);
121    struct agx_pool *pool = &ctx->batch->pool;
122 
123    if (push.indirect)
124       return agx_pool_upload(pool, &direct, sizeof(direct));
125    else
126       return direct;
127 }
128