1/*
2 * Copyright (c) 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <stdlib.h>
25#include <math.h>
26
27#include "util/debug.h"
28#include "util/macros.h"
29#include "util/u_math.h"
30#include "compiler/shader_enums.h"
31
32#include "intel_l3_config.h"
33
34/**
35 * The following diagram shows how we partition the URB:
36 *
37 *        16kb or 32kb               Rest of the URB space
38 *   __________-__________   _________________-_________________
39 *  /                     \ /                                   \
40 * +-------------------------------------------------------------+
41 * |  VS/HS/DS/GS/FS Push  |           VS/HS/DS/GS URB           |
42 * |       Constants       |               Entries               |
43 * +-------------------------------------------------------------+
44 *
45 * Push constants must be stored at the beginning of the URB space,
46 * while URB entries can be stored anywhere.  We choose to lay them
47 * out in pipeline order (VS -> HS -> DS -> GS).
48 */
49
50/**
51 * Decide how to partition the URB among the various stages.
52 *
53 * \param[in] push_constant_bytes - space allocate for push constants.
54 * \param[in] urb_size_bytes - total size of the URB (from L3 config).
55 * \param[in] tess_present - are tessellation shaders active?
56 * \param[in] gs_present - are geometry shaders active?
57 * \param[in] entry_size - the URB entry size (from the shader compiler)
58 * \param[out] entries - the number of URB entries for each stage
59 * \param[out] start - the starting offset for each stage
60 * \param[out] deref_block_size - deref block size for 3DSTATE_SF
61 * \param[out] constrained - true if we wanted more space than we had
62 */
63void
64intel_get_urb_config(const struct intel_device_info *devinfo,
65                     const struct intel_l3_config *l3_cfg,
66                     bool tess_present, bool gs_present,
67                     const unsigned entry_size[4],
68                     unsigned entries[4], unsigned start[4],
69                     enum intel_urb_deref_block_size *deref_block_size,
70                     bool *constrained)
71{
72   unsigned urb_size_kB = intel_get_l3_config_urb_size(devinfo, l3_cfg);
73
74   /* RCU_MODE register for Gfx12LP in BSpec says:
75    *
76    *    "HW reserves 4KB of URB space per bank for Compute Engine out of the
77    *    total storage available in L3. SW must consider that 4KB of storage
78    *    per bank will be reduced from what is programmed for the URB space
79    *    in L3 for Render Engine executed workloads.
80    *
81    *    Example: When URB space programmed is 64KB (per bank) for Render
82    *    Engine, the actual URB space available for operation is only 60KB
83    *    (per bank). Similarly when URB space programmed is 128KB (per bank)
84    *    for render engine, the actual URB space available for operation is
85    *    only 124KB (per bank). More detailed description available in "L3
86    *    Cache" section of the B-Spec."
87    */
88   if (devinfo->verx10 == 120) {
89      assert(devinfo->num_slices == 1);
90      urb_size_kB -= 4 * devinfo->l3_banks;
91   }
92
93   const unsigned push_constant_kB = devinfo->max_constant_urb_size_kb;
94
95   const bool active[4] = { true, tess_present, tess_present, gs_present };
96
97   /* URB allocations must be done in 8k chunks. */
98   const unsigned chunk_size_kB = 8;
99   const unsigned chunk_size_bytes = chunk_size_kB * 1024;
100
101   const unsigned push_constant_chunks = push_constant_kB / chunk_size_kB;
102   const unsigned urb_chunks = urb_size_kB / chunk_size_kB;
103
104   /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
105    *
106    *     VS Number of URB Entries must be divisible by 8 if the VS URB Entry
107    *     Allocation Size is less than 9 512-bit URB entries.
108    *
109    * Similar text exists for HS, DS and GS.
110    */
111   unsigned granularity[4];
112   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
113      granularity[i] = (entry_size[i] < 9) ? 8 : 1;
114   }
115
116   unsigned min_entries[4] = {
117      /* VS has a lower limit on the number of URB entries.
118       *
119       * From the Broadwell PRM, 3DSTATE_URB_VS instruction:
120       * "When tessellation is enabled, the VS Number of URB Entries must be
121       *  greater than or equal to 192."
122       */
123      [MESA_SHADER_VERTEX] = tess_present && devinfo->ver == 8 ?
124         192 : devinfo->urb.min_entries[MESA_SHADER_VERTEX],
125
126      /* There are two constraints on the minimum amount of URB space we can
127       * allocate:
128       *
129       * (1) We need room for at least 2 URB entries, since we always operate
130       * the GS in DUAL_OBJECT mode.
131       *
132       * (2) We can't allocate less than nr_gs_entries_granularity.
133       */
134      [MESA_SHADER_GEOMETRY] = gs_present ? 2 : 0,
135
136      [MESA_SHADER_TESS_CTRL] = tess_present ? 1 : 0,
137
138      [MESA_SHADER_TESS_EVAL] = tess_present ?
139         devinfo->urb.min_entries[MESA_SHADER_TESS_EVAL] : 0,
140   };
141
142   /* Min VS Entries isn't a multiple of 8 on Cherryview/Broxton; round up.
143    * Round them all up.
144    */
145   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
146      min_entries[i] = ALIGN(min_entries[i], granularity[i]);
147   }
148
149   unsigned entry_size_bytes[4];
150   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
151      entry_size_bytes[i] = 64 * entry_size[i];
152   }
153
154   /* Initially, assign each stage the minimum amount of URB space it needs,
155    * and make a note of how much additional space it "wants" (the amount of
156    * additional space it could actually make use of).
157    */
158   unsigned chunks[4];
159   unsigned wants[4];
160   unsigned total_needs = push_constant_chunks;
161   unsigned total_wants = 0;
162
163   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
164      if (active[i]) {
165         chunks[i] = DIV_ROUND_UP(min_entries[i] * entry_size_bytes[i],
166                                  chunk_size_bytes);
167
168         wants[i] =
169            DIV_ROUND_UP(devinfo->urb.max_entries[i] * entry_size_bytes[i],
170                         chunk_size_bytes) - chunks[i];
171      } else {
172         chunks[i] = 0;
173         wants[i] = 0;
174      }
175
176      total_needs += chunks[i];
177      total_wants += wants[i];
178   }
179
180   assert(total_needs <= urb_chunks);
181
182   *constrained = total_needs + total_wants > urb_chunks;
183
184   /* Mete out remaining space (if any) in proportion to "wants". */
185   unsigned remaining_space = MIN2(urb_chunks - total_needs, total_wants);
186
187   if (remaining_space > 0) {
188      for (int i = MESA_SHADER_VERTEX;
189           total_wants > 0 && i <= MESA_SHADER_TESS_EVAL; i++) {
190         unsigned additional = (unsigned)
191            roundf(wants[i] * (((float) remaining_space) / total_wants));
192         chunks[i] += additional;
193         remaining_space -= additional;
194         total_wants -= wants[i];
195      }
196
197      chunks[MESA_SHADER_GEOMETRY] += remaining_space;
198   }
199
200   /* Sanity check that we haven't over-allocated. */
201   unsigned total_chunks = push_constant_chunks;
202   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
203      total_chunks += chunks[i];
204   }
205   assert(total_chunks <= urb_chunks);
206
207   /* Finally, compute the number of entries that can fit in the space
208    * allocated to each stage.
209    */
210   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
211      entries[i] = chunks[i] * chunk_size_bytes / entry_size_bytes[i];
212
213      /* Since we rounded up when computing wants[], this may be slightly
214       * more than the maximum allowed amount, so correct for that.
215       */
216      entries[i] = MIN2(entries[i], devinfo->urb.max_entries[i]);
217
218      /* Ensure that we program a multiple of the granularity. */
219      entries[i] = ROUND_DOWN_TO(entries[i], granularity[i]);
220
221      /* Finally, sanity check to make sure we have at least the minimum
222       * number of entries needed for each stage.
223       */
224      assert(entries[i] >= min_entries[i]);
225   }
226
227   /* Lay out the URB in pipeline order: push constants, VS, HS, DS, GS. */
228   int first_urb = push_constant_chunks;
229
230   /* From the BDW PRM: for 3DSTATE_URB_*: VS URB Starting Address
231    *
232    *    "Value: [4,48] Device [SliceCount] GT 1"
233    *
234    * From the ICL PRMs and above :
235    *
236    *    "If CTXT_SR_CTL::POSH_Enable is clear and Push Constants are required
237    *     or Device[SliceCount] GT 1, the lower limit is 4."
238    *
239    *    "If Push Constants are not required andDevice[SliceCount] == 1, the
240    *     lower limit is 0."
241    */
242   if ((devinfo->ver == 8 && devinfo->num_slices == 1) ||
243       (devinfo->ver >= 11 && push_constant_chunks > 0 && devinfo->num_slices == 1))
244      first_urb = MAX2(first_urb, 4);
245
246   int next_urb = first_urb;
247   for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
248      if (entries[i]) {
249         start[i] = next_urb;
250         next_urb += chunks[i];
251      } else {
252         /* Put disabled stages at the beginning of the valid range */
253         start[i] = first_urb;
254      }
255   }
256
257   if (deref_block_size) {
258      if (devinfo->ver >= 12) {
259         /* From the Gfx12 BSpec:
260          *
261          *    "Deref Block size depends on the last enabled shader and number
262          *    of handles programmed for that shader
263          *
264          *       1) For GS last shader enabled cases, the deref block is
265          *          always set to a per poly(within hardware)
266          *
267          *    If the last enabled shader is VS or DS.
268          *
269          *       1) If DS is last enabled shader then if the number of DS
270          *          handles is less than 324, need to set per poly deref.
271          *
272          *       2) If VS is last enabled shader then if the number of VS
273          *          handles is less than 192, need to set per poly deref"
274          *
275          * The default is 32 so we assume that's the right choice if we're
276          * not in one of the explicit cases listed above.
277          */
278         if (gs_present) {
279            *deref_block_size = INTEL_URB_DEREF_BLOCK_SIZE_PER_POLY;
280         } else if (tess_present) {
281            if (entries[MESA_SHADER_TESS_EVAL] < 324)
282               *deref_block_size = INTEL_URB_DEREF_BLOCK_SIZE_PER_POLY;
283            else
284               *deref_block_size = INTEL_URB_DEREF_BLOCK_SIZE_32;
285         } else {
286            if (entries[MESA_SHADER_VERTEX] < 192)
287               *deref_block_size = INTEL_URB_DEREF_BLOCK_SIZE_PER_POLY;
288            else
289               *deref_block_size = INTEL_URB_DEREF_BLOCK_SIZE_32;
290         }
291      } else {
292         *deref_block_size = 0;
293      }
294   }
295}
296
297struct intel_mesh_urb_allocation
298intel_get_mesh_urb_config(const struct intel_device_info *devinfo,
299                          const struct intel_l3_config *l3_cfg,
300                          unsigned tue_size_dw, unsigned mue_size_dw)
301{
302   struct intel_mesh_urb_allocation r = {0};
303
304   /* Allocation Size must be aligned to 64B. */
305   r.task_entry_size_64b = DIV_ROUND_UP(tue_size_dw * 4, 64);
306   r.mesh_entry_size_64b = DIV_ROUND_UP(mue_size_dw * 4, 64);
307
308   assert(r.task_entry_size_64b <= 1024);
309   assert(r.mesh_entry_size_64b <= 1024);
310
311   /* Per-slice URB size. */
312   unsigned total_urb_kb = intel_get_l3_config_urb_size(devinfo, l3_cfg);
313
314   /* Programming Note in bspec requires all the slice to have the same number
315    * of entries, so we need to discount the space for constants for all of
316    * them.  See 3DSTATE_URB_ALLOC_MESH and 3DSTATE_URB_ALLOC_TASK.
317    */
318   const unsigned push_constant_kb = devinfo->max_constant_urb_size_kb;
319   total_urb_kb -= push_constant_kb;
320
321   /* TODO(mesh): Take push constant size as parameter instead of considering always
322    * the max? */
323
324   float task_urb_share = 0.0f;
325   if (r.task_entry_size_64b > 0) {
326      /* By default, assign 10% to TASK and 90% to MESH, since we expect MESH
327       * to use larger URB entries since it contains all the vertex and
328       * primitive data.  Environment variable allow us to tweak it.
329       *
330       * TODO(mesh): Re-evaluate if this is a good default once there are more
331       * workloads.
332       */
333      static int task_urb_share_percentage = -1;
334      if (task_urb_share_percentage < 0) {
335         task_urb_share_percentage =
336            MIN2(env_var_as_unsigned("INTEL_MESH_TASK_URB_SHARE", 10), 100);
337      }
338      task_urb_share = task_urb_share_percentage / 100.0f;
339   }
340
341   const unsigned one_task_urb_kb = ALIGN(r.task_entry_size_64b * 64, 1024) / 1024;
342
343   const unsigned task_urb_kb = ALIGN(MAX2(total_urb_kb * task_urb_share, one_task_urb_kb), 8);
344
345   const unsigned mesh_urb_kb = total_urb_kb - task_urb_kb;
346
347   /* TODO(mesh): Could we avoid allocating URB for Mesh if rasterization is
348    * disabled? */
349
350   unsigned next_address_8kb = DIV_ROUND_UP(push_constant_kb, 8);
351
352   if (r.task_entry_size_64b > 0) {
353      r.task_entries = MIN2((task_urb_kb * 16) / r.task_entry_size_64b, 1548);
354
355      /* 3DSTATE_URB_ALLOC_TASK_BODY says
356       *
357       *   TASK Number of URB Entries must be divisible by 8 if the TASK URB
358       *   Entry Allocation Size is less than 9 512-bit URB entries.
359       */
360      if (r.task_entry_size_64b < 9)
361         r.task_entries = ROUND_DOWN_TO(r.task_entries, 8);
362
363      r.task_starting_address_8kb = next_address_8kb;
364
365      assert(task_urb_kb % 8 == 0);
366      next_address_8kb += task_urb_kb / 8;
367   }
368
369   r.mesh_entries = MIN2((mesh_urb_kb * 16) / r.mesh_entry_size_64b, 1548);
370
371   /* Similar restriction to TASK. */
372   if (r.mesh_entry_size_64b < 9)
373      r.mesh_entries = ROUND_DOWN_TO(r.mesh_entries, 8);
374
375   r.mesh_starting_address_8kb = next_address_8kb;
376
377   r.deref_block_size = r.mesh_entries > 32 ?
378      INTEL_URB_DEREF_BLOCK_SIZE_MESH :
379      INTEL_URB_DEREF_BLOCK_SIZE_PER_POLY;
380
381   return r;
382}
383