1/*
2 * Copyright © 2018 Intel Corporation
3 * Copyright © 2018 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "v3d_compiler.h"
26#include "compiler/nir/nir_builder.h"
27#include "compiler/nir/nir_format_convert.h"
28
29/** @file v3d_nir_lower_scratch.c
30 *
31 * Swizzles around the addresses of
32 * nir_intrinsic_load_scratch/nir_intrinsic_store_scratch so that a QPU stores
33 * a cacheline at a time per dword of scratch access, scalarizing and removing
34 * writemasks in the process.
35 */
36
37static nir_ssa_def *
38v3d_nir_scratch_offset(nir_builder *b, nir_intrinsic_instr *instr)
39{
40        bool is_store = instr->intrinsic == nir_intrinsic_store_scratch;
41        nir_ssa_def *offset = nir_ssa_for_src(b, instr->src[is_store ? 1 : 0], 1);
42
43        assert(nir_intrinsic_align_mul(instr) >= 4);
44        assert(nir_intrinsic_align_offset(instr) == 0);
45
46        /* The spill_offset register will already have the subgroup ID (EIDX)
47         * shifted and ORed in at bit 2, so all we need to do is to move the
48         * dword index up above V3D_CHANNELS.
49         */
50        return nir_imul_imm(b, offset, V3D_CHANNELS);
51}
52
53static void
54v3d_nir_lower_load_scratch(nir_builder *b, nir_intrinsic_instr *instr)
55{
56        b->cursor = nir_before_instr(&instr->instr);
57
58        nir_ssa_def *offset = v3d_nir_scratch_offset(b,instr);
59
60        nir_ssa_def *chans[NIR_MAX_VEC_COMPONENTS];
61        for (int i = 0; i < instr->num_components; i++) {
62                nir_ssa_def *chan_offset =
63                        nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
64
65                nir_intrinsic_instr *chan_instr =
66                        nir_intrinsic_instr_create(b->shader, instr->intrinsic);
67                chan_instr->num_components = 1;
68                nir_ssa_dest_init(&chan_instr->instr, &chan_instr->dest, 1,
69                                  instr->dest.ssa.bit_size, NULL);
70
71                chan_instr->src[0] = nir_src_for_ssa(chan_offset);
72
73                nir_intrinsic_set_align(chan_instr, 4, 0);
74
75                nir_builder_instr_insert(b, &chan_instr->instr);
76
77                chans[i] = &chan_instr->dest.ssa;
78        }
79
80        nir_ssa_def *result = nir_vec(b, chans, instr->num_components);
81        nir_ssa_def_rewrite_uses(&instr->dest.ssa, result);
82        nir_instr_remove(&instr->instr);
83}
84
85static void
86v3d_nir_lower_store_scratch(nir_builder *b, nir_intrinsic_instr *instr)
87{
88        b->cursor = nir_before_instr(&instr->instr);
89
90        nir_ssa_def *offset = v3d_nir_scratch_offset(b, instr);
91        nir_ssa_def *value = nir_ssa_for_src(b, instr->src[0],
92                                             instr->num_components);
93
94        for (int i = 0; i < instr->num_components; i++) {
95                if (!(nir_intrinsic_write_mask(instr) & (1 << i)))
96                        continue;
97
98                nir_ssa_def *chan_offset =
99                        nir_iadd_imm(b, offset, V3D_CHANNELS * i * 4);
100
101                nir_intrinsic_instr *chan_instr =
102                        nir_intrinsic_instr_create(b->shader, instr->intrinsic);
103                chan_instr->num_components = 1;
104
105                chan_instr->src[0] = nir_src_for_ssa(nir_channel(b,
106                                                                 value,
107                                                                 i));
108                chan_instr->src[1] = nir_src_for_ssa(chan_offset);
109                nir_intrinsic_set_write_mask(chan_instr, 0x1);
110                nir_intrinsic_set_align(chan_instr, 4, 0);
111
112                nir_builder_instr_insert(b, &chan_instr->instr);
113        }
114
115        nir_instr_remove(&instr->instr);
116}
117
118static bool
119v3d_nir_lower_scratch_cb(nir_builder *b,
120                         nir_instr *instr,
121                         void *_state)
122{
123        if (instr->type != nir_instr_type_intrinsic)
124                return false;
125
126        nir_intrinsic_instr *intr =
127                nir_instr_as_intrinsic(instr);
128
129        switch (intr->intrinsic) {
130        case nir_intrinsic_load_scratch:
131                v3d_nir_lower_load_scratch(b, intr);
132                return true;
133        case nir_intrinsic_store_scratch:
134                v3d_nir_lower_store_scratch(b, intr);
135                return true;
136        default:
137                return false;
138        }
139
140        return false;
141}
142
143bool
144v3d_nir_lower_scratch(nir_shader *s)
145{
146        return nir_shader_instructions_pass(s, v3d_nir_lower_scratch_cb,
147                                            nir_metadata_block_index |
148                                            nir_metadata_dominance, NULL);
149}
150