1/*
2 * Copyright © 2021 Google LLC
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <gtest/gtest.h>
25#include "ralloc.h"
26#include "register_allocate.h"
27#include "register_allocate_internal.h"
28
29#include "util/blob.h"
30
31class ra_test : public ::testing::Test {
32public:
33   void *mem_ctx;
34
35protected:
36   ra_test();
37   ~ra_test();
38};
39
40ra_test::ra_test()
41{
42   mem_ctx = ralloc_context(NULL);
43}
44
45ra_test::~ra_test()
46{
47   ralloc_free(mem_ctx);
48}
49
50void
51thumb_checks(struct ra_regs *regs, unsigned reg32_base, unsigned reg64_base)
52{
53   struct ra_class *reg32low = ra_get_class_from_index(regs, 0);
54   struct ra_class *reg64low = ra_get_class_from_index(regs, 1);
55   struct ra_class *reg96 = ra_get_class_from_index(regs, 2);
56
57   /* Table 4.1 */
58   ASSERT_EQ(reg32low->p, 8);
59   ASSERT_EQ(reg32low->q[reg32low->index], 1);
60   ASSERT_EQ(reg32low->q[reg64low->index], 2);
61   ASSERT_EQ(reg32low->q[reg96->index], 3);
62   ASSERT_EQ(reg64low->p, 8);
63   ASSERT_EQ(reg64low->q[reg32low->index], 2);
64   ASSERT_EQ(reg64low->q[reg64low->index], 3);
65   ASSERT_EQ(reg64low->q[reg96->index], 4);
66   ASSERT_EQ(reg96->p, 2);
67   ASSERT_EQ(reg96->q[reg96->index], 2);
68   ASSERT_EQ(reg96->q[reg64low->index], 2);
69   ASSERT_EQ(reg96->q[reg96->index], 2);
70
71   /* These individual regs should conflict with themselves, but nothing else from their class */
72   for (int i = 0; i < 7; i++) {
73      ASSERT_FALSE(ra_class_allocations_conflict(reg32low, reg32_base + i, reg32low, reg32_base + i + 1));
74      ASSERT_TRUE(ra_class_allocations_conflict(reg32low, reg32_base + i, reg32low, reg32_base + i));
75   }
76
77   /* Check that reg64low conflicts with the pairs of reg32low but not neighbors */
78   ASSERT_TRUE(ra_class_allocations_conflict(reg64low, reg64_base + 0, reg32low, reg32_base + 0));
79   ASSERT_TRUE(ra_class_allocations_conflict(reg64low, reg64_base + 0, reg32low, reg32_base + 1));
80   ASSERT_FALSE(ra_class_allocations_conflict(reg64low, reg64_base + 0, reg32low, reg32_base + 2));
81
82   ASSERT_FALSE(ra_class_allocations_conflict(reg64low, reg64_base + 1, reg32low, reg32_base + 0));
83   ASSERT_TRUE(ra_class_allocations_conflict(reg64low, reg64_base + 1, reg32low, reg32_base + 1));
84   ASSERT_TRUE(ra_class_allocations_conflict(reg64low, reg64_base + 1, reg32low, reg32_base + 2));
85   ASSERT_FALSE(ra_class_allocations_conflict(reg64low, reg64_base + 1, reg32low, reg32_base + 3));
86}
87
88TEST_F(ra_test, thumb)
89{
90   struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, 100, true);
91
92   /* r0..15 are the real HW registers. */
93   int next_vreg = 16;
94
95   /* reg32low is any of the low 8 registers. */
96   unsigned int reg32_base = next_vreg;
97   struct ra_class *reg32low = ra_alloc_reg_class(regs);
98   for (int i = 0; i < 8; i++) {
99      int vreg = next_vreg++;
100      ra_class_add_reg(reg32low, vreg);
101      ra_add_transitive_reg_conflict(regs, i, vreg);
102   }
103
104   /* reg64low is pairs of the low 8 registers (with wraparound!) */
105   unsigned int reg64_base = next_vreg;
106   struct ra_class *reg64low = ra_alloc_reg_class(regs);
107   for (int i = 0; i < 8; i++) {
108      int vreg = next_vreg++;
109      ra_class_add_reg(reg64low, vreg);
110      ra_add_transitive_reg_conflict(regs, i, vreg);
111      ra_add_transitive_reg_conflict(regs, (i + 1) % 8, vreg);
112   }
113
114   /* reg96 is one of either r[0..2] or r[1..3] */
115   struct ra_class *reg96 = ra_alloc_reg_class(regs);
116   for (int i = 0; i < 2; i++) {
117      int vreg = next_vreg++;
118      ra_class_add_reg(reg96, vreg);
119      for (int j = 0; j < 3; j++)
120         ra_add_transitive_reg_conflict(regs, i + j, vreg);
121   }
122
123   ra_set_finalize(regs, NULL);
124
125   thumb_checks(regs, reg32_base, reg64_base);
126}
127
128TEST_F(ra_test, thumb_contigregs)
129{
130   struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, 16, true);
131
132   /* reg32low is any of the low 8 registers. */
133   struct ra_class *reg32low = ra_alloc_contig_reg_class(regs, 1);
134   for (int i = 0; i < 8; i++)
135      ra_class_add_reg(reg32low, i);
136
137   /* reg64low is pairs of the low 8 registers (we're ignoring the wraparound thing here) */
138   struct ra_class *reg64low = ra_alloc_contig_reg_class(regs, 2);
139   for (int i = 0; i < 8; i++)
140      ra_class_add_reg(reg64low, i);
141
142   /* reg96 is one of either r[0..2] or r[1..3] */
143   struct ra_class *reg96 = ra_alloc_contig_reg_class(regs, 3);
144   for (int i = 0; i < 2; i++)
145      ra_class_add_reg(reg96, i);
146
147   ra_set_finalize(regs, NULL);
148
149   thumb_checks(regs, 0, 0);
150}
151
152TEST_F(ra_test, nonintersect_contigregs)
153{
154   struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, 16, true);
155
156   struct ra_class *low = ra_alloc_contig_reg_class(regs, 1);
157   for (int i = 0; i < 8; i++)
158      ra_class_add_reg(low, i);
159
160   struct ra_class *high = ra_alloc_contig_reg_class(regs, 1);
161   for (int i = 8; i < 16; i++)
162      ra_class_add_reg(high, i);
163
164   ra_set_finalize(regs, NULL);
165
166   ASSERT_EQ(low->q[low->index], 1);
167   ASSERT_EQ(low->q[high->index], 0);
168   ASSERT_EQ(high->q[low->index], 0);
169   ASSERT_EQ(high->q[high->index], 1);
170}
171
172TEST_F(ra_test, aligned_contigregs)
173{
174   int base_regs = 32;
175   struct ra_regs *regs = ra_alloc_reg_set(mem_ctx, base_regs, true);
176
177   struct ra_class *c1 = ra_alloc_contig_reg_class(regs, 1);
178   for (int i = 0; i < base_regs; i++)
179      ra_class_add_reg(c1, i);
180
181   struct ra_class *c2 = ra_alloc_contig_reg_class(regs, 2);
182   for (int i = 8; i < base_regs; i += 2)
183      ra_class_add_reg(c2, i);
184
185   struct ra_class *c4 = ra_alloc_contig_reg_class(regs, 4);
186   for (int i = 8; i < base_regs; i += 4)
187      ra_class_add_reg(c4, i);
188
189   ra_set_finalize(regs, NULL);
190
191   ASSERT_EQ(c1->q[c1->index], 1);
192   ASSERT_EQ(c1->q[c2->index], 2);
193   ASSERT_EQ(c1->q[c4->index], 4);
194   ASSERT_EQ(c2->q[c1->index], 1);
195   ASSERT_EQ(c2->q[c2->index], 1);
196   ASSERT_EQ(c2->q[c4->index], 2);
197   ASSERT_EQ(c4->q[c1->index], 1);
198   ASSERT_EQ(c4->q[c2->index], 1);
199   ASSERT_EQ(c4->q[c4->index], 1);
200
201   /* Check conflicts for a c4 allocation at i against other classes. */
202   for (int i = 0; i < base_regs / 4; i += 4) {
203      for (int j = 0; j < base_regs; j++) {
204         ASSERT_EQ(ra_class_allocations_conflict(c4, i, c1, j),
205                   j >= i && j < i + 4);
206      }
207
208      for (int j = 0; j < base_regs; j += 2) {
209         ASSERT_EQ(ra_class_allocations_conflict(c4, i, c2, j),
210                   j >= i && j < i + 4);
211      }
212   }
213}
214
215TEST_F(ra_test, serialization_roundtrip)
216{
217   struct blob blob;
218   blob_init(&blob);
219
220   for (int i = 0; i < 2; i++) {
221      void *mem_ctx = ralloc_context(this->mem_ctx);
222      struct ra_regs *regs;
223
224      if (i == 0) {
225         /* Build a register set and serialize it. */
226         regs = ra_alloc_reg_set(mem_ctx, 4 + 4 + 4, true);
227
228         struct ra_class *reg8_low = ra_alloc_reg_class(regs);
229         struct ra_class *reg8_high = ra_alloc_reg_class(regs);
230         struct ra_class *reg16 = ra_alloc_reg_class(regs);
231
232         for (int i = 0; i < 4; i++) {
233            const unsigned low = 2 * i;
234            const unsigned high = low + 1;
235            ra_class_add_reg(reg8_low, low);
236            ra_class_add_reg(reg8_high, high);
237
238            const unsigned both = 8 + i;
239            ra_class_add_reg(reg16, 8 + i);
240
241            ra_add_reg_conflict(regs, low, both);
242            ra_add_reg_conflict(regs, high, both);
243         }
244
245         ra_set_finalize(regs, NULL);
246
247         assert(blob.size == 0);
248         ra_set_serialize(regs, &blob);
249      } else {
250         /* Deserialize the register set. */
251         assert(blob.size > 0);
252
253         struct blob_reader reader;
254         blob_reader_init(&reader, blob.data, blob.size);
255
256         regs = ra_set_deserialize(mem_ctx, &reader);
257      }
258
259      /* Verify the register set for each case. */
260      {
261         struct ra_class *reg8_low = ra_get_class_from_index(regs, 0);
262         ASSERT_EQ(ra_class_index(reg8_low), 0);
263
264         struct ra_class *reg8_high = ra_get_class_from_index(regs, 1);
265         ASSERT_EQ(ra_class_index(reg8_high), 1);
266
267         struct ra_class *reg16 = ra_get_class_from_index(regs, 2);
268         ASSERT_EQ(ra_class_index(reg16), 2);
269
270         for (int i = 0; i < 4; i++) {
271            EXPECT_TRUE(ra_class_allocations_conflict(reg8_low, 2 * i, reg16, 8 + i));
272            EXPECT_TRUE(ra_class_allocations_conflict(reg8_high, (2 * i) + 1, reg16, 8 + i));
273         }
274      }
275
276      ralloc_free(mem_ctx);
277   }
278
279   blob_finish(&blob);
280}
281
282