1/*
2 * Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24/*
25 * Helper lib to track gpu buffers contents/address, and map between gpu and
26 * host address while decoding cmdstream/crashdumps
27 */
28
29#include <assert.h>
30#include <stdlib.h>
31
32#include "util/rb_tree.h"
33#include "buffers.h"
34
35struct buffer {
36   struct rb_node node;
37   void *hostptr;
38   unsigned int len;
39   uint64_t gpuaddr;
40
41   /* for 'once' mode, for buffers containing cmdstream keep track per offset
42    * into buffer of which modes it has already been dumped;
43    */
44   struct {
45      unsigned offset;
46      unsigned dumped_mask;
47   } offsets[64];
48   unsigned noffsets;
49};
50
51static struct rb_tree buffers;
52
53static int
54buffer_insert_cmp(const struct rb_node *n1, const struct rb_node *n2)
55{
56   const struct buffer *buf1 = (const struct buffer *)n1;
57   const struct buffer *buf2 = (const struct buffer *)n2;
58   /* Note that gpuaddr comparisions can overflow an int: */
59   if (buf1->gpuaddr > buf2->gpuaddr)
60      return 1;
61   else if (buf1->gpuaddr < buf2->gpuaddr)
62      return -1;
63   return 0;
64}
65
66static int
67buffer_search_cmp(const struct rb_node *node, const void *addrptr)
68{
69   const struct buffer *buf = (const struct buffer *)node;
70   uint64_t gpuaddr = *(uint64_t *)addrptr;
71   if (buf->gpuaddr + buf->len <= gpuaddr)
72      return -1;
73   else if (buf->gpuaddr > gpuaddr)
74      return 1;
75   return 0;
76}
77
78static struct buffer *
79get_buffer(uint64_t gpuaddr)
80{
81   if (gpuaddr == 0)
82      return NULL;
83   return (struct buffer *)rb_tree_search(&buffers, &gpuaddr,
84                                          buffer_search_cmp);
85}
86
87static int
88buffer_contains_hostptr(struct buffer *buf, void *hostptr)
89{
90   return (buf->hostptr <= hostptr) && (hostptr < (buf->hostptr + buf->len));
91}
92
93uint64_t
94gpuaddr(void *hostptr)
95{
96   rb_tree_foreach (struct buffer, buf, &buffers, node) {
97      if (buffer_contains_hostptr(buf, hostptr))
98         return buf->gpuaddr + (hostptr - buf->hostptr);
99   }
100   return 0;
101}
102
103uint64_t
104gpubaseaddr(uint64_t gpuaddr)
105{
106   struct buffer *buf = get_buffer(gpuaddr);
107   if (buf)
108      return buf->gpuaddr;
109   else
110      return 0;
111}
112
113void *
114hostptr(uint64_t gpuaddr)
115{
116   struct buffer *buf = get_buffer(gpuaddr);
117   if (buf)
118      return buf->hostptr + (gpuaddr - buf->gpuaddr);
119   else
120      return 0;
121}
122
123unsigned
124hostlen(uint64_t gpuaddr)
125{
126   struct buffer *buf = get_buffer(gpuaddr);
127   if (buf)
128      return buf->len + buf->gpuaddr - gpuaddr;
129   else
130      return 0;
131}
132
133bool
134has_dumped(uint64_t gpuaddr, unsigned enable_mask)
135{
136   if (!gpuaddr)
137      return false;
138
139   struct buffer *b = get_buffer(gpuaddr);
140   if (!b)
141      return false;
142
143   assert(gpuaddr >= b->gpuaddr);
144   unsigned offset = gpuaddr - b->gpuaddr;
145
146   unsigned n = 0;
147   while (n < b->noffsets) {
148      if (offset == b->offsets[n].offset)
149         break;
150      n++;
151   }
152
153   /* if needed, allocate a new offset entry: */
154   if (n == b->noffsets) {
155      b->noffsets++;
156      assert(b->noffsets < ARRAY_SIZE(b->offsets));
157      b->offsets[n].dumped_mask = 0;
158      b->offsets[n].offset = offset;
159   }
160
161   if ((b->offsets[n].dumped_mask & enable_mask) == enable_mask)
162      return true;
163
164   b->offsets[n].dumped_mask |= enable_mask;
165
166   return false;
167}
168
169void
170reset_buffers(void)
171{
172   rb_tree_foreach_safe (struct buffer, buf, &buffers, node) {
173      rb_tree_remove(&buffers, &buf->node);
174      free(buf->hostptr);
175      free(buf);
176   }
177}
178
179/**
180 * Record buffer contents, takes ownership of hostptr (freed in
181 * reset_buffers())
182 */
183void
184add_buffer(uint64_t gpuaddr, unsigned int len, void *hostptr)
185{
186   struct buffer *buf = get_buffer(gpuaddr);
187
188   if (!buf) {
189      buf = calloc(sizeof(struct buffer), 1);
190      buf->gpuaddr = gpuaddr;
191      rb_tree_insert(&buffers, &buf->node, buffer_insert_cmp);
192   }
193
194   assert(buf->gpuaddr == gpuaddr);
195
196   buf->hostptr = hostptr;
197   buf->len = len;
198}
199