1/*
2 * Copyright (c) 2020 Collabora, Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors (Collabora):
24 *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
25 */
26
27/* Index buffer min/max cache. We need to calculate the min/max for arbitrary
28 * slices (start, start + count) of the index buffer at drawtime. As this can
29 * be quite expensive, we cache. Conceptually, we just use a hash table mapping
30 * the key (start, count) to the value (min, max). In practice, mesa's hash
31 * table implementation is higher overhead than we would like and makes
32 * handling memory usage a little complicated. So we use this data structure
33 * instead. Searching is O(n) to the size, but the size is capped at the
34 * PANFROST_MINMAX_SIZE constant (so this is a tradeoff between cache hit/miss
35 * ratio and cache search speed). Note that keys are adjacent so we get cache
36 * line alignment benefits. Insertion is O(1) and in-order until the cache
37 * fills up, after that it evicts the oldest cached value in a ring facilitated
38 * by index.
39 */
40
41#include "pan_minmax_cache.h"
42
43bool
44panfrost_minmax_cache_get(struct panfrost_minmax_cache *cache, unsigned start, unsigned count,
45                     unsigned *min_index, unsigned *max_index)
46{
47        uint64_t ht_key = (((uint64_t)count) << 32) | start;
48        bool found = false;
49
50        if (!cache)
51           return false;
52
53        for (unsigned i = 0; i < cache->size; ++i) {
54                if (cache->keys[i] == ht_key) {
55                        uint64_t hit = cache->values[i];
56
57                        *min_index = hit & 0xffffffff;
58                        *max_index = hit >> 32;
59                        found = true;
60                        break;
61                }
62        }
63
64        return found;
65}
66
67void
68panfrost_minmax_cache_add(struct panfrost_minmax_cache *cache, unsigned start, unsigned count,
69                     unsigned min_index, unsigned max_index)
70{
71        uint64_t ht_key = (((uint64_t)count) << 32) | start;
72        uint64_t value = min_index | (((uint64_t)max_index) << 32);
73        unsigned index = 0;
74
75        if (!cache)
76                return;
77
78        if (cache->size == PANFROST_MINMAX_SIZE) {
79                index = cache->index++;
80                cache->index = cache->index % PANFROST_MINMAX_SIZE;
81        } else {
82                index = cache->size++;
83        }
84
85        cache->keys[index] =  ht_key;
86        cache->values[index] = value;
87
88}
89
90/* If we've been caching min/max indices and we update the index
91 * buffer, that may invalidate the min/max. Check what's been cached vs
92 * what we've written, and throw out invalid entries. */
93
94void
95panfrost_minmax_cache_invalidate(struct panfrost_minmax_cache *cache, struct pipe_transfer *transfer)
96{
97        /* Ensure there is a cache to invalidate and a write */
98        if (!cache)
99                return;
100
101        if (!(transfer->usage & PIPE_MAP_WRITE))
102                return;
103
104        unsigned valid_count = 0;
105
106        for (unsigned i = 0; i < cache->size; ++i) {
107                uint64_t key = cache->keys[i];
108
109                uint32_t start = key & 0xffffffff;
110                uint32_t count = key >> 32;
111
112                /* 1D range intersection */
113                bool invalid = MAX2(transfer->box.x, start) < MIN2(transfer->box.x + transfer->box.width, start + count);
114                if (!invalid) {
115                        cache->keys[valid_count] = key;
116                        cache->values[valid_count] = cache->values[i];
117                        valid_count++;
118                }
119        }
120
121        cache->size = valid_count;
122        cache->index = 0;
123}
124