1/*
2 * Copyright 2021 Google LLC
3 * SPDX-License-Identifier: MIT
4 */
5
6#include "vn_renderer_internal.h"
7
8/* 3 seconds */
9#define VN_RENDERER_SHMEM_CACHE_EXPIRACY (3ll * 1000 * 1000)
10
11void
12vn_renderer_shmem_cache_init(struct vn_renderer_shmem_cache *cache,
13                             struct vn_renderer *renderer,
14                             vn_renderer_shmem_cache_destroy_func destroy_func)
15{
16   /* cache->bucket_mask is 32-bit and u_bit_scan is used */
17   static_assert(ARRAY_SIZE(cache->buckets) <= 32, "");
18
19   cache->renderer = renderer;
20   cache->destroy_func = destroy_func;
21
22   simple_mtx_init(&cache->mutex, mtx_plain);
23
24   for (uint32_t i = 0; i < ARRAY_SIZE(cache->buckets); i++) {
25      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[i];
26      list_inithead(&bucket->shmems);
27   }
28
29   cache->initialized = true;
30}
31
32void
33vn_renderer_shmem_cache_fini(struct vn_renderer_shmem_cache *cache)
34{
35   if (!cache->initialized)
36      return;
37
38   while (cache->bucket_mask) {
39      const int idx = u_bit_scan(&cache->bucket_mask);
40      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
41
42      list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
43                               &bucket->shmems, cache_head)
44         cache->destroy_func(cache->renderer, shmem);
45   }
46
47   simple_mtx_destroy(&cache->mutex);
48}
49
50static struct vn_renderer_shmem_bucket *
51choose_bucket(struct vn_renderer_shmem_cache *cache,
52              size_t size,
53              int *out_idx)
54{
55   assert(size);
56   if (unlikely(!util_is_power_of_two_or_zero64(size)))
57      return NULL;
58
59   const uint32_t idx = ffsll(size) - 1;
60   if (unlikely(idx >= ARRAY_SIZE(cache->buckets)))
61      return NULL;
62
63   *out_idx = idx;
64   return &cache->buckets[idx];
65}
66
67static void
68vn_renderer_shmem_cache_remove_expired_locked(
69   struct vn_renderer_shmem_cache *cache, int64_t now)
70{
71   uint32_t bucket_mask = cache->bucket_mask;
72   while (bucket_mask) {
73      const int idx = u_bit_scan(&bucket_mask);
74      struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
75
76      assert(!list_is_empty(&bucket->shmems));
77      const struct vn_renderer_shmem *last_shmem = list_last_entry(
78         &bucket->shmems, struct vn_renderer_shmem, cache_head);
79
80      /* remove expired shmems but keep at least the last one */
81      list_for_each_entry_safe(struct vn_renderer_shmem, shmem,
82                               &bucket->shmems, cache_head) {
83         if (shmem == last_shmem ||
84             now - shmem->cache_timestamp < VN_RENDERER_SHMEM_CACHE_EXPIRACY)
85            break;
86
87         list_del(&shmem->cache_head);
88         cache->destroy_func(cache->renderer, shmem);
89      }
90   }
91}
92
93bool
94vn_renderer_shmem_cache_add(struct vn_renderer_shmem_cache *cache,
95                            struct vn_renderer_shmem *shmem)
96{
97   assert(!vn_refcount_is_valid(&shmem->refcount));
98
99   int idx;
100   struct vn_renderer_shmem_bucket *bucket =
101      choose_bucket(cache, shmem->mmap_size, &idx);
102   if (!bucket)
103      return false;
104
105   const int64_t now = os_time_get();
106   shmem->cache_timestamp = now;
107
108   simple_mtx_lock(&cache->mutex);
109
110   vn_renderer_shmem_cache_remove_expired_locked(cache, now);
111
112   list_addtail(&shmem->cache_head, &bucket->shmems);
113   cache->bucket_mask |= 1 << idx;
114
115   simple_mtx_unlock(&cache->mutex);
116
117   return true;
118}
119
120struct vn_renderer_shmem *
121vn_renderer_shmem_cache_get(struct vn_renderer_shmem_cache *cache,
122                            size_t size)
123{
124   int idx;
125   struct vn_renderer_shmem_bucket *bucket = choose_bucket(cache, size, &idx);
126   if (!bucket) {
127      VN_TRACE_SCOPE("shmem cache skip");
128      simple_mtx_lock(&cache->mutex);
129      cache->debug.cache_skip_count++;
130      simple_mtx_unlock(&cache->mutex);
131      return NULL;
132   }
133
134   struct vn_renderer_shmem *shmem = NULL;
135
136   simple_mtx_lock(&cache->mutex);
137   if (cache->bucket_mask & (1 << idx)) {
138      assert(!list_is_empty(&bucket->shmems));
139      shmem = list_first_entry(&bucket->shmems, struct vn_renderer_shmem,
140                               cache_head);
141      list_del(&shmem->cache_head);
142
143      if (list_is_empty(&bucket->shmems))
144         cache->bucket_mask &= ~(1 << idx);
145
146      cache->debug.cache_hit_count++;
147   } else {
148      VN_TRACE_SCOPE("shmem cache miss");
149      cache->debug.cache_miss_count++;
150   }
151   simple_mtx_unlock(&cache->mutex);
152
153   return shmem;
154}
155
156/* for debugging only */
157void
158vn_renderer_shmem_cache_debug_dump(struct vn_renderer_shmem_cache *cache)
159{
160   simple_mtx_lock(&cache->mutex);
161
162   vn_log(NULL, "dumping shmem cache");
163   vn_log(NULL, "  cache skip: %d", cache->debug.cache_skip_count);
164   vn_log(NULL, "  cache hit: %d", cache->debug.cache_hit_count);
165   vn_log(NULL, "  cache miss: %d", cache->debug.cache_miss_count);
166
167   uint32_t bucket_mask = cache->bucket_mask;
168   while (bucket_mask) {
169      const int idx = u_bit_scan(&bucket_mask);
170      const struct vn_renderer_shmem_bucket *bucket = &cache->buckets[idx];
171      uint32_t count = 0;
172      list_for_each_entry(struct vn_renderer_shmem, shmem, &bucket->shmems,
173                          cache_head)
174         count++;
175      if (count)
176         vn_log(NULL, "  buckets[%d]: %d shmems", idx, count);
177   }
178
179   simple_mtx_unlock(&cache->mutex);
180}
181