1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/**
29 * \file
30 *
31 * Helper library for carving out smaller allocations (called "(slab) entries")
32 * from larger buffers (called "slabs").
33 *
34 * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
35 * meaning of each heap is treated as opaque by this library.
36 *
37 * The library allows delaying the re-use of an entry, i.e. an entry may be
38 * freed by calling \ref pb_slab_free even while the corresponding buffer
39 * region is still in use by the GPU. A callback function is called to
40 * determine when it is safe to allocate the entry again; the user of this
41 * library is expected to maintain the required fences or similar.
42 */
43
44#ifndef PB_SLAB_H
45#define PB_SLAB_H
46
47#include "pb_buffer.h"
48#include "util/simple_mtx.h"
49#include "util/list.h"
50#include "os/os_thread.h"
51
52struct pb_slab;
53struct pb_slabs;
54struct pb_slab_group;
55
56/* Descriptor of a slab entry.
57 *
58 * The user of this utility library is expected to embed this in a larger
59 * structure that describes a buffer object.
60 */
61struct pb_slab_entry
62{
63   struct list_head head;
64   struct pb_slab *slab; /* the slab that contains this buffer */
65   unsigned group_index; /* index into pb_slabs::groups */
66   unsigned entry_size;
67};
68
69/* Descriptor of a slab from which many entries are carved out.
70 *
71 * The user of this utility library is expected to embed this in a larger
72 * structure that describes a buffer object.
73 */
74struct pb_slab
75{
76   struct list_head head;
77
78   struct list_head free; /* list of free pb_slab_entry structures */
79   unsigned num_free; /* number of entries in free list */
80   unsigned num_entries; /* total number of entries */
81};
82
83/* Callback function that is called when a new slab needs to be allocated
84 * for fulfilling allocation requests of the given size from the given heap.
85 *
86 * The callback must allocate a pb_slab structure and the desired number
87 * of entries. All entries that belong to the slab must be added to the free
88 * list. Entries' pb_slab_entry structures must be initialized with the given
89 * group_index.
90 *
91 * The callback may call pb_slab functions.
92 */
93typedef struct pb_slab *(slab_alloc_fn)(void *priv,
94                                        unsigned heap,
95                                        unsigned entry_size,
96                                        unsigned group_index);
97
98/* Callback function that is called when all entries of a slab have been freed.
99 *
100 * The callback must free the slab and all its entries. It must not call any of
101 * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
102 */
103typedef void (slab_free_fn)(void *priv, struct pb_slab *);
104
105/* Callback function to determine whether a given entry can already be reused.
106 */
107typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
108
109/* Manager of slab allocations. The user of this utility library should embed
110 * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
111 * time.
112 */
113struct pb_slabs
114{
115   simple_mtx_t mutex;
116
117   unsigned min_order;
118   unsigned num_orders;
119   unsigned num_heaps;
120   bool allow_three_fourths_allocations;
121
122   /* One group per (heap, order, three_fourth_allocations). */
123   struct pb_slab_group *groups;
124
125   /* List of entries waiting to be reclaimed, i.e. they have been passed to
126    * pb_slab_free, but may not be safe for re-use yet. The tail points at
127    * the most-recently freed entry.
128    */
129   struct list_head reclaim;
130
131   void *priv;
132   slab_can_reclaim_fn *can_reclaim;
133   slab_alloc_fn *slab_alloc;
134   slab_free_fn *slab_free;
135};
136
137struct pb_slab_entry *
138pb_slab_alloc_reclaimed(struct pb_slabs *slabs, unsigned size, unsigned heap, bool reclaim_all);
139
140struct pb_slab_entry *
141pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
142
143void
144pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
145
146void
147pb_slabs_reclaim(struct pb_slabs *slabs);
148
149bool
150pb_slabs_init(struct pb_slabs *slabs,
151              unsigned min_order, unsigned max_order,
152              unsigned num_heaps, bool allow_three_fourth_allocations,
153              void *priv,
154              slab_can_reclaim_fn *can_reclaim,
155              slab_alloc_fn *slab_alloc,
156              slab_free_fn *slab_free);
157
158void
159pb_slabs_deinit(struct pb_slabs *slabs);
160
161#endif
162