Lines Matching refs:pool
81 static inline const char *pool_name(struct rxe_pool *pool)
83 return rxe_type_info[pool->type].name;
86 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
91 if ((max - min + 1) < pool->max_elem) {
97 pool->max_index = max;
98 pool->min_index = min;
101 pool->table = kmalloc(size, GFP_KERNEL);
102 if (!pool->table) {
107 pool->table_size = size;
108 bitmap_zero(pool->table, max - min + 1);
116 struct rxe_pool *pool,
123 memset(pool, 0, sizeof(*pool));
125 pool->rxe = rxe;
126 pool->type = type;
127 pool->max_elem = max_elem;
128 pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
129 pool->flags = rxe_type_info[type].flags;
130 pool->tree = RB_ROOT;
131 pool->cleanup = rxe_type_info[type].cleanup;
133 atomic_set(&pool->num_elem, 0);
135 kref_init(&pool->ref_cnt);
137 rwlock_init(&pool->pool_lock);
140 err = rxe_pool_init_index(pool,
148 pool->key_offset = rxe_type_info[type].key_offset;
149 pool->key_size = rxe_type_info[type].key_size;
152 pool->state = RXE_POOL_STATE_VALID;
160 struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
162 pool->state = RXE_POOL_STATE_INVALID;
163 kfree(pool->table);
166 static void rxe_pool_put(struct rxe_pool *pool)
168 kref_put(&pool->ref_cnt, rxe_pool_release);
171 void rxe_pool_cleanup(struct rxe_pool *pool)
175 write_lock_irqsave(&pool->pool_lock, flags);
176 pool->state = RXE_POOL_STATE_INVALID;
177 if (atomic_read(&pool->num_elem) > 0)
178 pr_warn("%s pool destroyed with unfree'd elem\n",
179 pool_name(pool));
180 write_unlock_irqrestore(&pool->pool_lock, flags);
182 rxe_pool_put(pool);
185 static u32 alloc_index(struct rxe_pool *pool)
188 u32 range = pool->max_index - pool->min_index + 1;
190 index = find_next_zero_bit(pool->table, range, pool->last);
192 index = find_first_zero_bit(pool->table, range);
195 set_bit(index, pool->table);
196 pool->last = index;
197 return index + pool->min_index;
200 static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
202 struct rb_node **link = &pool->tree.rb_node;
222 rb_insert_color(&new->node, &pool->tree);
227 static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
229 struct rb_node **link = &pool->tree.rb_node;
238 cmp = memcmp((u8 *)elem + pool->key_offset,
239 (u8 *)new + pool->key_offset, pool->key_size);
253 rb_insert_color(&new->node, &pool->tree);
261 struct rxe_pool *pool = elem->pool;
264 write_lock_irqsave(&pool->pool_lock, flags);
265 memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
266 insert_key(pool, elem);
267 write_unlock_irqrestore(&pool->pool_lock, flags);
273 struct rxe_pool *pool = elem->pool;
276 write_lock_irqsave(&pool->pool_lock, flags);
277 rb_erase(&elem->node, &pool->tree);
278 write_unlock_irqrestore(&pool->pool_lock, flags);
284 struct rxe_pool *pool = elem->pool;
287 write_lock_irqsave(&pool->pool_lock, flags);
288 elem->index = alloc_index(pool);
289 insert_index(pool, elem);
290 write_unlock_irqrestore(&pool->pool_lock, flags);
296 struct rxe_pool *pool = elem->pool;
299 write_lock_irqsave(&pool->pool_lock, flags);
300 clear_bit(elem->index - pool->min_index, pool->table);
301 rb_erase(&elem->node, &pool->tree);
302 write_unlock_irqrestore(&pool->pool_lock, flags);
305 void *rxe_alloc(struct rxe_pool *pool)
310 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
312 read_lock_irqsave(&pool->pool_lock, flags);
313 if (pool->state != RXE_POOL_STATE_VALID) {
314 read_unlock_irqrestore(&pool->pool_lock, flags);
317 kref_get(&pool->ref_cnt);
318 read_unlock_irqrestore(&pool->pool_lock, flags);
320 if (!ib_device_try_get(&pool->rxe->ib_dev))
323 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
326 elem = kzalloc(rxe_type_info[pool->type].size,
327 (pool->flags & RXE_POOL_ATOMIC) ?
332 elem->pool = pool;
338 atomic_dec(&pool->num_elem);
339 ib_device_put(&pool->rxe->ib_dev);
341 rxe_pool_put(pool);
345 int rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
349 might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
351 read_lock_irqsave(&pool->pool_lock, flags);
352 if (pool->state != RXE_POOL_STATE_VALID) {
353 read_unlock_irqrestore(&pool->pool_lock, flags);
356 kref_get(&pool->ref_cnt);
357 read_unlock_irqrestore(&pool->pool_lock, flags);
359 if (!ib_device_try_get(&pool->rxe->ib_dev))
362 if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
365 elem->pool = pool;
371 atomic_dec(&pool->num_elem);
372 ib_device_put(&pool->rxe->ib_dev);
374 rxe_pool_put(pool);
382 struct rxe_pool *pool = elem->pool;
384 if (pool->cleanup)
385 pool->cleanup(elem);
387 if (!(pool->flags & RXE_POOL_NO_ALLOC))
389 atomic_dec(&pool->num_elem);
390 ib_device_put(&pool->rxe->ib_dev);
391 rxe_pool_put(pool);
394 void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
400 read_lock_irqsave(&pool->pool_lock, flags);
402 if (pool->state != RXE_POOL_STATE_VALID)
405 node = pool->tree.rb_node;
421 read_unlock_irqrestore(&pool->pool_lock, flags);
425 void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
432 read_lock_irqsave(&pool->pool_lock, flags);
434 if (pool->state != RXE_POOL_STATE_VALID)
437 node = pool->tree.rb_node;
442 cmp = memcmp((u8 *)elem + pool->key_offset,
443 key, pool->key_size);
457 read_unlock_irqrestore(&pool->pool_lock, flags);