Lines Matching defs:chunk

40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
42 return chunk->end_addr - chunk->start_addr + 1;
171 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
180 * Add a new chunk of special memory to the specified pool.
187 struct gen_pool_chunk *chunk;
192 chunk = vzalloc_node(nbytes, nid);
193 if (unlikely(chunk == NULL))
196 chunk->phys_addr = phys;
197 chunk->start_addr = virt;
198 chunk->end_addr = virt + size - 1;
199 chunk->owner = owner;
200 atomic_long_set(&chunk->avail, size);
203 list_add_rcu(&chunk->next_chunk, &pool->chunks);
219 struct gen_pool_chunk *chunk;
223 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
224 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
225 paddr = chunk->phys_addr + (addr - chunk->start_addr);
245 struct gen_pool_chunk *chunk;
250 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
251 list_del(&chunk->next_chunk);
253 end_bit = chunk_size(chunk) >> order;
254 bit = find_first_bit(chunk->bits, end_bit);
257 vfree(chunk);
270 * @owner: optionally retrieve the chunk owner
280 struct gen_pool_chunk *chunk;
297 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
298 if (size > atomic_long_read(&chunk->avail))
302 end_bit = chunk_size(chunk) >> order;
304 start_bit = algo(chunk->bits, end_bit, start_bit,
305 nbits, data, pool, chunk->start_addr);
308 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
310 remain = bitmap_clear_ll(chunk->bits, start_bit,
316 addr = chunk->start_addr + ((unsigned long)start_bit << order);
318 atomic_long_sub(size, &chunk->avail);
320 *owner = chunk->owner;
490 struct gen_pool_chunk *chunk;
503 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
504 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
505 BUG_ON(addr + size - 1 > chunk->end_addr);
506 start_bit = (addr - chunk->start_addr) >> order;
507 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
510 atomic_long_add(size, &chunk->avail);
512 *owner = chunk->owner;
523 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
528 * Call @func for every chunk of generic memory pool. The @func is
532 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
535 struct gen_pool_chunk *chunk;
538 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
539 func(pool, chunk, data);
558 struct gen_pool_chunk *chunk;
561 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
562 if (start >= chunk->start_addr && start <= chunk->end_addr) {
563 if (end <= chunk->end_addr) {
582 struct gen_pool_chunk *chunk;
586 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
587 avail += atomic_long_read(&chunk->avail);
601 struct gen_pool_chunk *chunk;
605 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
606 size += chunk_size(chunk);
664 * @start_addr: start addr of alloction chunk
875 * Returns the pool that contains the chunk starting at the physical