Lines Matching defs:pool
151 /* setup the initial settings for a buffer pool */
152 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
156 pool->size = pool_size;
157 pool->index = pool_index;
158 pool->buff_size = buff_size;
159 pool->threshold = pool_size * 7 / 8;
160 pool->active = pool_active;
163 /* allocate and setup an buffer pool - called during open */
164 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
168 pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
170 if (!pool->free_map)
173 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
174 if (!pool->dma_addr) {
175 kfree(pool->free_map);
176 pool->free_map = NULL;
180 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
182 if (!pool->skbuff) {
183 kfree(pool->dma_addr);
184 pool->dma_addr = NULL;
186 kfree(pool->free_map);
187 pool->free_map = NULL;
191 for (i = 0; i < pool->size; ++i)
192 pool->free_map[i] = i;
194 atomic_set(&pool->available, 0);
195 pool->producer_index = 0;
196 pool->consumer_index = 0;
209 /* replenish the buffers for a pool. note that we don't need to
213 struct ibmveth_buff_pool *pool)
216 u32 count = pool->size - atomic_read(&pool->available);
229 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
238 free_index = pool->consumer_index;
239 pool->consumer_index++;
240 if (pool->consumer_index >= pool->size)
241 pool->consumer_index = 0;
242 index = pool->free_map[free_index];
245 BUG_ON(pool->skbuff[index] != NULL);
248 pool->buff_size, DMA_FROM_DEVICE);
253 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
254 pool->dma_addr[index] = dma_addr;
255 pool->skbuff[index] = skb;
257 correlator = ((u64)pool->index << 32) | index;
260 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
264 unsigned int len = min(pool->buff_size,
281 atomic_add(buffers_added, &(pool->available));
285 pool->free_map[free_index] = index;
286 pool->skbuff[index] = NULL;
287 if (pool->consumer_index == 0)
288 pool->consumer_index = pool->size - 1;
290 pool->consumer_index--;
293 pool->dma_addr[index], pool->buff_size,
299 atomic_add(buffers_added, &(pool->available));
322 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
324 if (pool->active &&
325 (atomic_read(&pool->available) < pool->threshold))
326 ibmveth_replenish_buffer_pool(adapter, pool);
332 /* empty and free ana buffer pool - also used to do cleanup in error paths */
334 struct ibmveth_buff_pool *pool)
338 kfree(pool->free_map);
339 pool->free_map = NULL;
341 if (pool->skbuff && pool->dma_addr) {
342 for (i = 0; i < pool->size; ++i) {
343 struct sk_buff *skb = pool->skbuff[i];
346 pool->dma_addr[i],
347 pool->buff_size,
350 pool->skbuff[i] = NULL;
355 if (pool->dma_addr) {
356 kfree(pool->dma_addr);
357 pool->dma_addr = NULL;
360 if (pool->skbuff) {
361 kfree(pool->skbuff);
362 pool->skbuff = NULL;
366 /* remove a buffer from a pool */
370 unsigned int pool = correlator >> 32;
375 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
376 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
378 skb = adapter->rx_buff_pool[pool].skbuff[index];
382 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
385 adapter->rx_buff_pool[pool].dma_addr[index],
386 adapter->rx_buff_pool[pool].buff_size,
389 free_index = adapter->rx_buff_pool[pool].producer_index;
390 adapter->rx_buff_pool[pool].producer_index++;
391 if (adapter->rx_buff_pool[pool].producer_index >=
392 adapter->rx_buff_pool[pool].size)
393 adapter->rx_buff_pool[pool].producer_index = 0;
394 adapter->rx_buff_pool[pool].free_map[free_index] = index;
398 atomic_dec(&(adapter->rx_buff_pool[pool].available));
405 unsigned int pool = correlator >> 32;
408 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
409 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
411 return adapter->rx_buff_pool[pool].skbuff[index];
419 unsigned int pool = correlator >> 32;
425 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
426 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
428 if (!adapter->rx_buff_pool[pool].active) {
430 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
435 adapter->rx_buff_pool[pool].buff_size;
436 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
620 netdev_err(netdev, "unable to alloc pool\n");
1535 /* Look for an active buffer pool that can hold the new MTU */
1746 &dev->dev.kobj, "pool%d", i);
1803 struct ibmveth_buff_pool *pool = container_of(kobj,
1808 return sprintf(buf, "%d\n", pool->active);
1810 return sprintf(buf, "%d\n", pool->size);
1812 return sprintf(buf, "%d\n", pool->buff_size);
1819 struct ibmveth_buff_pool *pool = container_of(kobj,
1828 if (value && !pool->active) {
1830 if (ibmveth_alloc_buffer_pool(pool)) {
1832 "unable to alloc pool\n");
1835 pool->active = 1;
1840 pool->active = 1;
1842 } else if (!value && pool->active) {
1845 /* Make sure there is a buffer pool with buffers that
1848 if (pool == &adapter->rx_buff_pool[i])
1857 netdev_err(netdev, "no active pool >= MTU\n");
1863 pool->active = 0;
1867 pool->active = 0;
1875 pool->size = value;
1879 pool->size = value;
1888 pool->buff_size = value;
1892 pool->buff_size = value;