Lines Matching defs:pool
144 /* setup the initial settings for a buffer pool */
145 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
149 pool->size = pool_size;
150 pool->index = pool_index;
151 pool->buff_size = buff_size;
152 pool->threshold = pool_size * 7 / 8;
153 pool->active = pool_active;
156 /* allocate and setup an buffer pool - called during open */
157 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
161 pool->free_map = kmalloc_array(pool->size, sizeof(u16), GFP_KERNEL);
163 if (!pool->free_map)
166 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
167 if (!pool->dma_addr) {
168 kfree(pool->free_map);
169 pool->free_map = NULL;
173 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
175 if (!pool->skbuff) {
176 kfree(pool->dma_addr);
177 pool->dma_addr = NULL;
179 kfree(pool->free_map);
180 pool->free_map = NULL;
184 for (i = 0; i < pool->size; ++i)
185 pool->free_map[i] = i;
187 atomic_set(&pool->available, 0);
188 pool->producer_index = 0;
189 pool->consumer_index = 0;
202 /* replenish the buffers for a pool. note that we don't need to
206 struct ibmveth_buff_pool *pool)
209 u32 count = pool->size - atomic_read(&pool->available);
222 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
231 free_index = pool->consumer_index;
232 pool->consumer_index++;
233 if (pool->consumer_index >= pool->size)
234 pool->consumer_index = 0;
235 index = pool->free_map[free_index];
238 BUG_ON(pool->skbuff[index] != NULL);
241 pool->buff_size, DMA_FROM_DEVICE);
246 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
247 pool->dma_addr[index] = dma_addr;
248 pool->skbuff[index] = skb;
250 correlator = ((u64)pool->index << 32) | index;
253 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
257 unsigned int len = min(pool->buff_size,
274 atomic_add(buffers_added, &(pool->available));
278 pool->free_map[free_index] = index;
279 pool->skbuff[index] = NULL;
280 if (pool->consumer_index == 0)
281 pool->consumer_index = pool->size - 1;
283 pool->consumer_index--;
286 pool->dma_addr[index], pool->buff_size,
292 atomic_add(buffers_added, &(pool->available));
315 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
317 if (pool->active &&
318 (atomic_read(&pool->available) < pool->threshold))
319 ibmveth_replenish_buffer_pool(adapter, pool);
325 /* empty and free ana buffer pool - also used to do cleanup in error paths */
327 struct ibmveth_buff_pool *pool)
331 kfree(pool->free_map);
332 pool->free_map = NULL;
334 if (pool->skbuff && pool->dma_addr) {
335 for (i = 0; i < pool->size; ++i) {
336 struct sk_buff *skb = pool->skbuff[i];
339 pool->dma_addr[i],
340 pool->buff_size,
343 pool->skbuff[i] = NULL;
348 if (pool->dma_addr) {
349 kfree(pool->dma_addr);
350 pool->dma_addr = NULL;
353 if (pool->skbuff) {
354 kfree(pool->skbuff);
355 pool->skbuff = NULL;
359 /* remove a buffer from a pool */
363 unsigned int pool = correlator >> 32;
368 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
369 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
371 skb = adapter->rx_buff_pool[pool].skbuff[index];
375 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
378 adapter->rx_buff_pool[pool].dma_addr[index],
379 adapter->rx_buff_pool[pool].buff_size,
382 free_index = adapter->rx_buff_pool[pool].producer_index;
383 adapter->rx_buff_pool[pool].producer_index++;
384 if (adapter->rx_buff_pool[pool].producer_index >=
385 adapter->rx_buff_pool[pool].size)
386 adapter->rx_buff_pool[pool].producer_index = 0;
387 adapter->rx_buff_pool[pool].free_map[free_index] = index;
391 atomic_dec(&(adapter->rx_buff_pool[pool].available));
398 unsigned int pool = correlator >> 32;
401 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
402 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
404 return adapter->rx_buff_pool[pool].skbuff[index];
412 unsigned int pool = correlator >> 32;
418 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
419 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
421 if (!adapter->rx_buff_pool[pool].active) {
423 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
428 adapter->rx_buff_pool[pool].buff_size;
429 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
587 netdev_err(netdev, "unable to alloc pool\n");
1527 /* Look for an active buffer pool that can hold the new MTU */
1738 &dev->dev.kobj, "pool%d", i);
1785 struct ibmveth_buff_pool *pool = container_of(kobj,
1790 return sprintf(buf, "%d\n", pool->active);
1792 return sprintf(buf, "%d\n", pool->size);
1794 return sprintf(buf, "%d\n", pool->buff_size);
1801 struct ibmveth_buff_pool *pool = container_of(kobj,
1811 if (value && !pool->active) {
1813 if (ibmveth_alloc_buffer_pool(pool)) {
1815 "unable to alloc pool\n");
1818 pool->active = 1;
1825 pool->active = 1;
1827 } else if (!value && pool->active) {
1830 /* Make sure there is a buffer pool with buffers that
1833 if (pool == &adapter->rx_buff_pool[i])
1842 netdev_err(netdev, "no active pool >= MTU\n");
1849 pool->active = 0;
1854 pool->active = 0;
1864 pool->size = value;
1868 pool->size = value;
1879 pool->buff_size = value;
1883 pool->buff_size = value;