Lines Matching defs:pool

46  * struct fc_exch_pool - Per cpu exchange pool
49 * @lock: Exch pool lock
56 * assigned range of exchanges to per cpu pool.
75 * @pool_max_index: Max exch array index in exch pool
76 * @pool: Per cpu exch pool
84 struct fc_exch_pool __percpu *pool;
203 * - If the EM pool lock and ex_lock must be taken at the same time, then the
204 * EM pool lock must be taken before the ex_lock.
404 * fc_exch_ptr_get() - Return an exchange from an exchange pool
405 * @pool: Exchange Pool to get an exchange from
406 * @index: Index of the exchange within the pool
408 * Use the index to get an exchange from within an exchange pool. exches
412 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
415 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
420 * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
421 * @pool: The pool to assign the exchange to
422 * @index: The index in the pool where the exchange will be assigned
423 * @ep: The exchange to assign to the pool
425 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
428 ((struct fc_exch **)(pool + 1))[index] = ep;
437 struct fc_exch_pool *pool;
440 pool = ep->pool;
441 spin_lock_bh(&pool->lock);
442 WARN_ON(pool->total_exches <= 0);
443 pool->total_exches--;
448 if (pool->left == FC_XID_UNKNOWN)
449 pool->left = index;
450 else if (pool->right == FC_XID_UNKNOWN)
451 pool->right = index;
453 pool->next_index = index;
454 fc_exch_ptr_set(pool, index, NULL);
456 fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
459 spin_unlock_bh(&pool->lock);
818 struct fc_exch_pool *pool;
829 pool = per_cpu_ptr(mp->pool, cpu);
830 spin_lock_bh(&pool->lock);
833 if (pool->left != FC_XID_UNKNOWN) {
834 if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
835 index = pool->left;
836 pool->left = FC_XID_UNKNOWN;
840 if (pool->right != FC_XID_UNKNOWN) {
841 if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
842 index = pool->right;
843 pool->right = FC_XID_UNKNOWN;
848 index = pool->next_index;
849 /* allocate new exch from pool */
850 while (fc_exch_ptr_get(pool, index)) {
852 if (index == pool->next_index)
855 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
866 fc_exch_ptr_set(pool, index, ep);
867 list_add_tail(&ep->ex_list, &pool->ex_list);
869 pool->total_exches++;
870 spin_unlock_bh(&pool->lock);
877 ep->pool = pool;
888 spin_unlock_bh(&pool->lock);
929 struct fc_exch_pool *pool;
943 pool = per_cpu_ptr(mp->pool, cpu);
944 spin_lock_bh(&pool->lock);
945 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
954 spin_unlock_bh(&pool->lock);
1923 * fc_exch_pool_reset() - Reset a per cpu exchange pool
1924 * @lport: The local port that the exchange pool is on
1925 * @pool: The exchange pool to be reset
1929 * Resets a per cpu exches pool, releasing all of its sequences
1935 struct fc_exch_pool *pool,
1941 spin_lock_bh(&pool->lock);
1943 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1948 spin_unlock_bh(&pool->lock);
1953 spin_lock_bh(&pool->lock);
1962 pool->next_index = 0;
1963 pool->left = FC_XID_UNKNOWN;
1964 pool->right = FC_XID_UNKNOWN;
1965 spin_unlock_bh(&pool->lock);
1987 per_cpu_ptr(ema->mp->pool, cpu),
2416 free_percpu(mp->pool);
2471 struct fc_exch_pool *pool;
2492 /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2493 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2509 * Setup per cpu exch pool with entire exchange id range equally
2511 * allocated for exch range per pool.
2516 * Allocate and initialize per cpu exch pool
2518 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2519 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2520 if (!mp->pool)
2523 pool = per_cpu_ptr(mp->pool, cpu);
2524 pool->next_index = 0;
2525 pool->left = FC_XID_UNKNOWN;
2526 pool->right = FC_XID_UNKNOWN;
2527 spin_lock_init(&pool->lock);
2528 INIT_LIST_HEAD(&pool->ex_list);
2533 free_percpu(mp->pool);
2685 * in per cpu exch pool.