Lines Matching refs:bucket
148 held not per host, but per port pair and TW bucket is used as state
151 If TW bucket has been already destroyed we fall back to VJ's scheme
2328 /* Clean up a referenced TCP bind bucket. */
2357 /* Find a non empty bucket (starting from st->bucket)
2366 for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2371 ilb2 = &hinfo->lhash2[st->bucket];
2386 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2387 * If "cur" is the last one in the st->bucket,
2389 * non empty bucket.
2409 ilb2 = &hinfo->lhash2[st->bucket];
2411 ++st->bucket;
2420 st->bucket = 0;
2434 return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2438 * Get first established socket starting from bucket given in st->bucket.
2439 * If st->bucket is zero, the very first socket in the hash is returned.
2447 for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2450 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2459 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2486 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2487 ++st->bucket;
2496 st->bucket = 0;
2526 int bucket = st->bucket;
2533 if (st->bucket > hinfo->lhash2_mask)
2536 while (offset-- && rc && bucket == st->bucket)
2540 st->bucket = 0;
2544 if (st->bucket > hinfo->ehash_mask)
2547 while (offset-- && rc && bucket == st->bucket)
2569 st->bucket = 0;
2594 st->bucket = 0;
2618 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2622 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2837 spin_unlock(&hinfo->lhash2[st->bucket].lock);
2865 spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2879 /* The st->bucket is done. Directly advance to the next
2880 * bucket instead of having the tcp_seek_last_pos() to skip
2881 * one by one in the current bucket and eventually find out
2882 * it has to advance to the next bucket.
2886 st->bucket++;
2888 st->bucket > hinfo->lhash2_mask) {
2890 st->bucket = 0;
2949 /* Move st->offset to the next sk in the bucket such that
2951 * st->bucket. See tcp_seek_last_pos().