Lines Matching refs:hash
99 u32 n_buckets; /* number of hash buckets */
121 u32 hash;
236 u32 hash)
238 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
282 offsetof(struct htab_elem, hash) -
446 /* hash table size must be power of 2; roundup_pow_of_two() can overflow
527 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
529 return &htab->buckets[hash & (htab->n_buckets - 1)];
532 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
534 return &__select_bucket(htab, hash)->head;
538 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
545 if (l->hash == hash && !memcmp(&l->key, key, key_size))
556 u32 hash, void *key,
564 if (l->hash == hash && !memcmp(&l->key, key, key_size))
567 if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
583 u32 hash, key_size;
589 hash = htab_map_hash(key, key_size, htab->hashrnd);
591 head = select_bucket(htab, hash);
593 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
696 b = __select_bucket(htab, tgt_l->hash);
718 u32 hash, key_size;
728 hash = htab_map_hash(key, key_size, htab->hashrnd);
730 head = select_bucket(htab, hash);
733 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
743 /* if next elem in this hash list is non-zero, just return it */
748 /* no more elements in this hash list, go to the next bucket */
749 i = hash & (htab->n_buckets - 1);
861 void *value, u32 key_size, u32 hash,
937 l_new->hash = hash;
967 u32 key_size, hash;
978 hash = htab_map_hash(key, key_size, htab->hashrnd);
980 b = __select_bucket(htab, hash);
987 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1007 l_old = lookup_elem_raw(head, hash, key, key_size);
1027 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1058 u32 key_size, hash;
1069 hash = htab_map_hash(key, key_size, htab->hashrnd);
1071 b = __select_bucket(htab, hash);
1079 l_new = prealloc_lru_pop(htab, key, hash);
1086 l_old = lookup_elem_raw(head, hash, key, key_size);
1122 u32 key_size, hash;
1133 hash = htab_map_hash(key, key_size, htab->hashrnd);
1135 b = __select_bucket(htab, hash);
1140 l_old = lookup_elem_raw(head, hash, key, key_size);
1147 /* per-cpu hash map can update value in-place */
1152 hash, true, onallcpus, NULL);
1174 u32 key_size, hash;
1185 hash = htab_map_hash(key, key_size, htab->hashrnd);
1187 b = __select_bucket(htab, hash);
1196 l_new = prealloc_lru_pop(htab, key, hash);
1203 l_old = lookup_elem_raw(head, hash, key, key_size);
1212 /* per-cpu hash map can update value in-place */
1250 u32 hash, key_size;
1257 hash = htab_map_hash(key, key_size, htab->hashrnd);
1258 b = __select_bucket(htab, hash);
1263 l = lookup_elem_raw(head, hash, key, key_size);
1282 u32 hash, key_size;
1289 hash = htab_map_hash(key, key_size, htab->hashrnd);
1290 b = __select_bucket(htab, hash);
1295 l = lookup_elem_raw(head, hash, key, key_size);
1423 /* while experimenting with hash tables with sizes ranging from 10 to
1654 void *percpu_value_buf; // non-zero means percpu hash