Lines Matching refs:ht

37 static u32 head_hashfn(struct rhashtable *ht,
41 return rht_head_hashfn(ht, tbl, he, ht->p);
47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
122 static union nested_table *nested_table_alloc(struct rhashtable *ht,
147 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
164 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
175 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
189 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
211 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
218 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
224 static int rhashtable_rehash_one(struct rhashtable *ht,
228 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
254 new_hash = head_hashfn(ht, new_tbl, entry);
274 static int rhashtable_rehash_chain(struct rhashtable *ht,
277 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
285 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
295 static int rhashtable_rehash_attach(struct rhashtable *ht,
312 static int rhashtable_rehash_table(struct rhashtable *ht)
314 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
320 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
325 err = rhashtable_rehash_chain(ht, old_hash);
332 rcu_assign_pointer(ht->tbl, new_tbl);
334 spin_lock(&ht->lock);
346 spin_unlock(&ht->lock);
348 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
351 static int rhashtable_rehash_alloc(struct rhashtable *ht,
358 ASSERT_RHT_MUTEX(ht);
360 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
364 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
373 * @ht: the hash table to shrink
379 * ht->mutex.
387 static int rhashtable_shrink(struct rhashtable *ht)
389 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
390 unsigned int nelems = atomic_read(&ht->nelems);
395 if (size < ht->p.min_size)
396 size = ht->p.min_size;
401 if (rht_dereference(old_tbl->future_tbl, ht))
404 return rhashtable_rehash_alloc(ht, old_tbl, size);
409 struct rhashtable *ht;
413 ht = container_of(work, struct rhashtable, run_work);
414 mutex_lock(&ht->mutex);
416 tbl = rht_dereference(ht->tbl, ht);
417 tbl = rhashtable_last_table(ht, tbl);
419 if (rht_grow_above_75(ht, tbl))
420 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
421 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
422 err = rhashtable_shrink(ht);
424 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
429 nerr = rhashtable_rehash_table(ht);
433 mutex_unlock(&ht->mutex);
436 schedule_work(&ht->run_work);
439 static int rhashtable_insert_rehash(struct rhashtable *ht,
447 old_tbl = rht_dereference_rcu(ht->tbl, ht);
453 if (rht_grow_above_75(ht, tbl))
461 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
465 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
471 schedule_work(&ht->run_work);
482 schedule_work(&ht->run_work);
487 static void *rhashtable_lookup_one(struct rhashtable *ht,
493 .ht = ht,
507 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
509 rhashtable_compare(&arg, rht_obj(ht, head)))) {
514 if (!ht->rhlist)
515 return rht_obj(ht, head);
539 struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
552 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
559 if (unlikely(rht_grow_above_max(ht, tbl)))
562 if (unlikely(rht_grow_above_100(ht, tbl)))
568 if (ht->rhlist) {
580 atomic_inc(&ht->nelems);
581 if (rht_grow_above_75(ht, tbl))
582 schedule_work(&ht->run_work);
587 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
596 new_tbl = rcu_dereference(ht->tbl);
600 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
605 bkt = rht_bucket_insert(ht, tbl, hash);
607 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
611 data = rhashtable_lookup_one(ht, bkt, tbl,
613 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
623 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
629 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
636 data = rhashtable_try_insert(ht, key, obj);
646 * @ht: Table to walk over
665 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
667 iter->ht = ht;
673 spin_lock(&ht->lock);
675 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
677 spin_unlock(&ht->lock);
689 spin_lock(&iter->ht->lock);
692 spin_unlock(&iter->ht->lock);
717 struct rhashtable *ht = iter->ht;
718 bool rhlist = ht->rhlist;
722 spin_lock(&ht->lock);
725 spin_unlock(&ht->lock);
730 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
791 struct rhashtable *ht = iter->ht;
793 bool rhlist = ht->rhlist;
824 return rht_obj(ht, rhlist ? &list->rhead : p);
835 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
862 struct rhashtable *ht = iter->ht;
864 bool rhlist = ht->rhlist;
875 return rht_obj(ht, rhlist ? &list->rhead : p);
901 struct rhashtable *ht = iter->ht;
905 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
933 struct rhashtable *ht;
939 ht = iter->ht;
941 spin_lock(&ht->lock);
947 spin_unlock(&ht->lock);
975 * @ht: hash table to be initialized
1015 int rhashtable_init(struct rhashtable *ht,
1025 memset(ht, 0, sizeof(*ht));
1026 mutex_init(&ht->mutex);
1027 spin_lock_init(&ht->lock);
1028 memcpy(&ht->p, params, sizeof(*params));
1031 ht->p.min_size = roundup_pow_of_two(params->min_size);
1034 ht->max_elems = 1u << 31;
1037 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1038 if (ht->p.max_size < ht->max_elems / 2)
1039 ht->max_elems = ht->p.max_size * 2;
1042 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1044 size = rounded_hashtable_size(&ht->p);
1046 ht->key_len = ht->p.key_len;
1048 ht->p.hashfn = jhash;
1050 if (!(ht->key_len & (sizeof(u32) - 1))) {
1051 ht->key_len /= sizeof(u32);
1052 ht->p.hashfn = rhashtable_jhash2;
1061 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1063 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1064 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1067 atomic_set(&ht->nelems, 0);
1069 RCU_INIT_POINTER(ht->tbl, tbl);
1071 INIT_WORK(&ht->run_work, rht_deferred_worker);
1090 err = rhashtable_init(&hlt->ht, params);
1091 hlt->ht.rhlist = true;
1096 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1102 if (!ht->rhlist) {
1103 free_fn(rht_obj(ht, obj), arg);
1110 list = rht_dereference(list->next, ht);
1111 free_fn(rht_obj(ht, obj), arg);
1117 * @ht: the hash table to destroy
1130 void rhashtable_free_and_destroy(struct rhashtable *ht,
1137 cancel_work_sync(&ht->run_work);
1139 mutex_lock(&ht->mutex);
1140 tbl = rht_dereference(ht->tbl, ht);
1149 rht_dereference(pos->next, ht) : NULL;
1153 rht_dereference(pos->next, ht) : NULL)
1154 rhashtable_free_one(ht, pos, free_fn, arg);
1158 next_tbl = rht_dereference(tbl->future_tbl, ht);
1164 mutex_unlock(&ht->mutex);
1168 void rhashtable_destroy(struct rhashtable *ht)
1170 return rhashtable_free_and_destroy(ht, NULL, NULL);
1215 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1224 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1231 ntbl = nested_table_alloc(ht, &ntbl[index].table,