Lines Matching refs:ht

37 static u32 head_hashfn(struct rhashtable *ht,
41 return rht_head_hashfn(ht, tbl, he, ht->p);
47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
49 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
122 static union nested_table *nested_table_alloc(struct rhashtable *ht,
147 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
164 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
175 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
189 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
211 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
218 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
224 static int rhashtable_rehash_one(struct rhashtable *ht,
228 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
229 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
255 new_hash = head_hashfn(ht, new_tbl, entry);
276 static int rhashtable_rehash_chain(struct rhashtable *ht,
279 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
288 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
298 static int rhashtable_rehash_attach(struct rhashtable *ht,
315 static int rhashtable_rehash_table(struct rhashtable *ht)
317 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
323 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
328 err = rhashtable_rehash_chain(ht, old_hash);
335 rcu_assign_pointer(ht->tbl, new_tbl);
337 spin_lock(&ht->lock);
349 spin_unlock(&ht->lock);
351 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
354 static int rhashtable_rehash_alloc(struct rhashtable *ht,
361 ASSERT_RHT_MUTEX(ht);
363 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
367 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
376 * @ht: the hash table to shrink
382 * ht->mutex.
390 static int rhashtable_shrink(struct rhashtable *ht)
392 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
393 unsigned int nelems = atomic_read(&ht->nelems);
398 if (size < ht->p.min_size)
399 size = ht->p.min_size;
404 if (rht_dereference(old_tbl->future_tbl, ht))
407 return rhashtable_rehash_alloc(ht, old_tbl, size);
412 struct rhashtable *ht;
416 ht = container_of(work, struct rhashtable, run_work);
417 mutex_lock(&ht->mutex);
419 tbl = rht_dereference(ht->tbl, ht);
420 tbl = rhashtable_last_table(ht, tbl);
422 if (rht_grow_above_75(ht, tbl))
423 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
424 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
425 err = rhashtable_shrink(ht);
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
432 nerr = rhashtable_rehash_table(ht);
436 mutex_unlock(&ht->mutex);
439 schedule_work(&ht->run_work);
442 static int rhashtable_insert_rehash(struct rhashtable *ht,
450 old_tbl = rht_dereference_rcu(ht->tbl, ht);
456 if (rht_grow_above_75(ht, tbl))
464 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
468 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
474 schedule_work(&ht->run_work);
485 schedule_work(&ht->run_work);
490 static void *rhashtable_lookup_one(struct rhashtable *ht,
496 .ht = ht,
510 (ht->p.obj_cmpfn ?
511 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
512 rhashtable_compare(&arg, rht_obj(ht, head)))) {
517 if (!ht->rhlist)
518 return rht_obj(ht, head);
542 struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
555 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
562 if (unlikely(rht_grow_above_max(ht, tbl)))
565 if (unlikely(rht_grow_above_100(ht, tbl)))
571 if (ht->rhlist) {
583 atomic_inc(&ht->nelems);
584 if (rht_grow_above_75(ht, tbl))
585 schedule_work(&ht->run_work);
590 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
600 new_tbl = rcu_dereference(ht->tbl);
604 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
609 bkt = rht_bucket_insert(ht, tbl, hash);
611 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
615 data = rhashtable_lookup_one(ht, bkt, tbl,
617 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
627 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
633 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
640 data = rhashtable_try_insert(ht, key, obj);
650 * @ht: Table to walk over
669 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
671 iter->ht = ht;
677 spin_lock(&ht->lock);
679 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
681 spin_unlock(&ht->lock);
693 spin_lock(&iter->ht->lock);
696 spin_unlock(&iter->ht->lock);
721 struct rhashtable *ht = iter->ht;
722 bool rhlist = ht->rhlist;
726 spin_lock(&ht->lock);
729 spin_unlock(&ht->lock);
734 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
795 struct rhashtable *ht = iter->ht;
797 bool rhlist = ht->rhlist;
828 return rht_obj(ht, rhlist ? &list->rhead : p);
839 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
866 struct rhashtable *ht = iter->ht;
868 bool rhlist = ht->rhlist;
879 return rht_obj(ht, rhlist ? &list->rhead : p);
905 struct rhashtable *ht = iter->ht;
909 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
937 struct rhashtable *ht;
943 ht = iter->ht;
945 spin_lock(&ht->lock);
951 spin_unlock(&ht->lock);
979 * @ht: hash table to be initialized
1019 int rhashtable_init(struct rhashtable *ht,
1029 memset(ht, 0, sizeof(*ht));
1030 mutex_init(&ht->mutex);
1031 spin_lock_init(&ht->lock);
1032 memcpy(&ht->p, params, sizeof(*params));
1035 ht->p.min_size = roundup_pow_of_two(params->min_size);
1038 ht->max_elems = 1u << 31;
1041 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1042 if (ht->p.max_size < ht->max_elems / 2)
1043 ht->max_elems = ht->p.max_size * 2;
1046 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1048 size = rounded_hashtable_size(&ht->p);
1050 ht->key_len = ht->p.key_len;
1052 ht->p.hashfn = jhash;
1054 if (!(ht->key_len & (sizeof(u32) - 1))) {
1055 ht->key_len /= sizeof(u32);
1056 ht->p.hashfn = rhashtable_jhash2;
1065 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1067 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1068 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1071 atomic_set(&ht->nelems, 0);
1073 RCU_INIT_POINTER(ht->tbl, tbl);
1075 INIT_WORK(&ht->run_work, rht_deferred_worker);
1094 err = rhashtable_init(&hlt->ht, params);
1095 hlt->ht.rhlist = true;
1100 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1106 if (!ht->rhlist) {
1107 free_fn(rht_obj(ht, obj), arg);
1114 list = rht_dereference(list->next, ht);
1115 free_fn(rht_obj(ht, obj), arg);
1121 * @ht: the hash table to destroy
1134 void rhashtable_free_and_destroy(struct rhashtable *ht,
1141 cancel_work_sync(&ht->run_work);
1143 mutex_lock(&ht->mutex);
1144 tbl = rht_dereference(ht->tbl, ht);
1153 rht_dereference(pos->next, ht) : NULL;
1157 rht_dereference(pos->next, ht) : NULL)
1158 rhashtable_free_one(ht, pos, free_fn, arg);
1162 next_tbl = rht_dereference(tbl->future_tbl, ht);
1168 mutex_unlock(&ht->mutex);
1172 void rhashtable_destroy(struct rhashtable *ht)
1174 return rhashtable_free_and_destroy(ht, NULL, NULL);
1219 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1228 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1235 ntbl = nested_table_alloc(ht, &ntbl[index].table,