162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
462306a36Sopenharmony_ci */
562306a36Sopenharmony_ci
662306a36Sopenharmony_ci#include "peerlookup.h"
762306a36Sopenharmony_ci#include "peer.h"
862306a36Sopenharmony_ci#include "noise.h"
962306a36Sopenharmony_ci
1062306a36Sopenharmony_cistatic struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
1162306a36Sopenharmony_ci					const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
1262306a36Sopenharmony_ci{
1362306a36Sopenharmony_ci	/* siphash gives us a secure 64bit number based on a random key. Since
1462306a36Sopenharmony_ci	 * the bits are uniformly distributed, we can then mask off to get the
1562306a36Sopenharmony_ci	 * bits we need.
1662306a36Sopenharmony_ci	 */
1762306a36Sopenharmony_ci	const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
1862306a36Sopenharmony_ci
1962306a36Sopenharmony_ci	return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
2062306a36Sopenharmony_ci}
2162306a36Sopenharmony_ci
2262306a36Sopenharmony_cistruct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
2362306a36Sopenharmony_ci{
2462306a36Sopenharmony_ci	struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci	if (!table)
2762306a36Sopenharmony_ci		return NULL;
2862306a36Sopenharmony_ci
2962306a36Sopenharmony_ci	get_random_bytes(&table->key, sizeof(table->key));
3062306a36Sopenharmony_ci	hash_init(table->hashtable);
3162306a36Sopenharmony_ci	mutex_init(&table->lock);
3262306a36Sopenharmony_ci	return table;
3362306a36Sopenharmony_ci}
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_civoid wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
3662306a36Sopenharmony_ci			     struct wg_peer *peer)
3762306a36Sopenharmony_ci{
3862306a36Sopenharmony_ci	mutex_lock(&table->lock);
3962306a36Sopenharmony_ci	hlist_add_head_rcu(&peer->pubkey_hash,
4062306a36Sopenharmony_ci			   pubkey_bucket(table, peer->handshake.remote_static));
4162306a36Sopenharmony_ci	mutex_unlock(&table->lock);
4262306a36Sopenharmony_ci}
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_civoid wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
4562306a36Sopenharmony_ci				struct wg_peer *peer)
4662306a36Sopenharmony_ci{
4762306a36Sopenharmony_ci	mutex_lock(&table->lock);
4862306a36Sopenharmony_ci	hlist_del_init_rcu(&peer->pubkey_hash);
4962306a36Sopenharmony_ci	mutex_unlock(&table->lock);
5062306a36Sopenharmony_ci}
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci/* Returns a strong reference to a peer */
5362306a36Sopenharmony_cistruct wg_peer *
5462306a36Sopenharmony_ciwg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
5562306a36Sopenharmony_ci			   const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
5662306a36Sopenharmony_ci{
5762306a36Sopenharmony_ci	struct wg_peer *iter_peer, *peer = NULL;
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci	rcu_read_lock_bh();
6062306a36Sopenharmony_ci	hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
6162306a36Sopenharmony_ci				    pubkey_hash) {
6262306a36Sopenharmony_ci		if (!memcmp(pubkey, iter_peer->handshake.remote_static,
6362306a36Sopenharmony_ci			    NOISE_PUBLIC_KEY_LEN)) {
6462306a36Sopenharmony_ci			peer = iter_peer;
6562306a36Sopenharmony_ci			break;
6662306a36Sopenharmony_ci		}
6762306a36Sopenharmony_ci	}
6862306a36Sopenharmony_ci	peer = wg_peer_get_maybe_zero(peer);
6962306a36Sopenharmony_ci	rcu_read_unlock_bh();
7062306a36Sopenharmony_ci	return peer;
7162306a36Sopenharmony_ci}
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_cistatic struct hlist_head *index_bucket(struct index_hashtable *table,
7462306a36Sopenharmony_ci				       const __le32 index)
7562306a36Sopenharmony_ci{
7662306a36Sopenharmony_ci	/* Since the indices are random and thus all bits are uniformly
7762306a36Sopenharmony_ci	 * distributed, we can find its bucket simply by masking.
7862306a36Sopenharmony_ci	 */
7962306a36Sopenharmony_ci	return &table->hashtable[(__force u32)index &
8062306a36Sopenharmony_ci				 (HASH_SIZE(table->hashtable) - 1)];
8162306a36Sopenharmony_ci}
8262306a36Sopenharmony_ci
8362306a36Sopenharmony_cistruct index_hashtable *wg_index_hashtable_alloc(void)
8462306a36Sopenharmony_ci{
8562306a36Sopenharmony_ci	struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci	if (!table)
8862306a36Sopenharmony_ci		return NULL;
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci	hash_init(table->hashtable);
9162306a36Sopenharmony_ci	spin_lock_init(&table->lock);
9262306a36Sopenharmony_ci	return table;
9362306a36Sopenharmony_ci}
9462306a36Sopenharmony_ci
9562306a36Sopenharmony_ci/* At the moment, we limit ourselves to 2^20 total peers, which generally might
9662306a36Sopenharmony_ci * amount to 2^20*3 items in this hashtable. The algorithm below works by
9762306a36Sopenharmony_ci * picking a random number and testing it. We can see that these limits mean we
9862306a36Sopenharmony_ci * usually succeed pretty quickly:
9962306a36Sopenharmony_ci *
10062306a36Sopenharmony_ci * >>> def calculation(tries, size):
10162306a36Sopenharmony_ci * ...     return (size / 2**32)**(tries - 1) *  (1 - (size / 2**32))
10262306a36Sopenharmony_ci * ...
10362306a36Sopenharmony_ci * >>> calculation(1, 2**20 * 3)
10462306a36Sopenharmony_ci * 0.999267578125
10562306a36Sopenharmony_ci * >>> calculation(2, 2**20 * 3)
10662306a36Sopenharmony_ci * 0.0007318854331970215
10762306a36Sopenharmony_ci * >>> calculation(3, 2**20 * 3)
10862306a36Sopenharmony_ci * 5.360489012673497e-07
10962306a36Sopenharmony_ci * >>> calculation(4, 2**20 * 3)
11062306a36Sopenharmony_ci * 3.9261394135792216e-10
11162306a36Sopenharmony_ci *
11262306a36Sopenharmony_ci * At the moment, we don't do any masking, so this algorithm isn't exactly
11362306a36Sopenharmony_ci * constant time in either the random guessing or in the hash list lookup. We
11462306a36Sopenharmony_ci * could require a minimum of 3 tries, which would successfully mask the
11562306a36Sopenharmony_ci * guessing. this would not, however, help with the growing hash lengths, which
11662306a36Sopenharmony_ci * is another thing to consider moving forward.
11762306a36Sopenharmony_ci */
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci__le32 wg_index_hashtable_insert(struct index_hashtable *table,
12062306a36Sopenharmony_ci				 struct index_hashtable_entry *entry)
12162306a36Sopenharmony_ci{
12262306a36Sopenharmony_ci	struct index_hashtable_entry *existing_entry;
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_ci	spin_lock_bh(&table->lock);
12562306a36Sopenharmony_ci	hlist_del_init_rcu(&entry->index_hash);
12662306a36Sopenharmony_ci	spin_unlock_bh(&table->lock);
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci	rcu_read_lock_bh();
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_cisearch_unused_slot:
13162306a36Sopenharmony_ci	/* First we try to find an unused slot, randomly, while unlocked. */
13262306a36Sopenharmony_ci	entry->index = (__force __le32)get_random_u32();
13362306a36Sopenharmony_ci	hlist_for_each_entry_rcu_bh(existing_entry,
13462306a36Sopenharmony_ci				    index_bucket(table, entry->index),
13562306a36Sopenharmony_ci				    index_hash) {
13662306a36Sopenharmony_ci		if (existing_entry->index == entry->index)
13762306a36Sopenharmony_ci			/* If it's already in use, we continue searching. */
13862306a36Sopenharmony_ci			goto search_unused_slot;
13962306a36Sopenharmony_ci	}
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	/* Once we've found an unused slot, we lock it, and then double-check
14262306a36Sopenharmony_ci	 * that nobody else stole it from us.
14362306a36Sopenharmony_ci	 */
14462306a36Sopenharmony_ci	spin_lock_bh(&table->lock);
14562306a36Sopenharmony_ci	hlist_for_each_entry_rcu_bh(existing_entry,
14662306a36Sopenharmony_ci				    index_bucket(table, entry->index),
14762306a36Sopenharmony_ci				    index_hash) {
14862306a36Sopenharmony_ci		if (existing_entry->index == entry->index) {
14962306a36Sopenharmony_ci			spin_unlock_bh(&table->lock);
15062306a36Sopenharmony_ci			/* If it was stolen, we start over. */
15162306a36Sopenharmony_ci			goto search_unused_slot;
15262306a36Sopenharmony_ci		}
15362306a36Sopenharmony_ci	}
15462306a36Sopenharmony_ci	/* Otherwise, we know we have it exclusively (since we're locked),
15562306a36Sopenharmony_ci	 * so we insert.
15662306a36Sopenharmony_ci	 */
15762306a36Sopenharmony_ci	hlist_add_head_rcu(&entry->index_hash,
15862306a36Sopenharmony_ci			   index_bucket(table, entry->index));
15962306a36Sopenharmony_ci	spin_unlock_bh(&table->lock);
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ci	rcu_read_unlock_bh();
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci	return entry->index;
16462306a36Sopenharmony_ci}
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_cibool wg_index_hashtable_replace(struct index_hashtable *table,
16762306a36Sopenharmony_ci				struct index_hashtable_entry *old,
16862306a36Sopenharmony_ci				struct index_hashtable_entry *new)
16962306a36Sopenharmony_ci{
17062306a36Sopenharmony_ci	bool ret;
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_ci	spin_lock_bh(&table->lock);
17362306a36Sopenharmony_ci	ret = !hlist_unhashed(&old->index_hash);
17462306a36Sopenharmony_ci	if (unlikely(!ret))
17562306a36Sopenharmony_ci		goto out;
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ci	new->index = old->index;
17862306a36Sopenharmony_ci	hlist_replace_rcu(&old->index_hash, &new->index_hash);
17962306a36Sopenharmony_ci
18062306a36Sopenharmony_ci	/* Calling init here NULLs out index_hash, and in fact after this
18162306a36Sopenharmony_ci	 * function returns, it's theoretically possible for this to get
18262306a36Sopenharmony_ci	 * reinserted elsewhere. That means the RCU lookup below might either
18362306a36Sopenharmony_ci	 * terminate early or jump between buckets, in which case the packet
18462306a36Sopenharmony_ci	 * simply gets dropped, which isn't terrible.
18562306a36Sopenharmony_ci	 */
18662306a36Sopenharmony_ci	INIT_HLIST_NODE(&old->index_hash);
18762306a36Sopenharmony_ciout:
18862306a36Sopenharmony_ci	spin_unlock_bh(&table->lock);
18962306a36Sopenharmony_ci	return ret;
19062306a36Sopenharmony_ci}
19162306a36Sopenharmony_ci
19262306a36Sopenharmony_civoid wg_index_hashtable_remove(struct index_hashtable *table,
19362306a36Sopenharmony_ci			       struct index_hashtable_entry *entry)
19462306a36Sopenharmony_ci{
19562306a36Sopenharmony_ci	spin_lock_bh(&table->lock);
19662306a36Sopenharmony_ci	hlist_del_init_rcu(&entry->index_hash);
19762306a36Sopenharmony_ci	spin_unlock_bh(&table->lock);
19862306a36Sopenharmony_ci}
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci/* Returns a strong reference to a entry->peer */
20162306a36Sopenharmony_cistruct index_hashtable_entry *
20262306a36Sopenharmony_ciwg_index_hashtable_lookup(struct index_hashtable *table,
20362306a36Sopenharmony_ci			  const enum index_hashtable_type type_mask,
20462306a36Sopenharmony_ci			  const __le32 index, struct wg_peer **peer)
20562306a36Sopenharmony_ci{
20662306a36Sopenharmony_ci	struct index_hashtable_entry *iter_entry, *entry = NULL;
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	rcu_read_lock_bh();
20962306a36Sopenharmony_ci	hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
21062306a36Sopenharmony_ci				    index_hash) {
21162306a36Sopenharmony_ci		if (iter_entry->index == index) {
21262306a36Sopenharmony_ci			if (likely(iter_entry->type & type_mask))
21362306a36Sopenharmony_ci				entry = iter_entry;
21462306a36Sopenharmony_ci			break;
21562306a36Sopenharmony_ci		}
21662306a36Sopenharmony_ci	}
21762306a36Sopenharmony_ci	if (likely(entry)) {
21862306a36Sopenharmony_ci		entry->peer = wg_peer_get_maybe_zero(entry->peer);
21962306a36Sopenharmony_ci		if (likely(entry->peer))
22062306a36Sopenharmony_ci			*peer = entry->peer;
22162306a36Sopenharmony_ci		else
22262306a36Sopenharmony_ci			entry = NULL;
22362306a36Sopenharmony_ci	}
22462306a36Sopenharmony_ci	rcu_read_unlock_bh();
22562306a36Sopenharmony_ci	return entry;
22662306a36Sopenharmony_ci}
227