0001
0002
0003
0004
0005
0006 #include "peerlookup.h"
0007 #include "peer.h"
0008 #include "noise.h"
0009
0010 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
0011 const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
0012 {
0013
0014
0015
0016
0017 const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
0018
0019 return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
0020 }
0021
0022 struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
0023 {
0024 struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
0025
0026 if (!table)
0027 return NULL;
0028
0029 get_random_bytes(&table->key, sizeof(table->key));
0030 hash_init(table->hashtable);
0031 mutex_init(&table->lock);
0032 return table;
0033 }
0034
0035 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
0036 struct wg_peer *peer)
0037 {
0038 mutex_lock(&table->lock);
0039 hlist_add_head_rcu(&peer->pubkey_hash,
0040 pubkey_bucket(table, peer->handshake.remote_static));
0041 mutex_unlock(&table->lock);
0042 }
0043
0044 void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
0045 struct wg_peer *peer)
0046 {
0047 mutex_lock(&table->lock);
0048 hlist_del_init_rcu(&peer->pubkey_hash);
0049 mutex_unlock(&table->lock);
0050 }
0051
0052
0053 struct wg_peer *
0054 wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
0055 const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
0056 {
0057 struct wg_peer *iter_peer, *peer = NULL;
0058
0059 rcu_read_lock_bh();
0060 hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
0061 pubkey_hash) {
0062 if (!memcmp(pubkey, iter_peer->handshake.remote_static,
0063 NOISE_PUBLIC_KEY_LEN)) {
0064 peer = iter_peer;
0065 break;
0066 }
0067 }
0068 peer = wg_peer_get_maybe_zero(peer);
0069 rcu_read_unlock_bh();
0070 return peer;
0071 }
0072
0073 static struct hlist_head *index_bucket(struct index_hashtable *table,
0074 const __le32 index)
0075 {
0076
0077
0078
0079 return &table->hashtable[(__force u32)index &
0080 (HASH_SIZE(table->hashtable) - 1)];
0081 }
0082
0083 struct index_hashtable *wg_index_hashtable_alloc(void)
0084 {
0085 struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
0086
0087 if (!table)
0088 return NULL;
0089
0090 hash_init(table->hashtable);
0091 spin_lock_init(&table->lock);
0092 return table;
0093 }
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118
0119 __le32 wg_index_hashtable_insert(struct index_hashtable *table,
0120 struct index_hashtable_entry *entry)
0121 {
0122 struct index_hashtable_entry *existing_entry;
0123
0124 spin_lock_bh(&table->lock);
0125 hlist_del_init_rcu(&entry->index_hash);
0126 spin_unlock_bh(&table->lock);
0127
0128 rcu_read_lock_bh();
0129
0130 search_unused_slot:
0131
0132 entry->index = (__force __le32)get_random_u32();
0133 hlist_for_each_entry_rcu_bh(existing_entry,
0134 index_bucket(table, entry->index),
0135 index_hash) {
0136 if (existing_entry->index == entry->index)
0137
0138 goto search_unused_slot;
0139 }
0140
0141
0142
0143
0144 spin_lock_bh(&table->lock);
0145 hlist_for_each_entry_rcu_bh(existing_entry,
0146 index_bucket(table, entry->index),
0147 index_hash) {
0148 if (existing_entry->index == entry->index) {
0149 spin_unlock_bh(&table->lock);
0150
0151 goto search_unused_slot;
0152 }
0153 }
0154
0155
0156
0157 hlist_add_head_rcu(&entry->index_hash,
0158 index_bucket(table, entry->index));
0159 spin_unlock_bh(&table->lock);
0160
0161 rcu_read_unlock_bh();
0162
0163 return entry->index;
0164 }
0165
0166 bool wg_index_hashtable_replace(struct index_hashtable *table,
0167 struct index_hashtable_entry *old,
0168 struct index_hashtable_entry *new)
0169 {
0170 bool ret;
0171
0172 spin_lock_bh(&table->lock);
0173 ret = !hlist_unhashed(&old->index_hash);
0174 if (unlikely(!ret))
0175 goto out;
0176
0177 new->index = old->index;
0178 hlist_replace_rcu(&old->index_hash, &new->index_hash);
0179
0180
0181
0182
0183
0184
0185
0186 INIT_HLIST_NODE(&old->index_hash);
0187 out:
0188 spin_unlock_bh(&table->lock);
0189 return ret;
0190 }
0191
0192 void wg_index_hashtable_remove(struct index_hashtable *table,
0193 struct index_hashtable_entry *entry)
0194 {
0195 spin_lock_bh(&table->lock);
0196 hlist_del_init_rcu(&entry->index_hash);
0197 spin_unlock_bh(&table->lock);
0198 }
0199
0200
0201 struct index_hashtable_entry *
0202 wg_index_hashtable_lookup(struct index_hashtable *table,
0203 const enum index_hashtable_type type_mask,
0204 const __le32 index, struct wg_peer **peer)
0205 {
0206 struct index_hashtable_entry *iter_entry, *entry = NULL;
0207
0208 rcu_read_lock_bh();
0209 hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
0210 index_hash) {
0211 if (iter_entry->index == index) {
0212 if (likely(iter_entry->type & type_mask))
0213 entry = iter_entry;
0214 break;
0215 }
0216 }
0217 if (likely(entry)) {
0218 entry->peer = wg_peer_get_maybe_zero(entry->peer);
0219 if (likely(entry->peer))
0220 *peer = entry->peer;
0221 else
0222 entry = NULL;
0223 }
0224 rcu_read_unlock_bh();
0225 return entry;
0226 }