Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
0004  */
0005 
0006 #include "peerlookup.h"
0007 #include "peer.h"
0008 #include "noise.h"
0009 
0010 static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table,
0011                     const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
0012 {
0013     /* siphash gives us a secure 64bit number based on a random key. Since
0014      * the bits are uniformly distributed, we can then mask off to get the
0015      * bits we need.
0016      */
0017     const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
0018 
0019     return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)];
0020 }
0021 
0022 struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void)
0023 {
0024     struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
0025 
0026     if (!table)
0027         return NULL;
0028 
0029     get_random_bytes(&table->key, sizeof(table->key));
0030     hash_init(table->hashtable);
0031     mutex_init(&table->lock);
0032     return table;
0033 }
0034 
0035 void wg_pubkey_hashtable_add(struct pubkey_hashtable *table,
0036                  struct wg_peer *peer)
0037 {
0038     mutex_lock(&table->lock);
0039     hlist_add_head_rcu(&peer->pubkey_hash,
0040                pubkey_bucket(table, peer->handshake.remote_static));
0041     mutex_unlock(&table->lock);
0042 }
0043 
0044 void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table,
0045                 struct wg_peer *peer)
0046 {
0047     mutex_lock(&table->lock);
0048     hlist_del_init_rcu(&peer->pubkey_hash);
0049     mutex_unlock(&table->lock);
0050 }
0051 
0052 /* Returns a strong reference to a peer */
0053 struct wg_peer *
0054 wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table,
0055                const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
0056 {
0057     struct wg_peer *iter_peer, *peer = NULL;
0058 
0059     rcu_read_lock_bh();
0060     hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey),
0061                     pubkey_hash) {
0062         if (!memcmp(pubkey, iter_peer->handshake.remote_static,
0063                 NOISE_PUBLIC_KEY_LEN)) {
0064             peer = iter_peer;
0065             break;
0066         }
0067     }
0068     peer = wg_peer_get_maybe_zero(peer);
0069     rcu_read_unlock_bh();
0070     return peer;
0071 }
0072 
0073 static struct hlist_head *index_bucket(struct index_hashtable *table,
0074                        const __le32 index)
0075 {
0076     /* Since the indices are random and thus all bits are uniformly
0077      * distributed, we can find its bucket simply by masking.
0078      */
0079     return &table->hashtable[(__force u32)index &
0080                  (HASH_SIZE(table->hashtable) - 1)];
0081 }
0082 
0083 struct index_hashtable *wg_index_hashtable_alloc(void)
0084 {
0085     struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL);
0086 
0087     if (!table)
0088         return NULL;
0089 
0090     hash_init(table->hashtable);
0091     spin_lock_init(&table->lock);
0092     return table;
0093 }
0094 
0095 /* At the moment, we limit ourselves to 2^20 total peers, which generally might
0096  * amount to 2^20*3 items in this hashtable. The algorithm below works by
0097  * picking a random number and testing it. We can see that these limits mean we
0098  * usually succeed pretty quickly:
0099  *
0100  * >>> def calculation(tries, size):
0101  * ...     return (size / 2**32)**(tries - 1) *  (1 - (size / 2**32))
0102  * ...
0103  * >>> calculation(1, 2**20 * 3)
0104  * 0.999267578125
0105  * >>> calculation(2, 2**20 * 3)
0106  * 0.0007318854331970215
0107  * >>> calculation(3, 2**20 * 3)
0108  * 5.360489012673497e-07
0109  * >>> calculation(4, 2**20 * 3)
0110  * 3.9261394135792216e-10
0111  *
0112  * At the moment, we don't do any masking, so this algorithm isn't exactly
0113  * constant time in either the random guessing or in the hash list lookup. We
0114  * could require a minimum of 3 tries, which would successfully mask the
0115  * guessing. this would not, however, help with the growing hash lengths, which
0116  * is another thing to consider moving forward.
0117  */
0118 
0119 __le32 wg_index_hashtable_insert(struct index_hashtable *table,
0120                  struct index_hashtable_entry *entry)
0121 {
0122     struct index_hashtable_entry *existing_entry;
0123 
0124     spin_lock_bh(&table->lock);
0125     hlist_del_init_rcu(&entry->index_hash);
0126     spin_unlock_bh(&table->lock);
0127 
0128     rcu_read_lock_bh();
0129 
0130 search_unused_slot:
0131     /* First we try to find an unused slot, randomly, while unlocked. */
0132     entry->index = (__force __le32)get_random_u32();
0133     hlist_for_each_entry_rcu_bh(existing_entry,
0134                     index_bucket(table, entry->index),
0135                     index_hash) {
0136         if (existing_entry->index == entry->index)
0137             /* If it's already in use, we continue searching. */
0138             goto search_unused_slot;
0139     }
0140 
0141     /* Once we've found an unused slot, we lock it, and then double-check
0142      * that nobody else stole it from us.
0143      */
0144     spin_lock_bh(&table->lock);
0145     hlist_for_each_entry_rcu_bh(existing_entry,
0146                     index_bucket(table, entry->index),
0147                     index_hash) {
0148         if (existing_entry->index == entry->index) {
0149             spin_unlock_bh(&table->lock);
0150             /* If it was stolen, we start over. */
0151             goto search_unused_slot;
0152         }
0153     }
0154     /* Otherwise, we know we have it exclusively (since we're locked),
0155      * so we insert.
0156      */
0157     hlist_add_head_rcu(&entry->index_hash,
0158                index_bucket(table, entry->index));
0159     spin_unlock_bh(&table->lock);
0160 
0161     rcu_read_unlock_bh();
0162 
0163     return entry->index;
0164 }
0165 
0166 bool wg_index_hashtable_replace(struct index_hashtable *table,
0167                 struct index_hashtable_entry *old,
0168                 struct index_hashtable_entry *new)
0169 {
0170     bool ret;
0171 
0172     spin_lock_bh(&table->lock);
0173     ret = !hlist_unhashed(&old->index_hash);
0174     if (unlikely(!ret))
0175         goto out;
0176 
0177     new->index = old->index;
0178     hlist_replace_rcu(&old->index_hash, &new->index_hash);
0179 
0180     /* Calling init here NULLs out index_hash, and in fact after this
0181      * function returns, it's theoretically possible for this to get
0182      * reinserted elsewhere. That means the RCU lookup below might either
0183      * terminate early or jump between buckets, in which case the packet
0184      * simply gets dropped, which isn't terrible.
0185      */
0186     INIT_HLIST_NODE(&old->index_hash);
0187 out:
0188     spin_unlock_bh(&table->lock);
0189     return ret;
0190 }
0191 
0192 void wg_index_hashtable_remove(struct index_hashtable *table,
0193                    struct index_hashtable_entry *entry)
0194 {
0195     spin_lock_bh(&table->lock);
0196     hlist_del_init_rcu(&entry->index_hash);
0197     spin_unlock_bh(&table->lock);
0198 }
0199 
0200 /* Returns a strong reference to a entry->peer */
0201 struct index_hashtable_entry *
0202 wg_index_hashtable_lookup(struct index_hashtable *table,
0203               const enum index_hashtable_type type_mask,
0204               const __le32 index, struct wg_peer **peer)
0205 {
0206     struct index_hashtable_entry *iter_entry, *entry = NULL;
0207 
0208     rcu_read_lock_bh();
0209     hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index),
0210                     index_hash) {
0211         if (iter_entry->index == index) {
0212             if (likely(iter_entry->type & type_mask))
0213                 entry = iter_entry;
0214             break;
0215         }
0216     }
0217     if (likely(entry)) {
0218         entry->peer = wg_peer_get_maybe_zero(entry->peer);
0219         if (likely(entry->peer))
0220             *peer = entry->peer;
0221         else
0222             entry = NULL;
0223     }
0224     rcu_read_unlock_bh();
0225     return entry;
0226 }