Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
0004  *
0005  * This contains some basic static unit tests for the allowedips data structure.
0006  * It also has two additional modes that are disabled and meant to be used by
0007  * folks directly playing with this file. If you define the macro
0008  * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in
0009  * memory, it will be printed out as KERN_DEBUG in a format that can be passed
0010  * to graphviz (the dot command) to visualize it. If you define the macro
0011  * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of
0012  * randomized tests done against a trivial implementation, which may take
0013  * upwards of a half-hour to complete. There's no set of users who should be
0014  * enabling these, and the only developers that should go anywhere near these
0015  * nobs are the ones who are reading this comment.
0016  */
0017 
0018 #ifdef DEBUG
0019 
0020 #include <linux/siphash.h>
0021 
0022 static __init void print_node(struct allowedips_node *node, u8 bits)
0023 {
0024     char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
0025     char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
0026     u8 ip1[16], ip2[16], cidr1, cidr2;
0027     char *style = "dotted";
0028     u32 color = 0;
0029 
0030     if (node == NULL)
0031         return;
0032     if (bits == 32) {
0033         fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
0034         fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
0035     } else if (bits == 128) {
0036         fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
0037         fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
0038     }
0039     if (node->peer) {
0040         hsiphash_key_t key = { { 0 } };
0041 
0042         memcpy(&key, &node->peer, sizeof(node->peer));
0043         color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 |
0044             hsiphash_1u32(0xbabecafe, &key) % 200 << 8 |
0045             hsiphash_1u32(0xabad1dea, &key) % 200;
0046         style = "bold";
0047     }
0048     wg_allowedips_read_node(node, ip1, &cidr1);
0049     printk(fmt_declaration, ip1, cidr1, style, color);
0050     if (node->bit[0]) {
0051         wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
0052         printk(fmt_connection, ip1, cidr1, ip2, cidr2);
0053     }
0054     if (node->bit[1]) {
0055         wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
0056         printk(fmt_connection, ip1, cidr1, ip2, cidr2);
0057     }
0058     if (node->bit[0])
0059         print_node(rcu_dereference_raw(node->bit[0]), bits);
0060     if (node->bit[1])
0061         print_node(rcu_dereference_raw(node->bit[1]), bits);
0062 }
0063 
0064 static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
0065 {
0066     printk(KERN_DEBUG "digraph trie {\n");
0067     print_node(rcu_dereference_raw(top), bits);
0068     printk(KERN_DEBUG "}\n");
0069 }
0070 
0071 enum {
0072     NUM_PEERS = 2000,
0073     NUM_RAND_ROUTES = 400,
0074     NUM_MUTATED_ROUTES = 100,
0075     NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30
0076 };
0077 
0078 struct horrible_allowedips {
0079     struct hlist_head head;
0080 };
0081 
0082 struct horrible_allowedips_node {
0083     struct hlist_node table;
0084     union nf_inet_addr ip;
0085     union nf_inet_addr mask;
0086     u8 ip_version;
0087     void *value;
0088 };
0089 
0090 static __init void horrible_allowedips_init(struct horrible_allowedips *table)
0091 {
0092     INIT_HLIST_HEAD(&table->head);
0093 }
0094 
0095 static __init void horrible_allowedips_free(struct horrible_allowedips *table)
0096 {
0097     struct horrible_allowedips_node *node;
0098     struct hlist_node *h;
0099 
0100     hlist_for_each_entry_safe(node, h, &table->head, table) {
0101         hlist_del(&node->table);
0102         kfree(node);
0103     }
0104 }
0105 
0106 static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
0107 {
0108     union nf_inet_addr mask;
0109 
0110     memset(&mask, 0, sizeof(mask));
0111     memset(&mask.all, 0xff, cidr / 8);
0112     if (cidr % 32)
0113         mask.all[cidr / 32] = (__force u32)htonl(
0114             (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
0115     return mask;
0116 }
0117 
0118 static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet)
0119 {
0120     return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) +
0121            hweight32(subnet.all[2]) + hweight32(subnet.all[3]);
0122 }
0123 
0124 static __init inline void
0125 horrible_mask_self(struct horrible_allowedips_node *node)
0126 {
0127     if (node->ip_version == 4) {
0128         node->ip.ip &= node->mask.ip;
0129     } else if (node->ip_version == 6) {
0130         node->ip.ip6[0] &= node->mask.ip6[0];
0131         node->ip.ip6[1] &= node->mask.ip6[1];
0132         node->ip.ip6[2] &= node->mask.ip6[2];
0133         node->ip.ip6[3] &= node->mask.ip6[3];
0134     }
0135 }
0136 
0137 static __init inline bool
0138 horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
0139 {
0140     return (ip->s_addr & node->mask.ip) == node->ip.ip;
0141 }
0142 
0143 static __init inline bool
0144 horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
0145 {
0146     return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
0147            (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
0148            (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
0149            (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
0150 }
0151 
0152 static __init void
0153 horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
0154 {
0155     struct horrible_allowedips_node *other = NULL, *where = NULL;
0156     u8 my_cidr = horrible_mask_to_cidr(node->mask);
0157 
0158     hlist_for_each_entry(other, &table->head, table) {
0159         if (other->ip_version == node->ip_version &&
0160             !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
0161             !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
0162             other->value = node->value;
0163             kfree(node);
0164             return;
0165         }
0166     }
0167     hlist_for_each_entry(other, &table->head, table) {
0168         where = other;
0169         if (horrible_mask_to_cidr(other->mask) <= my_cidr)
0170             break;
0171     }
0172     if (!other && !where)
0173         hlist_add_head(&node->table, &table->head);
0174     else if (!other)
0175         hlist_add_behind(&node->table, &where->table);
0176     else
0177         hlist_add_before(&node->table, &where->table);
0178 }
0179 
0180 static __init int
0181 horrible_allowedips_insert_v4(struct horrible_allowedips *table,
0182                   struct in_addr *ip, u8 cidr, void *value)
0183 {
0184     struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
0185 
0186     if (unlikely(!node))
0187         return -ENOMEM;
0188     node->ip.in = *ip;
0189     node->mask = horrible_cidr_to_mask(cidr);
0190     node->ip_version = 4;
0191     node->value = value;
0192     horrible_mask_self(node);
0193     horrible_insert_ordered(table, node);
0194     return 0;
0195 }
0196 
0197 static __init int
0198 horrible_allowedips_insert_v6(struct horrible_allowedips *table,
0199                   struct in6_addr *ip, u8 cidr, void *value)
0200 {
0201     struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
0202 
0203     if (unlikely(!node))
0204         return -ENOMEM;
0205     node->ip.in6 = *ip;
0206     node->mask = horrible_cidr_to_mask(cidr);
0207     node->ip_version = 6;
0208     node->value = value;
0209     horrible_mask_self(node);
0210     horrible_insert_ordered(table, node);
0211     return 0;
0212 }
0213 
0214 static __init void *
0215 horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
0216 {
0217     struct horrible_allowedips_node *node;
0218 
0219     hlist_for_each_entry(node, &table->head, table) {
0220         if (node->ip_version == 4 && horrible_match_v4(node, ip))
0221             return node->value;
0222     }
0223     return NULL;
0224 }
0225 
0226 static __init void *
0227 horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
0228 {
0229     struct horrible_allowedips_node *node;
0230 
0231     hlist_for_each_entry(node, &table->head, table) {
0232         if (node->ip_version == 6 && horrible_match_v6(node, ip))
0233             return node->value;
0234     }
0235     return NULL;
0236 }
0237 
0238 
0239 static __init void
0240 horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
0241 {
0242     struct horrible_allowedips_node *node;
0243     struct hlist_node *h;
0244 
0245     hlist_for_each_entry_safe(node, h, &table->head, table) {
0246         if (node->value != value)
0247             continue;
0248         hlist_del(&node->table);
0249         kfree(node);
0250     }
0251 
0252 }
0253 
0254 static __init bool randomized_test(void)
0255 {
0256     unsigned int i, j, k, mutate_amount, cidr;
0257     u8 ip[16], mutate_mask[16], mutated[16];
0258     struct wg_peer **peers, *peer;
0259     struct horrible_allowedips h;
0260     DEFINE_MUTEX(mutex);
0261     struct allowedips t;
0262     bool ret = false;
0263 
0264     mutex_init(&mutex);
0265 
0266     wg_allowedips_init(&t);
0267     horrible_allowedips_init(&h);
0268 
0269     peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL);
0270     if (unlikely(!peers)) {
0271         pr_err("allowedips random self-test malloc: FAIL\n");
0272         goto free;
0273     }
0274     for (i = 0; i < NUM_PEERS; ++i) {
0275         peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL);
0276         if (unlikely(!peers[i])) {
0277             pr_err("allowedips random self-test malloc: FAIL\n");
0278             goto free;
0279         }
0280         kref_init(&peers[i]->refcount);
0281         INIT_LIST_HEAD(&peers[i]->allowedips_list);
0282     }
0283 
0284     mutex_lock(&mutex);
0285 
0286     for (i = 0; i < NUM_RAND_ROUTES; ++i) {
0287         prandom_bytes(ip, 4);
0288         cidr = prandom_u32_max(32) + 1;
0289         peer = peers[prandom_u32_max(NUM_PEERS)];
0290         if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr,
0291                         peer, &mutex) < 0) {
0292             pr_err("allowedips random self-test malloc: FAIL\n");
0293             goto free_locked;
0294         }
0295         if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip,
0296                           cidr, peer) < 0) {
0297             pr_err("allowedips random self-test malloc: FAIL\n");
0298             goto free_locked;
0299         }
0300         for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
0301             memcpy(mutated, ip, 4);
0302             prandom_bytes(mutate_mask, 4);
0303             mutate_amount = prandom_u32_max(32);
0304             for (k = 0; k < mutate_amount / 8; ++k)
0305                 mutate_mask[k] = 0xff;
0306             mutate_mask[k] = 0xff
0307                      << ((8 - (mutate_amount % 8)) % 8);
0308             for (; k < 4; ++k)
0309                 mutate_mask[k] = 0;
0310             for (k = 0; k < 4; ++k)
0311                 mutated[k] = (mutated[k] & mutate_mask[k]) |
0312                          (~mutate_mask[k] &
0313                           prandom_u32_max(256));
0314             cidr = prandom_u32_max(32) + 1;
0315             peer = peers[prandom_u32_max(NUM_PEERS)];
0316             if (wg_allowedips_insert_v4(&t,
0317                             (struct in_addr *)mutated,
0318                             cidr, peer, &mutex) < 0) {
0319                 pr_err("allowedips random self-test malloc: FAIL\n");
0320                 goto free_locked;
0321             }
0322             if (horrible_allowedips_insert_v4(&h,
0323                 (struct in_addr *)mutated, cidr, peer)) {
0324                 pr_err("allowedips random self-test malloc: FAIL\n");
0325                 goto free_locked;
0326             }
0327         }
0328     }
0329 
0330     for (i = 0; i < NUM_RAND_ROUTES; ++i) {
0331         prandom_bytes(ip, 16);
0332         cidr = prandom_u32_max(128) + 1;
0333         peer = peers[prandom_u32_max(NUM_PEERS)];
0334         if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr,
0335                         peer, &mutex) < 0) {
0336             pr_err("allowedips random self-test malloc: FAIL\n");
0337             goto free_locked;
0338         }
0339         if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip,
0340                           cidr, peer) < 0) {
0341             pr_err("allowedips random self-test malloc: FAIL\n");
0342             goto free_locked;
0343         }
0344         for (j = 0; j < NUM_MUTATED_ROUTES; ++j) {
0345             memcpy(mutated, ip, 16);
0346             prandom_bytes(mutate_mask, 16);
0347             mutate_amount = prandom_u32_max(128);
0348             for (k = 0; k < mutate_amount / 8; ++k)
0349                 mutate_mask[k] = 0xff;
0350             mutate_mask[k] = 0xff
0351                      << ((8 - (mutate_amount % 8)) % 8);
0352             for (; k < 4; ++k)
0353                 mutate_mask[k] = 0;
0354             for (k = 0; k < 4; ++k)
0355                 mutated[k] = (mutated[k] & mutate_mask[k]) |
0356                          (~mutate_mask[k] &
0357                           prandom_u32_max(256));
0358             cidr = prandom_u32_max(128) + 1;
0359             peer = peers[prandom_u32_max(NUM_PEERS)];
0360             if (wg_allowedips_insert_v6(&t,
0361                             (struct in6_addr *)mutated,
0362                             cidr, peer, &mutex) < 0) {
0363                 pr_err("allowedips random self-test malloc: FAIL\n");
0364                 goto free_locked;
0365             }
0366             if (horrible_allowedips_insert_v6(
0367                     &h, (struct in6_addr *)mutated, cidr,
0368                     peer)) {
0369                 pr_err("allowedips random self-test malloc: FAIL\n");
0370                 goto free_locked;
0371             }
0372         }
0373     }
0374 
0375     mutex_unlock(&mutex);
0376 
0377     if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
0378         print_tree(t.root4, 32);
0379         print_tree(t.root6, 128);
0380     }
0381 
0382     for (j = 0;; ++j) {
0383         for (i = 0; i < NUM_QUERIES; ++i) {
0384             prandom_bytes(ip, 4);
0385             if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
0386                 horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
0387                 pr_err("allowedips random v4 self-test: FAIL\n");
0388                 goto free;
0389             }
0390             prandom_bytes(ip, 16);
0391             if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
0392                 pr_err("allowedips random v6 self-test: FAIL\n");
0393                 goto free;
0394             }
0395         }
0396         if (j >= NUM_PEERS)
0397             break;
0398         mutex_lock(&mutex);
0399         wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
0400         mutex_unlock(&mutex);
0401         horrible_allowedips_remove_by_value(&h, peers[j]);
0402     }
0403 
0404     if (t.root4 || t.root6) {
0405         pr_err("allowedips random self-test removal: FAIL\n");
0406         goto free;
0407     }
0408 
0409     ret = true;
0410 
0411 free:
0412     mutex_lock(&mutex);
0413 free_locked:
0414     wg_allowedips_free(&t, &mutex);
0415     mutex_unlock(&mutex);
0416     horrible_allowedips_free(&h);
0417     if (peers) {
0418         for (i = 0; i < NUM_PEERS; ++i)
0419             kfree(peers[i]);
0420     }
0421     kfree(peers);
0422     return ret;
0423 }
0424 
0425 static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d)
0426 {
0427     static struct in_addr ip;
0428     u8 *split = (u8 *)&ip;
0429 
0430     split[0] = a;
0431     split[1] = b;
0432     split[2] = c;
0433     split[3] = d;
0434     return &ip;
0435 }
0436 
0437 static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d)
0438 {
0439     static struct in6_addr ip;
0440     __be32 *split = (__be32 *)&ip;
0441 
0442     split[0] = cpu_to_be32(a);
0443     split[1] = cpu_to_be32(b);
0444     split[2] = cpu_to_be32(c);
0445     split[3] = cpu_to_be32(d);
0446     return &ip;
0447 }
0448 
0449 static __init struct wg_peer *init_peer(void)
0450 {
0451     struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL);
0452 
0453     if (!peer)
0454         return NULL;
0455     kref_init(&peer->refcount);
0456     INIT_LIST_HEAD(&peer->allowedips_list);
0457     return peer;
0458 }
0459 
0460 #define insert(version, mem, ipa, ipb, ipc, ipd, cidr)                       \
0461     wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \
0462                     cidr, mem, &mutex)
0463 
0464 #define maybe_fail() do {                                               \
0465         ++i;                                                    \
0466         if (!_s) {                                              \
0467             pr_info("allowedips self-test %zu: FAIL\n", i); \
0468             success = false;                                \
0469         }                                                       \
0470     } while (0)
0471 
0472 #define test(version, mem, ipa, ipb, ipc, ipd) do {                          \
0473         bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
0474                  ip##version(ipa, ipb, ipc, ipd)) == (mem);  \
0475         maybe_fail();                                                \
0476     } while (0)
0477 
0478 #define test_negative(version, mem, ipa, ipb, ipc, ipd) do {                 \
0479         bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \
0480                  ip##version(ipa, ipb, ipc, ipd)) != (mem);  \
0481         maybe_fail();                                                \
0482     } while (0)
0483 
0484 #define test_boolean(cond) do {   \
0485         bool _s = (cond); \
0486         maybe_fail();     \
0487     } while (0)
0488 
0489 bool __init wg_allowedips_selftest(void)
0490 {
0491     bool found_a = false, found_b = false, found_c = false, found_d = false,
0492          found_e = false, found_other = false;
0493     struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(),
0494                *d = init_peer(), *e = init_peer(), *f = init_peer(),
0495                *g = init_peer(), *h = init_peer();
0496     struct allowedips_node *iter_node;
0497     bool success = false;
0498     struct allowedips t;
0499     DEFINE_MUTEX(mutex);
0500     struct in6_addr ip;
0501     size_t i = 0, count = 0;
0502     __be64 part;
0503 
0504     mutex_init(&mutex);
0505     mutex_lock(&mutex);
0506     wg_allowedips_init(&t);
0507 
0508     if (!a || !b || !c || !d || !e || !f || !g || !h) {
0509         pr_err("allowedips self-test malloc: FAIL\n");
0510         goto free;
0511     }
0512 
0513     insert(4, a, 192, 168, 4, 0, 24);
0514     insert(4, b, 192, 168, 4, 4, 32);
0515     insert(4, c, 192, 168, 0, 0, 16);
0516     insert(4, d, 192, 95, 5, 64, 27);
0517     /* replaces previous entry, and maskself is required */
0518     insert(4, c, 192, 95, 5, 65, 27);
0519     insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
0520     insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64);
0521     insert(4, e, 0, 0, 0, 0, 0);
0522     insert(6, e, 0, 0, 0, 0, 0);
0523     /* replaces previous entry */
0524     insert(6, f, 0, 0, 0, 0, 0);
0525     insert(6, g, 0x24046800, 0, 0, 0, 32);
0526     /* maskself is required */
0527     insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64);
0528     insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128);
0529     insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128);
0530     insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98);
0531     insert(4, g, 64, 15, 112, 0, 20);
0532     /* maskself is required */
0533     insert(4, h, 64, 15, 123, 211, 25);
0534     insert(4, a, 10, 0, 0, 0, 25);
0535     insert(4, b, 10, 0, 0, 128, 25);
0536     insert(4, a, 10, 1, 0, 0, 30);
0537     insert(4, b, 10, 1, 0, 4, 30);
0538     insert(4, c, 10, 1, 0, 8, 29);
0539     insert(4, d, 10, 1, 0, 16, 29);
0540 
0541     if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) {
0542         print_tree(t.root4, 32);
0543         print_tree(t.root6, 128);
0544     }
0545 
0546     success = true;
0547 
0548     test(4, a, 192, 168, 4, 20);
0549     test(4, a, 192, 168, 4, 0);
0550     test(4, b, 192, 168, 4, 4);
0551     test(4, c, 192, 168, 200, 182);
0552     test(4, c, 192, 95, 5, 68);
0553     test(4, e, 192, 95, 5, 96);
0554     test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543);
0555     test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee);
0556     test(6, f, 0x26075300, 0x60006b01, 0, 0);
0557     test(6, g, 0x24046800, 0x40040806, 0, 0x1006);
0558     test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678);
0559     test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678);
0560     test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678);
0561     test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678);
0562     test(6, h, 0x24046800, 0x40040800, 0, 0);
0563     test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010);
0564     test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef);
0565     test(4, g, 64, 15, 116, 26);
0566     test(4, g, 64, 15, 127, 3);
0567     test(4, g, 64, 15, 123, 1);
0568     test(4, h, 64, 15, 123, 128);
0569     test(4, h, 64, 15, 123, 129);
0570     test(4, a, 10, 0, 0, 52);
0571     test(4, b, 10, 0, 0, 220);
0572     test(4, a, 10, 1, 0, 2);
0573     test(4, b, 10, 1, 0, 6);
0574     test(4, c, 10, 1, 0, 10);
0575     test(4, d, 10, 1, 0, 20);
0576 
0577     insert(4, a, 1, 0, 0, 0, 32);
0578     insert(4, a, 64, 0, 0, 0, 32);
0579     insert(4, a, 128, 0, 0, 0, 32);
0580     insert(4, a, 192, 0, 0, 0, 32);
0581     insert(4, a, 255, 0, 0, 0, 32);
0582     wg_allowedips_remove_by_peer(&t, a, &mutex);
0583     test_negative(4, a, 1, 0, 0, 0);
0584     test_negative(4, a, 64, 0, 0, 0);
0585     test_negative(4, a, 128, 0, 0, 0);
0586     test_negative(4, a, 192, 0, 0, 0);
0587     test_negative(4, a, 255, 0, 0, 0);
0588 
0589     wg_allowedips_free(&t, &mutex);
0590     wg_allowedips_init(&t);
0591     insert(4, a, 192, 168, 0, 0, 16);
0592     insert(4, a, 192, 168, 0, 0, 24);
0593     wg_allowedips_remove_by_peer(&t, a, &mutex);
0594     test_negative(4, a, 192, 168, 0, 1);
0595 
0596     /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
0597      * if something goes wrong.
0598      */
0599     for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
0600         part = cpu_to_be64(~(1LLU << (i % 64)));
0601         memset(&ip, 0xff, 16);
0602         memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
0603         wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
0604     }
0605 
0606     wg_allowedips_free(&t, &mutex);
0607 
0608     wg_allowedips_init(&t);
0609     insert(4, a, 192, 95, 5, 93, 27);
0610     insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128);
0611     insert(4, a, 10, 1, 0, 20, 29);
0612     insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83);
0613     insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21);
0614     list_for_each_entry(iter_node, &a->allowedips_list, peer_list) {
0615         u8 cidr, ip[16] __aligned(__alignof(u64));
0616         int family = wg_allowedips_read_node(iter_node, ip, &cidr);
0617 
0618         count++;
0619 
0620         if (cidr == 27 && family == AF_INET &&
0621             !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr)))
0622             found_a = true;
0623         else if (cidr == 128 && family == AF_INET6 &&
0624              !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543),
0625                  sizeof(struct in6_addr)))
0626             found_b = true;
0627         else if (cidr == 29 && family == AF_INET &&
0628              !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr)))
0629             found_c = true;
0630         else if (cidr == 83 && family == AF_INET6 &&
0631              !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0),
0632                  sizeof(struct in6_addr)))
0633             found_d = true;
0634         else if (cidr == 21 && family == AF_INET6 &&
0635              !memcmp(ip, ip6(0x26075000, 0, 0, 0),
0636                  sizeof(struct in6_addr)))
0637             found_e = true;
0638         else
0639             found_other = true;
0640     }
0641     test_boolean(count == 5);
0642     test_boolean(found_a);
0643     test_boolean(found_b);
0644     test_boolean(found_c);
0645     test_boolean(found_d);
0646     test_boolean(found_e);
0647     test_boolean(!found_other);
0648 
0649     if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success)
0650         success = randomized_test();
0651 
0652     if (success)
0653         pr_info("allowedips self-tests: pass\n");
0654 
0655 free:
0656     wg_allowedips_free(&t, &mutex);
0657     kfree(a);
0658     kfree(b);
0659     kfree(c);
0660     kfree(d);
0661     kfree(e);
0662     kfree(f);
0663     kfree(g);
0664     kfree(h);
0665     mutex_unlock(&mutex);
0666 
0667     return success;
0668 }
0669 
0670 #undef test_negative
0671 #undef test
0672 #undef remove
0673 #undef insert
0674 #undef init_peer
0675 
0676 #endif