Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  *
0004  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
0005  * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
0006  */
0007 
0008 #include <linux/bitops.h>
0009 #include <linux/delay.h>
0010 #include <linux/kasan.h>
0011 #include <linux/kernel.h>
0012 #include <linux/mm.h>
0013 #include <linux/mman.h>
0014 #include <linux/module.h>
0015 #include <linux/printk.h>
0016 #include <linux/random.h>
0017 #include <linux/slab.h>
0018 #include <linux/string.h>
0019 #include <linux/uaccess.h>
0020 #include <linux/io.h>
0021 #include <linux/vmalloc.h>
0022 #include <linux/set_memory.h>
0023 
0024 #include <asm/page.h>
0025 
0026 #include <kunit/test.h>
0027 
0028 #include "../mm/kasan/kasan.h"
0029 
0030 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
0031 
0032 /*
0033  * Some tests use these global variables to store return values from function
0034  * calls that could otherwise be eliminated by the compiler as dead code.
0035  */
0036 void *kasan_ptr_result;
0037 int kasan_int_result;
0038 
0039 static struct kunit_resource resource;
0040 static struct kunit_kasan_status test_status;
0041 static bool multishot;
0042 
0043 /*
0044  * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
0045  * first detected bug and panic the kernel if panic_on_warn is enabled. For
0046  * hardware tag-based KASAN also allow tag checking to be reenabled for each
0047  * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
0048  */
0049 static int kasan_test_init(struct kunit *test)
0050 {
0051     if (!kasan_enabled()) {
0052         kunit_err(test, "can't run KASAN tests with KASAN disabled");
0053         return -1;
0054     }
0055 
0056     multishot = kasan_save_enable_multi_shot();
0057     test_status.report_found = false;
0058     test_status.sync_fault = false;
0059     kunit_add_named_resource(test, NULL, NULL, &resource,
0060                     "kasan_status", &test_status);
0061     return 0;
0062 }
0063 
0064 static void kasan_test_exit(struct kunit *test)
0065 {
0066     kasan_restore_multi_shot(multishot);
0067     KUNIT_EXPECT_FALSE(test, test_status.report_found);
0068 }
0069 
0070 /**
0071  * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
0072  * KASAN report; causes a test failure otherwise. This relies on a KUnit
0073  * resource named "kasan_status". Do not use this name for KUnit resources
0074  * outside of KASAN tests.
0075  *
0076  * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
0077  * checking is auto-disabled. When this happens, this test handler reenables
0078  * tag checking. As tag checking can be only disabled or enabled per CPU,
0079  * this handler disables migration (preemption).
0080  *
0081  * Since the compiler doesn't see that the expression can change the test_status
0082  * fields, it can reorder or optimize away the accesses to those fields.
0083  * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
0084  * expression to prevent that.
0085  *
0086  * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
0087  * as false. This allows detecting KASAN reports that happen outside of the
0088  * checks by asserting !test_status.report_found at the start of
0089  * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
0090  */
0091 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {          \
0092     if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&             \
0093         kasan_sync_fault_possible())                \
0094         migrate_disable();                  \
0095     KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));  \
0096     barrier();                          \
0097     expression;                         \
0098     barrier();                          \
0099     if (kasan_async_fault_possible())               \
0100         kasan_force_async_fault();              \
0101     if (!READ_ONCE(test_status.report_found)) {         \
0102         KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "  \
0103                 "expected in \"" #expression        \
0104                  "\", but none occurred");      \
0105     }                               \
0106     if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&             \
0107         kasan_sync_fault_possible()) {              \
0108         if (READ_ONCE(test_status.report_found) &&      \
0109             READ_ONCE(test_status.sync_fault))          \
0110             kasan_enable_tagging();             \
0111         migrate_enable();                   \
0112     }                               \
0113     WRITE_ONCE(test_status.report_found, false);            \
0114 } while (0)
0115 
0116 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {           \
0117     if (!IS_ENABLED(config))                    \
0118         kunit_skip((test), "Test requires " #config "=y");  \
0119 } while (0)
0120 
0121 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {          \
0122     if (IS_ENABLED(config))                     \
0123         kunit_skip((test), "Test requires " #config "=n");  \
0124 } while (0)
0125 
0126 static void kmalloc_oob_right(struct kunit *test)
0127 {
0128     char *ptr;
0129     size_t size = 128 - KASAN_GRANULE_SIZE - 5;
0130 
0131     ptr = kmalloc(size, GFP_KERNEL);
0132     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0133 
0134     OPTIMIZER_HIDE_VAR(ptr);
0135     /*
0136      * An unaligned access past the requested kmalloc size.
0137      * Only generic KASAN can precisely detect these.
0138      */
0139     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0140         KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
0141 
0142     /*
0143      * An aligned access into the first out-of-bounds granule that falls
0144      * within the aligned kmalloc object.
0145      */
0146     KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
0147 
0148     /* Out-of-bounds access past the aligned kmalloc object. */
0149     KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
0150                     ptr[size + KASAN_GRANULE_SIZE + 5]);
0151 
0152     kfree(ptr);
0153 }
0154 
0155 static void kmalloc_oob_left(struct kunit *test)
0156 {
0157     char *ptr;
0158     size_t size = 15;
0159 
0160     ptr = kmalloc(size, GFP_KERNEL);
0161     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0162 
0163     OPTIMIZER_HIDE_VAR(ptr);
0164     KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
0165     kfree(ptr);
0166 }
0167 
0168 static void kmalloc_node_oob_right(struct kunit *test)
0169 {
0170     char *ptr;
0171     size_t size = 4096;
0172 
0173     ptr = kmalloc_node(size, GFP_KERNEL, 0);
0174     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0175 
0176     OPTIMIZER_HIDE_VAR(ptr);
0177     KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
0178     kfree(ptr);
0179 }
0180 
0181 /*
0182  * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
0183  * fit into a slab cache and therefore is allocated via the page allocator
0184  * fallback. Since this kind of fallback is only implemented for SLUB, these
0185  * tests are limited to that allocator.
0186  */
0187 static void kmalloc_pagealloc_oob_right(struct kunit *test)
0188 {
0189     char *ptr;
0190     size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
0191 
0192     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
0193 
0194     ptr = kmalloc(size, GFP_KERNEL);
0195     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0196 
0197     OPTIMIZER_HIDE_VAR(ptr);
0198     KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
0199 
0200     kfree(ptr);
0201 }
0202 
0203 static void kmalloc_pagealloc_uaf(struct kunit *test)
0204 {
0205     char *ptr;
0206     size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
0207 
0208     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
0209 
0210     ptr = kmalloc(size, GFP_KERNEL);
0211     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0212     kfree(ptr);
0213 
0214     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
0215 }
0216 
0217 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
0218 {
0219     char *ptr;
0220     size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
0221 
0222     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
0223 
0224     ptr = kmalloc(size, GFP_KERNEL);
0225     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0226 
0227     KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
0228 }
0229 
0230 static void pagealloc_oob_right(struct kunit *test)
0231 {
0232     char *ptr;
0233     struct page *pages;
0234     size_t order = 4;
0235     size_t size = (1UL << (PAGE_SHIFT + order));
0236 
0237     /*
0238      * With generic KASAN page allocations have no redzones, thus
0239      * out-of-bounds detection is not guaranteed.
0240      * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
0241      */
0242     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
0243 
0244     pages = alloc_pages(GFP_KERNEL, order);
0245     ptr = page_address(pages);
0246     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0247 
0248     KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
0249     free_pages((unsigned long)ptr, order);
0250 }
0251 
0252 static void pagealloc_uaf(struct kunit *test)
0253 {
0254     char *ptr;
0255     struct page *pages;
0256     size_t order = 4;
0257 
0258     pages = alloc_pages(GFP_KERNEL, order);
0259     ptr = page_address(pages);
0260     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0261     free_pages((unsigned long)ptr, order);
0262 
0263     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
0264 }
0265 
0266 static void kmalloc_large_oob_right(struct kunit *test)
0267 {
0268     char *ptr;
0269     size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
0270 
0271     /*
0272      * Allocate a chunk that is large enough, but still fits into a slab
0273      * and does not trigger the page allocator fallback in SLUB.
0274      */
0275     ptr = kmalloc(size, GFP_KERNEL);
0276     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0277 
0278     OPTIMIZER_HIDE_VAR(ptr);
0279     KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
0280     kfree(ptr);
0281 }
0282 
0283 static void krealloc_more_oob_helper(struct kunit *test,
0284                     size_t size1, size_t size2)
0285 {
0286     char *ptr1, *ptr2;
0287     size_t middle;
0288 
0289     KUNIT_ASSERT_LT(test, size1, size2);
0290     middle = size1 + (size2 - size1) / 2;
0291 
0292     ptr1 = kmalloc(size1, GFP_KERNEL);
0293     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0294 
0295     ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
0296     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
0297 
0298     /* All offsets up to size2 must be accessible. */
0299     ptr2[size1 - 1] = 'x';
0300     ptr2[size1] = 'x';
0301     ptr2[middle] = 'x';
0302     ptr2[size2 - 1] = 'x';
0303 
0304     /* Generic mode is precise, so unaligned size2 must be inaccessible. */
0305     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0306         KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
0307 
0308     /* For all modes first aligned offset after size2 must be inaccessible. */
0309     KUNIT_EXPECT_KASAN_FAIL(test,
0310         ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
0311 
0312     kfree(ptr2);
0313 }
0314 
0315 static void krealloc_less_oob_helper(struct kunit *test,
0316                     size_t size1, size_t size2)
0317 {
0318     char *ptr1, *ptr2;
0319     size_t middle;
0320 
0321     KUNIT_ASSERT_LT(test, size2, size1);
0322     middle = size2 + (size1 - size2) / 2;
0323 
0324     ptr1 = kmalloc(size1, GFP_KERNEL);
0325     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0326 
0327     ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
0328     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
0329 
0330     /* Must be accessible for all modes. */
0331     ptr2[size2 - 1] = 'x';
0332 
0333     /* Generic mode is precise, so unaligned size2 must be inaccessible. */
0334     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
0335         KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
0336 
0337     /* For all modes first aligned offset after size2 must be inaccessible. */
0338     KUNIT_EXPECT_KASAN_FAIL(test,
0339         ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
0340 
0341     /*
0342      * For all modes all size2, middle, and size1 should land in separate
0343      * granules and thus the latter two offsets should be inaccessible.
0344      */
0345     KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
0346                 round_down(middle, KASAN_GRANULE_SIZE));
0347     KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
0348                 round_down(size1, KASAN_GRANULE_SIZE));
0349     KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
0350     KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
0351     KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
0352 
0353     kfree(ptr2);
0354 }
0355 
0356 static void krealloc_more_oob(struct kunit *test)
0357 {
0358     krealloc_more_oob_helper(test, 201, 235);
0359 }
0360 
0361 static void krealloc_less_oob(struct kunit *test)
0362 {
0363     krealloc_less_oob_helper(test, 235, 201);
0364 }
0365 
0366 static void krealloc_pagealloc_more_oob(struct kunit *test)
0367 {
0368     /* page_alloc fallback in only implemented for SLUB. */
0369     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
0370 
0371     krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
0372                     KMALLOC_MAX_CACHE_SIZE + 235);
0373 }
0374 
0375 static void krealloc_pagealloc_less_oob(struct kunit *test)
0376 {
0377     /* page_alloc fallback in only implemented for SLUB. */
0378     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
0379 
0380     krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
0381                     KMALLOC_MAX_CACHE_SIZE + 201);
0382 }
0383 
0384 /*
0385  * Check that krealloc() detects a use-after-free, returns NULL,
0386  * and doesn't unpoison the freed object.
0387  */
0388 static void krealloc_uaf(struct kunit *test)
0389 {
0390     char *ptr1, *ptr2;
0391     int size1 = 201;
0392     int size2 = 235;
0393 
0394     ptr1 = kmalloc(size1, GFP_KERNEL);
0395     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0396     kfree(ptr1);
0397 
0398     KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
0399     KUNIT_ASSERT_NULL(test, ptr2);
0400     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
0401 }
0402 
0403 static void kmalloc_oob_16(struct kunit *test)
0404 {
0405     struct {
0406         u64 words[2];
0407     } *ptr1, *ptr2;
0408 
0409     /* This test is specifically crafted for the generic mode. */
0410     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0411 
0412     ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
0413     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0414 
0415     ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
0416     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
0417 
0418     OPTIMIZER_HIDE_VAR(ptr1);
0419     OPTIMIZER_HIDE_VAR(ptr2);
0420     KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
0421     kfree(ptr1);
0422     kfree(ptr2);
0423 }
0424 
0425 static void kmalloc_uaf_16(struct kunit *test)
0426 {
0427     struct {
0428         u64 words[2];
0429     } *ptr1, *ptr2;
0430 
0431     ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
0432     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0433 
0434     ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
0435     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
0436     kfree(ptr2);
0437 
0438     KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
0439     kfree(ptr1);
0440 }
0441 
0442 /*
0443  * Note: in the memset tests below, the written range touches both valid and
0444  * invalid memory. This makes sure that the instrumentation does not only check
0445  * the starting address but the whole range.
0446  */
0447 
0448 static void kmalloc_oob_memset_2(struct kunit *test)
0449 {
0450     char *ptr;
0451     size_t size = 128 - KASAN_GRANULE_SIZE;
0452 
0453     ptr = kmalloc(size, GFP_KERNEL);
0454     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0455 
0456     OPTIMIZER_HIDE_VAR(size);
0457     KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
0458     kfree(ptr);
0459 }
0460 
0461 static void kmalloc_oob_memset_4(struct kunit *test)
0462 {
0463     char *ptr;
0464     size_t size = 128 - KASAN_GRANULE_SIZE;
0465 
0466     ptr = kmalloc(size, GFP_KERNEL);
0467     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0468 
0469     OPTIMIZER_HIDE_VAR(size);
0470     KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
0471     kfree(ptr);
0472 }
0473 
0474 static void kmalloc_oob_memset_8(struct kunit *test)
0475 {
0476     char *ptr;
0477     size_t size = 128 - KASAN_GRANULE_SIZE;
0478 
0479     ptr = kmalloc(size, GFP_KERNEL);
0480     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0481 
0482     OPTIMIZER_HIDE_VAR(size);
0483     KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
0484     kfree(ptr);
0485 }
0486 
0487 static void kmalloc_oob_memset_16(struct kunit *test)
0488 {
0489     char *ptr;
0490     size_t size = 128 - KASAN_GRANULE_SIZE;
0491 
0492     ptr = kmalloc(size, GFP_KERNEL);
0493     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0494 
0495     OPTIMIZER_HIDE_VAR(size);
0496     KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
0497     kfree(ptr);
0498 }
0499 
0500 static void kmalloc_oob_in_memset(struct kunit *test)
0501 {
0502     char *ptr;
0503     size_t size = 128 - KASAN_GRANULE_SIZE;
0504 
0505     ptr = kmalloc(size, GFP_KERNEL);
0506     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0507 
0508     OPTIMIZER_HIDE_VAR(ptr);
0509     OPTIMIZER_HIDE_VAR(size);
0510     KUNIT_EXPECT_KASAN_FAIL(test,
0511                 memset(ptr, 0, size + KASAN_GRANULE_SIZE));
0512     kfree(ptr);
0513 }
0514 
0515 static void kmalloc_memmove_negative_size(struct kunit *test)
0516 {
0517     char *ptr;
0518     size_t size = 64;
0519     size_t invalid_size = -2;
0520 
0521     /*
0522      * Hardware tag-based mode doesn't check memmove for negative size.
0523      * As a result, this test introduces a side-effect memory corruption,
0524      * which can result in a crash.
0525      */
0526     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
0527 
0528     ptr = kmalloc(size, GFP_KERNEL);
0529     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0530 
0531     memset((char *)ptr, 0, 64);
0532     OPTIMIZER_HIDE_VAR(ptr);
0533     OPTIMIZER_HIDE_VAR(invalid_size);
0534     KUNIT_EXPECT_KASAN_FAIL(test,
0535         memmove((char *)ptr, (char *)ptr + 4, invalid_size));
0536     kfree(ptr);
0537 }
0538 
0539 static void kmalloc_memmove_invalid_size(struct kunit *test)
0540 {
0541     char *ptr;
0542     size_t size = 64;
0543     volatile size_t invalid_size = size;
0544 
0545     ptr = kmalloc(size, GFP_KERNEL);
0546     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0547 
0548     memset((char *)ptr, 0, 64);
0549     OPTIMIZER_HIDE_VAR(ptr);
0550     KUNIT_EXPECT_KASAN_FAIL(test,
0551         memmove((char *)ptr, (char *)ptr + 4, invalid_size));
0552     kfree(ptr);
0553 }
0554 
0555 static void kmalloc_uaf(struct kunit *test)
0556 {
0557     char *ptr;
0558     size_t size = 10;
0559 
0560     ptr = kmalloc(size, GFP_KERNEL);
0561     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0562 
0563     kfree(ptr);
0564     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
0565 }
0566 
0567 static void kmalloc_uaf_memset(struct kunit *test)
0568 {
0569     char *ptr;
0570     size_t size = 33;
0571 
0572     /*
0573      * Only generic KASAN uses quarantine, which is required to avoid a
0574      * kernel memory corruption this test causes.
0575      */
0576     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0577 
0578     ptr = kmalloc(size, GFP_KERNEL);
0579     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0580 
0581     kfree(ptr);
0582     KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
0583 }
0584 
0585 static void kmalloc_uaf2(struct kunit *test)
0586 {
0587     char *ptr1, *ptr2;
0588     size_t size = 43;
0589     int counter = 0;
0590 
0591 again:
0592     ptr1 = kmalloc(size, GFP_KERNEL);
0593     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
0594 
0595     kfree(ptr1);
0596 
0597     ptr2 = kmalloc(size, GFP_KERNEL);
0598     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
0599 
0600     /*
0601      * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
0602      * Allow up to 16 attempts at generating different tags.
0603      */
0604     if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
0605         kfree(ptr2);
0606         goto again;
0607     }
0608 
0609     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
0610     KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
0611 
0612     kfree(ptr2);
0613 }
0614 
0615 static void kfree_via_page(struct kunit *test)
0616 {
0617     char *ptr;
0618     size_t size = 8;
0619     struct page *page;
0620     unsigned long offset;
0621 
0622     ptr = kmalloc(size, GFP_KERNEL);
0623     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0624 
0625     page = virt_to_page(ptr);
0626     offset = offset_in_page(ptr);
0627     kfree(page_address(page) + offset);
0628 }
0629 
0630 static void kfree_via_phys(struct kunit *test)
0631 {
0632     char *ptr;
0633     size_t size = 8;
0634     phys_addr_t phys;
0635 
0636     ptr = kmalloc(size, GFP_KERNEL);
0637     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0638 
0639     phys = virt_to_phys(ptr);
0640     kfree(phys_to_virt(phys));
0641 }
0642 
0643 static void kmem_cache_oob(struct kunit *test)
0644 {
0645     char *p;
0646     size_t size = 200;
0647     struct kmem_cache *cache;
0648 
0649     cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
0650     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0651 
0652     p = kmem_cache_alloc(cache, GFP_KERNEL);
0653     if (!p) {
0654         kunit_err(test, "Allocation failed: %s\n", __func__);
0655         kmem_cache_destroy(cache);
0656         return;
0657     }
0658 
0659     KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
0660 
0661     kmem_cache_free(cache, p);
0662     kmem_cache_destroy(cache);
0663 }
0664 
0665 static void kmem_cache_accounted(struct kunit *test)
0666 {
0667     int i;
0668     char *p;
0669     size_t size = 200;
0670     struct kmem_cache *cache;
0671 
0672     cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
0673     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0674 
0675     /*
0676      * Several allocations with a delay to allow for lazy per memcg kmem
0677      * cache creation.
0678      */
0679     for (i = 0; i < 5; i++) {
0680         p = kmem_cache_alloc(cache, GFP_KERNEL);
0681         if (!p)
0682             goto free_cache;
0683 
0684         kmem_cache_free(cache, p);
0685         msleep(100);
0686     }
0687 
0688 free_cache:
0689     kmem_cache_destroy(cache);
0690 }
0691 
0692 static void kmem_cache_bulk(struct kunit *test)
0693 {
0694     struct kmem_cache *cache;
0695     size_t size = 200;
0696     char *p[10];
0697     bool ret;
0698     int i;
0699 
0700     cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
0701     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0702 
0703     ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
0704     if (!ret) {
0705         kunit_err(test, "Allocation failed: %s\n", __func__);
0706         kmem_cache_destroy(cache);
0707         return;
0708     }
0709 
0710     for (i = 0; i < ARRAY_SIZE(p); i++)
0711         p[i][0] = p[i][size - 1] = 42;
0712 
0713     kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
0714     kmem_cache_destroy(cache);
0715 }
0716 
0717 static char global_array[10];
0718 
0719 static void kasan_global_oob_right(struct kunit *test)
0720 {
0721     /*
0722      * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
0723      * from failing here and panicking the kernel, access the array via a
0724      * volatile pointer, which will prevent the compiler from being able to
0725      * determine the array bounds.
0726      *
0727      * This access uses a volatile pointer to char (char *volatile) rather
0728      * than the more conventional pointer to volatile char (volatile char *)
0729      * because we want to prevent the compiler from making inferences about
0730      * the pointer itself (i.e. its array bounds), not the data that it
0731      * refers to.
0732      */
0733     char *volatile array = global_array;
0734     char *p = &array[ARRAY_SIZE(global_array) + 3];
0735 
0736     /* Only generic mode instruments globals. */
0737     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0738 
0739     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
0740 }
0741 
0742 static void kasan_global_oob_left(struct kunit *test)
0743 {
0744     char *volatile array = global_array;
0745     char *p = array - 3;
0746 
0747     /*
0748      * GCC is known to fail this test, skip it.
0749      * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
0750      */
0751     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
0752     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0753     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
0754 }
0755 
0756 /* Check that ksize() makes the whole object accessible. */
0757 static void ksize_unpoisons_memory(struct kunit *test)
0758 {
0759     char *ptr;
0760     size_t size = 123, real_size;
0761 
0762     ptr = kmalloc(size, GFP_KERNEL);
0763     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0764     real_size = ksize(ptr);
0765 
0766     OPTIMIZER_HIDE_VAR(ptr);
0767 
0768     /* This access shouldn't trigger a KASAN report. */
0769     ptr[size] = 'x';
0770 
0771     /* This one must. */
0772     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
0773 
0774     kfree(ptr);
0775 }
0776 
0777 /*
0778  * Check that a use-after-free is detected by ksize() and via normal accesses
0779  * after it.
0780  */
0781 static void ksize_uaf(struct kunit *test)
0782 {
0783     char *ptr;
0784     int size = 128 - KASAN_GRANULE_SIZE;
0785 
0786     ptr = kmalloc(size, GFP_KERNEL);
0787     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0788     kfree(ptr);
0789 
0790     OPTIMIZER_HIDE_VAR(ptr);
0791     KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
0792     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
0793     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
0794 }
0795 
0796 static void kasan_stack_oob(struct kunit *test)
0797 {
0798     char stack_array[10];
0799     /* See comment in kasan_global_oob_right. */
0800     char *volatile array = stack_array;
0801     char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
0802 
0803     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
0804 
0805     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
0806 }
0807 
0808 static void kasan_alloca_oob_left(struct kunit *test)
0809 {
0810     volatile int i = 10;
0811     char alloca_array[i];
0812     /* See comment in kasan_global_oob_right. */
0813     char *volatile array = alloca_array;
0814     char *p = array - 1;
0815 
0816     /* Only generic mode instruments dynamic allocas. */
0817     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0818     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
0819 
0820     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
0821 }
0822 
0823 static void kasan_alloca_oob_right(struct kunit *test)
0824 {
0825     volatile int i = 10;
0826     char alloca_array[i];
0827     /* See comment in kasan_global_oob_right. */
0828     char *volatile array = alloca_array;
0829     char *p = array + i;
0830 
0831     /* Only generic mode instruments dynamic allocas. */
0832     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
0833     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
0834 
0835     KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
0836 }
0837 
0838 static void kmem_cache_double_free(struct kunit *test)
0839 {
0840     char *p;
0841     size_t size = 200;
0842     struct kmem_cache *cache;
0843 
0844     cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
0845     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0846 
0847     p = kmem_cache_alloc(cache, GFP_KERNEL);
0848     if (!p) {
0849         kunit_err(test, "Allocation failed: %s\n", __func__);
0850         kmem_cache_destroy(cache);
0851         return;
0852     }
0853 
0854     kmem_cache_free(cache, p);
0855     KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
0856     kmem_cache_destroy(cache);
0857 }
0858 
0859 static void kmem_cache_invalid_free(struct kunit *test)
0860 {
0861     char *p;
0862     size_t size = 200;
0863     struct kmem_cache *cache;
0864 
0865     cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
0866                   NULL);
0867     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0868 
0869     p = kmem_cache_alloc(cache, GFP_KERNEL);
0870     if (!p) {
0871         kunit_err(test, "Allocation failed: %s\n", __func__);
0872         kmem_cache_destroy(cache);
0873         return;
0874     }
0875 
0876     /* Trigger invalid free, the object doesn't get freed. */
0877     KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
0878 
0879     /*
0880      * Properly free the object to prevent the "Objects remaining in
0881      * test_cache on __kmem_cache_shutdown" BUG failure.
0882      */
0883     kmem_cache_free(cache, p);
0884 
0885     kmem_cache_destroy(cache);
0886 }
0887 
0888 static void empty_cache_ctor(void *object) { }
0889 
0890 static void kmem_cache_double_destroy(struct kunit *test)
0891 {
0892     struct kmem_cache *cache;
0893 
0894     /* Provide a constructor to prevent cache merging. */
0895     cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
0896     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
0897     kmem_cache_destroy(cache);
0898     KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
0899 }
0900 
0901 static void kasan_memchr(struct kunit *test)
0902 {
0903     char *ptr;
0904     size_t size = 24;
0905 
0906     /*
0907      * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
0908      * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
0909      */
0910     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
0911 
0912     if (OOB_TAG_OFF)
0913         size = round_up(size, OOB_TAG_OFF);
0914 
0915     ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
0916     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0917 
0918     OPTIMIZER_HIDE_VAR(ptr);
0919     OPTIMIZER_HIDE_VAR(size);
0920     KUNIT_EXPECT_KASAN_FAIL(test,
0921         kasan_ptr_result = memchr(ptr, '1', size + 1));
0922 
0923     kfree(ptr);
0924 }
0925 
0926 static void kasan_memcmp(struct kunit *test)
0927 {
0928     char *ptr;
0929     size_t size = 24;
0930     int arr[9];
0931 
0932     /*
0933      * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
0934      * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
0935      */
0936     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
0937 
0938     if (OOB_TAG_OFF)
0939         size = round_up(size, OOB_TAG_OFF);
0940 
0941     ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
0942     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0943     memset(arr, 0, sizeof(arr));
0944 
0945     OPTIMIZER_HIDE_VAR(ptr);
0946     OPTIMIZER_HIDE_VAR(size);
0947     KUNIT_EXPECT_KASAN_FAIL(test,
0948         kasan_int_result = memcmp(ptr, arr, size+1));
0949     kfree(ptr);
0950 }
0951 
0952 static void kasan_strings(struct kunit *test)
0953 {
0954     char *ptr;
0955     size_t size = 24;
0956 
0957     /*
0958      * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
0959      * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
0960      */
0961     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
0962 
0963     ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
0964     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
0965 
0966     kfree(ptr);
0967 
0968     /*
0969      * Try to cause only 1 invalid access (less spam in dmesg).
0970      * For that we need ptr to point to zeroed byte.
0971      * Skip metadata that could be stored in freed object so ptr
0972      * will likely point to zeroed byte.
0973      */
0974     ptr += 16;
0975     KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
0976 
0977     KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
0978 
0979     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
0980 
0981     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
0982 
0983     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
0984 
0985     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
0986 }
0987 
0988 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
0989 {
0990     KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
0991     KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
0992     KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
0993     KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
0994     KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
0995     KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
0996     KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
0997     KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
0998 }
0999 
1000 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1001 {
1002     KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1003     KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1004     KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1005     KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1006     KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1007     KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1008     KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1009     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1010 
1011 #if defined(clear_bit_unlock_is_negative_byte)
1012     KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1013                 clear_bit_unlock_is_negative_byte(nr, addr));
1014 #endif
1015 }
1016 
1017 static void kasan_bitops_generic(struct kunit *test)
1018 {
1019     long *bits;
1020 
1021     /* This test is specifically crafted for the generic mode. */
1022     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1023 
1024     /*
1025      * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1026      * this way we do not actually corrupt other memory.
1027      */
1028     bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1029     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1030 
1031     /*
1032      * Below calls try to access bit within allocated memory; however, the
1033      * below accesses are still out-of-bounds, since bitops are defined to
1034      * operate on the whole long the bit is in.
1035      */
1036     kasan_bitops_modify(test, BITS_PER_LONG, bits);
1037 
1038     /*
1039      * Below calls try to access bit beyond allocated memory.
1040      */
1041     kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1042 
1043     kfree(bits);
1044 }
1045 
1046 static void kasan_bitops_tags(struct kunit *test)
1047 {
1048     long *bits;
1049 
1050     /* This test is specifically crafted for tag-based modes. */
1051     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1052 
1053     /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1054     bits = kzalloc(48, GFP_KERNEL);
1055     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1056 
1057     /* Do the accesses past the 48 allocated bytes, but within the redone. */
1058     kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1059     kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1060 
1061     kfree(bits);
1062 }
1063 
1064 static void kmalloc_double_kzfree(struct kunit *test)
1065 {
1066     char *ptr;
1067     size_t size = 16;
1068 
1069     ptr = kmalloc(size, GFP_KERNEL);
1070     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1071 
1072     kfree_sensitive(ptr);
1073     KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1074 }
1075 
1076 static void vmalloc_helpers_tags(struct kunit *test)
1077 {
1078     void *ptr;
1079 
1080     /* This test is intended for tag-based modes. */
1081     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1082 
1083     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1084 
1085     ptr = vmalloc(PAGE_SIZE);
1086     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1087 
1088     /* Check that the returned pointer is tagged. */
1089     KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1090     KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1091 
1092     /* Make sure exported vmalloc helpers handle tagged pointers. */
1093     KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1094     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1095 
1096 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1097     {
1098         int rv;
1099 
1100         /* Make sure vmalloc'ed memory permissions can be changed. */
1101         rv = set_memory_ro((unsigned long)ptr, 1);
1102         KUNIT_ASSERT_GE(test, rv, 0);
1103         rv = set_memory_rw((unsigned long)ptr, 1);
1104         KUNIT_ASSERT_GE(test, rv, 0);
1105     }
1106 #endif
1107 
1108     vfree(ptr);
1109 }
1110 
1111 static void vmalloc_oob(struct kunit *test)
1112 {
1113     char *v_ptr, *p_ptr;
1114     struct page *page;
1115     size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1116 
1117     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1118 
1119     v_ptr = vmalloc(size);
1120     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1121 
1122     OPTIMIZER_HIDE_VAR(v_ptr);
1123 
1124     /*
1125      * We have to be careful not to hit the guard page in vmalloc tests.
1126      * The MMU will catch that and crash us.
1127      */
1128 
1129     /* Make sure in-bounds accesses are valid. */
1130     v_ptr[0] = 0;
1131     v_ptr[size - 1] = 0;
1132 
1133     /*
1134      * An unaligned access past the requested vmalloc size.
1135      * Only generic KASAN can precisely detect these.
1136      */
1137     if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1138         KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1139 
1140     /* An aligned access into the first out-of-bounds granule. */
1141     KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1142 
1143     /* Check that in-bounds accesses to the physical page are valid. */
1144     page = vmalloc_to_page(v_ptr);
1145     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1146     p_ptr = page_address(page);
1147     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1148     p_ptr[0] = 0;
1149 
1150     vfree(v_ptr);
1151 
1152     /*
1153      * We can't check for use-after-unmap bugs in this nor in the following
1154      * vmalloc tests, as the page might be fully unmapped and accessing it
1155      * will crash the kernel.
1156      */
1157 }
1158 
1159 static void vmap_tags(struct kunit *test)
1160 {
1161     char *p_ptr, *v_ptr;
1162     struct page *p_page, *v_page;
1163 
1164     /*
1165      * This test is specifically crafted for the software tag-based mode,
1166      * the only tag-based mode that poisons vmap mappings.
1167      */
1168     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1169 
1170     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1171 
1172     p_page = alloc_pages(GFP_KERNEL, 1);
1173     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1174     p_ptr = page_address(p_page);
1175     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1176 
1177     v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1178     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1179 
1180     /*
1181      * We can't check for out-of-bounds bugs in this nor in the following
1182      * vmalloc tests, as allocations have page granularity and accessing
1183      * the guard page will crash the kernel.
1184      */
1185 
1186     KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1187     KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1188 
1189     /* Make sure that in-bounds accesses through both pointers work. */
1190     *p_ptr = 0;
1191     *v_ptr = 0;
1192 
1193     /* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1194     v_page = vmalloc_to_page(v_ptr);
1195     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1196     KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1197 
1198     vunmap(v_ptr);
1199     free_pages((unsigned long)p_ptr, 1);
1200 }
1201 
1202 static void vm_map_ram_tags(struct kunit *test)
1203 {
1204     char *p_ptr, *v_ptr;
1205     struct page *page;
1206 
1207     /*
1208      * This test is specifically crafted for the software tag-based mode,
1209      * the only tag-based mode that poisons vm_map_ram mappings.
1210      */
1211     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1212 
1213     page = alloc_pages(GFP_KERNEL, 1);
1214     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1215     p_ptr = page_address(page);
1216     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1217 
1218     v_ptr = vm_map_ram(&page, 1, -1);
1219     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1220 
1221     KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1222     KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1223 
1224     /* Make sure that in-bounds accesses through both pointers work. */
1225     *p_ptr = 0;
1226     *v_ptr = 0;
1227 
1228     vm_unmap_ram(v_ptr, 1);
1229     free_pages((unsigned long)p_ptr, 1);
1230 }
1231 
1232 static void vmalloc_percpu(struct kunit *test)
1233 {
1234     char __percpu *ptr;
1235     int cpu;
1236 
1237     /*
1238      * This test is specifically crafted for the software tag-based mode,
1239      * the only tag-based mode that poisons percpu mappings.
1240      */
1241     KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1242 
1243     ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1244 
1245     for_each_possible_cpu(cpu) {
1246         char *c_ptr = per_cpu_ptr(ptr, cpu);
1247 
1248         KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1249         KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1250 
1251         /* Make sure that in-bounds accesses don't crash the kernel. */
1252         *c_ptr = 0;
1253     }
1254 
1255     free_percpu(ptr);
1256 }
1257 
1258 /*
1259  * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1260  * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1261  * modes.
1262  */
1263 static void match_all_not_assigned(struct kunit *test)
1264 {
1265     char *ptr;
1266     struct page *pages;
1267     int i, size, order;
1268 
1269     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1270 
1271     for (i = 0; i < 256; i++) {
1272         size = (get_random_int() % 1024) + 1;
1273         ptr = kmalloc(size, GFP_KERNEL);
1274         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1275         KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1276         KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1277         kfree(ptr);
1278     }
1279 
1280     for (i = 0; i < 256; i++) {
1281         order = (get_random_int() % 4) + 1;
1282         pages = alloc_pages(GFP_KERNEL, order);
1283         ptr = page_address(pages);
1284         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1285         KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1286         KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1287         free_pages((unsigned long)ptr, order);
1288     }
1289 
1290     if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
1291         return;
1292 
1293     for (i = 0; i < 256; i++) {
1294         size = (get_random_int() % 1024) + 1;
1295         ptr = vmalloc(size);
1296         KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1297         KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1298         KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1299         vfree(ptr);
1300     }
1301 }
1302 
1303 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
1304 static void match_all_ptr_tag(struct kunit *test)
1305 {
1306     char *ptr;
1307     u8 tag;
1308 
1309     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1310 
1311     ptr = kmalloc(128, GFP_KERNEL);
1312     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1313 
1314     /* Backup the assigned tag. */
1315     tag = get_tag(ptr);
1316     KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1317 
1318     /* Reset the tag to 0xff.*/
1319     ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1320 
1321     /* This access shouldn't trigger a KASAN report. */
1322     *ptr = 0;
1323 
1324     /* Recover the pointer tag and free. */
1325     ptr = set_tag(ptr, tag);
1326     kfree(ptr);
1327 }
1328 
1329 /* Check that there are no match-all memory tags for tag-based modes. */
1330 static void match_all_mem_tag(struct kunit *test)
1331 {
1332     char *ptr;
1333     int tag;
1334 
1335     KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1336 
1337     ptr = kmalloc(128, GFP_KERNEL);
1338     KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1339     KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1340 
1341     /* For each possible tag value not matching the pointer tag. */
1342     for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1343         if (tag == get_tag(ptr))
1344             continue;
1345 
1346         /* Mark the first memory granule with the chosen memory tag. */
1347         kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1348 
1349         /* This access must cause a KASAN report. */
1350         KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1351     }
1352 
1353     /* Recover the memory tag and free. */
1354     kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1355     kfree(ptr);
1356 }
1357 
1358 static struct kunit_case kasan_kunit_test_cases[] = {
1359     KUNIT_CASE(kmalloc_oob_right),
1360     KUNIT_CASE(kmalloc_oob_left),
1361     KUNIT_CASE(kmalloc_node_oob_right),
1362     KUNIT_CASE(kmalloc_pagealloc_oob_right),
1363     KUNIT_CASE(kmalloc_pagealloc_uaf),
1364     KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1365     KUNIT_CASE(pagealloc_oob_right),
1366     KUNIT_CASE(pagealloc_uaf),
1367     KUNIT_CASE(kmalloc_large_oob_right),
1368     KUNIT_CASE(krealloc_more_oob),
1369     KUNIT_CASE(krealloc_less_oob),
1370     KUNIT_CASE(krealloc_pagealloc_more_oob),
1371     KUNIT_CASE(krealloc_pagealloc_less_oob),
1372     KUNIT_CASE(krealloc_uaf),
1373     KUNIT_CASE(kmalloc_oob_16),
1374     KUNIT_CASE(kmalloc_uaf_16),
1375     KUNIT_CASE(kmalloc_oob_in_memset),
1376     KUNIT_CASE(kmalloc_oob_memset_2),
1377     KUNIT_CASE(kmalloc_oob_memset_4),
1378     KUNIT_CASE(kmalloc_oob_memset_8),
1379     KUNIT_CASE(kmalloc_oob_memset_16),
1380     KUNIT_CASE(kmalloc_memmove_negative_size),
1381     KUNIT_CASE(kmalloc_memmove_invalid_size),
1382     KUNIT_CASE(kmalloc_uaf),
1383     KUNIT_CASE(kmalloc_uaf_memset),
1384     KUNIT_CASE(kmalloc_uaf2),
1385     KUNIT_CASE(kfree_via_page),
1386     KUNIT_CASE(kfree_via_phys),
1387     KUNIT_CASE(kmem_cache_oob),
1388     KUNIT_CASE(kmem_cache_accounted),
1389     KUNIT_CASE(kmem_cache_bulk),
1390     KUNIT_CASE(kasan_global_oob_right),
1391     KUNIT_CASE(kasan_global_oob_left),
1392     KUNIT_CASE(kasan_stack_oob),
1393     KUNIT_CASE(kasan_alloca_oob_left),
1394     KUNIT_CASE(kasan_alloca_oob_right),
1395     KUNIT_CASE(ksize_unpoisons_memory),
1396     KUNIT_CASE(ksize_uaf),
1397     KUNIT_CASE(kmem_cache_double_free),
1398     KUNIT_CASE(kmem_cache_invalid_free),
1399     KUNIT_CASE(kmem_cache_double_destroy),
1400     KUNIT_CASE(kasan_memchr),
1401     KUNIT_CASE(kasan_memcmp),
1402     KUNIT_CASE(kasan_strings),
1403     KUNIT_CASE(kasan_bitops_generic),
1404     KUNIT_CASE(kasan_bitops_tags),
1405     KUNIT_CASE(kmalloc_double_kzfree),
1406     KUNIT_CASE(vmalloc_helpers_tags),
1407     KUNIT_CASE(vmalloc_oob),
1408     KUNIT_CASE(vmap_tags),
1409     KUNIT_CASE(vm_map_ram_tags),
1410     KUNIT_CASE(vmalloc_percpu),
1411     KUNIT_CASE(match_all_not_assigned),
1412     KUNIT_CASE(match_all_ptr_tag),
1413     KUNIT_CASE(match_all_mem_tag),
1414     {}
1415 };
1416 
1417 static struct kunit_suite kasan_kunit_test_suite = {
1418     .name = "kasan",
1419     .init = kasan_test_init,
1420     .test_cases = kasan_kunit_test_cases,
1421     .exit = kasan_test_exit,
1422 };
1423 
1424 kunit_test_suite(kasan_kunit_test_suite);
1425 
1426 MODULE_LICENSE("GPL");