Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Test cases for SL[AOU]B/page initialization at alloc/free time.
0004  */
0005 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0006 
0007 #include <linux/init.h>
0008 #include <linux/kernel.h>
0009 #include <linux/mm.h>
0010 #include <linux/module.h>
0011 #include <linux/slab.h>
0012 #include <linux/string.h>
0013 #include <linux/vmalloc.h>
0014 
0015 #define GARBAGE_INT (0x09A7BA9E)
0016 #define GARBAGE_BYTE (0x9E)
0017 
0018 #define REPORT_FAILURES_IN_FN() \
0019     do {    \
0020         if (failures)   \
0021             pr_info("%s failed %d out of %d times\n",   \
0022                 __func__, failures, num_tests);     \
0023         else        \
0024             pr_info("all %d tests in %s passed\n",      \
0025                 num_tests, __func__);           \
0026     } while (0)
0027 
0028 /* Calculate the number of uninitialized bytes in the buffer. */
0029 static int __init count_nonzero_bytes(void *ptr, size_t size)
0030 {
0031     int i, ret = 0;
0032     unsigned char *p = (unsigned char *)ptr;
0033 
0034     for (i = 0; i < size; i++)
0035         if (p[i])
0036             ret++;
0037     return ret;
0038 }
0039 
0040 /* Fill a buffer with garbage, skipping |skip| first bytes. */
0041 static void __init fill_with_garbage_skip(void *ptr, int size, size_t skip)
0042 {
0043     unsigned int *p = (unsigned int *)((char *)ptr + skip);
0044     int i = 0;
0045 
0046     WARN_ON(skip > size);
0047     size -= skip;
0048 
0049     while (size >= sizeof(*p)) {
0050         p[i] = GARBAGE_INT;
0051         i++;
0052         size -= sizeof(*p);
0053     }
0054     if (size)
0055         memset(&p[i], GARBAGE_BYTE, size);
0056 }
0057 
0058 static void __init fill_with_garbage(void *ptr, size_t size)
0059 {
0060     fill_with_garbage_skip(ptr, size, 0);
0061 }
0062 
0063 static int __init do_alloc_pages_order(int order, int *total_failures)
0064 {
0065     struct page *page;
0066     void *buf;
0067     size_t size = PAGE_SIZE << order;
0068 
0069     page = alloc_pages(GFP_KERNEL, order);
0070     buf = page_address(page);
0071     fill_with_garbage(buf, size);
0072     __free_pages(page, order);
0073 
0074     page = alloc_pages(GFP_KERNEL, order);
0075     buf = page_address(page);
0076     if (count_nonzero_bytes(buf, size))
0077         (*total_failures)++;
0078     fill_with_garbage(buf, size);
0079     __free_pages(page, order);
0080     return 1;
0081 }
0082 
0083 /* Test the page allocator by calling alloc_pages with different orders. */
0084 static int __init test_pages(int *total_failures)
0085 {
0086     int failures = 0, num_tests = 0;
0087     int i;
0088 
0089     for (i = 0; i < 10; i++)
0090         num_tests += do_alloc_pages_order(i, &failures);
0091 
0092     REPORT_FAILURES_IN_FN();
0093     *total_failures += failures;
0094     return num_tests;
0095 }
0096 
0097 /* Test kmalloc() with given parameters. */
0098 static int __init do_kmalloc_size(size_t size, int *total_failures)
0099 {
0100     void *buf;
0101 
0102     buf = kmalloc(size, GFP_KERNEL);
0103     fill_with_garbage(buf, size);
0104     kfree(buf);
0105 
0106     buf = kmalloc(size, GFP_KERNEL);
0107     if (count_nonzero_bytes(buf, size))
0108         (*total_failures)++;
0109     fill_with_garbage(buf, size);
0110     kfree(buf);
0111     return 1;
0112 }
0113 
0114 /* Test vmalloc() with given parameters. */
0115 static int __init do_vmalloc_size(size_t size, int *total_failures)
0116 {
0117     void *buf;
0118 
0119     buf = vmalloc(size);
0120     fill_with_garbage(buf, size);
0121     vfree(buf);
0122 
0123     buf = vmalloc(size);
0124     if (count_nonzero_bytes(buf, size))
0125         (*total_failures)++;
0126     fill_with_garbage(buf, size);
0127     vfree(buf);
0128     return 1;
0129 }
0130 
0131 /* Test kmalloc()/vmalloc() by allocating objects of different sizes. */
0132 static int __init test_kvmalloc(int *total_failures)
0133 {
0134     int failures = 0, num_tests = 0;
0135     int i, size;
0136 
0137     for (i = 0; i < 20; i++) {
0138         size = 1 << i;
0139         num_tests += do_kmalloc_size(size, &failures);
0140         num_tests += do_vmalloc_size(size, &failures);
0141     }
0142 
0143     REPORT_FAILURES_IN_FN();
0144     *total_failures += failures;
0145     return num_tests;
0146 }
0147 
0148 #define CTOR_BYTES (sizeof(unsigned int))
0149 #define CTOR_PATTERN (0x41414141)
0150 /* Initialize the first 4 bytes of the object. */
0151 static void test_ctor(void *obj)
0152 {
0153     *(unsigned int *)obj = CTOR_PATTERN;
0154 }
0155 
0156 /*
0157  * Check the invariants for the buffer allocated from a slab cache.
0158  * If the cache has a test constructor, the first 4 bytes of the object must
0159  * always remain equal to CTOR_PATTERN.
0160  * If the cache isn't an RCU-typesafe one, or if the allocation is done with
0161  * __GFP_ZERO, then the object contents must be zeroed after allocation.
0162  * If the cache is an RCU-typesafe one, the object contents must never be
0163  * zeroed after the first use. This is checked by memcmp() in
0164  * do_kmem_cache_size().
0165  */
0166 static bool __init check_buf(void *buf, int size, bool want_ctor,
0167                  bool want_rcu, bool want_zero)
0168 {
0169     int bytes;
0170     bool fail = false;
0171 
0172     bytes = count_nonzero_bytes(buf, size);
0173     WARN_ON(want_ctor && want_zero);
0174     if (want_zero)
0175         return bytes;
0176     if (want_ctor) {
0177         if (*(unsigned int *)buf != CTOR_PATTERN)
0178             fail = 1;
0179     } else {
0180         if (bytes)
0181             fail = !want_rcu;
0182     }
0183     return fail;
0184 }
0185 
0186 #define BULK_SIZE 100
0187 static void *bulk_array[BULK_SIZE];
0188 
0189 /*
0190  * Test kmem_cache with given parameters:
0191  *  want_ctor - use a constructor;
0192  *  want_rcu - use SLAB_TYPESAFE_BY_RCU;
0193  *  want_zero - use __GFP_ZERO.
0194  */
0195 static int __init do_kmem_cache_size(size_t size, bool want_ctor,
0196                      bool want_rcu, bool want_zero,
0197                      int *total_failures)
0198 {
0199     struct kmem_cache *c;
0200     int iter;
0201     bool fail = false;
0202     gfp_t alloc_mask = GFP_KERNEL | (want_zero ? __GFP_ZERO : 0);
0203     void *buf, *buf_copy;
0204 
0205     c = kmem_cache_create("test_cache", size, 1,
0206                   want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
0207                   want_ctor ? test_ctor : NULL);
0208     for (iter = 0; iter < 10; iter++) {
0209         /* Do a test of bulk allocations */
0210         if (!want_rcu && !want_ctor) {
0211             int ret;
0212 
0213             ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
0214             if (!ret) {
0215                 fail = true;
0216             } else {
0217                 int i;
0218                 for (i = 0; i < ret; i++)
0219                     fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
0220                 kmem_cache_free_bulk(c, ret, bulk_array);
0221             }
0222         }
0223 
0224         buf = kmem_cache_alloc(c, alloc_mask);
0225         /* Check that buf is zeroed, if it must be. */
0226         fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
0227         fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
0228 
0229         if (!want_rcu) {
0230             kmem_cache_free(c, buf);
0231             continue;
0232         }
0233 
0234         /*
0235          * If this is an RCU cache, use a critical section to ensure we
0236          * can touch objects after they're freed.
0237          */
0238         rcu_read_lock();
0239         /*
0240          * Copy the buffer to check that it's not wiped on
0241          * free().
0242          */
0243         buf_copy = kmalloc(size, GFP_ATOMIC);
0244         if (buf_copy)
0245             memcpy(buf_copy, buf, size);
0246 
0247         kmem_cache_free(c, buf);
0248         /*
0249          * Check that |buf| is intact after kmem_cache_free().
0250          * |want_zero| is false, because we wrote garbage to
0251          * the buffer already.
0252          */
0253         fail |= check_buf(buf, size, want_ctor, want_rcu,
0254                   false);
0255         if (buf_copy) {
0256             fail |= (bool)memcmp(buf, buf_copy, size);
0257             kfree(buf_copy);
0258         }
0259         rcu_read_unlock();
0260     }
0261     kmem_cache_destroy(c);
0262 
0263     *total_failures += fail;
0264     return 1;
0265 }
0266 
0267 /*
0268  * Check that the data written to an RCU-allocated object survives
0269  * reallocation.
0270  */
0271 static int __init do_kmem_cache_rcu_persistent(int size, int *total_failures)
0272 {
0273     struct kmem_cache *c;
0274     void *buf, *buf_contents, *saved_ptr;
0275     void **used_objects;
0276     int i, iter, maxiter = 1024;
0277     bool fail = false;
0278 
0279     c = kmem_cache_create("test_cache", size, size, SLAB_TYPESAFE_BY_RCU,
0280                   NULL);
0281     buf = kmem_cache_alloc(c, GFP_KERNEL);
0282     if (!buf)
0283         goto out;
0284     saved_ptr = buf;
0285     fill_with_garbage(buf, size);
0286     buf_contents = kmalloc(size, GFP_KERNEL);
0287     if (!buf_contents) {
0288         kmem_cache_free(c, buf);
0289         goto out;
0290     }
0291     used_objects = kmalloc_array(maxiter, sizeof(void *), GFP_KERNEL);
0292     if (!used_objects) {
0293         kmem_cache_free(c, buf);
0294         kfree(buf_contents);
0295         goto out;
0296     }
0297     memcpy(buf_contents, buf, size);
0298     kmem_cache_free(c, buf);
0299     /*
0300      * Run for a fixed number of iterations. If we never hit saved_ptr,
0301      * assume the test passes.
0302      */
0303     for (iter = 0; iter < maxiter; iter++) {
0304         buf = kmem_cache_alloc(c, GFP_KERNEL);
0305         used_objects[iter] = buf;
0306         if (buf == saved_ptr) {
0307             fail = memcmp(buf_contents, buf, size);
0308             for (i = 0; i <= iter; i++)
0309                 kmem_cache_free(c, used_objects[i]);
0310             goto free_out;
0311         }
0312     }
0313 
0314     for (iter = 0; iter < maxiter; iter++)
0315         kmem_cache_free(c, used_objects[iter]);
0316 
0317 free_out:
0318     kfree(buf_contents);
0319     kfree(used_objects);
0320 out:
0321     kmem_cache_destroy(c);
0322     *total_failures += fail;
0323     return 1;
0324 }
0325 
0326 static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
0327 {
0328     struct kmem_cache *c;
0329     int i, iter, maxiter = 1024;
0330     int num, bytes;
0331     bool fail = false;
0332     void *objects[10];
0333 
0334     c = kmem_cache_create("test_cache", size, size, 0, NULL);
0335     for (iter = 0; (iter < maxiter) && !fail; iter++) {
0336         num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
0337                         objects);
0338         for (i = 0; i < num; i++) {
0339             bytes = count_nonzero_bytes(objects[i], size);
0340             if (bytes)
0341                 fail = true;
0342             fill_with_garbage(objects[i], size);
0343         }
0344 
0345         if (num)
0346             kmem_cache_free_bulk(c, num, objects);
0347     }
0348     kmem_cache_destroy(c);
0349     *total_failures += fail;
0350     return 1;
0351 }
0352 
0353 /*
0354  * Test kmem_cache allocation by creating caches of different sizes, with and
0355  * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
0356  */
0357 static int __init test_kmemcache(int *total_failures)
0358 {
0359     int failures = 0, num_tests = 0;
0360     int i, flags, size;
0361     bool ctor, rcu, zero;
0362 
0363     for (i = 0; i < 10; i++) {
0364         size = 8 << i;
0365         for (flags = 0; flags < 8; flags++) {
0366             ctor = flags & 1;
0367             rcu = flags & 2;
0368             zero = flags & 4;
0369             if (ctor & zero)
0370                 continue;
0371             num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
0372                             &failures);
0373         }
0374         num_tests += do_kmem_cache_size_bulk(size, &failures);
0375     }
0376     REPORT_FAILURES_IN_FN();
0377     *total_failures += failures;
0378     return num_tests;
0379 }
0380 
0381 /* Test the behavior of SLAB_TYPESAFE_BY_RCU caches of different sizes. */
0382 static int __init test_rcu_persistent(int *total_failures)
0383 {
0384     int failures = 0, num_tests = 0;
0385     int i, size;
0386 
0387     for (i = 0; i < 10; i++) {
0388         size = 8 << i;
0389         num_tests += do_kmem_cache_rcu_persistent(size, &failures);
0390     }
0391     REPORT_FAILURES_IN_FN();
0392     *total_failures += failures;
0393     return num_tests;
0394 }
0395 
0396 /*
0397  * Run the tests. Each test function returns the number of executed tests and
0398  * updates |failures| with the number of failed tests.
0399  */
0400 static int __init test_meminit_init(void)
0401 {
0402     int failures = 0, num_tests = 0;
0403 
0404     num_tests += test_pages(&failures);
0405     num_tests += test_kvmalloc(&failures);
0406     num_tests += test_kmemcache(&failures);
0407     num_tests += test_rcu_persistent(&failures);
0408 
0409     if (failures == 0)
0410         pr_info("all %d tests passed!\n", num_tests);
0411     else
0412         pr_info("failures: %d out of %d\n", failures, num_tests);
0413 
0414     return failures ? -EINVAL : 0;
0415 }
0416 module_init(test_meminit_init);
0417 
0418 MODULE_LICENSE("GPL");