Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <test_progs.h>
0003 #include <sys/mman.h>
0004 #include "test_mmap.skel.h"
0005 
0006 struct map_data {
0007     __u64 val[512 * 4];
0008 };
0009 
0010 static size_t roundup_page(size_t sz)
0011 {
0012     long page_size = sysconf(_SC_PAGE_SIZE);
0013     return (sz + page_size - 1) / page_size * page_size;
0014 }
0015 
0016 void test_mmap(void)
0017 {
0018     const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
0019     const size_t map_sz = roundup_page(sizeof(struct map_data));
0020     const int zero = 0, one = 1, two = 2, far = 1500;
0021     const long page_size = sysconf(_SC_PAGE_SIZE);
0022     int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
0023     struct bpf_map *data_map, *bss_map;
0024     void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
0025     struct test_mmap__bss *bss_data;
0026     struct bpf_map_info map_info;
0027     __u32 map_info_sz = sizeof(map_info);
0028     struct map_data *map_data;
0029     struct test_mmap *skel;
0030     __u64 val = 0;
0031 
0032     skel = test_mmap__open();
0033     if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
0034         return;
0035 
0036     err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
0037     if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
0038         goto cleanup;
0039 
0040     /* at least 4 pages of data */
0041     err = bpf_map__set_max_entries(skel->maps.data_map,
0042                        4 * (page_size / sizeof(u64)));
0043     if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
0044         goto cleanup;
0045 
0046     err = test_mmap__load(skel);
0047     if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
0048         goto cleanup;
0049 
0050     bss_map = skel->maps.bss;
0051     data_map = skel->maps.data_map;
0052     data_map_fd = bpf_map__fd(data_map);
0053 
0054     rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
0055     tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
0056     if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
0057         munmap(tmp1, page_size);
0058         goto cleanup;
0059     }
0060     /* now double-check if it's mmap()'able at all */
0061     tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
0062     if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
0063         goto cleanup;
0064 
0065     /* get map's ID */
0066     memset(&map_info, 0, map_info_sz);
0067     err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
0068     if (CHECK(err, "map_get_info", "failed %d\n", errno))
0069         goto cleanup;
0070     data_map_id = map_info.id;
0071 
0072     /* mmap BSS map */
0073     bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
0074               bpf_map__fd(bss_map), 0);
0075     if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
0076           ".bss mmap failed: %d\n", errno)) {
0077         bss_mmaped = NULL;
0078         goto cleanup;
0079     }
0080     /* map as R/W first */
0081     map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
0082               data_map_fd, 0);
0083     if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
0084           "data_map mmap failed: %d\n", errno)) {
0085         map_mmaped = NULL;
0086         goto cleanup;
0087     }
0088 
0089     bss_data = bss_mmaped;
0090     map_data = map_mmaped;
0091 
0092     CHECK_FAIL(bss_data->in_val);
0093     CHECK_FAIL(bss_data->out_val);
0094     CHECK_FAIL(skel->bss->in_val);
0095     CHECK_FAIL(skel->bss->out_val);
0096     CHECK_FAIL(map_data->val[0]);
0097     CHECK_FAIL(map_data->val[1]);
0098     CHECK_FAIL(map_data->val[2]);
0099     CHECK_FAIL(map_data->val[far]);
0100 
0101     err = test_mmap__attach(skel);
0102     if (CHECK(err, "attach_raw_tp", "err %d\n", err))
0103         goto cleanup;
0104 
0105     bss_data->in_val = 123;
0106     val = 111;
0107     CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
0108 
0109     usleep(1);
0110 
0111     CHECK_FAIL(bss_data->in_val != 123);
0112     CHECK_FAIL(bss_data->out_val != 123);
0113     CHECK_FAIL(skel->bss->in_val != 123);
0114     CHECK_FAIL(skel->bss->out_val != 123);
0115     CHECK_FAIL(map_data->val[0] != 111);
0116     CHECK_FAIL(map_data->val[1] != 222);
0117     CHECK_FAIL(map_data->val[2] != 123);
0118     CHECK_FAIL(map_data->val[far] != 3 * 123);
0119 
0120     CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
0121     CHECK_FAIL(val != 111);
0122     CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
0123     CHECK_FAIL(val != 222);
0124     CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
0125     CHECK_FAIL(val != 123);
0126     CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
0127     CHECK_FAIL(val != 3 * 123);
0128 
0129     /* data_map freeze should fail due to R/W mmap() */
0130     err = bpf_map_freeze(data_map_fd);
0131     if (CHECK(!err || errno != EBUSY, "no_freeze",
0132           "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
0133         goto cleanup;
0134 
0135     err = mprotect(map_mmaped, map_sz, PROT_READ);
0136     if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
0137         goto cleanup;
0138 
0139     /* unmap R/W mapping */
0140     err = munmap(map_mmaped, map_sz);
0141     map_mmaped = NULL;
0142     if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
0143         goto cleanup;
0144 
0145     /* re-map as R/O now */
0146     map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
0147     if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
0148           "data_map R/O mmap failed: %d\n", errno)) {
0149         map_mmaped = NULL;
0150         goto cleanup;
0151     }
0152     err = mprotect(map_mmaped, map_sz, PROT_WRITE);
0153     if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
0154         goto cleanup;
0155     err = mprotect(map_mmaped, map_sz, PROT_EXEC);
0156     if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
0157         goto cleanup;
0158     map_data = map_mmaped;
0159 
0160     /* map/unmap in a loop to test ref counting */
0161     for (i = 0; i < 10; i++) {
0162         int flags = i % 2 ? PROT_READ : PROT_WRITE;
0163         void *p;
0164 
0165         p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
0166         if (CHECK_FAIL(p == MAP_FAILED))
0167             goto cleanup;
0168         err = munmap(p, map_sz);
0169         if (CHECK_FAIL(err))
0170             goto cleanup;
0171     }
0172 
0173     /* data_map freeze should now succeed due to no R/W mapping */
0174     err = bpf_map_freeze(data_map_fd);
0175     if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
0176           err, errno))
0177         goto cleanup;
0178 
0179     /* mapping as R/W now should fail */
0180     tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
0181             data_map_fd, 0);
0182     if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
0183         munmap(tmp1, map_sz);
0184         goto cleanup;
0185     }
0186 
0187     bss_data->in_val = 321;
0188     usleep(1);
0189     CHECK_FAIL(bss_data->in_val != 321);
0190     CHECK_FAIL(bss_data->out_val != 321);
0191     CHECK_FAIL(skel->bss->in_val != 321);
0192     CHECK_FAIL(skel->bss->out_val != 321);
0193     CHECK_FAIL(map_data->val[0] != 111);
0194     CHECK_FAIL(map_data->val[1] != 222);
0195     CHECK_FAIL(map_data->val[2] != 321);
0196     CHECK_FAIL(map_data->val[far] != 3 * 321);
0197 
0198     /* check some more advanced mmap() manipulations */
0199 
0200     tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
0201               -1, 0);
0202     if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
0203         goto cleanup;
0204 
0205     /* map all but last page: pages 1-3 mapped */
0206     tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
0207               data_map_fd, 0);
0208     if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
0209         munmap(tmp0, 4 * page_size);
0210         goto cleanup;
0211     }
0212 
0213     /* unmap second page: pages 1, 3 mapped */
0214     err = munmap(tmp1 + page_size, page_size);
0215     if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
0216         munmap(tmp1, 4 * page_size);
0217         goto cleanup;
0218     }
0219 
0220     /* map page 2 back */
0221     tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
0222             MAP_SHARED | MAP_FIXED, data_map_fd, 0);
0223     if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
0224         munmap(tmp1, page_size);
0225         munmap(tmp1 + 2*page_size, 2 * page_size);
0226         goto cleanup;
0227     }
0228     CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
0229           "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
0230 
0231     /* re-map all 4 pages */
0232     tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
0233             data_map_fd, 0);
0234     if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
0235         munmap(tmp1, 4 * page_size); /* unmap page 1 */
0236         goto cleanup;
0237     }
0238     CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
0239 
0240     map_data = tmp2;
0241     CHECK_FAIL(bss_data->in_val != 321);
0242     CHECK_FAIL(bss_data->out_val != 321);
0243     CHECK_FAIL(skel->bss->in_val != 321);
0244     CHECK_FAIL(skel->bss->out_val != 321);
0245     CHECK_FAIL(map_data->val[0] != 111);
0246     CHECK_FAIL(map_data->val[1] != 222);
0247     CHECK_FAIL(map_data->val[2] != 321);
0248     CHECK_FAIL(map_data->val[far] != 3 * 321);
0249 
0250     munmap(tmp2, 4 * page_size);
0251 
0252     /* map all 4 pages, but with pg_off=1 page, should fail */
0253     tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
0254             data_map_fd, page_size /* initial page shift */);
0255     if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
0256         munmap(tmp1, 4 * page_size);
0257         goto cleanup;
0258     }
0259 
0260     tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
0261     if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
0262         goto cleanup;
0263 
0264     test_mmap__destroy(skel);
0265     skel = NULL;
0266     CHECK_FAIL(munmap(bss_mmaped, bss_sz));
0267     bss_mmaped = NULL;
0268     CHECK_FAIL(munmap(map_mmaped, map_sz));
0269     map_mmaped = NULL;
0270 
0271     /* map should be still held by active mmap */
0272     tmp_fd = bpf_map_get_fd_by_id(data_map_id);
0273     if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
0274         munmap(tmp1, map_sz);
0275         goto cleanup;
0276     }
0277     close(tmp_fd);
0278 
0279     /* this should release data map finally */
0280     munmap(tmp1, map_sz);
0281 
0282     /* we need to wait for RCU grace period */
0283     for (i = 0; i < 10000; i++) {
0284         __u32 id = data_map_id - 1;
0285         if (bpf_map_get_next_id(id, &id) || id > data_map_id)
0286             break;
0287         usleep(1);
0288     }
0289 
0290     /* should fail to get map FD by non-existing ID */
0291     tmp_fd = bpf_map_get_fd_by_id(data_map_id);
0292     if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
0293           "unexpectedly succeeded %d\n", tmp_fd)) {
0294         close(tmp_fd);
0295         goto cleanup;
0296     }
0297 
0298 cleanup:
0299     if (bss_mmaped)
0300         CHECK_FAIL(munmap(bss_mmaped, bss_sz));
0301     if (map_mmaped)
0302         CHECK_FAIL(munmap(map_mmaped, map_sz));
0303     test_mmap__destroy(skel);
0304 }