Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #define _GNU_SOURCE
0003 #include <linux/compiler.h>
0004 #include <asm/barrier.h>
0005 #include <test_progs.h>
0006 #include <sys/mman.h>
0007 #include <sys/epoll.h>
0008 #include <time.h>
0009 #include <sched.h>
0010 #include <signal.h>
0011 #include <pthread.h>
0012 #include <sys/sysinfo.h>
0013 #include <linux/perf_event.h>
0014 #include <linux/ring_buffer.h>
0015 #include "test_ringbuf.lskel.h"
0016 
0017 #define EDONE 7777
0018 
0019 static int duration = 0;
0020 
0021 struct sample {
0022     int pid;
0023     int seq;
0024     long value;
0025     char comm[16];
0026 };
0027 
0028 static int sample_cnt;
0029 
0030 static void atomic_inc(int *cnt)
0031 {
0032     __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
0033 }
0034 
0035 static int atomic_xchg(int *cnt, int val)
0036 {
0037     return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
0038 }
0039 
0040 static int process_sample(void *ctx, void *data, size_t len)
0041 {
0042     struct sample *s = data;
0043 
0044     atomic_inc(&sample_cnt);
0045 
0046     switch (s->seq) {
0047     case 0:
0048         CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
0049               333L, s->value);
0050         return 0;
0051     case 1:
0052         CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
0053               777L, s->value);
0054         return -EDONE;
0055     default:
0056         /* we don't care about the rest */
0057         return 0;
0058     }
0059 }
0060 
0061 static struct test_ringbuf_lskel *skel;
0062 static struct ring_buffer *ringbuf;
0063 
0064 static void trigger_samples()
0065 {
0066     skel->bss->dropped = 0;
0067     skel->bss->total = 0;
0068     skel->bss->discarded = 0;
0069 
0070     /* trigger exactly two samples */
0071     skel->bss->value = 333;
0072     syscall(__NR_getpgid);
0073     skel->bss->value = 777;
0074     syscall(__NR_getpgid);
0075 }
0076 
0077 static void *poll_thread(void *input)
0078 {
0079     long timeout = (long)input;
0080 
0081     return (void *)(long)ring_buffer__poll(ringbuf, timeout);
0082 }
0083 
0084 void test_ringbuf(void)
0085 {
0086     const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
0087     pthread_t thread;
0088     long bg_ret = -1;
0089     int err, cnt, rb_fd;
0090     int page_size = getpagesize();
0091     void *mmap_ptr, *tmp_ptr;
0092 
0093     skel = test_ringbuf_lskel__open();
0094     if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
0095         return;
0096 
0097     skel->maps.ringbuf.max_entries = page_size;
0098 
0099     err = test_ringbuf_lskel__load(skel);
0100     if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
0101         goto cleanup;
0102 
0103     rb_fd = skel->maps.ringbuf.map_fd;
0104     /* good read/write cons_pos */
0105     mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
0106     ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
0107     tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
0108     if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
0109         goto cleanup;
0110     ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
0111     ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
0112 
0113     /* bad writeable prod_pos */
0114     mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
0115     err = -errno;
0116     ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
0117     ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
0118 
0119     /* bad writeable data pages */
0120     mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
0121     err = -errno;
0122     ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
0123     ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
0124     mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
0125     ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
0126     mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
0127     ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
0128 
0129     /* good read-only pages */
0130     mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
0131     if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
0132         goto cleanup;
0133 
0134     ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
0135     ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
0136     ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
0137     ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
0138 
0139     /* good read-only pages with initial offset */
0140     mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
0141     if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
0142         goto cleanup;
0143 
0144     ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
0145     ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
0146     ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
0147     ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
0148 
0149     /* only trigger BPF program for current process */
0150     skel->bss->pid = getpid();
0151 
0152     ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
0153                    process_sample, NULL, NULL);
0154     if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
0155         goto cleanup;
0156 
0157     err = test_ringbuf_lskel__attach(skel);
0158     if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
0159         goto cleanup;
0160 
0161     trigger_samples();
0162 
0163     /* 2 submitted + 1 discarded records */
0164     CHECK(skel->bss->avail_data != 3 * rec_sz,
0165           "err_avail_size", "exp %ld, got %ld\n",
0166           3L * rec_sz, skel->bss->avail_data);
0167     CHECK(skel->bss->ring_size != page_size,
0168           "err_ring_size", "exp %ld, got %ld\n",
0169           (long)page_size, skel->bss->ring_size);
0170     CHECK(skel->bss->cons_pos != 0,
0171           "err_cons_pos", "exp %ld, got %ld\n",
0172           0L, skel->bss->cons_pos);
0173     CHECK(skel->bss->prod_pos != 3 * rec_sz,
0174           "err_prod_pos", "exp %ld, got %ld\n",
0175           3L * rec_sz, skel->bss->prod_pos);
0176 
0177     /* poll for samples */
0178     err = ring_buffer__poll(ringbuf, -1);
0179 
0180     /* -EDONE is used as an indicator that we are done */
0181     if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
0182         goto cleanup;
0183     cnt = atomic_xchg(&sample_cnt, 0);
0184     CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
0185 
0186     /* we expect extra polling to return nothing */
0187     err = ring_buffer__poll(ringbuf, 0);
0188     if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
0189         goto cleanup;
0190     cnt = atomic_xchg(&sample_cnt, 0);
0191     CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
0192 
0193     CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0194           0L, skel->bss->dropped);
0195     CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
0196           2L, skel->bss->total);
0197     CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
0198           1L, skel->bss->discarded);
0199 
0200     /* now validate consumer position is updated and returned */
0201     trigger_samples();
0202     CHECK(skel->bss->cons_pos != 3 * rec_sz,
0203           "err_cons_pos", "exp %ld, got %ld\n",
0204           3L * rec_sz, skel->bss->cons_pos);
0205     err = ring_buffer__poll(ringbuf, -1);
0206     CHECK(err <= 0, "poll_err", "err %d\n", err);
0207     cnt = atomic_xchg(&sample_cnt, 0);
0208     CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
0209 
0210     /* start poll in background w/ long timeout */
0211     err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
0212     if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
0213         goto cleanup;
0214 
0215     /* turn off notifications now */
0216     skel->bss->flags = BPF_RB_NO_WAKEUP;
0217 
0218     /* give background thread a bit of a time */
0219     usleep(50000);
0220     trigger_samples();
0221     /* sleeping arbitrarily is bad, but no better way to know that
0222      * epoll_wait() **DID NOT** unblock in background thread
0223      */
0224     usleep(50000);
0225     /* background poll should still be blocked */
0226     err = pthread_tryjoin_np(thread, (void **)&bg_ret);
0227     if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
0228         goto cleanup;
0229 
0230     /* BPF side did everything right */
0231     CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0232           0L, skel->bss->dropped);
0233     CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
0234           2L, skel->bss->total);
0235     CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
0236           1L, skel->bss->discarded);
0237     cnt = atomic_xchg(&sample_cnt, 0);
0238     CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
0239 
0240     /* clear flags to return to "adaptive" notification mode */
0241     skel->bss->flags = 0;
0242 
0243     /* produce new samples, no notification should be triggered, because
0244      * consumer is now behind
0245      */
0246     trigger_samples();
0247 
0248     /* background poll should still be blocked */
0249     err = pthread_tryjoin_np(thread, (void **)&bg_ret);
0250     if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
0251         goto cleanup;
0252 
0253     /* still no samples, because consumer is behind */
0254     cnt = atomic_xchg(&sample_cnt, 0);
0255     CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
0256 
0257     skel->bss->dropped = 0;
0258     skel->bss->total = 0;
0259     skel->bss->discarded = 0;
0260 
0261     skel->bss->value = 333;
0262     syscall(__NR_getpgid);
0263     /* now force notifications */
0264     skel->bss->flags = BPF_RB_FORCE_WAKEUP;
0265     skel->bss->value = 777;
0266     syscall(__NR_getpgid);
0267 
0268     /* now we should get a pending notification */
0269     usleep(50000);
0270     err = pthread_tryjoin_np(thread, (void **)&bg_ret);
0271     if (CHECK(err, "join_bg", "err %d\n", err))
0272         goto cleanup;
0273 
0274     if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
0275         goto cleanup;
0276 
0277     /* due to timing variations, there could still be non-notified
0278      * samples, so consume them here to collect all the samples
0279      */
0280     err = ring_buffer__consume(ringbuf);
0281     CHECK(err < 0, "rb_consume", "failed: %d\b", err);
0282 
0283     /* 3 rounds, 2 samples each */
0284     cnt = atomic_xchg(&sample_cnt, 0);
0285     CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
0286 
0287     /* BPF side did everything right */
0288     CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
0289           0L, skel->bss->dropped);
0290     CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
0291           2L, skel->bss->total);
0292     CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
0293           1L, skel->bss->discarded);
0294 
0295     test_ringbuf_lskel__detach(skel);
0296 cleanup:
0297     ring_buffer__free(ringbuf);
0298     test_ringbuf_lskel__destroy(skel);
0299 }