0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034 #include <linux/kernel.h>
0035 #include <linux/gfp.h>
0036 #include <linux/slab.h>
0037 #include <linux/radix-tree.h>
0038 #include <linux/rcupdate.h>
0039 #include <stdlib.h>
0040 #include <pthread.h>
0041 #include <stdio.h>
0042 #include <assert.h>
0043
0044 #include "regression.h"
0045
0046 static RADIX_TREE(mt_tree, GFP_KERNEL);
0047
0048 struct page {
0049 pthread_mutex_t lock;
0050 struct rcu_head rcu;
0051 int count;
0052 unsigned long index;
0053 };
0054
0055 static struct page *page_alloc(int index)
0056 {
0057 struct page *p;
0058 p = malloc(sizeof(struct page));
0059 p->count = 1;
0060 p->index = index;
0061 pthread_mutex_init(&p->lock, NULL);
0062
0063 return p;
0064 }
0065
0066 static void page_rcu_free(struct rcu_head *rcu)
0067 {
0068 struct page *p = container_of(rcu, struct page, rcu);
0069 assert(!p->count);
0070 pthread_mutex_destroy(&p->lock);
0071 free(p);
0072 }
0073
0074 static void page_free(struct page *p)
0075 {
0076 call_rcu(&p->rcu, page_rcu_free);
0077 }
0078
0079 static unsigned find_get_pages(unsigned long start,
0080 unsigned int nr_pages, struct page **pages)
0081 {
0082 XA_STATE(xas, &mt_tree, start);
0083 struct page *page;
0084 unsigned int ret = 0;
0085
0086 rcu_read_lock();
0087 xas_for_each(&xas, page, ULONG_MAX) {
0088 if (xas_retry(&xas, page))
0089 continue;
0090
0091 pthread_mutex_lock(&page->lock);
0092 if (!page->count)
0093 goto unlock;
0094
0095
0096 pthread_mutex_unlock(&page->lock);
0097
0098
0099 if (unlikely(page != xas_reload(&xas)))
0100 goto put_page;
0101
0102 pages[ret] = page;
0103 ret++;
0104 continue;
0105 unlock:
0106 pthread_mutex_unlock(&page->lock);
0107 put_page:
0108 xas_reset(&xas);
0109 }
0110 rcu_read_unlock();
0111 return ret;
0112 }
0113
0114 static pthread_barrier_t worker_barrier;
0115
0116 static void *regression1_fn(void *arg)
0117 {
0118 rcu_register_thread();
0119
0120 if (pthread_barrier_wait(&worker_barrier) ==
0121 PTHREAD_BARRIER_SERIAL_THREAD) {
0122 int j;
0123
0124 for (j = 0; j < 1000000; j++) {
0125 struct page *p;
0126
0127 p = page_alloc(0);
0128 xa_lock(&mt_tree);
0129 radix_tree_insert(&mt_tree, 0, p);
0130 xa_unlock(&mt_tree);
0131
0132 p = page_alloc(1);
0133 xa_lock(&mt_tree);
0134 radix_tree_insert(&mt_tree, 1, p);
0135 xa_unlock(&mt_tree);
0136
0137 xa_lock(&mt_tree);
0138 p = radix_tree_delete(&mt_tree, 1);
0139 pthread_mutex_lock(&p->lock);
0140 p->count--;
0141 pthread_mutex_unlock(&p->lock);
0142 xa_unlock(&mt_tree);
0143 page_free(p);
0144
0145 xa_lock(&mt_tree);
0146 p = radix_tree_delete(&mt_tree, 0);
0147 pthread_mutex_lock(&p->lock);
0148 p->count--;
0149 pthread_mutex_unlock(&p->lock);
0150 xa_unlock(&mt_tree);
0151 page_free(p);
0152 }
0153 } else {
0154 int j;
0155
0156 for (j = 0; j < 100000000; j++) {
0157 struct page *pages[10];
0158
0159 find_get_pages(0, 10, pages);
0160 }
0161 }
0162
0163 rcu_unregister_thread();
0164
0165 return NULL;
0166 }
0167
0168 static pthread_t *threads;
0169 void regression1_test(void)
0170 {
0171 int nr_threads;
0172 int i;
0173 long arg;
0174
0175
0176 printv(1, "running regression test 1, should finish in under a minute\n");
0177 nr_threads = 2;
0178 pthread_barrier_init(&worker_barrier, NULL, nr_threads);
0179
0180 threads = malloc(nr_threads * sizeof(pthread_t *));
0181
0182 for (i = 0; i < nr_threads; i++) {
0183 arg = i;
0184 if (pthread_create(&threads[i], NULL, regression1_fn, (void *)arg)) {
0185 perror("pthread_create");
0186 exit(1);
0187 }
0188 }
0189
0190 for (i = 0; i < nr_threads; i++) {
0191 if (pthread_join(threads[i], NULL)) {
0192 perror("pthread_join");
0193 exit(1);
0194 }
0195 }
0196
0197 free(threads);
0198
0199 printv(1, "regression test 1, done\n");
0200 }