0001
0002
0003
0004
0005
0006
0007
0008
0009 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0010
0011 #include <linux/mm_types.h>
0012 #include <linux/err.h>
0013 #include "binder_alloc.h"
0014
0015 #define BUFFER_NUM 5
0016 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
0017
0018 static bool binder_selftest_run = true;
0019 static int binder_selftest_failures;
0020 static DEFINE_MUTEX(binder_selftest_lock);
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 enum buf_end_align_type {
0032
0033
0034
0035
0036
0037
0038
0039 SAME_PAGE_UNALIGNED = 0,
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050 SAME_PAGE_ALIGNED,
0051
0052
0053
0054
0055
0056
0057
0058 NEXT_PAGE_UNALIGNED,
0059
0060
0061
0062
0063
0064
0065
0066 NEXT_PAGE_ALIGNED,
0067
0068
0069
0070
0071
0072
0073
0074 NEXT_NEXT_UNALIGNED,
0075 LOOP_END,
0076 };
0077
0078 static void pr_err_size_seq(size_t *sizes, int *seq)
0079 {
0080 int i;
0081
0082 pr_err("alloc sizes: ");
0083 for (i = 0; i < BUFFER_NUM; i++)
0084 pr_cont("[%zu]", sizes[i]);
0085 pr_cont("\n");
0086 pr_err("free seq: ");
0087 for (i = 0; i < BUFFER_NUM; i++)
0088 pr_cont("[%d]", seq[i]);
0089 pr_cont("\n");
0090 }
0091
0092 static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
0093 struct binder_buffer *buffer,
0094 size_t size)
0095 {
0096 void __user *page_addr;
0097 void __user *end;
0098 int page_index;
0099
0100 end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
0101 page_addr = buffer->user_data;
0102 for (; page_addr < end; page_addr += PAGE_SIZE) {
0103 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
0104 if (!alloc->pages[page_index].page_ptr ||
0105 !list_empty(&alloc->pages[page_index].lru)) {
0106 pr_err("expect alloc but is %s at page index %d\n",
0107 alloc->pages[page_index].page_ptr ?
0108 "lru" : "free", page_index);
0109 return false;
0110 }
0111 }
0112 return true;
0113 }
0114
0115 static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
0116 struct binder_buffer *buffers[],
0117 size_t *sizes, int *seq)
0118 {
0119 int i;
0120
0121 for (i = 0; i < BUFFER_NUM; i++) {
0122 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0);
0123 if (IS_ERR(buffers[i]) ||
0124 !check_buffer_pages_allocated(alloc, buffers[i],
0125 sizes[i])) {
0126 pr_err_size_seq(sizes, seq);
0127 binder_selftest_failures++;
0128 }
0129 }
0130 }
0131
0132 static void binder_selftest_free_buf(struct binder_alloc *alloc,
0133 struct binder_buffer *buffers[],
0134 size_t *sizes, int *seq, size_t end)
0135 {
0136 int i;
0137
0138 for (i = 0; i < BUFFER_NUM; i++)
0139 binder_alloc_free_buf(alloc, buffers[seq[i]]);
0140
0141 for (i = 0; i < end / PAGE_SIZE; i++) {
0142
0143
0144
0145
0146
0147 if (list_empty(&alloc->pages[i].lru)) {
0148 pr_err_size_seq(sizes, seq);
0149 pr_err("expect lru but is %s at page index %d\n",
0150 alloc->pages[i].page_ptr ? "alloc" : "free", i);
0151 binder_selftest_failures++;
0152 }
0153 }
0154 }
0155
0156 static void binder_selftest_free_page(struct binder_alloc *alloc)
0157 {
0158 int i;
0159 unsigned long count;
0160
0161 while ((count = list_lru_count(&binder_alloc_lru))) {
0162 list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
0163 NULL, count);
0164 }
0165
0166 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
0167 if (alloc->pages[i].page_ptr) {
0168 pr_err("expect free but is %s at page index %d\n",
0169 list_empty(&alloc->pages[i].lru) ?
0170 "alloc" : "lru", i);
0171 binder_selftest_failures++;
0172 }
0173 }
0174 }
0175
0176 static void binder_selftest_alloc_free(struct binder_alloc *alloc,
0177 size_t *sizes, int *seq, size_t end)
0178 {
0179 struct binder_buffer *buffers[BUFFER_NUM];
0180
0181 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
0182 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
0183
0184
0185 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
0186 if (list_lru_count(&binder_alloc_lru))
0187 pr_err("lru list should be empty but is not\n");
0188
0189 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
0190 binder_selftest_free_page(alloc);
0191 }
0192
0193 static bool is_dup(int *seq, int index, int val)
0194 {
0195 int i;
0196
0197 for (i = 0; i < index; i++) {
0198 if (seq[i] == val)
0199 return true;
0200 }
0201 return false;
0202 }
0203
0204
0205 static void binder_selftest_free_seq(struct binder_alloc *alloc,
0206 size_t *sizes, int *seq,
0207 int index, size_t end)
0208 {
0209 int i;
0210
0211 if (index == BUFFER_NUM) {
0212 binder_selftest_alloc_free(alloc, sizes, seq, end);
0213 return;
0214 }
0215 for (i = 0; i < BUFFER_NUM; i++) {
0216 if (is_dup(seq, index, i))
0217 continue;
0218 seq[index] = i;
0219 binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
0220 }
0221 }
0222
0223 static void binder_selftest_alloc_size(struct binder_alloc *alloc,
0224 size_t *end_offset)
0225 {
0226 int i;
0227 int seq[BUFFER_NUM] = {0};
0228 size_t front_sizes[BUFFER_NUM];
0229 size_t back_sizes[BUFFER_NUM];
0230 size_t last_offset, offset = 0;
0231
0232 for (i = 0; i < BUFFER_NUM; i++) {
0233 last_offset = offset;
0234 offset = end_offset[i];
0235 front_sizes[i] = offset - last_offset;
0236 back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
0237 }
0238
0239
0240
0241
0242
0243 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
0244 binder_selftest_free_seq(alloc, front_sizes, seq, 0,
0245 end_offset[BUFFER_NUM - 1]);
0246 binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
0247 }
0248
0249 static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
0250 size_t *end_offset, int index)
0251 {
0252 int align;
0253 size_t end, prev;
0254
0255 if (index == BUFFER_NUM) {
0256 binder_selftest_alloc_size(alloc, end_offset);
0257 return;
0258 }
0259 prev = index == 0 ? 0 : end_offset[index - 1];
0260 end = prev;
0261
0262 BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
0263
0264 for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
0265 if (align % 2)
0266 end = ALIGN(end, PAGE_SIZE);
0267 else
0268 end += BUFFER_MIN_SIZE;
0269 end_offset[index] = end;
0270 binder_selftest_alloc_offset(alloc, end_offset, index + 1);
0271 }
0272 }
0273
0274
0275
0276
0277
0278
0279
0280
0281
0282
0283 void binder_selftest_alloc(struct binder_alloc *alloc)
0284 {
0285 size_t end_offset[BUFFER_NUM];
0286
0287 if (!binder_selftest_run)
0288 return;
0289 mutex_lock(&binder_selftest_lock);
0290 if (!binder_selftest_run || !alloc->vma_addr)
0291 goto done;
0292 pr_info("STARTED\n");
0293 binder_selftest_alloc_offset(alloc, end_offset, 0);
0294 binder_selftest_run = false;
0295 if (binder_selftest_failures > 0)
0296 pr_info("%d tests FAILED\n", binder_selftest_failures);
0297 else
0298 pr_info("PASSED\n");
0299
0300 done:
0301 mutex_unlock(&binder_selftest_lock);
0302 }