0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include "../kselftest_harness.h"
0014
0015 #include <errno.h>
0016 #include <fcntl.h>
0017 #include <stdio.h>
0018 #include <stdlib.h>
0019 #include <stdint.h>
0020 #include <unistd.h>
0021 #include <strings.h>
0022 #include <time.h>
0023 #include <pthread.h>
0024 #include <sys/types.h>
0025 #include <sys/stat.h>
0026 #include <sys/mman.h>
0027 #include <sys/ioctl.h>
0028
0029 #include "./local_config.h"
0030 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
0031 #include <hugetlbfs.h>
0032 #endif
0033
0034
0035
0036
0037
0038 #include "../../../../lib/test_hmm_uapi.h"
0039 #include "../../../../mm/gup_test.h"
0040
0041 struct hmm_buffer {
0042 void *ptr;
0043 void *mirror;
0044 unsigned long size;
0045 int fd;
0046 uint64_t cpages;
0047 uint64_t faults;
0048 };
0049
0050 enum {
0051 HMM_PRIVATE_DEVICE_ONE,
0052 HMM_PRIVATE_DEVICE_TWO,
0053 HMM_COHERENCE_DEVICE_ONE,
0054 HMM_COHERENCE_DEVICE_TWO,
0055 };
0056
0057 #define TWOMEG (1 << 21)
0058 #define HMM_BUFFER_SIZE (1024 << 12)
0059 #define HMM_PATH_MAX 64
0060 #define NTIMES 10
0061
0062 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
0063
0064 #define FOLL_WRITE 0x01
0065 #define FOLL_LONGTERM 0x10000
0066
0067 FIXTURE(hmm)
0068 {
0069 int fd;
0070 unsigned int page_size;
0071 unsigned int page_shift;
0072 };
0073
0074 FIXTURE_VARIANT(hmm)
0075 {
0076 int device_number;
0077 };
0078
0079 FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
0080 {
0081 .device_number = HMM_PRIVATE_DEVICE_ONE,
0082 };
0083
0084 FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
0085 {
0086 .device_number = HMM_COHERENCE_DEVICE_ONE,
0087 };
0088
0089 FIXTURE(hmm2)
0090 {
0091 int fd0;
0092 int fd1;
0093 unsigned int page_size;
0094 unsigned int page_shift;
0095 };
0096
0097 FIXTURE_VARIANT(hmm2)
0098 {
0099 int device_number0;
0100 int device_number1;
0101 };
0102
0103 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
0104 {
0105 .device_number0 = HMM_PRIVATE_DEVICE_ONE,
0106 .device_number1 = HMM_PRIVATE_DEVICE_TWO,
0107 };
0108
0109 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
0110 {
0111 .device_number0 = HMM_COHERENCE_DEVICE_ONE,
0112 .device_number1 = HMM_COHERENCE_DEVICE_TWO,
0113 };
0114
0115 static int hmm_open(int unit)
0116 {
0117 char pathname[HMM_PATH_MAX];
0118 int fd;
0119
0120 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
0121 fd = open(pathname, O_RDWR, 0);
0122 if (fd < 0)
0123 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
0124 pathname);
0125 return fd;
0126 }
0127
0128 static bool hmm_is_coherent_type(int dev_num)
0129 {
0130 return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
0131 }
0132
0133 FIXTURE_SETUP(hmm)
0134 {
0135 self->page_size = sysconf(_SC_PAGE_SIZE);
0136 self->page_shift = ffs(self->page_size) - 1;
0137
0138 self->fd = hmm_open(variant->device_number);
0139 if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
0140 SKIP(exit(0), "DEVICE_COHERENT not available");
0141 ASSERT_GE(self->fd, 0);
0142 }
0143
0144 FIXTURE_SETUP(hmm2)
0145 {
0146 self->page_size = sysconf(_SC_PAGE_SIZE);
0147 self->page_shift = ffs(self->page_size) - 1;
0148
0149 self->fd0 = hmm_open(variant->device_number0);
0150 if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
0151 SKIP(exit(0), "DEVICE_COHERENT not available");
0152 ASSERT_GE(self->fd0, 0);
0153 self->fd1 = hmm_open(variant->device_number1);
0154 ASSERT_GE(self->fd1, 0);
0155 }
0156
0157 FIXTURE_TEARDOWN(hmm)
0158 {
0159 int ret = close(self->fd);
0160
0161 ASSERT_EQ(ret, 0);
0162 self->fd = -1;
0163 }
0164
0165 FIXTURE_TEARDOWN(hmm2)
0166 {
0167 int ret = close(self->fd0);
0168
0169 ASSERT_EQ(ret, 0);
0170 self->fd0 = -1;
0171
0172 ret = close(self->fd1);
0173 ASSERT_EQ(ret, 0);
0174 self->fd1 = -1;
0175 }
0176
0177 static int hmm_dmirror_cmd(int fd,
0178 unsigned long request,
0179 struct hmm_buffer *buffer,
0180 unsigned long npages)
0181 {
0182 struct hmm_dmirror_cmd cmd;
0183 int ret;
0184
0185
0186 cmd.addr = (__u64)buffer->ptr;
0187 cmd.ptr = (__u64)buffer->mirror;
0188 cmd.npages = npages;
0189
0190 for (;;) {
0191 ret = ioctl(fd, request, &cmd);
0192 if (ret == 0)
0193 break;
0194 if (errno == EINTR)
0195 continue;
0196 return -errno;
0197 }
0198 buffer->cpages = cmd.cpages;
0199 buffer->faults = cmd.faults;
0200
0201 return 0;
0202 }
0203
0204 static void hmm_buffer_free(struct hmm_buffer *buffer)
0205 {
0206 if (buffer == NULL)
0207 return;
0208
0209 if (buffer->ptr)
0210 munmap(buffer->ptr, buffer->size);
0211 free(buffer->mirror);
0212 free(buffer);
0213 }
0214
0215
0216
0217
0218 static int hmm_create_file(unsigned long size)
0219 {
0220 char path[HMM_PATH_MAX];
0221 int fd;
0222
0223 strcpy(path, "/tmp");
0224 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
0225 if (fd >= 0) {
0226 int r;
0227
0228 do {
0229 r = ftruncate(fd, size);
0230 } while (r == -1 && errno == EINTR);
0231 if (!r)
0232 return fd;
0233 close(fd);
0234 }
0235 return -1;
0236 }
0237
0238
0239
0240
0241 static unsigned int hmm_random(void)
0242 {
0243 static int fd = -1;
0244 unsigned int r;
0245
0246 if (fd < 0) {
0247 fd = open("/dev/urandom", O_RDONLY);
0248 if (fd < 0) {
0249 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
0250 __FILE__, __LINE__);
0251 return ~0U;
0252 }
0253 }
0254 read(fd, &r, sizeof(r));
0255 return r;
0256 }
0257
0258 static void hmm_nanosleep(unsigned int n)
0259 {
0260 struct timespec t;
0261
0262 t.tv_sec = 0;
0263 t.tv_nsec = n;
0264 nanosleep(&t, NULL);
0265 }
0266
0267 static int hmm_migrate_sys_to_dev(int fd,
0268 struct hmm_buffer *buffer,
0269 unsigned long npages)
0270 {
0271 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
0272 }
0273
0274 static int hmm_migrate_dev_to_sys(int fd,
0275 struct hmm_buffer *buffer,
0276 unsigned long npages)
0277 {
0278 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
0279 }
0280
0281
0282
0283
0284 TEST_F(hmm, open_close)
0285 {
0286 }
0287
0288
0289
0290
0291 TEST_F(hmm, anon_read)
0292 {
0293 struct hmm_buffer *buffer;
0294 unsigned long npages;
0295 unsigned long size;
0296 unsigned long i;
0297 int *ptr;
0298 int ret;
0299 int val;
0300
0301 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0302 ASSERT_NE(npages, 0);
0303 size = npages << self->page_shift;
0304
0305 buffer = malloc(sizeof(*buffer));
0306 ASSERT_NE(buffer, NULL);
0307
0308 buffer->fd = -1;
0309 buffer->size = size;
0310 buffer->mirror = malloc(size);
0311 ASSERT_NE(buffer->mirror, NULL);
0312
0313 buffer->ptr = mmap(NULL, size,
0314 PROT_READ | PROT_WRITE,
0315 MAP_PRIVATE | MAP_ANONYMOUS,
0316 buffer->fd, 0);
0317 ASSERT_NE(buffer->ptr, MAP_FAILED);
0318
0319
0320
0321
0322
0323 i = 2 * self->page_size / sizeof(*ptr);
0324 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0325 ptr[i] = i;
0326
0327
0328 ret = mprotect(buffer->ptr, size, PROT_READ);
0329 ASSERT_EQ(ret, 0);
0330
0331
0332 val = *(int *)(buffer->ptr + self->page_size);
0333 ASSERT_EQ(val, 0);
0334
0335
0336 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0337 ASSERT_EQ(ret, 0);
0338 ASSERT_EQ(buffer->cpages, npages);
0339 ASSERT_EQ(buffer->faults, 1);
0340
0341
0342 ptr = buffer->mirror;
0343 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
0344 ASSERT_EQ(ptr[i], 0);
0345 for (; i < size / sizeof(*ptr); ++i)
0346 ASSERT_EQ(ptr[i], i);
0347
0348 hmm_buffer_free(buffer);
0349 }
0350
0351
0352
0353
0354
0355 TEST_F(hmm, anon_read_prot)
0356 {
0357 struct hmm_buffer *buffer;
0358 unsigned long npages;
0359 unsigned long size;
0360 unsigned long i;
0361 int *ptr;
0362 int ret;
0363
0364 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0365 ASSERT_NE(npages, 0);
0366 size = npages << self->page_shift;
0367
0368 buffer = malloc(sizeof(*buffer));
0369 ASSERT_NE(buffer, NULL);
0370
0371 buffer->fd = -1;
0372 buffer->size = size;
0373 buffer->mirror = malloc(size);
0374 ASSERT_NE(buffer->mirror, NULL);
0375
0376 buffer->ptr = mmap(NULL, size,
0377 PROT_READ | PROT_WRITE,
0378 MAP_PRIVATE | MAP_ANONYMOUS,
0379 buffer->fd, 0);
0380 ASSERT_NE(buffer->ptr, MAP_FAILED);
0381
0382
0383 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0384 ptr[i] = i;
0385
0386
0387 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0388 ptr[i] = -i;
0389
0390
0391 ret = mprotect(buffer->ptr, size, PROT_NONE);
0392 ASSERT_EQ(ret, 0);
0393
0394
0395 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0396 ASSERT_EQ(ret, -EFAULT);
0397
0398
0399 ret = mprotect(buffer->ptr, size, PROT_READ);
0400 ASSERT_EQ(ret, 0);
0401 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0402 ASSERT_EQ(ptr[i], i);
0403
0404
0405 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0406 ASSERT_EQ(ptr[i], -i);
0407
0408 hmm_buffer_free(buffer);
0409 }
0410
0411
0412
0413
0414 TEST_F(hmm, anon_write)
0415 {
0416 struct hmm_buffer *buffer;
0417 unsigned long npages;
0418 unsigned long size;
0419 unsigned long i;
0420 int *ptr;
0421 int ret;
0422
0423 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0424 ASSERT_NE(npages, 0);
0425 size = npages << self->page_shift;
0426
0427 buffer = malloc(sizeof(*buffer));
0428 ASSERT_NE(buffer, NULL);
0429
0430 buffer->fd = -1;
0431 buffer->size = size;
0432 buffer->mirror = malloc(size);
0433 ASSERT_NE(buffer->mirror, NULL);
0434
0435 buffer->ptr = mmap(NULL, size,
0436 PROT_READ | PROT_WRITE,
0437 MAP_PRIVATE | MAP_ANONYMOUS,
0438 buffer->fd, 0);
0439 ASSERT_NE(buffer->ptr, MAP_FAILED);
0440
0441
0442 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0443 ptr[i] = i;
0444
0445
0446 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0447 ASSERT_EQ(ret, 0);
0448 ASSERT_EQ(buffer->cpages, npages);
0449 ASSERT_EQ(buffer->faults, 1);
0450
0451
0452 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0453 ASSERT_EQ(ptr[i], i);
0454
0455 hmm_buffer_free(buffer);
0456 }
0457
0458
0459
0460
0461
0462 TEST_F(hmm, anon_write_prot)
0463 {
0464 struct hmm_buffer *buffer;
0465 unsigned long npages;
0466 unsigned long size;
0467 unsigned long i;
0468 int *ptr;
0469 int ret;
0470
0471 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0472 ASSERT_NE(npages, 0);
0473 size = npages << self->page_shift;
0474
0475 buffer = malloc(sizeof(*buffer));
0476 ASSERT_NE(buffer, NULL);
0477
0478 buffer->fd = -1;
0479 buffer->size = size;
0480 buffer->mirror = malloc(size);
0481 ASSERT_NE(buffer->mirror, NULL);
0482
0483 buffer->ptr = mmap(NULL, size,
0484 PROT_READ,
0485 MAP_PRIVATE | MAP_ANONYMOUS,
0486 buffer->fd, 0);
0487 ASSERT_NE(buffer->ptr, MAP_FAILED);
0488
0489
0490 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
0491 ASSERT_EQ(ret, 0);
0492 ASSERT_EQ(buffer->cpages, 1);
0493 ASSERT_EQ(buffer->faults, 1);
0494
0495
0496 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0497 ptr[i] = i;
0498
0499
0500 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0501 ASSERT_EQ(ret, -EPERM);
0502
0503
0504 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0505 ASSERT_EQ(ptr[i], 0);
0506
0507
0508 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
0509 ASSERT_EQ(ret, 0);
0510
0511
0512 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0513 ASSERT_EQ(ret, 0);
0514 ASSERT_EQ(buffer->cpages, npages);
0515 ASSERT_EQ(buffer->faults, 1);
0516
0517
0518 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0519 ASSERT_EQ(ptr[i], i);
0520
0521 hmm_buffer_free(buffer);
0522 }
0523
0524
0525
0526
0527
0528 TEST_F(hmm, anon_write_child)
0529 {
0530 struct hmm_buffer *buffer;
0531 unsigned long npages;
0532 unsigned long size;
0533 unsigned long i;
0534 int *ptr;
0535 pid_t pid;
0536 int child_fd;
0537 int ret;
0538
0539 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0540 ASSERT_NE(npages, 0);
0541 size = npages << self->page_shift;
0542
0543 buffer = malloc(sizeof(*buffer));
0544 ASSERT_NE(buffer, NULL);
0545
0546 buffer->fd = -1;
0547 buffer->size = size;
0548 buffer->mirror = malloc(size);
0549 ASSERT_NE(buffer->mirror, NULL);
0550
0551 buffer->ptr = mmap(NULL, size,
0552 PROT_READ | PROT_WRITE,
0553 MAP_PRIVATE | MAP_ANONYMOUS,
0554 buffer->fd, 0);
0555 ASSERT_NE(buffer->ptr, MAP_FAILED);
0556
0557
0558 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0559 ptr[i] = i;
0560
0561
0562 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0563 ptr[i] = -i;
0564
0565 pid = fork();
0566 if (pid == -1)
0567 ASSERT_EQ(pid, 0);
0568 if (pid != 0) {
0569 waitpid(pid, &ret, 0);
0570 ASSERT_EQ(WIFEXITED(ret), 1);
0571
0572
0573 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0574 ASSERT_EQ(ptr[i], i);
0575 return;
0576 }
0577
0578
0579 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0580 ASSERT_EQ(ptr[i], i);
0581 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0582 ASSERT_EQ(ptr[i], -i);
0583
0584
0585 child_fd = hmm_open(0);
0586 ASSERT_GE(child_fd, 0);
0587
0588
0589 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
0590 ASSERT_EQ(ret, 0);
0591 ASSERT_EQ(buffer->cpages, npages);
0592 ASSERT_EQ(buffer->faults, 1);
0593
0594
0595 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0596 ASSERT_EQ(ptr[i], -i);
0597
0598 close(child_fd);
0599 exit(0);
0600 }
0601
0602
0603
0604
0605
0606 TEST_F(hmm, anon_write_child_shared)
0607 {
0608 struct hmm_buffer *buffer;
0609 unsigned long npages;
0610 unsigned long size;
0611 unsigned long i;
0612 int *ptr;
0613 pid_t pid;
0614 int child_fd;
0615 int ret;
0616
0617 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0618 ASSERT_NE(npages, 0);
0619 size = npages << self->page_shift;
0620
0621 buffer = malloc(sizeof(*buffer));
0622 ASSERT_NE(buffer, NULL);
0623
0624 buffer->fd = -1;
0625 buffer->size = size;
0626 buffer->mirror = malloc(size);
0627 ASSERT_NE(buffer->mirror, NULL);
0628
0629 buffer->ptr = mmap(NULL, size,
0630 PROT_READ | PROT_WRITE,
0631 MAP_SHARED | MAP_ANONYMOUS,
0632 buffer->fd, 0);
0633 ASSERT_NE(buffer->ptr, MAP_FAILED);
0634
0635
0636 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0637 ptr[i] = i;
0638
0639
0640 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0641 ptr[i] = -i;
0642
0643 pid = fork();
0644 if (pid == -1)
0645 ASSERT_EQ(pid, 0);
0646 if (pid != 0) {
0647 waitpid(pid, &ret, 0);
0648 ASSERT_EQ(WIFEXITED(ret), 1);
0649
0650
0651 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0652 ASSERT_EQ(ptr[i], -i);
0653 return;
0654 }
0655
0656
0657 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0658 ASSERT_EQ(ptr[i], i);
0659 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0660 ASSERT_EQ(ptr[i], -i);
0661
0662
0663 child_fd = hmm_open(0);
0664 ASSERT_GE(child_fd, 0);
0665
0666
0667 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
0668 ASSERT_EQ(ret, 0);
0669 ASSERT_EQ(buffer->cpages, npages);
0670 ASSERT_EQ(buffer->faults, 1);
0671
0672
0673 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0674 ASSERT_EQ(ptr[i], -i);
0675
0676 close(child_fd);
0677 exit(0);
0678 }
0679
0680
0681
0682
0683 TEST_F(hmm, anon_write_huge)
0684 {
0685 struct hmm_buffer *buffer;
0686 unsigned long npages;
0687 unsigned long size;
0688 unsigned long i;
0689 void *old_ptr;
0690 void *map;
0691 int *ptr;
0692 int ret;
0693
0694 size = 2 * TWOMEG;
0695
0696 buffer = malloc(sizeof(*buffer));
0697 ASSERT_NE(buffer, NULL);
0698
0699 buffer->fd = -1;
0700 buffer->size = size;
0701 buffer->mirror = malloc(size);
0702 ASSERT_NE(buffer->mirror, NULL);
0703
0704 buffer->ptr = mmap(NULL, size,
0705 PROT_READ | PROT_WRITE,
0706 MAP_PRIVATE | MAP_ANONYMOUS,
0707 buffer->fd, 0);
0708 ASSERT_NE(buffer->ptr, MAP_FAILED);
0709
0710 size = TWOMEG;
0711 npages = size >> self->page_shift;
0712 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
0713 ret = madvise(map, size, MADV_HUGEPAGE);
0714 ASSERT_EQ(ret, 0);
0715 old_ptr = buffer->ptr;
0716 buffer->ptr = map;
0717
0718
0719 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0720 ptr[i] = i;
0721
0722
0723 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0724 ASSERT_EQ(ret, 0);
0725 ASSERT_EQ(buffer->cpages, npages);
0726 ASSERT_EQ(buffer->faults, 1);
0727
0728
0729 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0730 ASSERT_EQ(ptr[i], i);
0731
0732 buffer->ptr = old_ptr;
0733 hmm_buffer_free(buffer);
0734 }
0735
0736 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
0737
0738
0739
0740 TEST_F(hmm, anon_write_hugetlbfs)
0741 {
0742 struct hmm_buffer *buffer;
0743 unsigned long npages;
0744 unsigned long size;
0745 unsigned long i;
0746 int *ptr;
0747 int ret;
0748 long pagesizes[4];
0749 int n, idx;
0750
0751
0752
0753 n = gethugepagesizes(pagesizes, 4);
0754 if (n <= 0)
0755 SKIP(return, "Huge page size could not be determined");
0756 for (idx = 0; --n > 0; ) {
0757 if (pagesizes[n] < pagesizes[idx])
0758 idx = n;
0759 }
0760 size = ALIGN(TWOMEG, pagesizes[idx]);
0761 npages = size >> self->page_shift;
0762
0763 buffer = malloc(sizeof(*buffer));
0764 ASSERT_NE(buffer, NULL);
0765
0766 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
0767 if (buffer->ptr == NULL) {
0768 free(buffer);
0769 SKIP(return, "Huge page could not be allocated");
0770 }
0771
0772 buffer->fd = -1;
0773 buffer->size = size;
0774 buffer->mirror = malloc(size);
0775 ASSERT_NE(buffer->mirror, NULL);
0776
0777
0778 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0779 ptr[i] = i;
0780
0781
0782 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0783 ASSERT_EQ(ret, 0);
0784 ASSERT_EQ(buffer->cpages, npages);
0785 ASSERT_EQ(buffer->faults, 1);
0786
0787
0788 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0789 ASSERT_EQ(ptr[i], i);
0790
0791 free_hugepage_region(buffer->ptr);
0792 buffer->ptr = NULL;
0793 hmm_buffer_free(buffer);
0794 }
0795 #endif
0796
0797
0798
0799
0800 TEST_F(hmm, file_read)
0801 {
0802 struct hmm_buffer *buffer;
0803 unsigned long npages;
0804 unsigned long size;
0805 unsigned long i;
0806 int *ptr;
0807 int ret;
0808 int fd;
0809 ssize_t len;
0810
0811 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0812 ASSERT_NE(npages, 0);
0813 size = npages << self->page_shift;
0814
0815 fd = hmm_create_file(size);
0816 ASSERT_GE(fd, 0);
0817
0818 buffer = malloc(sizeof(*buffer));
0819 ASSERT_NE(buffer, NULL);
0820
0821 buffer->fd = fd;
0822 buffer->size = size;
0823 buffer->mirror = malloc(size);
0824 ASSERT_NE(buffer->mirror, NULL);
0825
0826
0827 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0828 ptr[i] = i;
0829 len = pwrite(fd, buffer->mirror, size, 0);
0830 ASSERT_EQ(len, size);
0831 memset(buffer->mirror, 0, size);
0832
0833 buffer->ptr = mmap(NULL, size,
0834 PROT_READ,
0835 MAP_SHARED,
0836 buffer->fd, 0);
0837 ASSERT_NE(buffer->ptr, MAP_FAILED);
0838
0839
0840 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0841 ASSERT_EQ(ret, 0);
0842 ASSERT_EQ(buffer->cpages, npages);
0843 ASSERT_EQ(buffer->faults, 1);
0844
0845
0846 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0847 ASSERT_EQ(ptr[i], i);
0848
0849 hmm_buffer_free(buffer);
0850 }
0851
0852
0853
0854
0855 TEST_F(hmm, file_write)
0856 {
0857 struct hmm_buffer *buffer;
0858 unsigned long npages;
0859 unsigned long size;
0860 unsigned long i;
0861 int *ptr;
0862 int ret;
0863 int fd;
0864 ssize_t len;
0865
0866 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0867 ASSERT_NE(npages, 0);
0868 size = npages << self->page_shift;
0869
0870 fd = hmm_create_file(size);
0871 ASSERT_GE(fd, 0);
0872
0873 buffer = malloc(sizeof(*buffer));
0874 ASSERT_NE(buffer, NULL);
0875
0876 buffer->fd = fd;
0877 buffer->size = size;
0878 buffer->mirror = malloc(size);
0879 ASSERT_NE(buffer->mirror, NULL);
0880
0881 buffer->ptr = mmap(NULL, size,
0882 PROT_READ | PROT_WRITE,
0883 MAP_SHARED,
0884 buffer->fd, 0);
0885 ASSERT_NE(buffer->ptr, MAP_FAILED);
0886
0887
0888 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0889 ptr[i] = i;
0890
0891
0892 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0893 ASSERT_EQ(ret, 0);
0894 ASSERT_EQ(buffer->cpages, npages);
0895 ASSERT_EQ(buffer->faults, 1);
0896
0897
0898 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0899 ASSERT_EQ(ptr[i], i);
0900
0901
0902 len = pread(fd, buffer->mirror, size, 0);
0903 ASSERT_EQ(len, size);
0904 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0905 ASSERT_EQ(ptr[i], i);
0906
0907 hmm_buffer_free(buffer);
0908 }
0909
0910
0911
0912
0913 TEST_F(hmm, migrate)
0914 {
0915 struct hmm_buffer *buffer;
0916 unsigned long npages;
0917 unsigned long size;
0918 unsigned long i;
0919 int *ptr;
0920 int ret;
0921
0922 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0923 ASSERT_NE(npages, 0);
0924 size = npages << self->page_shift;
0925
0926 buffer = malloc(sizeof(*buffer));
0927 ASSERT_NE(buffer, NULL);
0928
0929 buffer->fd = -1;
0930 buffer->size = size;
0931 buffer->mirror = malloc(size);
0932 ASSERT_NE(buffer->mirror, NULL);
0933
0934 buffer->ptr = mmap(NULL, size,
0935 PROT_READ | PROT_WRITE,
0936 MAP_PRIVATE | MAP_ANONYMOUS,
0937 buffer->fd, 0);
0938 ASSERT_NE(buffer->ptr, MAP_FAILED);
0939
0940
0941 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0942 ptr[i] = i;
0943
0944
0945 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
0946 ASSERT_EQ(ret, 0);
0947 ASSERT_EQ(buffer->cpages, npages);
0948
0949
0950 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0951 ASSERT_EQ(ptr[i], i);
0952
0953 hmm_buffer_free(buffer);
0954 }
0955
0956
0957
0958
0959
0960
0961 TEST_F(hmm, migrate_fault)
0962 {
0963 struct hmm_buffer *buffer;
0964 unsigned long npages;
0965 unsigned long size;
0966 unsigned long i;
0967 int *ptr;
0968 int ret;
0969
0970 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0971 ASSERT_NE(npages, 0);
0972 size = npages << self->page_shift;
0973
0974 buffer = malloc(sizeof(*buffer));
0975 ASSERT_NE(buffer, NULL);
0976
0977 buffer->fd = -1;
0978 buffer->size = size;
0979 buffer->mirror = malloc(size);
0980 ASSERT_NE(buffer->mirror, NULL);
0981
0982 buffer->ptr = mmap(NULL, size,
0983 PROT_READ | PROT_WRITE,
0984 MAP_PRIVATE | MAP_ANONYMOUS,
0985 buffer->fd, 0);
0986 ASSERT_NE(buffer->ptr, MAP_FAILED);
0987
0988
0989 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0990 ptr[i] = i;
0991
0992
0993 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
0994 ASSERT_EQ(ret, 0);
0995 ASSERT_EQ(buffer->cpages, npages);
0996
0997
0998 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0999 ASSERT_EQ(ptr[i], i);
1000
1001
1002 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1003 ASSERT_EQ(ptr[i], i);
1004
1005
1006 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1007 ASSERT_EQ(ret, 0);
1008 ASSERT_EQ(buffer->cpages, npages);
1009
1010
1011 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1012 ASSERT_EQ(ptr[i], i);
1013
1014 hmm_buffer_free(buffer);
1015 }
1016
1017
1018
1019
1020 TEST_F(hmm, migrate_shared)
1021 {
1022 struct hmm_buffer *buffer;
1023 unsigned long npages;
1024 unsigned long size;
1025 int ret;
1026
1027 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1028 ASSERT_NE(npages, 0);
1029 size = npages << self->page_shift;
1030
1031 buffer = malloc(sizeof(*buffer));
1032 ASSERT_NE(buffer, NULL);
1033
1034 buffer->fd = -1;
1035 buffer->size = size;
1036 buffer->mirror = malloc(size);
1037 ASSERT_NE(buffer->mirror, NULL);
1038
1039 buffer->ptr = mmap(NULL, size,
1040 PROT_READ | PROT_WRITE,
1041 MAP_SHARED | MAP_ANONYMOUS,
1042 buffer->fd, 0);
1043 ASSERT_NE(buffer->ptr, MAP_FAILED);
1044
1045
1046 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1047 ASSERT_EQ(ret, -ENOENT);
1048
1049 hmm_buffer_free(buffer);
1050 }
1051
1052
1053
1054
1055 TEST_F(hmm2, migrate_mixed)
1056 {
1057 struct hmm_buffer *buffer;
1058 unsigned long npages;
1059 unsigned long size;
1060 int *ptr;
1061 unsigned char *p;
1062 int ret;
1063 int val;
1064
1065 npages = 6;
1066 size = npages << self->page_shift;
1067
1068 buffer = malloc(sizeof(*buffer));
1069 ASSERT_NE(buffer, NULL);
1070
1071 buffer->fd = -1;
1072 buffer->size = size;
1073 buffer->mirror = malloc(size);
1074 ASSERT_NE(buffer->mirror, NULL);
1075
1076
1077 buffer->ptr = mmap(NULL, size,
1078 PROT_NONE,
1079 MAP_PRIVATE | MAP_ANONYMOUS,
1080 buffer->fd, 0);
1081 ASSERT_NE(buffer->ptr, MAP_FAILED);
1082 p = buffer->ptr;
1083
1084
1085 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1086 ASSERT_EQ(ret, -EINVAL);
1087
1088
1089 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1090 ASSERT_EQ(ret, 0);
1091
1092
1093 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1094 ASSERT_EQ(ret, -EINVAL);
1095
1096
1097 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1098 PROT_READ);
1099 ASSERT_EQ(ret, 0);
1100 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1101 val = *ptr + 3;
1102 ASSERT_EQ(val, 3);
1103
1104
1105 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1106 PROT_READ | PROT_WRITE);
1107 ASSERT_EQ(ret, 0);
1108 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1109 *ptr = val;
1110 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1111 PROT_READ);
1112 ASSERT_EQ(ret, 0);
1113
1114
1115 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1116 PROT_READ | PROT_WRITE);
1117 ASSERT_EQ(ret, 0);
1118 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1119 *ptr = val;
1120 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1121 *ptr = val;
1122
1123
1124 buffer->ptr = p + 2 * self->page_size;
1125 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1126 ASSERT_EQ(ret, 0);
1127 ASSERT_EQ(buffer->cpages, 4);
1128
1129
1130 buffer->ptr = p + 5 * self->page_size;
1131 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1132 ASSERT_EQ(ret, -ENOENT);
1133 buffer->ptr = p;
1134
1135 buffer->ptr = p;
1136 hmm_buffer_free(buffer);
1137 }
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 TEST_F(hmm, migrate_multiple)
1148 {
1149 struct hmm_buffer *buffer;
1150 unsigned long npages;
1151 unsigned long size;
1152 unsigned long i;
1153 unsigned long c;
1154 int *ptr;
1155 int ret;
1156
1157 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1158 ASSERT_NE(npages, 0);
1159 size = npages << self->page_shift;
1160
1161 for (c = 0; c < NTIMES; c++) {
1162 buffer = malloc(sizeof(*buffer));
1163 ASSERT_NE(buffer, NULL);
1164
1165 buffer->fd = -1;
1166 buffer->size = size;
1167 buffer->mirror = malloc(size);
1168 ASSERT_NE(buffer->mirror, NULL);
1169
1170 buffer->ptr = mmap(NULL, size,
1171 PROT_READ | PROT_WRITE,
1172 MAP_PRIVATE | MAP_ANONYMOUS,
1173 buffer->fd, 0);
1174 ASSERT_NE(buffer->ptr, MAP_FAILED);
1175
1176
1177 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1178 ptr[i] = i;
1179
1180
1181 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1182 ASSERT_EQ(ret, 0);
1183 ASSERT_EQ(buffer->cpages, npages);
1184
1185
1186 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1187 ASSERT_EQ(ptr[i], i);
1188
1189
1190 if (hmm_is_coherent_type(variant->device_number)) {
1191 ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1192 ASSERT_EQ(ret, 0);
1193 ASSERT_EQ(buffer->cpages, npages);
1194 }
1195
1196 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1197 ASSERT_EQ(ptr[i], i);
1198
1199 hmm_buffer_free(buffer);
1200 }
1201 }
1202
1203
1204
1205
1206 TEST_F(hmm, anon_read_multiple)
1207 {
1208 struct hmm_buffer *buffer;
1209 unsigned long npages;
1210 unsigned long size;
1211 unsigned long i;
1212 unsigned long c;
1213 int *ptr;
1214 int ret;
1215
1216 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1217 ASSERT_NE(npages, 0);
1218 size = npages << self->page_shift;
1219
1220 for (c = 0; c < NTIMES; c++) {
1221 buffer = malloc(sizeof(*buffer));
1222 ASSERT_NE(buffer, NULL);
1223
1224 buffer->fd = -1;
1225 buffer->size = size;
1226 buffer->mirror = malloc(size);
1227 ASSERT_NE(buffer->mirror, NULL);
1228
1229 buffer->ptr = mmap(NULL, size,
1230 PROT_READ | PROT_WRITE,
1231 MAP_PRIVATE | MAP_ANONYMOUS,
1232 buffer->fd, 0);
1233 ASSERT_NE(buffer->ptr, MAP_FAILED);
1234
1235
1236 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1237 ptr[i] = i + c;
1238
1239
1240 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1241 npages);
1242 ASSERT_EQ(ret, 0);
1243 ASSERT_EQ(buffer->cpages, npages);
1244 ASSERT_EQ(buffer->faults, 1);
1245
1246
1247 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1248 ASSERT_EQ(ptr[i], i + c);
1249
1250 hmm_buffer_free(buffer);
1251 }
1252 }
1253
1254 void *unmap_buffer(void *p)
1255 {
1256 struct hmm_buffer *buffer = p;
1257
1258
1259 hmm_nanosleep(hmm_random() % 32000);
1260 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1261 buffer->ptr = NULL;
1262
1263 return NULL;
1264 }
1265
1266
1267
1268
1269 TEST_F(hmm, anon_teardown)
1270 {
1271 unsigned long npages;
1272 unsigned long size;
1273 unsigned long c;
1274 void *ret;
1275
1276 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1277 ASSERT_NE(npages, 0);
1278 size = npages << self->page_shift;
1279
1280 for (c = 0; c < NTIMES; ++c) {
1281 pthread_t thread;
1282 struct hmm_buffer *buffer;
1283 unsigned long i;
1284 int *ptr;
1285 int rc;
1286
1287 buffer = malloc(sizeof(*buffer));
1288 ASSERT_NE(buffer, NULL);
1289
1290 buffer->fd = -1;
1291 buffer->size = size;
1292 buffer->mirror = malloc(size);
1293 ASSERT_NE(buffer->mirror, NULL);
1294
1295 buffer->ptr = mmap(NULL, size,
1296 PROT_READ | PROT_WRITE,
1297 MAP_PRIVATE | MAP_ANONYMOUS,
1298 buffer->fd, 0);
1299 ASSERT_NE(buffer->ptr, MAP_FAILED);
1300
1301
1302 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1303 ptr[i] = i + c;
1304
1305 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1306 ASSERT_EQ(rc, 0);
1307
1308
1309 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1310 npages);
1311 if (rc == 0) {
1312 ASSERT_EQ(buffer->cpages, npages);
1313 ASSERT_EQ(buffer->faults, 1);
1314
1315
1316 for (i = 0, ptr = buffer->mirror;
1317 i < size / sizeof(*ptr);
1318 ++i)
1319 ASSERT_EQ(ptr[i], i + c);
1320 }
1321
1322 pthread_join(thread, &ret);
1323 hmm_buffer_free(buffer);
1324 }
1325 }
1326
1327
1328
1329
1330 TEST_F(hmm, mixedmap)
1331 {
1332 struct hmm_buffer *buffer;
1333 unsigned long npages;
1334 unsigned long size;
1335 unsigned char *m;
1336 int ret;
1337
1338 npages = 1;
1339 size = npages << self->page_shift;
1340
1341 buffer = malloc(sizeof(*buffer));
1342 ASSERT_NE(buffer, NULL);
1343
1344 buffer->fd = -1;
1345 buffer->size = size;
1346 buffer->mirror = malloc(npages);
1347 ASSERT_NE(buffer->mirror, NULL);
1348
1349
1350
1351 buffer->ptr = mmap(NULL, size,
1352 PROT_READ | PROT_WRITE,
1353 MAP_PRIVATE,
1354 self->fd, 0);
1355 ASSERT_NE(buffer->ptr, MAP_FAILED);
1356
1357
1358 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1359 ASSERT_EQ(ret, 0);
1360 ASSERT_EQ(buffer->cpages, npages);
1361
1362
1363 m = buffer->mirror;
1364 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1365
1366 hmm_buffer_free(buffer);
1367 }
1368
1369
1370
1371
1372 TEST_F(hmm2, snapshot)
1373 {
1374 struct hmm_buffer *buffer;
1375 unsigned long npages;
1376 unsigned long size;
1377 int *ptr;
1378 unsigned char *p;
1379 unsigned char *m;
1380 int ret;
1381 int val;
1382
1383 npages = 7;
1384 size = npages << self->page_shift;
1385
1386 buffer = malloc(sizeof(*buffer));
1387 ASSERT_NE(buffer, NULL);
1388
1389 buffer->fd = -1;
1390 buffer->size = size;
1391 buffer->mirror = malloc(npages);
1392 ASSERT_NE(buffer->mirror, NULL);
1393
1394
1395 buffer->ptr = mmap(NULL, size,
1396 PROT_NONE,
1397 MAP_PRIVATE | MAP_ANONYMOUS,
1398 buffer->fd, 0);
1399 ASSERT_NE(buffer->ptr, MAP_FAILED);
1400 p = buffer->ptr;
1401
1402
1403 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1404 ASSERT_EQ(ret, 0);
1405
1406
1407 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1408 PROT_READ);
1409 ASSERT_EQ(ret, 0);
1410 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1411 val = *ptr + 3;
1412 ASSERT_EQ(val, 3);
1413
1414
1415 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1416 PROT_READ | PROT_WRITE);
1417 ASSERT_EQ(ret, 0);
1418 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1419 *ptr = val;
1420 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1421 PROT_READ);
1422 ASSERT_EQ(ret, 0);
1423
1424
1425 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1426 PROT_READ | PROT_WRITE);
1427 ASSERT_EQ(ret, 0);
1428 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1429 *ptr = val;
1430
1431
1432 buffer->ptr = p + 5 * self->page_size;
1433 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1434 ASSERT_EQ(ret, 0);
1435 ASSERT_EQ(buffer->cpages, 1);
1436
1437
1438 buffer->ptr = p + 6 * self->page_size;
1439 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1440 ASSERT_EQ(ret, 0);
1441 ASSERT_EQ(buffer->cpages, 1);
1442
1443
1444 buffer->ptr = p;
1445 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1446 ASSERT_EQ(ret, 0);
1447 ASSERT_EQ(buffer->cpages, npages);
1448
1449
1450 m = buffer->mirror;
1451 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1452 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1453 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1454 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1455 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1456 if (!hmm_is_coherent_type(variant->device_number0)) {
1457 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1458 HMM_DMIRROR_PROT_WRITE);
1459 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1460 } else {
1461 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1462 HMM_DMIRROR_PROT_WRITE);
1463 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1464 HMM_DMIRROR_PROT_WRITE);
1465 }
1466
1467 hmm_buffer_free(buffer);
1468 }
1469
1470 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
1471
1472
1473
1474
1475 TEST_F(hmm, compound)
1476 {
1477 struct hmm_buffer *buffer;
1478 unsigned long npages;
1479 unsigned long size;
1480 int *ptr;
1481 unsigned char *m;
1482 int ret;
1483 long pagesizes[4];
1484 int n, idx;
1485 unsigned long i;
1486
1487
1488
1489 n = gethugepagesizes(pagesizes, 4);
1490 if (n <= 0)
1491 return;
1492 for (idx = 0; --n > 0; ) {
1493 if (pagesizes[n] < pagesizes[idx])
1494 idx = n;
1495 }
1496 size = ALIGN(TWOMEG, pagesizes[idx]);
1497 npages = size >> self->page_shift;
1498
1499 buffer = malloc(sizeof(*buffer));
1500 ASSERT_NE(buffer, NULL);
1501
1502 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1503 if (buffer->ptr == NULL) {
1504 free(buffer);
1505 return;
1506 }
1507
1508 buffer->size = size;
1509 buffer->mirror = malloc(npages);
1510 ASSERT_NE(buffer->mirror, NULL);
1511
1512
1513 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1514 ptr[i] = i;
1515
1516
1517 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1518 ASSERT_EQ(ret, 0);
1519 ASSERT_EQ(buffer->cpages, npages);
1520
1521
1522 m = buffer->mirror;
1523 for (i = 0; i < npages; ++i)
1524 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1525 HMM_DMIRROR_PROT_PMD);
1526
1527
1528 ret = mprotect(buffer->ptr, size, PROT_READ);
1529 ASSERT_EQ(ret, 0);
1530
1531
1532 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1533 ASSERT_EQ(ret, 0);
1534 ASSERT_EQ(buffer->cpages, npages);
1535
1536
1537 m = buffer->mirror;
1538 for (i = 0; i < npages; ++i)
1539 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1540 HMM_DMIRROR_PROT_PMD);
1541
1542 free_hugepage_region(buffer->ptr);
1543 buffer->ptr = NULL;
1544 hmm_buffer_free(buffer);
1545 }
1546 #endif
1547
1548
1549
1550
1551 TEST_F(hmm2, double_map)
1552 {
1553 struct hmm_buffer *buffer;
1554 unsigned long npages;
1555 unsigned long size;
1556 unsigned long i;
1557 int *ptr;
1558 int ret;
1559
1560 npages = 6;
1561 size = npages << self->page_shift;
1562
1563 buffer = malloc(sizeof(*buffer));
1564 ASSERT_NE(buffer, NULL);
1565
1566 buffer->fd = -1;
1567 buffer->size = size;
1568 buffer->mirror = malloc(npages);
1569 ASSERT_NE(buffer->mirror, NULL);
1570
1571
1572 buffer->ptr = mmap(NULL, size,
1573 PROT_READ | PROT_WRITE,
1574 MAP_PRIVATE | MAP_ANONYMOUS,
1575 buffer->fd, 0);
1576 ASSERT_NE(buffer->ptr, MAP_FAILED);
1577
1578
1579 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1580 ptr[i] = i;
1581
1582
1583 ret = mprotect(buffer->ptr, size, PROT_READ);
1584 ASSERT_EQ(ret, 0);
1585
1586
1587 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1588 ASSERT_EQ(ret, 0);
1589 ASSERT_EQ(buffer->cpages, npages);
1590 ASSERT_EQ(buffer->faults, 1);
1591
1592
1593 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1594 ASSERT_EQ(ptr[i], i);
1595
1596
1597 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1598 ASSERT_EQ(ret, 0);
1599 ASSERT_EQ(buffer->cpages, npages);
1600 ASSERT_EQ(buffer->faults, 1);
1601
1602
1603 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1604 ASSERT_EQ(ptr[i], i);
1605
1606
1607 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1608 ASSERT_EQ(ret, 0);
1609 ASSERT_EQ(buffer->cpages, npages);
1610
1611 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1612 ASSERT_EQ(ret, 0);
1613 ASSERT_EQ(buffer->cpages, npages);
1614 ASSERT_EQ(buffer->faults, 1);
1615
1616
1617 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1618 ASSERT_EQ(ptr[i], i);
1619
1620 hmm_buffer_free(buffer);
1621 }
1622
1623
1624
1625
1626 TEST_F(hmm, exclusive)
1627 {
1628 struct hmm_buffer *buffer;
1629 unsigned long npages;
1630 unsigned long size;
1631 unsigned long i;
1632 int *ptr;
1633 int ret;
1634
1635 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1636 ASSERT_NE(npages, 0);
1637 size = npages << self->page_shift;
1638
1639 buffer = malloc(sizeof(*buffer));
1640 ASSERT_NE(buffer, NULL);
1641
1642 buffer->fd = -1;
1643 buffer->size = size;
1644 buffer->mirror = malloc(size);
1645 ASSERT_NE(buffer->mirror, NULL);
1646
1647 buffer->ptr = mmap(NULL, size,
1648 PROT_READ | PROT_WRITE,
1649 MAP_PRIVATE | MAP_ANONYMOUS,
1650 buffer->fd, 0);
1651 ASSERT_NE(buffer->ptr, MAP_FAILED);
1652
1653
1654 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1655 ptr[i] = i;
1656
1657
1658 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1659 ASSERT_EQ(ret, 0);
1660 ASSERT_EQ(buffer->cpages, npages);
1661
1662
1663 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1664 ASSERT_EQ(ptr[i], i);
1665
1666
1667 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1668 ASSERT_EQ(ptr[i]++, i);
1669
1670 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1671 ASSERT_EQ(ptr[i], i+1);
1672
1673
1674 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1675 ASSERT_EQ(ret, 0);
1676
1677 hmm_buffer_free(buffer);
1678 }
1679
1680 TEST_F(hmm, exclusive_mprotect)
1681 {
1682 struct hmm_buffer *buffer;
1683 unsigned long npages;
1684 unsigned long size;
1685 unsigned long i;
1686 int *ptr;
1687 int ret;
1688
1689 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1690 ASSERT_NE(npages, 0);
1691 size = npages << self->page_shift;
1692
1693 buffer = malloc(sizeof(*buffer));
1694 ASSERT_NE(buffer, NULL);
1695
1696 buffer->fd = -1;
1697 buffer->size = size;
1698 buffer->mirror = malloc(size);
1699 ASSERT_NE(buffer->mirror, NULL);
1700
1701 buffer->ptr = mmap(NULL, size,
1702 PROT_READ | PROT_WRITE,
1703 MAP_PRIVATE | MAP_ANONYMOUS,
1704 buffer->fd, 0);
1705 ASSERT_NE(buffer->ptr, MAP_FAILED);
1706
1707
1708 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1709 ptr[i] = i;
1710
1711
1712 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1713 ASSERT_EQ(ret, 0);
1714 ASSERT_EQ(buffer->cpages, npages);
1715
1716
1717 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1718 ASSERT_EQ(ptr[i], i);
1719
1720 ret = mprotect(buffer->ptr, size, PROT_READ);
1721 ASSERT_EQ(ret, 0);
1722
1723
1724 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1725 ASSERT_EQ(ret, -EPERM);
1726
1727 hmm_buffer_free(buffer);
1728 }
1729
1730
1731
1732
1733 TEST_F(hmm, exclusive_cow)
1734 {
1735 struct hmm_buffer *buffer;
1736 unsigned long npages;
1737 unsigned long size;
1738 unsigned long i;
1739 int *ptr;
1740 int ret;
1741
1742 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1743 ASSERT_NE(npages, 0);
1744 size = npages << self->page_shift;
1745
1746 buffer = malloc(sizeof(*buffer));
1747 ASSERT_NE(buffer, NULL);
1748
1749 buffer->fd = -1;
1750 buffer->size = size;
1751 buffer->mirror = malloc(size);
1752 ASSERT_NE(buffer->mirror, NULL);
1753
1754 buffer->ptr = mmap(NULL, size,
1755 PROT_READ | PROT_WRITE,
1756 MAP_PRIVATE | MAP_ANONYMOUS,
1757 buffer->fd, 0);
1758 ASSERT_NE(buffer->ptr, MAP_FAILED);
1759
1760
1761 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1762 ptr[i] = i;
1763
1764
1765 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1766 ASSERT_EQ(ret, 0);
1767 ASSERT_EQ(buffer->cpages, npages);
1768
1769 fork();
1770
1771
1772 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1773 ASSERT_EQ(ptr[i]++, i);
1774
1775 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1776 ASSERT_EQ(ptr[i], i+1);
1777
1778 hmm_buffer_free(buffer);
1779 }
1780
1781 static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1782 int npages, int size, int flags)
1783 {
1784 struct gup_test gup = {
1785 .nr_pages_per_call = npages,
1786 .addr = addr,
1787 .gup_flags = FOLL_WRITE | flags,
1788 .size = size,
1789 };
1790
1791 if (ioctl(gup_fd, cmd, &gup)) {
1792 perror("ioctl on error\n");
1793 return errno;
1794 }
1795
1796 return 0;
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806 TEST_F(hmm, hmm_gup_test)
1807 {
1808 struct hmm_buffer *buffer;
1809 int gup_fd;
1810 unsigned long npages;
1811 unsigned long size;
1812 unsigned long i;
1813 int *ptr;
1814 int ret;
1815 unsigned char *m;
1816
1817 gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1818 if (gup_fd == -1)
1819 SKIP(return, "Skipping test, could not find gup_test driver");
1820
1821 npages = 4;
1822 size = npages << self->page_shift;
1823
1824 buffer = malloc(sizeof(*buffer));
1825 ASSERT_NE(buffer, NULL);
1826
1827 buffer->fd = -1;
1828 buffer->size = size;
1829 buffer->mirror = malloc(size);
1830 ASSERT_NE(buffer->mirror, NULL);
1831
1832 buffer->ptr = mmap(NULL, size,
1833 PROT_READ | PROT_WRITE,
1834 MAP_PRIVATE | MAP_ANONYMOUS,
1835 buffer->fd, 0);
1836 ASSERT_NE(buffer->ptr, MAP_FAILED);
1837
1838
1839 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1840 ptr[i] = i;
1841
1842
1843 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1844 ASSERT_EQ(ret, 0);
1845 ASSERT_EQ(buffer->cpages, npages);
1846
1847 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1848 ASSERT_EQ(ptr[i], i);
1849
1850 ASSERT_EQ(gup_test_exec(gup_fd,
1851 (unsigned long)buffer->ptr,
1852 GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1853 ASSERT_EQ(gup_test_exec(gup_fd,
1854 (unsigned long)buffer->ptr + 1 * self->page_size,
1855 GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1856 ASSERT_EQ(gup_test_exec(gup_fd,
1857 (unsigned long)buffer->ptr + 2 * self->page_size,
1858 PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1859 ASSERT_EQ(gup_test_exec(gup_fd,
1860 (unsigned long)buffer->ptr + 3 * self->page_size,
1861 PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1862
1863
1864 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1865 ASSERT_EQ(ret, 0);
1866 ASSERT_EQ(buffer->cpages, npages);
1867 m = buffer->mirror;
1868 if (hmm_is_coherent_type(variant->device_number)) {
1869 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1870 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1871 } else {
1872 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1873 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1874 }
1875 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1876 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1877
1878
1879
1880
1881 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1882 ASSERT_EQ(ptr[i], i);
1883
1884 close(gup_fd);
1885 hmm_buffer_free(buffer);
1886 }
1887
1888
1889
1890
1891
1892
1893
1894
1895 TEST_F(hmm, hmm_cow_in_device)
1896 {
1897 struct hmm_buffer *buffer;
1898 unsigned long npages;
1899 unsigned long size;
1900 unsigned long i;
1901 int *ptr;
1902 int ret;
1903 unsigned char *m;
1904 pid_t pid;
1905 int status;
1906
1907 npages = 4;
1908 size = npages << self->page_shift;
1909
1910 buffer = malloc(sizeof(*buffer));
1911 ASSERT_NE(buffer, NULL);
1912
1913 buffer->fd = -1;
1914 buffer->size = size;
1915 buffer->mirror = malloc(size);
1916 ASSERT_NE(buffer->mirror, NULL);
1917
1918 buffer->ptr = mmap(NULL, size,
1919 PROT_READ | PROT_WRITE,
1920 MAP_PRIVATE | MAP_ANONYMOUS,
1921 buffer->fd, 0);
1922 ASSERT_NE(buffer->ptr, MAP_FAILED);
1923
1924
1925 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1926 ptr[i] = i;
1927
1928
1929
1930 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1931 ASSERT_EQ(ret, 0);
1932 ASSERT_EQ(buffer->cpages, npages);
1933
1934 pid = fork();
1935 if (pid == -1)
1936 ASSERT_EQ(pid, 0);
1937 if (!pid) {
1938
1939 while (1) {
1940 }
1941 perror("Should not reach this\n");
1942 exit(0);
1943 }
1944
1945
1946
1947
1948 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1949 ptr[i] = i;
1950
1951
1952 EXPECT_EQ(0, kill(pid, SIGTERM));
1953 EXPECT_EQ(pid, waitpid(pid, &status, 0));
1954 EXPECT_NE(0, WIFSIGNALED(status));
1955 EXPECT_EQ(SIGTERM, WTERMSIG(status));
1956
1957
1958 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1959 ASSERT_EQ(ret, 0);
1960 ASSERT_EQ(buffer->cpages, npages);
1961 m = buffer->mirror;
1962 for (i = 0; i < npages; i++)
1963 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
1964
1965 hmm_buffer_free(buffer);
1966 }
1967 TEST_HARNESS_MAIN