Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
0004  * the linux kernel to help device drivers mirror a process address space in
0005  * the device. This allows the device to use the same address space which
0006  * makes communication and data exchange a lot easier.
0007  *
0008  * This framework's sole purpose is to exercise various code paths inside
0009  * the kernel to make sure that HMM performs as expected and to flush out any
0010  * bugs.
0011  */
0012 
0013 #include "../kselftest_harness.h"
0014 
0015 #include <errno.h>
0016 #include <fcntl.h>
0017 #include <stdio.h>
0018 #include <stdlib.h>
0019 #include <stdint.h>
0020 #include <unistd.h>
0021 #include <strings.h>
0022 #include <time.h>
0023 #include <pthread.h>
0024 #include <sys/types.h>
0025 #include <sys/stat.h>
0026 #include <sys/mman.h>
0027 #include <sys/ioctl.h>
0028 
0029 #include "./local_config.h"
0030 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
0031 #include <hugetlbfs.h>
0032 #endif
0033 
0034 /*
0035  * This is a private UAPI to the kernel test module so it isn't exported
0036  * in the usual include/uapi/... directory.
0037  */
0038 #include "../../../../lib/test_hmm_uapi.h"
0039 #include "../../../../mm/gup_test.h"
0040 
0041 struct hmm_buffer {
0042     void        *ptr;
0043     void        *mirror;
0044     unsigned long   size;
0045     int     fd;
0046     uint64_t    cpages;
0047     uint64_t    faults;
0048 };
0049 
0050 enum {
0051     HMM_PRIVATE_DEVICE_ONE,
0052     HMM_PRIVATE_DEVICE_TWO,
0053     HMM_COHERENCE_DEVICE_ONE,
0054     HMM_COHERENCE_DEVICE_TWO,
0055 };
0056 
0057 #define TWOMEG      (1 << 21)
0058 #define HMM_BUFFER_SIZE (1024 << 12)
0059 #define HMM_PATH_MAX    64
0060 #define NTIMES      10
0061 
0062 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
0063 /* Just the flags we need, copied from mm.h: */
0064 #define FOLL_WRITE  0x01    /* check pte is writable */
0065 #define FOLL_LONGTERM   0x10000 /* mapping lifetime is indefinite */
0066 
0067 FIXTURE(hmm)
0068 {
0069     int     fd;
0070     unsigned int    page_size;
0071     unsigned int    page_shift;
0072 };
0073 
0074 FIXTURE_VARIANT(hmm)
0075 {
0076     int     device_number;
0077 };
0078 
0079 FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
0080 {
0081     .device_number = HMM_PRIVATE_DEVICE_ONE,
0082 };
0083 
0084 FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
0085 {
0086     .device_number = HMM_COHERENCE_DEVICE_ONE,
0087 };
0088 
0089 FIXTURE(hmm2)
0090 {
0091     int     fd0;
0092     int     fd1;
0093     unsigned int    page_size;
0094     unsigned int    page_shift;
0095 };
0096 
0097 FIXTURE_VARIANT(hmm2)
0098 {
0099     int     device_number0;
0100     int     device_number1;
0101 };
0102 
0103 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
0104 {
0105     .device_number0 = HMM_PRIVATE_DEVICE_ONE,
0106     .device_number1 = HMM_PRIVATE_DEVICE_TWO,
0107 };
0108 
0109 FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
0110 {
0111     .device_number0 = HMM_COHERENCE_DEVICE_ONE,
0112     .device_number1 = HMM_COHERENCE_DEVICE_TWO,
0113 };
0114 
0115 static int hmm_open(int unit)
0116 {
0117     char pathname[HMM_PATH_MAX];
0118     int fd;
0119 
0120     snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
0121     fd = open(pathname, O_RDWR, 0);
0122     if (fd < 0)
0123         fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
0124             pathname);
0125     return fd;
0126 }
0127 
0128 static bool hmm_is_coherent_type(int dev_num)
0129 {
0130     return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
0131 }
0132 
0133 FIXTURE_SETUP(hmm)
0134 {
0135     self->page_size = sysconf(_SC_PAGE_SIZE);
0136     self->page_shift = ffs(self->page_size) - 1;
0137 
0138     self->fd = hmm_open(variant->device_number);
0139     if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
0140         SKIP(exit(0), "DEVICE_COHERENT not available");
0141     ASSERT_GE(self->fd, 0);
0142 }
0143 
0144 FIXTURE_SETUP(hmm2)
0145 {
0146     self->page_size = sysconf(_SC_PAGE_SIZE);
0147     self->page_shift = ffs(self->page_size) - 1;
0148 
0149     self->fd0 = hmm_open(variant->device_number0);
0150     if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
0151         SKIP(exit(0), "DEVICE_COHERENT not available");
0152     ASSERT_GE(self->fd0, 0);
0153     self->fd1 = hmm_open(variant->device_number1);
0154     ASSERT_GE(self->fd1, 0);
0155 }
0156 
0157 FIXTURE_TEARDOWN(hmm)
0158 {
0159     int ret = close(self->fd);
0160 
0161     ASSERT_EQ(ret, 0);
0162     self->fd = -1;
0163 }
0164 
0165 FIXTURE_TEARDOWN(hmm2)
0166 {
0167     int ret = close(self->fd0);
0168 
0169     ASSERT_EQ(ret, 0);
0170     self->fd0 = -1;
0171 
0172     ret = close(self->fd1);
0173     ASSERT_EQ(ret, 0);
0174     self->fd1 = -1;
0175 }
0176 
0177 static int hmm_dmirror_cmd(int fd,
0178                unsigned long request,
0179                struct hmm_buffer *buffer,
0180                unsigned long npages)
0181 {
0182     struct hmm_dmirror_cmd cmd;
0183     int ret;
0184 
0185     /* Simulate a device reading system memory. */
0186     cmd.addr = (__u64)buffer->ptr;
0187     cmd.ptr = (__u64)buffer->mirror;
0188     cmd.npages = npages;
0189 
0190     for (;;) {
0191         ret = ioctl(fd, request, &cmd);
0192         if (ret == 0)
0193             break;
0194         if (errno == EINTR)
0195             continue;
0196         return -errno;
0197     }
0198     buffer->cpages = cmd.cpages;
0199     buffer->faults = cmd.faults;
0200 
0201     return 0;
0202 }
0203 
0204 static void hmm_buffer_free(struct hmm_buffer *buffer)
0205 {
0206     if (buffer == NULL)
0207         return;
0208 
0209     if (buffer->ptr)
0210         munmap(buffer->ptr, buffer->size);
0211     free(buffer->mirror);
0212     free(buffer);
0213 }
0214 
0215 /*
0216  * Create a temporary file that will be deleted on close.
0217  */
0218 static int hmm_create_file(unsigned long size)
0219 {
0220     char path[HMM_PATH_MAX];
0221     int fd;
0222 
0223     strcpy(path, "/tmp");
0224     fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
0225     if (fd >= 0) {
0226         int r;
0227 
0228         do {
0229             r = ftruncate(fd, size);
0230         } while (r == -1 && errno == EINTR);
0231         if (!r)
0232             return fd;
0233         close(fd);
0234     }
0235     return -1;
0236 }
0237 
0238 /*
0239  * Return a random unsigned number.
0240  */
0241 static unsigned int hmm_random(void)
0242 {
0243     static int fd = -1;
0244     unsigned int r;
0245 
0246     if (fd < 0) {
0247         fd = open("/dev/urandom", O_RDONLY);
0248         if (fd < 0) {
0249             fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
0250                     __FILE__, __LINE__);
0251             return ~0U;
0252         }
0253     }
0254     read(fd, &r, sizeof(r));
0255     return r;
0256 }
0257 
0258 static void hmm_nanosleep(unsigned int n)
0259 {
0260     struct timespec t;
0261 
0262     t.tv_sec = 0;
0263     t.tv_nsec = n;
0264     nanosleep(&t, NULL);
0265 }
0266 
0267 static int hmm_migrate_sys_to_dev(int fd,
0268                    struct hmm_buffer *buffer,
0269                    unsigned long npages)
0270 {
0271     return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
0272 }
0273 
0274 static int hmm_migrate_dev_to_sys(int fd,
0275                    struct hmm_buffer *buffer,
0276                    unsigned long npages)
0277 {
0278     return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
0279 }
0280 
0281 /*
0282  * Simple NULL test of device open/close.
0283  */
0284 TEST_F(hmm, open_close)
0285 {
0286 }
0287 
0288 /*
0289  * Read private anonymous memory.
0290  */
0291 TEST_F(hmm, anon_read)
0292 {
0293     struct hmm_buffer *buffer;
0294     unsigned long npages;
0295     unsigned long size;
0296     unsigned long i;
0297     int *ptr;
0298     int ret;
0299     int val;
0300 
0301     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0302     ASSERT_NE(npages, 0);
0303     size = npages << self->page_shift;
0304 
0305     buffer = malloc(sizeof(*buffer));
0306     ASSERT_NE(buffer, NULL);
0307 
0308     buffer->fd = -1;
0309     buffer->size = size;
0310     buffer->mirror = malloc(size);
0311     ASSERT_NE(buffer->mirror, NULL);
0312 
0313     buffer->ptr = mmap(NULL, size,
0314                PROT_READ | PROT_WRITE,
0315                MAP_PRIVATE | MAP_ANONYMOUS,
0316                buffer->fd, 0);
0317     ASSERT_NE(buffer->ptr, MAP_FAILED);
0318 
0319     /*
0320      * Initialize buffer in system memory but leave the first two pages
0321      * zero (pte_none and pfn_zero).
0322      */
0323     i = 2 * self->page_size / sizeof(*ptr);
0324     for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0325         ptr[i] = i;
0326 
0327     /* Set buffer permission to read-only. */
0328     ret = mprotect(buffer->ptr, size, PROT_READ);
0329     ASSERT_EQ(ret, 0);
0330 
0331     /* Populate the CPU page table with a special zero page. */
0332     val = *(int *)(buffer->ptr + self->page_size);
0333     ASSERT_EQ(val, 0);
0334 
0335     /* Simulate a device reading system memory. */
0336     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0337     ASSERT_EQ(ret, 0);
0338     ASSERT_EQ(buffer->cpages, npages);
0339     ASSERT_EQ(buffer->faults, 1);
0340 
0341     /* Check what the device read. */
0342     ptr = buffer->mirror;
0343     for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
0344         ASSERT_EQ(ptr[i], 0);
0345     for (; i < size / sizeof(*ptr); ++i)
0346         ASSERT_EQ(ptr[i], i);
0347 
0348     hmm_buffer_free(buffer);
0349 }
0350 
0351 /*
0352  * Read private anonymous memory which has been protected with
0353  * mprotect() PROT_NONE.
0354  */
0355 TEST_F(hmm, anon_read_prot)
0356 {
0357     struct hmm_buffer *buffer;
0358     unsigned long npages;
0359     unsigned long size;
0360     unsigned long i;
0361     int *ptr;
0362     int ret;
0363 
0364     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0365     ASSERT_NE(npages, 0);
0366     size = npages << self->page_shift;
0367 
0368     buffer = malloc(sizeof(*buffer));
0369     ASSERT_NE(buffer, NULL);
0370 
0371     buffer->fd = -1;
0372     buffer->size = size;
0373     buffer->mirror = malloc(size);
0374     ASSERT_NE(buffer->mirror, NULL);
0375 
0376     buffer->ptr = mmap(NULL, size,
0377                PROT_READ | PROT_WRITE,
0378                MAP_PRIVATE | MAP_ANONYMOUS,
0379                buffer->fd, 0);
0380     ASSERT_NE(buffer->ptr, MAP_FAILED);
0381 
0382     /* Initialize buffer in system memory. */
0383     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0384         ptr[i] = i;
0385 
0386     /* Initialize mirror buffer so we can verify it isn't written. */
0387     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0388         ptr[i] = -i;
0389 
0390     /* Protect buffer from reading. */
0391     ret = mprotect(buffer->ptr, size, PROT_NONE);
0392     ASSERT_EQ(ret, 0);
0393 
0394     /* Simulate a device reading system memory. */
0395     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0396     ASSERT_EQ(ret, -EFAULT);
0397 
0398     /* Allow CPU to read the buffer so we can check it. */
0399     ret = mprotect(buffer->ptr, size, PROT_READ);
0400     ASSERT_EQ(ret, 0);
0401     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0402         ASSERT_EQ(ptr[i], i);
0403 
0404     /* Check what the device read. */
0405     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0406         ASSERT_EQ(ptr[i], -i);
0407 
0408     hmm_buffer_free(buffer);
0409 }
0410 
0411 /*
0412  * Write private anonymous memory.
0413  */
0414 TEST_F(hmm, anon_write)
0415 {
0416     struct hmm_buffer *buffer;
0417     unsigned long npages;
0418     unsigned long size;
0419     unsigned long i;
0420     int *ptr;
0421     int ret;
0422 
0423     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0424     ASSERT_NE(npages, 0);
0425     size = npages << self->page_shift;
0426 
0427     buffer = malloc(sizeof(*buffer));
0428     ASSERT_NE(buffer, NULL);
0429 
0430     buffer->fd = -1;
0431     buffer->size = size;
0432     buffer->mirror = malloc(size);
0433     ASSERT_NE(buffer->mirror, NULL);
0434 
0435     buffer->ptr = mmap(NULL, size,
0436                PROT_READ | PROT_WRITE,
0437                MAP_PRIVATE | MAP_ANONYMOUS,
0438                buffer->fd, 0);
0439     ASSERT_NE(buffer->ptr, MAP_FAILED);
0440 
0441     /* Initialize data that the device will write to buffer->ptr. */
0442     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0443         ptr[i] = i;
0444 
0445     /* Simulate a device writing system memory. */
0446     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0447     ASSERT_EQ(ret, 0);
0448     ASSERT_EQ(buffer->cpages, npages);
0449     ASSERT_EQ(buffer->faults, 1);
0450 
0451     /* Check what the device wrote. */
0452     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0453         ASSERT_EQ(ptr[i], i);
0454 
0455     hmm_buffer_free(buffer);
0456 }
0457 
0458 /*
0459  * Write private anonymous memory which has been protected with
0460  * mprotect() PROT_READ.
0461  */
0462 TEST_F(hmm, anon_write_prot)
0463 {
0464     struct hmm_buffer *buffer;
0465     unsigned long npages;
0466     unsigned long size;
0467     unsigned long i;
0468     int *ptr;
0469     int ret;
0470 
0471     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0472     ASSERT_NE(npages, 0);
0473     size = npages << self->page_shift;
0474 
0475     buffer = malloc(sizeof(*buffer));
0476     ASSERT_NE(buffer, NULL);
0477 
0478     buffer->fd = -1;
0479     buffer->size = size;
0480     buffer->mirror = malloc(size);
0481     ASSERT_NE(buffer->mirror, NULL);
0482 
0483     buffer->ptr = mmap(NULL, size,
0484                PROT_READ,
0485                MAP_PRIVATE | MAP_ANONYMOUS,
0486                buffer->fd, 0);
0487     ASSERT_NE(buffer->ptr, MAP_FAILED);
0488 
0489     /* Simulate a device reading a zero page of memory. */
0490     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
0491     ASSERT_EQ(ret, 0);
0492     ASSERT_EQ(buffer->cpages, 1);
0493     ASSERT_EQ(buffer->faults, 1);
0494 
0495     /* Initialize data that the device will write to buffer->ptr. */
0496     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0497         ptr[i] = i;
0498 
0499     /* Simulate a device writing system memory. */
0500     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0501     ASSERT_EQ(ret, -EPERM);
0502 
0503     /* Check what the device wrote. */
0504     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0505         ASSERT_EQ(ptr[i], 0);
0506 
0507     /* Now allow writing and see that the zero page is replaced. */
0508     ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
0509     ASSERT_EQ(ret, 0);
0510 
0511     /* Simulate a device writing system memory. */
0512     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0513     ASSERT_EQ(ret, 0);
0514     ASSERT_EQ(buffer->cpages, npages);
0515     ASSERT_EQ(buffer->faults, 1);
0516 
0517     /* Check what the device wrote. */
0518     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0519         ASSERT_EQ(ptr[i], i);
0520 
0521     hmm_buffer_free(buffer);
0522 }
0523 
0524 /*
0525  * Check that a device writing an anonymous private mapping
0526  * will copy-on-write if a child process inherits the mapping.
0527  */
0528 TEST_F(hmm, anon_write_child)
0529 {
0530     struct hmm_buffer *buffer;
0531     unsigned long npages;
0532     unsigned long size;
0533     unsigned long i;
0534     int *ptr;
0535     pid_t pid;
0536     int child_fd;
0537     int ret;
0538 
0539     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0540     ASSERT_NE(npages, 0);
0541     size = npages << self->page_shift;
0542 
0543     buffer = malloc(sizeof(*buffer));
0544     ASSERT_NE(buffer, NULL);
0545 
0546     buffer->fd = -1;
0547     buffer->size = size;
0548     buffer->mirror = malloc(size);
0549     ASSERT_NE(buffer->mirror, NULL);
0550 
0551     buffer->ptr = mmap(NULL, size,
0552                PROT_READ | PROT_WRITE,
0553                MAP_PRIVATE | MAP_ANONYMOUS,
0554                buffer->fd, 0);
0555     ASSERT_NE(buffer->ptr, MAP_FAILED);
0556 
0557     /* Initialize buffer->ptr so we can tell if it is written. */
0558     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0559         ptr[i] = i;
0560 
0561     /* Initialize data that the device will write to buffer->ptr. */
0562     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0563         ptr[i] = -i;
0564 
0565     pid = fork();
0566     if (pid == -1)
0567         ASSERT_EQ(pid, 0);
0568     if (pid != 0) {
0569         waitpid(pid, &ret, 0);
0570         ASSERT_EQ(WIFEXITED(ret), 1);
0571 
0572         /* Check that the parent's buffer did not change. */
0573         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0574             ASSERT_EQ(ptr[i], i);
0575         return;
0576     }
0577 
0578     /* Check that we see the parent's values. */
0579     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0580         ASSERT_EQ(ptr[i], i);
0581     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0582         ASSERT_EQ(ptr[i], -i);
0583 
0584     /* The child process needs its own mirror to its own mm. */
0585     child_fd = hmm_open(0);
0586     ASSERT_GE(child_fd, 0);
0587 
0588     /* Simulate a device writing system memory. */
0589     ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
0590     ASSERT_EQ(ret, 0);
0591     ASSERT_EQ(buffer->cpages, npages);
0592     ASSERT_EQ(buffer->faults, 1);
0593 
0594     /* Check what the device wrote. */
0595     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0596         ASSERT_EQ(ptr[i], -i);
0597 
0598     close(child_fd);
0599     exit(0);
0600 }
0601 
0602 /*
0603  * Check that a device writing an anonymous shared mapping
0604  * will not copy-on-write if a child process inherits the mapping.
0605  */
0606 TEST_F(hmm, anon_write_child_shared)
0607 {
0608     struct hmm_buffer *buffer;
0609     unsigned long npages;
0610     unsigned long size;
0611     unsigned long i;
0612     int *ptr;
0613     pid_t pid;
0614     int child_fd;
0615     int ret;
0616 
0617     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0618     ASSERT_NE(npages, 0);
0619     size = npages << self->page_shift;
0620 
0621     buffer = malloc(sizeof(*buffer));
0622     ASSERT_NE(buffer, NULL);
0623 
0624     buffer->fd = -1;
0625     buffer->size = size;
0626     buffer->mirror = malloc(size);
0627     ASSERT_NE(buffer->mirror, NULL);
0628 
0629     buffer->ptr = mmap(NULL, size,
0630                PROT_READ | PROT_WRITE,
0631                MAP_SHARED | MAP_ANONYMOUS,
0632                buffer->fd, 0);
0633     ASSERT_NE(buffer->ptr, MAP_FAILED);
0634 
0635     /* Initialize buffer->ptr so we can tell if it is written. */
0636     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0637         ptr[i] = i;
0638 
0639     /* Initialize data that the device will write to buffer->ptr. */
0640     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0641         ptr[i] = -i;
0642 
0643     pid = fork();
0644     if (pid == -1)
0645         ASSERT_EQ(pid, 0);
0646     if (pid != 0) {
0647         waitpid(pid, &ret, 0);
0648         ASSERT_EQ(WIFEXITED(ret), 1);
0649 
0650         /* Check that the parent's buffer did change. */
0651         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0652             ASSERT_EQ(ptr[i], -i);
0653         return;
0654     }
0655 
0656     /* Check that we see the parent's values. */
0657     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0658         ASSERT_EQ(ptr[i], i);
0659     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0660         ASSERT_EQ(ptr[i], -i);
0661 
0662     /* The child process needs its own mirror to its own mm. */
0663     child_fd = hmm_open(0);
0664     ASSERT_GE(child_fd, 0);
0665 
0666     /* Simulate a device writing system memory. */
0667     ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
0668     ASSERT_EQ(ret, 0);
0669     ASSERT_EQ(buffer->cpages, npages);
0670     ASSERT_EQ(buffer->faults, 1);
0671 
0672     /* Check what the device wrote. */
0673     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0674         ASSERT_EQ(ptr[i], -i);
0675 
0676     close(child_fd);
0677     exit(0);
0678 }
0679 
0680 /*
0681  * Write private anonymous huge page.
0682  */
0683 TEST_F(hmm, anon_write_huge)
0684 {
0685     struct hmm_buffer *buffer;
0686     unsigned long npages;
0687     unsigned long size;
0688     unsigned long i;
0689     void *old_ptr;
0690     void *map;
0691     int *ptr;
0692     int ret;
0693 
0694     size = 2 * TWOMEG;
0695 
0696     buffer = malloc(sizeof(*buffer));
0697     ASSERT_NE(buffer, NULL);
0698 
0699     buffer->fd = -1;
0700     buffer->size = size;
0701     buffer->mirror = malloc(size);
0702     ASSERT_NE(buffer->mirror, NULL);
0703 
0704     buffer->ptr = mmap(NULL, size,
0705                PROT_READ | PROT_WRITE,
0706                MAP_PRIVATE | MAP_ANONYMOUS,
0707                buffer->fd, 0);
0708     ASSERT_NE(buffer->ptr, MAP_FAILED);
0709 
0710     size = TWOMEG;
0711     npages = size >> self->page_shift;
0712     map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
0713     ret = madvise(map, size, MADV_HUGEPAGE);
0714     ASSERT_EQ(ret, 0);
0715     old_ptr = buffer->ptr;
0716     buffer->ptr = map;
0717 
0718     /* Initialize data that the device will write to buffer->ptr. */
0719     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0720         ptr[i] = i;
0721 
0722     /* Simulate a device writing system memory. */
0723     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0724     ASSERT_EQ(ret, 0);
0725     ASSERT_EQ(buffer->cpages, npages);
0726     ASSERT_EQ(buffer->faults, 1);
0727 
0728     /* Check what the device wrote. */
0729     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0730         ASSERT_EQ(ptr[i], i);
0731 
0732     buffer->ptr = old_ptr;
0733     hmm_buffer_free(buffer);
0734 }
0735 
0736 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
0737 /*
0738  * Write huge TLBFS page.
0739  */
0740 TEST_F(hmm, anon_write_hugetlbfs)
0741 {
0742     struct hmm_buffer *buffer;
0743     unsigned long npages;
0744     unsigned long size;
0745     unsigned long i;
0746     int *ptr;
0747     int ret;
0748     long pagesizes[4];
0749     int n, idx;
0750 
0751     /* Skip test if we can't allocate a hugetlbfs page. */
0752 
0753     n = gethugepagesizes(pagesizes, 4);
0754     if (n <= 0)
0755         SKIP(return, "Huge page size could not be determined");
0756     for (idx = 0; --n > 0; ) {
0757         if (pagesizes[n] < pagesizes[idx])
0758             idx = n;
0759     }
0760     size = ALIGN(TWOMEG, pagesizes[idx]);
0761     npages = size >> self->page_shift;
0762 
0763     buffer = malloc(sizeof(*buffer));
0764     ASSERT_NE(buffer, NULL);
0765 
0766     buffer->ptr = get_hugepage_region(size, GHR_STRICT);
0767     if (buffer->ptr == NULL) {
0768         free(buffer);
0769         SKIP(return, "Huge page could not be allocated");
0770     }
0771 
0772     buffer->fd = -1;
0773     buffer->size = size;
0774     buffer->mirror = malloc(size);
0775     ASSERT_NE(buffer->mirror, NULL);
0776 
0777     /* Initialize data that the device will write to buffer->ptr. */
0778     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0779         ptr[i] = i;
0780 
0781     /* Simulate a device writing system memory. */
0782     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0783     ASSERT_EQ(ret, 0);
0784     ASSERT_EQ(buffer->cpages, npages);
0785     ASSERT_EQ(buffer->faults, 1);
0786 
0787     /* Check what the device wrote. */
0788     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0789         ASSERT_EQ(ptr[i], i);
0790 
0791     free_hugepage_region(buffer->ptr);
0792     buffer->ptr = NULL;
0793     hmm_buffer_free(buffer);
0794 }
0795 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
0796 
0797 /*
0798  * Read mmap'ed file memory.
0799  */
0800 TEST_F(hmm, file_read)
0801 {
0802     struct hmm_buffer *buffer;
0803     unsigned long npages;
0804     unsigned long size;
0805     unsigned long i;
0806     int *ptr;
0807     int ret;
0808     int fd;
0809     ssize_t len;
0810 
0811     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0812     ASSERT_NE(npages, 0);
0813     size = npages << self->page_shift;
0814 
0815     fd = hmm_create_file(size);
0816     ASSERT_GE(fd, 0);
0817 
0818     buffer = malloc(sizeof(*buffer));
0819     ASSERT_NE(buffer, NULL);
0820 
0821     buffer->fd = fd;
0822     buffer->size = size;
0823     buffer->mirror = malloc(size);
0824     ASSERT_NE(buffer->mirror, NULL);
0825 
0826     /* Write initial contents of the file. */
0827     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0828         ptr[i] = i;
0829     len = pwrite(fd, buffer->mirror, size, 0);
0830     ASSERT_EQ(len, size);
0831     memset(buffer->mirror, 0, size);
0832 
0833     buffer->ptr = mmap(NULL, size,
0834                PROT_READ,
0835                MAP_SHARED,
0836                buffer->fd, 0);
0837     ASSERT_NE(buffer->ptr, MAP_FAILED);
0838 
0839     /* Simulate a device reading system memory. */
0840     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
0841     ASSERT_EQ(ret, 0);
0842     ASSERT_EQ(buffer->cpages, npages);
0843     ASSERT_EQ(buffer->faults, 1);
0844 
0845     /* Check what the device read. */
0846     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0847         ASSERT_EQ(ptr[i], i);
0848 
0849     hmm_buffer_free(buffer);
0850 }
0851 
0852 /*
0853  * Write mmap'ed file memory.
0854  */
0855 TEST_F(hmm, file_write)
0856 {
0857     struct hmm_buffer *buffer;
0858     unsigned long npages;
0859     unsigned long size;
0860     unsigned long i;
0861     int *ptr;
0862     int ret;
0863     int fd;
0864     ssize_t len;
0865 
0866     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0867     ASSERT_NE(npages, 0);
0868     size = npages << self->page_shift;
0869 
0870     fd = hmm_create_file(size);
0871     ASSERT_GE(fd, 0);
0872 
0873     buffer = malloc(sizeof(*buffer));
0874     ASSERT_NE(buffer, NULL);
0875 
0876     buffer->fd = fd;
0877     buffer->size = size;
0878     buffer->mirror = malloc(size);
0879     ASSERT_NE(buffer->mirror, NULL);
0880 
0881     buffer->ptr = mmap(NULL, size,
0882                PROT_READ | PROT_WRITE,
0883                MAP_SHARED,
0884                buffer->fd, 0);
0885     ASSERT_NE(buffer->ptr, MAP_FAILED);
0886 
0887     /* Initialize data that the device will write to buffer->ptr. */
0888     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0889         ptr[i] = i;
0890 
0891     /* Simulate a device writing system memory. */
0892     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
0893     ASSERT_EQ(ret, 0);
0894     ASSERT_EQ(buffer->cpages, npages);
0895     ASSERT_EQ(buffer->faults, 1);
0896 
0897     /* Check what the device wrote. */
0898     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0899         ASSERT_EQ(ptr[i], i);
0900 
0901     /* Check that the device also wrote the file. */
0902     len = pread(fd, buffer->mirror, size, 0);
0903     ASSERT_EQ(len, size);
0904     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0905         ASSERT_EQ(ptr[i], i);
0906 
0907     hmm_buffer_free(buffer);
0908 }
0909 
0910 /*
0911  * Migrate anonymous memory to device private memory.
0912  */
0913 TEST_F(hmm, migrate)
0914 {
0915     struct hmm_buffer *buffer;
0916     unsigned long npages;
0917     unsigned long size;
0918     unsigned long i;
0919     int *ptr;
0920     int ret;
0921 
0922     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0923     ASSERT_NE(npages, 0);
0924     size = npages << self->page_shift;
0925 
0926     buffer = malloc(sizeof(*buffer));
0927     ASSERT_NE(buffer, NULL);
0928 
0929     buffer->fd = -1;
0930     buffer->size = size;
0931     buffer->mirror = malloc(size);
0932     ASSERT_NE(buffer->mirror, NULL);
0933 
0934     buffer->ptr = mmap(NULL, size,
0935                PROT_READ | PROT_WRITE,
0936                MAP_PRIVATE | MAP_ANONYMOUS,
0937                buffer->fd, 0);
0938     ASSERT_NE(buffer->ptr, MAP_FAILED);
0939 
0940     /* Initialize buffer in system memory. */
0941     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0942         ptr[i] = i;
0943 
0944     /* Migrate memory to device. */
0945     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
0946     ASSERT_EQ(ret, 0);
0947     ASSERT_EQ(buffer->cpages, npages);
0948 
0949     /* Check what the device read. */
0950     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0951         ASSERT_EQ(ptr[i], i);
0952 
0953     hmm_buffer_free(buffer);
0954 }
0955 
0956 /*
0957  * Migrate anonymous memory to device private memory and fault some of it back
0958  * to system memory, then try migrating the resulting mix of system and device
0959  * private memory to the device.
0960  */
0961 TEST_F(hmm, migrate_fault)
0962 {
0963     struct hmm_buffer *buffer;
0964     unsigned long npages;
0965     unsigned long size;
0966     unsigned long i;
0967     int *ptr;
0968     int ret;
0969 
0970     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
0971     ASSERT_NE(npages, 0);
0972     size = npages << self->page_shift;
0973 
0974     buffer = malloc(sizeof(*buffer));
0975     ASSERT_NE(buffer, NULL);
0976 
0977     buffer->fd = -1;
0978     buffer->size = size;
0979     buffer->mirror = malloc(size);
0980     ASSERT_NE(buffer->mirror, NULL);
0981 
0982     buffer->ptr = mmap(NULL, size,
0983                PROT_READ | PROT_WRITE,
0984                MAP_PRIVATE | MAP_ANONYMOUS,
0985                buffer->fd, 0);
0986     ASSERT_NE(buffer->ptr, MAP_FAILED);
0987 
0988     /* Initialize buffer in system memory. */
0989     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
0990         ptr[i] = i;
0991 
0992     /* Migrate memory to device. */
0993     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
0994     ASSERT_EQ(ret, 0);
0995     ASSERT_EQ(buffer->cpages, npages);
0996 
0997     /* Check what the device read. */
0998     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
0999         ASSERT_EQ(ptr[i], i);
1000 
1001     /* Fault half the pages back to system memory and check them. */
1002     for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1003         ASSERT_EQ(ptr[i], i);
1004 
1005     /* Migrate memory to the device again. */
1006     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1007     ASSERT_EQ(ret, 0);
1008     ASSERT_EQ(buffer->cpages, npages);
1009 
1010     /* Check what the device read. */
1011     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1012         ASSERT_EQ(ptr[i], i);
1013 
1014     hmm_buffer_free(buffer);
1015 }
1016 
1017 /*
1018  * Migrate anonymous shared memory to device private memory.
1019  */
1020 TEST_F(hmm, migrate_shared)
1021 {
1022     struct hmm_buffer *buffer;
1023     unsigned long npages;
1024     unsigned long size;
1025     int ret;
1026 
1027     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1028     ASSERT_NE(npages, 0);
1029     size = npages << self->page_shift;
1030 
1031     buffer = malloc(sizeof(*buffer));
1032     ASSERT_NE(buffer, NULL);
1033 
1034     buffer->fd = -1;
1035     buffer->size = size;
1036     buffer->mirror = malloc(size);
1037     ASSERT_NE(buffer->mirror, NULL);
1038 
1039     buffer->ptr = mmap(NULL, size,
1040                PROT_READ | PROT_WRITE,
1041                MAP_SHARED | MAP_ANONYMOUS,
1042                buffer->fd, 0);
1043     ASSERT_NE(buffer->ptr, MAP_FAILED);
1044 
1045     /* Migrate memory to device. */
1046     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1047     ASSERT_EQ(ret, -ENOENT);
1048 
1049     hmm_buffer_free(buffer);
1050 }
1051 
1052 /*
1053  * Try to migrate various memory types to device private memory.
1054  */
1055 TEST_F(hmm2, migrate_mixed)
1056 {
1057     struct hmm_buffer *buffer;
1058     unsigned long npages;
1059     unsigned long size;
1060     int *ptr;
1061     unsigned char *p;
1062     int ret;
1063     int val;
1064 
1065     npages = 6;
1066     size = npages << self->page_shift;
1067 
1068     buffer = malloc(sizeof(*buffer));
1069     ASSERT_NE(buffer, NULL);
1070 
1071     buffer->fd = -1;
1072     buffer->size = size;
1073     buffer->mirror = malloc(size);
1074     ASSERT_NE(buffer->mirror, NULL);
1075 
1076     /* Reserve a range of addresses. */
1077     buffer->ptr = mmap(NULL, size,
1078                PROT_NONE,
1079                MAP_PRIVATE | MAP_ANONYMOUS,
1080                buffer->fd, 0);
1081     ASSERT_NE(buffer->ptr, MAP_FAILED);
1082     p = buffer->ptr;
1083 
1084     /* Migrating a protected area should be an error. */
1085     ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1086     ASSERT_EQ(ret, -EINVAL);
1087 
1088     /* Punch a hole after the first page address. */
1089     ret = munmap(buffer->ptr + self->page_size, self->page_size);
1090     ASSERT_EQ(ret, 0);
1091 
1092     /* We expect an error if the vma doesn't cover the range. */
1093     ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1094     ASSERT_EQ(ret, -EINVAL);
1095 
1096     /* Page 2 will be a read-only zero page. */
1097     ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1098                 PROT_READ);
1099     ASSERT_EQ(ret, 0);
1100     ptr = (int *)(buffer->ptr + 2 * self->page_size);
1101     val = *ptr + 3;
1102     ASSERT_EQ(val, 3);
1103 
1104     /* Page 3 will be read-only. */
1105     ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1106                 PROT_READ | PROT_WRITE);
1107     ASSERT_EQ(ret, 0);
1108     ptr = (int *)(buffer->ptr + 3 * self->page_size);
1109     *ptr = val;
1110     ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1111                 PROT_READ);
1112     ASSERT_EQ(ret, 0);
1113 
1114     /* Page 4-5 will be read-write. */
1115     ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1116                 PROT_READ | PROT_WRITE);
1117     ASSERT_EQ(ret, 0);
1118     ptr = (int *)(buffer->ptr + 4 * self->page_size);
1119     *ptr = val;
1120     ptr = (int *)(buffer->ptr + 5 * self->page_size);
1121     *ptr = val;
1122 
1123     /* Now try to migrate pages 2-5 to device 1. */
1124     buffer->ptr = p + 2 * self->page_size;
1125     ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1126     ASSERT_EQ(ret, 0);
1127     ASSERT_EQ(buffer->cpages, 4);
1128 
1129     /* Page 5 won't be migrated to device 0 because it's on device 1. */
1130     buffer->ptr = p + 5 * self->page_size;
1131     ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1132     ASSERT_EQ(ret, -ENOENT);
1133     buffer->ptr = p;
1134 
1135     buffer->ptr = p;
1136     hmm_buffer_free(buffer);
1137 }
1138 
1139 /*
1140  * Migrate anonymous memory to device memory and back to system memory
1141  * multiple times. In case of private zone configuration, this is done
1142  * through fault pages accessed by CPU. In case of coherent zone configuration,
1143  * the pages from the device should be explicitly migrated back to system memory.
1144  * The reason is Coherent device zone has coherent access by CPU, therefore
1145  * it will not generate any page fault.
1146  */
1147 TEST_F(hmm, migrate_multiple)
1148 {
1149     struct hmm_buffer *buffer;
1150     unsigned long npages;
1151     unsigned long size;
1152     unsigned long i;
1153     unsigned long c;
1154     int *ptr;
1155     int ret;
1156 
1157     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1158     ASSERT_NE(npages, 0);
1159     size = npages << self->page_shift;
1160 
1161     for (c = 0; c < NTIMES; c++) {
1162         buffer = malloc(sizeof(*buffer));
1163         ASSERT_NE(buffer, NULL);
1164 
1165         buffer->fd = -1;
1166         buffer->size = size;
1167         buffer->mirror = malloc(size);
1168         ASSERT_NE(buffer->mirror, NULL);
1169 
1170         buffer->ptr = mmap(NULL, size,
1171                    PROT_READ | PROT_WRITE,
1172                    MAP_PRIVATE | MAP_ANONYMOUS,
1173                    buffer->fd, 0);
1174         ASSERT_NE(buffer->ptr, MAP_FAILED);
1175 
1176         /* Initialize buffer in system memory. */
1177         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1178             ptr[i] = i;
1179 
1180         /* Migrate memory to device. */
1181         ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1182         ASSERT_EQ(ret, 0);
1183         ASSERT_EQ(buffer->cpages, npages);
1184 
1185         /* Check what the device read. */
1186         for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1187             ASSERT_EQ(ptr[i], i);
1188 
1189         /* Migrate back to system memory and check them. */
1190         if (hmm_is_coherent_type(variant->device_number)) {
1191             ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1192             ASSERT_EQ(ret, 0);
1193             ASSERT_EQ(buffer->cpages, npages);
1194         }
1195 
1196         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1197             ASSERT_EQ(ptr[i], i);
1198 
1199         hmm_buffer_free(buffer);
1200     }
1201 }
1202 
1203 /*
1204  * Read anonymous memory multiple times.
1205  */
1206 TEST_F(hmm, anon_read_multiple)
1207 {
1208     struct hmm_buffer *buffer;
1209     unsigned long npages;
1210     unsigned long size;
1211     unsigned long i;
1212     unsigned long c;
1213     int *ptr;
1214     int ret;
1215 
1216     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1217     ASSERT_NE(npages, 0);
1218     size = npages << self->page_shift;
1219 
1220     for (c = 0; c < NTIMES; c++) {
1221         buffer = malloc(sizeof(*buffer));
1222         ASSERT_NE(buffer, NULL);
1223 
1224         buffer->fd = -1;
1225         buffer->size = size;
1226         buffer->mirror = malloc(size);
1227         ASSERT_NE(buffer->mirror, NULL);
1228 
1229         buffer->ptr = mmap(NULL, size,
1230                    PROT_READ | PROT_WRITE,
1231                    MAP_PRIVATE | MAP_ANONYMOUS,
1232                    buffer->fd, 0);
1233         ASSERT_NE(buffer->ptr, MAP_FAILED);
1234 
1235         /* Initialize buffer in system memory. */
1236         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1237             ptr[i] = i + c;
1238 
1239         /* Simulate a device reading system memory. */
1240         ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1241                       npages);
1242         ASSERT_EQ(ret, 0);
1243         ASSERT_EQ(buffer->cpages, npages);
1244         ASSERT_EQ(buffer->faults, 1);
1245 
1246         /* Check what the device read. */
1247         for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1248             ASSERT_EQ(ptr[i], i + c);
1249 
1250         hmm_buffer_free(buffer);
1251     }
1252 }
1253 
1254 void *unmap_buffer(void *p)
1255 {
1256     struct hmm_buffer *buffer = p;
1257 
1258     /* Delay for a bit and then unmap buffer while it is being read. */
1259     hmm_nanosleep(hmm_random() % 32000);
1260     munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1261     buffer->ptr = NULL;
1262 
1263     return NULL;
1264 }
1265 
1266 /*
1267  * Try reading anonymous memory while it is being unmapped.
1268  */
1269 TEST_F(hmm, anon_teardown)
1270 {
1271     unsigned long npages;
1272     unsigned long size;
1273     unsigned long c;
1274     void *ret;
1275 
1276     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1277     ASSERT_NE(npages, 0);
1278     size = npages << self->page_shift;
1279 
1280     for (c = 0; c < NTIMES; ++c) {
1281         pthread_t thread;
1282         struct hmm_buffer *buffer;
1283         unsigned long i;
1284         int *ptr;
1285         int rc;
1286 
1287         buffer = malloc(sizeof(*buffer));
1288         ASSERT_NE(buffer, NULL);
1289 
1290         buffer->fd = -1;
1291         buffer->size = size;
1292         buffer->mirror = malloc(size);
1293         ASSERT_NE(buffer->mirror, NULL);
1294 
1295         buffer->ptr = mmap(NULL, size,
1296                    PROT_READ | PROT_WRITE,
1297                    MAP_PRIVATE | MAP_ANONYMOUS,
1298                    buffer->fd, 0);
1299         ASSERT_NE(buffer->ptr, MAP_FAILED);
1300 
1301         /* Initialize buffer in system memory. */
1302         for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1303             ptr[i] = i + c;
1304 
1305         rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1306         ASSERT_EQ(rc, 0);
1307 
1308         /* Simulate a device reading system memory. */
1309         rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1310                      npages);
1311         if (rc == 0) {
1312             ASSERT_EQ(buffer->cpages, npages);
1313             ASSERT_EQ(buffer->faults, 1);
1314 
1315             /* Check what the device read. */
1316             for (i = 0, ptr = buffer->mirror;
1317                  i < size / sizeof(*ptr);
1318                  ++i)
1319                 ASSERT_EQ(ptr[i], i + c);
1320         }
1321 
1322         pthread_join(thread, &ret);
1323         hmm_buffer_free(buffer);
1324     }
1325 }
1326 
1327 /*
1328  * Test memory snapshot without faulting in pages accessed by the device.
1329  */
1330 TEST_F(hmm, mixedmap)
1331 {
1332     struct hmm_buffer *buffer;
1333     unsigned long npages;
1334     unsigned long size;
1335     unsigned char *m;
1336     int ret;
1337 
1338     npages = 1;
1339     size = npages << self->page_shift;
1340 
1341     buffer = malloc(sizeof(*buffer));
1342     ASSERT_NE(buffer, NULL);
1343 
1344     buffer->fd = -1;
1345     buffer->size = size;
1346     buffer->mirror = malloc(npages);
1347     ASSERT_NE(buffer->mirror, NULL);
1348 
1349 
1350     /* Reserve a range of addresses. */
1351     buffer->ptr = mmap(NULL, size,
1352                PROT_READ | PROT_WRITE,
1353                MAP_PRIVATE,
1354                self->fd, 0);
1355     ASSERT_NE(buffer->ptr, MAP_FAILED);
1356 
1357     /* Simulate a device snapshotting CPU pagetables. */
1358     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1359     ASSERT_EQ(ret, 0);
1360     ASSERT_EQ(buffer->cpages, npages);
1361 
1362     /* Check what the device saw. */
1363     m = buffer->mirror;
1364     ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1365 
1366     hmm_buffer_free(buffer);
1367 }
1368 
1369 /*
1370  * Test memory snapshot without faulting in pages accessed by the device.
1371  */
1372 TEST_F(hmm2, snapshot)
1373 {
1374     struct hmm_buffer *buffer;
1375     unsigned long npages;
1376     unsigned long size;
1377     int *ptr;
1378     unsigned char *p;
1379     unsigned char *m;
1380     int ret;
1381     int val;
1382 
1383     npages = 7;
1384     size = npages << self->page_shift;
1385 
1386     buffer = malloc(sizeof(*buffer));
1387     ASSERT_NE(buffer, NULL);
1388 
1389     buffer->fd = -1;
1390     buffer->size = size;
1391     buffer->mirror = malloc(npages);
1392     ASSERT_NE(buffer->mirror, NULL);
1393 
1394     /* Reserve a range of addresses. */
1395     buffer->ptr = mmap(NULL, size,
1396                PROT_NONE,
1397                MAP_PRIVATE | MAP_ANONYMOUS,
1398                buffer->fd, 0);
1399     ASSERT_NE(buffer->ptr, MAP_FAILED);
1400     p = buffer->ptr;
1401 
1402     /* Punch a hole after the first page address. */
1403     ret = munmap(buffer->ptr + self->page_size, self->page_size);
1404     ASSERT_EQ(ret, 0);
1405 
1406     /* Page 2 will be read-only zero page. */
1407     ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1408                 PROT_READ);
1409     ASSERT_EQ(ret, 0);
1410     ptr = (int *)(buffer->ptr + 2 * self->page_size);
1411     val = *ptr + 3;
1412     ASSERT_EQ(val, 3);
1413 
1414     /* Page 3 will be read-only. */
1415     ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1416                 PROT_READ | PROT_WRITE);
1417     ASSERT_EQ(ret, 0);
1418     ptr = (int *)(buffer->ptr + 3 * self->page_size);
1419     *ptr = val;
1420     ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1421                 PROT_READ);
1422     ASSERT_EQ(ret, 0);
1423 
1424     /* Page 4-6 will be read-write. */
1425     ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1426                 PROT_READ | PROT_WRITE);
1427     ASSERT_EQ(ret, 0);
1428     ptr = (int *)(buffer->ptr + 4 * self->page_size);
1429     *ptr = val;
1430 
1431     /* Page 5 will be migrated to device 0. */
1432     buffer->ptr = p + 5 * self->page_size;
1433     ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1434     ASSERT_EQ(ret, 0);
1435     ASSERT_EQ(buffer->cpages, 1);
1436 
1437     /* Page 6 will be migrated to device 1. */
1438     buffer->ptr = p + 6 * self->page_size;
1439     ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1440     ASSERT_EQ(ret, 0);
1441     ASSERT_EQ(buffer->cpages, 1);
1442 
1443     /* Simulate a device snapshotting CPU pagetables. */
1444     buffer->ptr = p;
1445     ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1446     ASSERT_EQ(ret, 0);
1447     ASSERT_EQ(buffer->cpages, npages);
1448 
1449     /* Check what the device saw. */
1450     m = buffer->mirror;
1451     ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1452     ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1453     ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1454     ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1455     ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1456     if (!hmm_is_coherent_type(variant->device_number0)) {
1457         ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1458                 HMM_DMIRROR_PROT_WRITE);
1459         ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1460     } else {
1461         ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1462                 HMM_DMIRROR_PROT_WRITE);
1463         ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1464                 HMM_DMIRROR_PROT_WRITE);
1465     }
1466 
1467     hmm_buffer_free(buffer);
1468 }
1469 
1470 #ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
1471 /*
1472  * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1473  * should be mapped by a large page table entry.
1474  */
1475 TEST_F(hmm, compound)
1476 {
1477     struct hmm_buffer *buffer;
1478     unsigned long npages;
1479     unsigned long size;
1480     int *ptr;
1481     unsigned char *m;
1482     int ret;
1483     long pagesizes[4];
1484     int n, idx;
1485     unsigned long i;
1486 
1487     /* Skip test if we can't allocate a hugetlbfs page. */
1488 
1489     n = gethugepagesizes(pagesizes, 4);
1490     if (n <= 0)
1491         return;
1492     for (idx = 0; --n > 0; ) {
1493         if (pagesizes[n] < pagesizes[idx])
1494             idx = n;
1495     }
1496     size = ALIGN(TWOMEG, pagesizes[idx]);
1497     npages = size >> self->page_shift;
1498 
1499     buffer = malloc(sizeof(*buffer));
1500     ASSERT_NE(buffer, NULL);
1501 
1502     buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1503     if (buffer->ptr == NULL) {
1504         free(buffer);
1505         return;
1506     }
1507 
1508     buffer->size = size;
1509     buffer->mirror = malloc(npages);
1510     ASSERT_NE(buffer->mirror, NULL);
1511 
1512     /* Initialize the pages the device will snapshot in buffer->ptr. */
1513     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1514         ptr[i] = i;
1515 
1516     /* Simulate a device snapshotting CPU pagetables. */
1517     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1518     ASSERT_EQ(ret, 0);
1519     ASSERT_EQ(buffer->cpages, npages);
1520 
1521     /* Check what the device saw. */
1522     m = buffer->mirror;
1523     for (i = 0; i < npages; ++i)
1524         ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1525                 HMM_DMIRROR_PROT_PMD);
1526 
1527     /* Make the region read-only. */
1528     ret = mprotect(buffer->ptr, size, PROT_READ);
1529     ASSERT_EQ(ret, 0);
1530 
1531     /* Simulate a device snapshotting CPU pagetables. */
1532     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1533     ASSERT_EQ(ret, 0);
1534     ASSERT_EQ(buffer->cpages, npages);
1535 
1536     /* Check what the device saw. */
1537     m = buffer->mirror;
1538     for (i = 0; i < npages; ++i)
1539         ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1540                 HMM_DMIRROR_PROT_PMD);
1541 
1542     free_hugepage_region(buffer->ptr);
1543     buffer->ptr = NULL;
1544     hmm_buffer_free(buffer);
1545 }
1546 #endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
1547 
1548 /*
1549  * Test two devices reading the same memory (double mapped).
1550  */
1551 TEST_F(hmm2, double_map)
1552 {
1553     struct hmm_buffer *buffer;
1554     unsigned long npages;
1555     unsigned long size;
1556     unsigned long i;
1557     int *ptr;
1558     int ret;
1559 
1560     npages = 6;
1561     size = npages << self->page_shift;
1562 
1563     buffer = malloc(sizeof(*buffer));
1564     ASSERT_NE(buffer, NULL);
1565 
1566     buffer->fd = -1;
1567     buffer->size = size;
1568     buffer->mirror = malloc(npages);
1569     ASSERT_NE(buffer->mirror, NULL);
1570 
1571     /* Reserve a range of addresses. */
1572     buffer->ptr = mmap(NULL, size,
1573                PROT_READ | PROT_WRITE,
1574                MAP_PRIVATE | MAP_ANONYMOUS,
1575                buffer->fd, 0);
1576     ASSERT_NE(buffer->ptr, MAP_FAILED);
1577 
1578     /* Initialize buffer in system memory. */
1579     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1580         ptr[i] = i;
1581 
1582     /* Make region read-only. */
1583     ret = mprotect(buffer->ptr, size, PROT_READ);
1584     ASSERT_EQ(ret, 0);
1585 
1586     /* Simulate device 0 reading system memory. */
1587     ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1588     ASSERT_EQ(ret, 0);
1589     ASSERT_EQ(buffer->cpages, npages);
1590     ASSERT_EQ(buffer->faults, 1);
1591 
1592     /* Check what the device read. */
1593     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1594         ASSERT_EQ(ptr[i], i);
1595 
1596     /* Simulate device 1 reading system memory. */
1597     ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1598     ASSERT_EQ(ret, 0);
1599     ASSERT_EQ(buffer->cpages, npages);
1600     ASSERT_EQ(buffer->faults, 1);
1601 
1602     /* Check what the device read. */
1603     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1604         ASSERT_EQ(ptr[i], i);
1605 
1606     /* Migrate pages to device 1 and try to read from device 0. */
1607     ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1608     ASSERT_EQ(ret, 0);
1609     ASSERT_EQ(buffer->cpages, npages);
1610 
1611     ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1612     ASSERT_EQ(ret, 0);
1613     ASSERT_EQ(buffer->cpages, npages);
1614     ASSERT_EQ(buffer->faults, 1);
1615 
1616     /* Check what device 0 read. */
1617     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1618         ASSERT_EQ(ptr[i], i);
1619 
1620     hmm_buffer_free(buffer);
1621 }
1622 
1623 /*
1624  * Basic check of exclusive faulting.
1625  */
1626 TEST_F(hmm, exclusive)
1627 {
1628     struct hmm_buffer *buffer;
1629     unsigned long npages;
1630     unsigned long size;
1631     unsigned long i;
1632     int *ptr;
1633     int ret;
1634 
1635     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1636     ASSERT_NE(npages, 0);
1637     size = npages << self->page_shift;
1638 
1639     buffer = malloc(sizeof(*buffer));
1640     ASSERT_NE(buffer, NULL);
1641 
1642     buffer->fd = -1;
1643     buffer->size = size;
1644     buffer->mirror = malloc(size);
1645     ASSERT_NE(buffer->mirror, NULL);
1646 
1647     buffer->ptr = mmap(NULL, size,
1648                PROT_READ | PROT_WRITE,
1649                MAP_PRIVATE | MAP_ANONYMOUS,
1650                buffer->fd, 0);
1651     ASSERT_NE(buffer->ptr, MAP_FAILED);
1652 
1653     /* Initialize buffer in system memory. */
1654     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1655         ptr[i] = i;
1656 
1657     /* Map memory exclusively for device access. */
1658     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1659     ASSERT_EQ(ret, 0);
1660     ASSERT_EQ(buffer->cpages, npages);
1661 
1662     /* Check what the device read. */
1663     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1664         ASSERT_EQ(ptr[i], i);
1665 
1666     /* Fault pages back to system memory and check them. */
1667     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1668         ASSERT_EQ(ptr[i]++, i);
1669 
1670     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1671         ASSERT_EQ(ptr[i], i+1);
1672 
1673     /* Check atomic access revoked */
1674     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1675     ASSERT_EQ(ret, 0);
1676 
1677     hmm_buffer_free(buffer);
1678 }
1679 
1680 TEST_F(hmm, exclusive_mprotect)
1681 {
1682     struct hmm_buffer *buffer;
1683     unsigned long npages;
1684     unsigned long size;
1685     unsigned long i;
1686     int *ptr;
1687     int ret;
1688 
1689     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1690     ASSERT_NE(npages, 0);
1691     size = npages << self->page_shift;
1692 
1693     buffer = malloc(sizeof(*buffer));
1694     ASSERT_NE(buffer, NULL);
1695 
1696     buffer->fd = -1;
1697     buffer->size = size;
1698     buffer->mirror = malloc(size);
1699     ASSERT_NE(buffer->mirror, NULL);
1700 
1701     buffer->ptr = mmap(NULL, size,
1702                PROT_READ | PROT_WRITE,
1703                MAP_PRIVATE | MAP_ANONYMOUS,
1704                buffer->fd, 0);
1705     ASSERT_NE(buffer->ptr, MAP_FAILED);
1706 
1707     /* Initialize buffer in system memory. */
1708     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1709         ptr[i] = i;
1710 
1711     /* Map memory exclusively for device access. */
1712     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1713     ASSERT_EQ(ret, 0);
1714     ASSERT_EQ(buffer->cpages, npages);
1715 
1716     /* Check what the device read. */
1717     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1718         ASSERT_EQ(ptr[i], i);
1719 
1720     ret = mprotect(buffer->ptr, size, PROT_READ);
1721     ASSERT_EQ(ret, 0);
1722 
1723     /* Simulate a device writing system memory. */
1724     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1725     ASSERT_EQ(ret, -EPERM);
1726 
1727     hmm_buffer_free(buffer);
1728 }
1729 
1730 /*
1731  * Check copy-on-write works.
1732  */
1733 TEST_F(hmm, exclusive_cow)
1734 {
1735     struct hmm_buffer *buffer;
1736     unsigned long npages;
1737     unsigned long size;
1738     unsigned long i;
1739     int *ptr;
1740     int ret;
1741 
1742     npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1743     ASSERT_NE(npages, 0);
1744     size = npages << self->page_shift;
1745 
1746     buffer = malloc(sizeof(*buffer));
1747     ASSERT_NE(buffer, NULL);
1748 
1749     buffer->fd = -1;
1750     buffer->size = size;
1751     buffer->mirror = malloc(size);
1752     ASSERT_NE(buffer->mirror, NULL);
1753 
1754     buffer->ptr = mmap(NULL, size,
1755                PROT_READ | PROT_WRITE,
1756                MAP_PRIVATE | MAP_ANONYMOUS,
1757                buffer->fd, 0);
1758     ASSERT_NE(buffer->ptr, MAP_FAILED);
1759 
1760     /* Initialize buffer in system memory. */
1761     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1762         ptr[i] = i;
1763 
1764     /* Map memory exclusively for device access. */
1765     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1766     ASSERT_EQ(ret, 0);
1767     ASSERT_EQ(buffer->cpages, npages);
1768 
1769     fork();
1770 
1771     /* Fault pages back to system memory and check them. */
1772     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1773         ASSERT_EQ(ptr[i]++, i);
1774 
1775     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1776         ASSERT_EQ(ptr[i], i+1);
1777 
1778     hmm_buffer_free(buffer);
1779 }
1780 
1781 static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1782              int npages, int size, int flags)
1783 {
1784     struct gup_test gup = {
1785         .nr_pages_per_call  = npages,
1786         .addr           = addr,
1787         .gup_flags      = FOLL_WRITE | flags,
1788         .size           = size,
1789     };
1790 
1791     if (ioctl(gup_fd, cmd, &gup)) {
1792         perror("ioctl on error\n");
1793         return errno;
1794     }
1795 
1796     return 0;
1797 }
1798 
1799 /*
1800  * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1801  * This should trigger a migration back to system memory for both, private
1802  * and coherent type pages.
1803  * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1804  * to your configuration before you run it.
1805  */
1806 TEST_F(hmm, hmm_gup_test)
1807 {
1808     struct hmm_buffer *buffer;
1809     int gup_fd;
1810     unsigned long npages;
1811     unsigned long size;
1812     unsigned long i;
1813     int *ptr;
1814     int ret;
1815     unsigned char *m;
1816 
1817     gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1818     if (gup_fd == -1)
1819         SKIP(return, "Skipping test, could not find gup_test driver");
1820 
1821     npages = 4;
1822     size = npages << self->page_shift;
1823 
1824     buffer = malloc(sizeof(*buffer));
1825     ASSERT_NE(buffer, NULL);
1826 
1827     buffer->fd = -1;
1828     buffer->size = size;
1829     buffer->mirror = malloc(size);
1830     ASSERT_NE(buffer->mirror, NULL);
1831 
1832     buffer->ptr = mmap(NULL, size,
1833                PROT_READ | PROT_WRITE,
1834                MAP_PRIVATE | MAP_ANONYMOUS,
1835                buffer->fd, 0);
1836     ASSERT_NE(buffer->ptr, MAP_FAILED);
1837 
1838     /* Initialize buffer in system memory. */
1839     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1840         ptr[i] = i;
1841 
1842     /* Migrate memory to device. */
1843     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1844     ASSERT_EQ(ret, 0);
1845     ASSERT_EQ(buffer->cpages, npages);
1846     /* Check what the device read. */
1847     for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1848         ASSERT_EQ(ptr[i], i);
1849 
1850     ASSERT_EQ(gup_test_exec(gup_fd,
1851                 (unsigned long)buffer->ptr,
1852                 GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1853     ASSERT_EQ(gup_test_exec(gup_fd,
1854                 (unsigned long)buffer->ptr + 1 * self->page_size,
1855                 GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1856     ASSERT_EQ(gup_test_exec(gup_fd,
1857                 (unsigned long)buffer->ptr + 2 * self->page_size,
1858                 PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1859     ASSERT_EQ(gup_test_exec(gup_fd,
1860                 (unsigned long)buffer->ptr + 3 * self->page_size,
1861                 PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1862 
1863     /* Take snapshot to CPU pagetables */
1864     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1865     ASSERT_EQ(ret, 0);
1866     ASSERT_EQ(buffer->cpages, npages);
1867     m = buffer->mirror;
1868     if (hmm_is_coherent_type(variant->device_number)) {
1869         ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1870         ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1871     } else {
1872         ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1873         ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1874     }
1875     ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1876     ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1877     /*
1878      * Check again the content on the pages. Make sure there's no
1879      * corrupted data.
1880      */
1881     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1882         ASSERT_EQ(ptr[i], i);
1883 
1884     close(gup_fd);
1885     hmm_buffer_free(buffer);
1886 }
1887 
1888 /*
1889  * Test copy-on-write in device pages.
1890  * In case of writing to COW private page(s), a page fault will migrate pages
1891  * back to system memory first. Then, these pages will be duplicated. In case
1892  * of COW device coherent type, pages are duplicated directly from device
1893  * memory.
1894  */
1895 TEST_F(hmm, hmm_cow_in_device)
1896 {
1897     struct hmm_buffer *buffer;
1898     unsigned long npages;
1899     unsigned long size;
1900     unsigned long i;
1901     int *ptr;
1902     int ret;
1903     unsigned char *m;
1904     pid_t pid;
1905     int status;
1906 
1907     npages = 4;
1908     size = npages << self->page_shift;
1909 
1910     buffer = malloc(sizeof(*buffer));
1911     ASSERT_NE(buffer, NULL);
1912 
1913     buffer->fd = -1;
1914     buffer->size = size;
1915     buffer->mirror = malloc(size);
1916     ASSERT_NE(buffer->mirror, NULL);
1917 
1918     buffer->ptr = mmap(NULL, size,
1919                PROT_READ | PROT_WRITE,
1920                MAP_PRIVATE | MAP_ANONYMOUS,
1921                buffer->fd, 0);
1922     ASSERT_NE(buffer->ptr, MAP_FAILED);
1923 
1924     /* Initialize buffer in system memory. */
1925     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1926         ptr[i] = i;
1927 
1928     /* Migrate memory to device. */
1929 
1930     ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1931     ASSERT_EQ(ret, 0);
1932     ASSERT_EQ(buffer->cpages, npages);
1933 
1934     pid = fork();
1935     if (pid == -1)
1936         ASSERT_EQ(pid, 0);
1937     if (!pid) {
1938         /* Child process waitd for SIGTERM from the parent. */
1939         while (1) {
1940         }
1941         perror("Should not reach this\n");
1942         exit(0);
1943     }
1944     /* Parent process writes to COW pages(s) and gets a
1945      * new copy in system. In case of device private pages,
1946      * this write causes a migration to system mem first.
1947      */
1948     for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1949         ptr[i] = i;
1950 
1951     /* Terminate child and wait */
1952     EXPECT_EQ(0, kill(pid, SIGTERM));
1953     EXPECT_EQ(pid, waitpid(pid, &status, 0));
1954     EXPECT_NE(0, WIFSIGNALED(status));
1955     EXPECT_EQ(SIGTERM, WTERMSIG(status));
1956 
1957     /* Take snapshot to CPU pagetables */
1958     ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1959     ASSERT_EQ(ret, 0);
1960     ASSERT_EQ(buffer->cpages, npages);
1961     m = buffer->mirror;
1962     for (i = 0; i < npages; i++)
1963         ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
1964 
1965     hmm_buffer_free(buffer);
1966 }
1967 TEST_HARNESS_MAIN