0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012 #include <linux/io.h>
0013 #include <linux/mm.h>
0014 #include <linux/slab.h>
0015
0016 #include "acrn_drv.h"
0017
0018 static int modify_region(struct acrn_vm *vm, struct vm_memory_region_op *region)
0019 {
0020 struct vm_memory_region_batch *regions;
0021 int ret;
0022
0023 regions = kzalloc(sizeof(*regions), GFP_KERNEL);
0024 if (!regions)
0025 return -ENOMEM;
0026
0027 regions->vmid = vm->vmid;
0028 regions->regions_num = 1;
0029 regions->regions_gpa = virt_to_phys(region);
0030
0031 ret = hcall_set_memory_regions(virt_to_phys(regions));
0032 if (ret < 0)
0033 dev_dbg(acrn_dev.this_device,
0034 "Failed to set memory region for VM[%u]!\n", vm->vmid);
0035
0036 kfree(regions);
0037 return ret;
0038 }
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051 int acrn_mm_region_add(struct acrn_vm *vm, u64 user_gpa, u64 service_gpa,
0052 u64 size, u32 mem_type, u32 mem_access_right)
0053 {
0054 struct vm_memory_region_op *region;
0055 int ret = 0;
0056
0057 region = kzalloc(sizeof(*region), GFP_KERNEL);
0058 if (!region)
0059 return -ENOMEM;
0060
0061 region->type = ACRN_MEM_REGION_ADD;
0062 region->user_vm_pa = user_gpa;
0063 region->service_vm_pa = service_gpa;
0064 region->size = size;
0065 region->attr = ((mem_type & ACRN_MEM_TYPE_MASK) |
0066 (mem_access_right & ACRN_MEM_ACCESS_RIGHT_MASK));
0067 ret = modify_region(vm, region);
0068
0069 dev_dbg(acrn_dev.this_device,
0070 "%s: user-GPA[%pK] service-GPA[%pK] size[0x%llx].\n",
0071 __func__, (void *)user_gpa, (void *)service_gpa, size);
0072 kfree(region);
0073 return ret;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 int acrn_mm_region_del(struct acrn_vm *vm, u64 user_gpa, u64 size)
0085 {
0086 struct vm_memory_region_op *region;
0087 int ret = 0;
0088
0089 region = kzalloc(sizeof(*region), GFP_KERNEL);
0090 if (!region)
0091 return -ENOMEM;
0092
0093 region->type = ACRN_MEM_REGION_DEL;
0094 region->user_vm_pa = user_gpa;
0095 region->service_vm_pa = 0UL;
0096 region->size = size;
0097 region->attr = 0U;
0098
0099 ret = modify_region(vm, region);
0100
0101 dev_dbg(acrn_dev.this_device, "%s: user-GPA[%pK] size[0x%llx].\n",
0102 __func__, (void *)user_gpa, size);
0103 kfree(region);
0104 return ret;
0105 }
0106
0107 int acrn_vm_memseg_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
0108 {
0109 int ret;
0110
0111 if (memmap->type == ACRN_MEMMAP_RAM)
0112 return acrn_vm_ram_map(vm, memmap);
0113
0114 if (memmap->type != ACRN_MEMMAP_MMIO) {
0115 dev_dbg(acrn_dev.this_device,
0116 "Invalid memmap type: %u\n", memmap->type);
0117 return -EINVAL;
0118 }
0119
0120 ret = acrn_mm_region_add(vm, memmap->user_vm_pa,
0121 memmap->service_vm_pa, memmap->len,
0122 ACRN_MEM_TYPE_UC, memmap->attr);
0123 if (ret < 0)
0124 dev_dbg(acrn_dev.this_device,
0125 "Add memory region failed, VM[%u]!\n", vm->vmid);
0126
0127 return ret;
0128 }
0129
0130 int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
0131 {
0132 int ret;
0133
0134 if (memmap->type != ACRN_MEMMAP_MMIO) {
0135 dev_dbg(acrn_dev.this_device,
0136 "Invalid memmap type: %u\n", memmap->type);
0137 return -EINVAL;
0138 }
0139
0140 ret = acrn_mm_region_del(vm, memmap->user_vm_pa, memmap->len);
0141 if (ret < 0)
0142 dev_dbg(acrn_dev.this_device,
0143 "Del memory region failed, VM[%u]!\n", vm->vmid);
0144
0145 return ret;
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155 int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
0156 {
0157 struct vm_memory_region_batch *regions_info;
0158 int nr_pages, i = 0, order, nr_regions = 0;
0159 struct vm_memory_mapping *region_mapping;
0160 struct vm_memory_region_op *vm_region;
0161 struct page **pages = NULL, *page;
0162 void *remap_vaddr;
0163 int ret, pinned;
0164 u64 user_vm_pa;
0165 unsigned long pfn;
0166 struct vm_area_struct *vma;
0167
0168 if (!vm || !memmap)
0169 return -EINVAL;
0170
0171 mmap_read_lock(current->mm);
0172 vma = vma_lookup(current->mm, memmap->vma_base);
0173 if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
0174 if ((memmap->vma_base + memmap->len) > vma->vm_end) {
0175 mmap_read_unlock(current->mm);
0176 return -EINVAL;
0177 }
0178
0179 ret = follow_pfn(vma, memmap->vma_base, &pfn);
0180 mmap_read_unlock(current->mm);
0181 if (ret < 0) {
0182 dev_dbg(acrn_dev.this_device,
0183 "Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
0184 return ret;
0185 }
0186
0187 return acrn_mm_region_add(vm, memmap->user_vm_pa,
0188 PFN_PHYS(pfn), memmap->len,
0189 ACRN_MEM_TYPE_WB, memmap->attr);
0190 }
0191 mmap_read_unlock(current->mm);
0192
0193
0194 nr_pages = memmap->len >> PAGE_SHIFT;
0195 pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
0196 if (!pages)
0197 return -ENOMEM;
0198
0199
0200 pinned = pin_user_pages_fast(memmap->vma_base,
0201 nr_pages, FOLL_WRITE | FOLL_LONGTERM,
0202 pages);
0203 if (pinned < 0) {
0204 ret = pinned;
0205 goto free_pages;
0206 } else if (pinned != nr_pages) {
0207 ret = -EFAULT;
0208 goto put_pages;
0209 }
0210
0211
0212 remap_vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
0213 if (!remap_vaddr) {
0214 ret = -ENOMEM;
0215 goto put_pages;
0216 }
0217
0218
0219 mutex_lock(&vm->regions_mapping_lock);
0220 region_mapping = &vm->regions_mapping[vm->regions_mapping_count];
0221 if (vm->regions_mapping_count < ACRN_MEM_MAPPING_MAX) {
0222 region_mapping->pages = pages;
0223 region_mapping->npages = nr_pages;
0224 region_mapping->size = memmap->len;
0225 region_mapping->service_vm_va = remap_vaddr;
0226 region_mapping->user_vm_pa = memmap->user_vm_pa;
0227 vm->regions_mapping_count++;
0228 } else {
0229 dev_warn(acrn_dev.this_device,
0230 "Run out of memory mapping slots!\n");
0231 ret = -ENOMEM;
0232 mutex_unlock(&vm->regions_mapping_lock);
0233 goto unmap_no_count;
0234 }
0235 mutex_unlock(&vm->regions_mapping_lock);
0236
0237
0238 while (i < nr_pages) {
0239 page = pages[i];
0240 VM_BUG_ON_PAGE(PageTail(page), page);
0241 order = compound_order(page);
0242 nr_regions++;
0243 i += 1 << order;
0244 }
0245
0246
0247 regions_info = kzalloc(struct_size(regions_info, regions_op,
0248 nr_regions), GFP_KERNEL);
0249 if (!regions_info) {
0250 ret = -ENOMEM;
0251 goto unmap_kernel_map;
0252 }
0253
0254
0255 vm_region = regions_info->regions_op;
0256 regions_info->vmid = vm->vmid;
0257 regions_info->regions_num = nr_regions;
0258 regions_info->regions_gpa = virt_to_phys(vm_region);
0259 user_vm_pa = memmap->user_vm_pa;
0260 i = 0;
0261 while (i < nr_pages) {
0262 u32 region_size;
0263
0264 page = pages[i];
0265 VM_BUG_ON_PAGE(PageTail(page), page);
0266 order = compound_order(page);
0267 region_size = PAGE_SIZE << order;
0268 vm_region->type = ACRN_MEM_REGION_ADD;
0269 vm_region->user_vm_pa = user_vm_pa;
0270 vm_region->service_vm_pa = page_to_phys(page);
0271 vm_region->size = region_size;
0272 vm_region->attr = (ACRN_MEM_TYPE_WB & ACRN_MEM_TYPE_MASK) |
0273 (memmap->attr & ACRN_MEM_ACCESS_RIGHT_MASK);
0274
0275 vm_region++;
0276 user_vm_pa += region_size;
0277 i += 1 << order;
0278 }
0279
0280
0281 ret = hcall_set_memory_regions(virt_to_phys(regions_info));
0282 if (ret < 0) {
0283 dev_dbg(acrn_dev.this_device,
0284 "Failed to set regions, VM[%u]!\n", vm->vmid);
0285 goto unset_region;
0286 }
0287 kfree(regions_info);
0288
0289 dev_dbg(acrn_dev.this_device,
0290 "%s: VM[%u] service-GVA[%pK] user-GPA[%pK] size[0x%llx]\n",
0291 __func__, vm->vmid,
0292 remap_vaddr, (void *)memmap->user_vm_pa, memmap->len);
0293 return ret;
0294
0295 unset_region:
0296 kfree(regions_info);
0297 unmap_kernel_map:
0298 mutex_lock(&vm->regions_mapping_lock);
0299 vm->regions_mapping_count--;
0300 mutex_unlock(&vm->regions_mapping_lock);
0301 unmap_no_count:
0302 vunmap(remap_vaddr);
0303 put_pages:
0304 for (i = 0; i < pinned; i++)
0305 unpin_user_page(pages[i]);
0306 free_pages:
0307 vfree(pages);
0308 return ret;
0309 }
0310
0311
0312
0313
0314
0315 void acrn_vm_all_ram_unmap(struct acrn_vm *vm)
0316 {
0317 struct vm_memory_mapping *region_mapping;
0318 int i, j;
0319
0320 mutex_lock(&vm->regions_mapping_lock);
0321 for (i = 0; i < vm->regions_mapping_count; i++) {
0322 region_mapping = &vm->regions_mapping[i];
0323 vunmap(region_mapping->service_vm_va);
0324 for (j = 0; j < region_mapping->npages; j++)
0325 unpin_user_page(region_mapping->pages[j]);
0326 vfree(region_mapping->pages);
0327 }
0328 mutex_unlock(&vm->regions_mapping_lock);
0329 }