0001
0002
0003
0004
0005
0006
0007 #define KMSG_COMPONENT "prot_virt"
0008 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0009
0010 #include <linux/kernel.h>
0011 #include <linux/types.h>
0012 #include <linux/sizes.h>
0013 #include <linux/bitmap.h>
0014 #include <linux/memblock.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/swap.h>
0017 #include <asm/facility.h>
0018 #include <asm/sections.h>
0019 #include <asm/uv.h>
0020
0021
0022 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
0023 int __bootdata_preserved(prot_virt_guest);
0024 #endif
0025
0026 struct uv_info __bootdata_preserved(uv_info);
0027
0028 #if IS_ENABLED(CONFIG_KVM)
0029 int __bootdata_preserved(prot_virt_host);
0030 EXPORT_SYMBOL(prot_virt_host);
0031 EXPORT_SYMBOL(uv_info);
0032
0033 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
0034 {
0035 struct uv_cb_init uvcb = {
0036 .header.cmd = UVC_CMD_INIT_UV,
0037 .header.len = sizeof(uvcb),
0038 .stor_origin = stor_base,
0039 .stor_len = stor_len,
0040 };
0041
0042 if (uv_call(0, (uint64_t)&uvcb)) {
0043 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
0044 uvcb.header.rc, uvcb.header.rrc);
0045 return -1;
0046 }
0047 return 0;
0048 }
0049
0050 void __init setup_uv(void)
0051 {
0052 void *uv_stor_base;
0053
0054 if (!is_prot_virt_host())
0055 return;
0056
0057 uv_stor_base = memblock_alloc_try_nid(
0058 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
0059 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
0060 if (!uv_stor_base) {
0061 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
0062 uv_info.uv_base_stor_len);
0063 goto fail;
0064 }
0065
0066 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
0067 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
0068 goto fail;
0069 }
0070
0071 pr_info("Reserving %luMB as ultravisor base storage\n",
0072 uv_info.uv_base_stor_len >> 20);
0073 return;
0074 fail:
0075 pr_info("Disabling support for protected virtualization");
0076 prot_virt_host = 0;
0077 }
0078
0079
0080
0081
0082
0083 static int uv_pin_shared(unsigned long paddr)
0084 {
0085 struct uv_cb_cfs uvcb = {
0086 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
0087 .header.len = sizeof(uvcb),
0088 .paddr = paddr,
0089 };
0090
0091 if (uv_call(0, (u64)&uvcb))
0092 return -EINVAL;
0093 return 0;
0094 }
0095
0096
0097
0098
0099
0100
0101
0102
0103 static int uv_destroy_page(unsigned long paddr)
0104 {
0105 struct uv_cb_cfs uvcb = {
0106 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
0107 .header.len = sizeof(uvcb),
0108 .paddr = paddr
0109 };
0110
0111 if (uv_call(0, (u64)&uvcb)) {
0112
0113
0114
0115
0116 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
0117 return 0;
0118 return -EINVAL;
0119 }
0120 return 0;
0121 }
0122
0123
0124
0125
0126 int uv_destroy_owned_page(unsigned long paddr)
0127 {
0128 struct page *page = phys_to_page(paddr);
0129 int rc;
0130
0131 get_page(page);
0132 rc = uv_destroy_page(paddr);
0133 if (!rc)
0134 clear_bit(PG_arch_1, &page->flags);
0135 put_page(page);
0136 return rc;
0137 }
0138
0139
0140
0141
0142
0143
0144
0145 int uv_convert_from_secure(unsigned long paddr)
0146 {
0147 struct uv_cb_cfs uvcb = {
0148 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
0149 .header.len = sizeof(uvcb),
0150 .paddr = paddr
0151 };
0152
0153 if (uv_call(0, (u64)&uvcb))
0154 return -EINVAL;
0155 return 0;
0156 }
0157
0158
0159
0160
0161 int uv_convert_owned_from_secure(unsigned long paddr)
0162 {
0163 struct page *page = phys_to_page(paddr);
0164 int rc;
0165
0166 get_page(page);
0167 rc = uv_convert_from_secure(paddr);
0168 if (!rc)
0169 clear_bit(PG_arch_1, &page->flags);
0170 put_page(page);
0171 return rc;
0172 }
0173
0174
0175
0176
0177
0178
0179
0180 static int expected_page_refs(struct page *page)
0181 {
0182 int res;
0183
0184 res = page_mapcount(page);
0185 if (PageSwapCache(page)) {
0186 res++;
0187 } else if (page_mapping(page)) {
0188 res++;
0189 if (page_has_private(page))
0190 res++;
0191 }
0192 return res;
0193 }
0194
0195 static int make_secure_pte(pte_t *ptep, unsigned long addr,
0196 struct page *exp_page, struct uv_cb_header *uvcb)
0197 {
0198 pte_t entry = READ_ONCE(*ptep);
0199 struct page *page;
0200 int expected, cc = 0;
0201
0202 if (!pte_present(entry))
0203 return -ENXIO;
0204 if (pte_val(entry) & _PAGE_INVALID)
0205 return -ENXIO;
0206
0207 page = pte_page(entry);
0208 if (page != exp_page)
0209 return -ENXIO;
0210 if (PageWriteback(page))
0211 return -EAGAIN;
0212 expected = expected_page_refs(page);
0213 if (!page_ref_freeze(page, expected))
0214 return -EBUSY;
0215 set_bit(PG_arch_1, &page->flags);
0216
0217
0218
0219
0220
0221
0222
0223
0224 cc = __uv_call(0, (u64)uvcb);
0225 page_ref_unfreeze(page, expected);
0226
0227
0228
0229
0230 if (cc == UVC_CC_OK)
0231 return 0;
0232 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
0233 return -EAGAIN;
0234 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
0235 }
0236
0237
0238
0239
0240
0241
0242
0243
0244
0245
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
0257 {
0258 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
0259 return false;
0260 return atomic_read(&mm->context.protected_count) > 1;
0261 }
0262
0263
0264
0265
0266
0267
0268
0269 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
0270 {
0271 struct vm_area_struct *vma;
0272 bool local_drain = false;
0273 spinlock_t *ptelock;
0274 unsigned long uaddr;
0275 struct page *page;
0276 pte_t *ptep;
0277 int rc;
0278
0279 again:
0280 rc = -EFAULT;
0281 mmap_read_lock(gmap->mm);
0282
0283 uaddr = __gmap_translate(gmap, gaddr);
0284 if (IS_ERR_VALUE(uaddr))
0285 goto out;
0286 vma = vma_lookup(gmap->mm, uaddr);
0287 if (!vma)
0288 goto out;
0289
0290
0291
0292
0293
0294
0295
0296 if (is_vm_hugetlb_page(vma))
0297 goto out;
0298
0299 rc = -ENXIO;
0300 page = follow_page(vma, uaddr, FOLL_WRITE);
0301 if (IS_ERR_OR_NULL(page))
0302 goto out;
0303
0304 lock_page(page);
0305 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
0306 if (should_export_before_import(uvcb, gmap->mm))
0307 uv_convert_from_secure(page_to_phys(page));
0308 rc = make_secure_pte(ptep, uaddr, page, uvcb);
0309 pte_unmap_unlock(ptep, ptelock);
0310 unlock_page(page);
0311 out:
0312 mmap_read_unlock(gmap->mm);
0313
0314 if (rc == -EAGAIN) {
0315
0316
0317
0318
0319 wait_on_page_writeback(page);
0320 } else if (rc == -EBUSY) {
0321
0322
0323
0324
0325
0326
0327 if (local_drain) {
0328 lru_add_drain_all();
0329
0330 return -EAGAIN;
0331 }
0332
0333
0334
0335
0336
0337
0338
0339 lru_add_drain();
0340 local_drain = true;
0341
0342 goto again;
0343 } else if (rc == -ENXIO) {
0344 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
0345 return -EFAULT;
0346 return -EAGAIN;
0347 }
0348 return rc;
0349 }
0350 EXPORT_SYMBOL_GPL(gmap_make_secure);
0351
0352 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
0353 {
0354 struct uv_cb_cts uvcb = {
0355 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
0356 .header.len = sizeof(uvcb),
0357 .guest_handle = gmap->guest_handle,
0358 .gaddr = gaddr,
0359 };
0360
0361 return gmap_make_secure(gmap, gaddr, &uvcb);
0362 }
0363 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
0375 {
0376 struct vm_area_struct *vma;
0377 unsigned long uaddr;
0378 struct page *page;
0379 int rc;
0380
0381 rc = -EFAULT;
0382 mmap_read_lock(gmap->mm);
0383
0384 uaddr = __gmap_translate(gmap, gaddr);
0385 if (IS_ERR_VALUE(uaddr))
0386 goto out;
0387 vma = vma_lookup(gmap->mm, uaddr);
0388 if (!vma)
0389 goto out;
0390
0391
0392
0393 if (is_vm_hugetlb_page(vma))
0394 goto out;
0395
0396 rc = 0;
0397
0398 page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
0399 if (IS_ERR_OR_NULL(page))
0400 goto out;
0401 rc = uv_destroy_owned_page(page_to_phys(page));
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411 if (rc)
0412 rc = uv_convert_owned_from_secure(page_to_phys(page));
0413 put_page(page);
0414 out:
0415 mmap_read_unlock(gmap->mm);
0416 return rc;
0417 }
0418 EXPORT_SYMBOL_GPL(gmap_destroy_page);
0419
0420
0421
0422
0423
0424
0425
0426 int arch_make_page_accessible(struct page *page)
0427 {
0428 int rc = 0;
0429
0430
0431 if (PageHuge(page))
0432 return 0;
0433
0434
0435
0436
0437
0438
0439
0440
0441
0442
0443 if (!test_bit(PG_arch_1, &page->flags))
0444 return 0;
0445
0446 rc = uv_pin_shared(page_to_phys(page));
0447 if (!rc) {
0448 clear_bit(PG_arch_1, &page->flags);
0449 return 0;
0450 }
0451
0452 rc = uv_convert_from_secure(page_to_phys(page));
0453 if (!rc) {
0454 clear_bit(PG_arch_1, &page->flags);
0455 return 0;
0456 }
0457
0458 return rc;
0459 }
0460 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
0461
0462 #endif
0463
0464 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
0465 static ssize_t uv_query_facilities(struct kobject *kobj,
0466 struct kobj_attribute *attr, char *page)
0467 {
0468 return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
0469 uv_info.inst_calls_list[0],
0470 uv_info.inst_calls_list[1],
0471 uv_info.inst_calls_list[2],
0472 uv_info.inst_calls_list[3]);
0473 }
0474
0475 static struct kobj_attribute uv_query_facilities_attr =
0476 __ATTR(facilities, 0444, uv_query_facilities, NULL);
0477
0478 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
0479 struct kobj_attribute *attr, char *buf)
0480 {
0481 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
0482 }
0483
0484 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
0485 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
0486
0487 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
0488 struct kobj_attribute *attr, char *buf)
0489 {
0490 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
0491 }
0492
0493 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
0494 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
0495
0496 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
0497 struct kobj_attribute *attr, char *page)
0498 {
0499 return scnprintf(page, PAGE_SIZE, "%lx\n",
0500 uv_info.guest_cpu_stor_len);
0501 }
0502
0503 static struct kobj_attribute uv_query_dump_cpu_len_attr =
0504 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
0505
0506 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
0507 struct kobj_attribute *attr, char *page)
0508 {
0509 return scnprintf(page, PAGE_SIZE, "%lx\n",
0510 uv_info.conf_dump_storage_state_len);
0511 }
0512
0513 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
0514 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
0515
0516 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
0517 struct kobj_attribute *attr, char *page)
0518 {
0519 return scnprintf(page, PAGE_SIZE, "%lx\n",
0520 uv_info.conf_dump_finalize_len);
0521 }
0522
0523 static struct kobj_attribute uv_query_dump_finalize_len_attr =
0524 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
0525
0526 static ssize_t uv_query_feature_indications(struct kobject *kobj,
0527 struct kobj_attribute *attr, char *buf)
0528 {
0529 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
0530 }
0531
0532 static struct kobj_attribute uv_query_feature_indications_attr =
0533 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
0534
0535 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
0536 struct kobj_attribute *attr, char *page)
0537 {
0538 return scnprintf(page, PAGE_SIZE, "%d\n",
0539 uv_info.max_guest_cpu_id + 1);
0540 }
0541
0542 static struct kobj_attribute uv_query_max_guest_cpus_attr =
0543 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
0544
0545 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
0546 struct kobj_attribute *attr, char *page)
0547 {
0548 return scnprintf(page, PAGE_SIZE, "%d\n",
0549 uv_info.max_num_sec_conf);
0550 }
0551
0552 static struct kobj_attribute uv_query_max_guest_vms_attr =
0553 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
0554
0555 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
0556 struct kobj_attribute *attr, char *page)
0557 {
0558 return scnprintf(page, PAGE_SIZE, "%lx\n",
0559 uv_info.max_sec_stor_addr);
0560 }
0561
0562 static struct kobj_attribute uv_query_max_guest_addr_attr =
0563 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
0564
0565 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
0566 struct kobj_attribute *attr, char *page)
0567 {
0568 return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
0569 }
0570
0571 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
0572 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
0573
0574 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
0575 struct kobj_attribute *attr, char *page)
0576 {
0577 return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
0578 }
0579
0580 static struct kobj_attribute uv_query_supp_att_pflags_attr =
0581 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
0582
0583 static struct attribute *uv_query_attrs[] = {
0584 &uv_query_facilities_attr.attr,
0585 &uv_query_feature_indications_attr.attr,
0586 &uv_query_max_guest_cpus_attr.attr,
0587 &uv_query_max_guest_vms_attr.attr,
0588 &uv_query_max_guest_addr_attr.attr,
0589 &uv_query_supp_se_hdr_ver_attr.attr,
0590 &uv_query_supp_se_hdr_pcf_attr.attr,
0591 &uv_query_dump_storage_state_len_attr.attr,
0592 &uv_query_dump_finalize_len_attr.attr,
0593 &uv_query_dump_cpu_len_attr.attr,
0594 &uv_query_supp_att_req_hdr_ver_attr.attr,
0595 &uv_query_supp_att_pflags_attr.attr,
0596 NULL,
0597 };
0598
0599 static struct attribute_group uv_query_attr_group = {
0600 .attrs = uv_query_attrs,
0601 };
0602
0603 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
0604 struct kobj_attribute *attr, char *page)
0605 {
0606 int val = 0;
0607
0608 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
0609 val = prot_virt_guest;
0610 #endif
0611 return scnprintf(page, PAGE_SIZE, "%d\n", val);
0612 }
0613
0614 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
0615 struct kobj_attribute *attr, char *page)
0616 {
0617 int val = 0;
0618
0619 #if IS_ENABLED(CONFIG_KVM)
0620 val = prot_virt_host;
0621 #endif
0622
0623 return scnprintf(page, PAGE_SIZE, "%d\n", val);
0624 }
0625
0626 static struct kobj_attribute uv_prot_virt_guest =
0627 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
0628
0629 static struct kobj_attribute uv_prot_virt_host =
0630 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
0631
0632 static const struct attribute *uv_prot_virt_attrs[] = {
0633 &uv_prot_virt_guest.attr,
0634 &uv_prot_virt_host.attr,
0635 NULL,
0636 };
0637
0638 static struct kset *uv_query_kset;
0639 static struct kobject *uv_kobj;
0640
0641 static int __init uv_info_init(void)
0642 {
0643 int rc = -ENOMEM;
0644
0645 if (!test_facility(158))
0646 return 0;
0647
0648 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
0649 if (!uv_kobj)
0650 return -ENOMEM;
0651
0652 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
0653 if (rc)
0654 goto out_kobj;
0655
0656 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
0657 if (!uv_query_kset) {
0658 rc = -ENOMEM;
0659 goto out_ind_files;
0660 }
0661
0662 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
0663 if (!rc)
0664 return 0;
0665
0666 kset_unregister(uv_query_kset);
0667 out_ind_files:
0668 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
0669 out_kobj:
0670 kobject_del(uv_kobj);
0671 kobject_put(uv_kobj);
0672 return rc;
0673 }
0674 device_initcall(uv_info_init);
0675 #endif