Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Common Ultravisor functions and initialization
0004  *
0005  * Copyright IBM Corp. 2019, 2020
0006  */
0007 #define KMSG_COMPONENT "prot_virt"
0008 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
0009 
0010 #include <linux/kernel.h>
0011 #include <linux/types.h>
0012 #include <linux/sizes.h>
0013 #include <linux/bitmap.h>
0014 #include <linux/memblock.h>
0015 #include <linux/pagemap.h>
0016 #include <linux/swap.h>
0017 #include <asm/facility.h>
0018 #include <asm/sections.h>
0019 #include <asm/uv.h>
0020 
0021 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
0022 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
0023 int __bootdata_preserved(prot_virt_guest);
0024 #endif
0025 
0026 struct uv_info __bootdata_preserved(uv_info);
0027 
0028 #if IS_ENABLED(CONFIG_KVM)
0029 int __bootdata_preserved(prot_virt_host);
0030 EXPORT_SYMBOL(prot_virt_host);
0031 EXPORT_SYMBOL(uv_info);
0032 
0033 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
0034 {
0035     struct uv_cb_init uvcb = {
0036         .header.cmd = UVC_CMD_INIT_UV,
0037         .header.len = sizeof(uvcb),
0038         .stor_origin = stor_base,
0039         .stor_len = stor_len,
0040     };
0041 
0042     if (uv_call(0, (uint64_t)&uvcb)) {
0043         pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
0044                uvcb.header.rc, uvcb.header.rrc);
0045         return -1;
0046     }
0047     return 0;
0048 }
0049 
0050 void __init setup_uv(void)
0051 {
0052     void *uv_stor_base;
0053 
0054     if (!is_prot_virt_host())
0055         return;
0056 
0057     uv_stor_base = memblock_alloc_try_nid(
0058         uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
0059         MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
0060     if (!uv_stor_base) {
0061         pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
0062             uv_info.uv_base_stor_len);
0063         goto fail;
0064     }
0065 
0066     if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
0067         memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
0068         goto fail;
0069     }
0070 
0071     pr_info("Reserving %luMB as ultravisor base storage\n",
0072         uv_info.uv_base_stor_len >> 20);
0073     return;
0074 fail:
0075     pr_info("Disabling support for protected virtualization");
0076     prot_virt_host = 0;
0077 }
0078 
0079 /*
0080  * Requests the Ultravisor to pin the page in the shared state. This will
0081  * cause an intercept when the guest attempts to unshare the pinned page.
0082  */
0083 static int uv_pin_shared(unsigned long paddr)
0084 {
0085     struct uv_cb_cfs uvcb = {
0086         .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
0087         .header.len = sizeof(uvcb),
0088         .paddr = paddr,
0089     };
0090 
0091     if (uv_call(0, (u64)&uvcb))
0092         return -EINVAL;
0093     return 0;
0094 }
0095 
0096 /*
0097  * Requests the Ultravisor to destroy a guest page and make it
0098  * accessible to the host. The destroy clears the page instead of
0099  * exporting.
0100  *
0101  * @paddr: Absolute host address of page to be destroyed
0102  */
0103 static int uv_destroy_page(unsigned long paddr)
0104 {
0105     struct uv_cb_cfs uvcb = {
0106         .header.cmd = UVC_CMD_DESTR_SEC_STOR,
0107         .header.len = sizeof(uvcb),
0108         .paddr = paddr
0109     };
0110 
0111     if (uv_call(0, (u64)&uvcb)) {
0112         /*
0113          * Older firmware uses 107/d as an indication of a non secure
0114          * page. Let us emulate the newer variant (no-op).
0115          */
0116         if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
0117             return 0;
0118         return -EINVAL;
0119     }
0120     return 0;
0121 }
0122 
0123 /*
0124  * The caller must already hold a reference to the page
0125  */
0126 int uv_destroy_owned_page(unsigned long paddr)
0127 {
0128     struct page *page = phys_to_page(paddr);
0129     int rc;
0130 
0131     get_page(page);
0132     rc = uv_destroy_page(paddr);
0133     if (!rc)
0134         clear_bit(PG_arch_1, &page->flags);
0135     put_page(page);
0136     return rc;
0137 }
0138 
0139 /*
0140  * Requests the Ultravisor to encrypt a guest page and make it
0141  * accessible to the host for paging (export).
0142  *
0143  * @paddr: Absolute host address of page to be exported
0144  */
0145 int uv_convert_from_secure(unsigned long paddr)
0146 {
0147     struct uv_cb_cfs uvcb = {
0148         .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
0149         .header.len = sizeof(uvcb),
0150         .paddr = paddr
0151     };
0152 
0153     if (uv_call(0, (u64)&uvcb))
0154         return -EINVAL;
0155     return 0;
0156 }
0157 
0158 /*
0159  * The caller must already hold a reference to the page
0160  */
0161 int uv_convert_owned_from_secure(unsigned long paddr)
0162 {
0163     struct page *page = phys_to_page(paddr);
0164     int rc;
0165 
0166     get_page(page);
0167     rc = uv_convert_from_secure(paddr);
0168     if (!rc)
0169         clear_bit(PG_arch_1, &page->flags);
0170     put_page(page);
0171     return rc;
0172 }
0173 
0174 /*
0175  * Calculate the expected ref_count for a page that would otherwise have no
0176  * further pins. This was cribbed from similar functions in other places in
0177  * the kernel, but with some slight modifications. We know that a secure
0178  * page can not be a huge page for example.
0179  */
0180 static int expected_page_refs(struct page *page)
0181 {
0182     int res;
0183 
0184     res = page_mapcount(page);
0185     if (PageSwapCache(page)) {
0186         res++;
0187     } else if (page_mapping(page)) {
0188         res++;
0189         if (page_has_private(page))
0190             res++;
0191     }
0192     return res;
0193 }
0194 
0195 static int make_secure_pte(pte_t *ptep, unsigned long addr,
0196                struct page *exp_page, struct uv_cb_header *uvcb)
0197 {
0198     pte_t entry = READ_ONCE(*ptep);
0199     struct page *page;
0200     int expected, cc = 0;
0201 
0202     if (!pte_present(entry))
0203         return -ENXIO;
0204     if (pte_val(entry) & _PAGE_INVALID)
0205         return -ENXIO;
0206 
0207     page = pte_page(entry);
0208     if (page != exp_page)
0209         return -ENXIO;
0210     if (PageWriteback(page))
0211         return -EAGAIN;
0212     expected = expected_page_refs(page);
0213     if (!page_ref_freeze(page, expected))
0214         return -EBUSY;
0215     set_bit(PG_arch_1, &page->flags);
0216     /*
0217      * If the UVC does not succeed or fail immediately, we don't want to
0218      * loop for long, or we might get stall notifications.
0219      * On the other hand, this is a complex scenario and we are holding a lot of
0220      * locks, so we can't easily sleep and reschedule. We try only once,
0221      * and if the UVC returned busy or partial completion, we return
0222      * -EAGAIN and we let the callers deal with it.
0223      */
0224     cc = __uv_call(0, (u64)uvcb);
0225     page_ref_unfreeze(page, expected);
0226     /*
0227      * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
0228      * If busy or partially completed, return -EAGAIN.
0229      */
0230     if (cc == UVC_CC_OK)
0231         return 0;
0232     else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
0233         return -EAGAIN;
0234     return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
0235 }
0236 
0237 /**
0238  * should_export_before_import - Determine whether an export is needed
0239  * before an import-like operation
0240  * @uvcb: the Ultravisor control block of the UVC to be performed
0241  * @mm: the mm of the process
0242  *
0243  * Returns whether an export is needed before every import-like operation.
0244  * This is needed for shared pages, which don't trigger a secure storage
0245  * exception when accessed from a different guest.
0246  *
0247  * Although considered as one, the Unpin Page UVC is not an actual import,
0248  * so it is not affected.
0249  *
0250  * No export is needed also when there is only one protected VM, because the
0251  * page cannot belong to the wrong VM in that case (there is no "other VM"
0252  * it can belong to).
0253  *
0254  * Return: true if an export is needed before every import, otherwise false.
0255  */
0256 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
0257 {
0258     if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
0259         return false;
0260     return atomic_read(&mm->context.protected_count) > 1;
0261 }
0262 
0263 /*
0264  * Requests the Ultravisor to make a page accessible to a guest.
0265  * If it's brought in the first time, it will be cleared. If
0266  * it has been exported before, it will be decrypted and integrity
0267  * checked.
0268  */
0269 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
0270 {
0271     struct vm_area_struct *vma;
0272     bool local_drain = false;
0273     spinlock_t *ptelock;
0274     unsigned long uaddr;
0275     struct page *page;
0276     pte_t *ptep;
0277     int rc;
0278 
0279 again:
0280     rc = -EFAULT;
0281     mmap_read_lock(gmap->mm);
0282 
0283     uaddr = __gmap_translate(gmap, gaddr);
0284     if (IS_ERR_VALUE(uaddr))
0285         goto out;
0286     vma = vma_lookup(gmap->mm, uaddr);
0287     if (!vma)
0288         goto out;
0289     /*
0290      * Secure pages cannot be huge and userspace should not combine both.
0291      * In case userspace does it anyway this will result in an -EFAULT for
0292      * the unpack. The guest is thus never reaching secure mode. If
0293      * userspace is playing dirty tricky with mapping huge pages later
0294      * on this will result in a segmentation fault.
0295      */
0296     if (is_vm_hugetlb_page(vma))
0297         goto out;
0298 
0299     rc = -ENXIO;
0300     page = follow_page(vma, uaddr, FOLL_WRITE);
0301     if (IS_ERR_OR_NULL(page))
0302         goto out;
0303 
0304     lock_page(page);
0305     ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
0306     if (should_export_before_import(uvcb, gmap->mm))
0307         uv_convert_from_secure(page_to_phys(page));
0308     rc = make_secure_pte(ptep, uaddr, page, uvcb);
0309     pte_unmap_unlock(ptep, ptelock);
0310     unlock_page(page);
0311 out:
0312     mmap_read_unlock(gmap->mm);
0313 
0314     if (rc == -EAGAIN) {
0315         /*
0316          * If we are here because the UVC returned busy or partial
0317          * completion, this is just a useless check, but it is safe.
0318          */
0319         wait_on_page_writeback(page);
0320     } else if (rc == -EBUSY) {
0321         /*
0322          * If we have tried a local drain and the page refcount
0323          * still does not match our expected safe value, try with a
0324          * system wide drain. This is needed if the pagevecs holding
0325          * the page are on a different CPU.
0326          */
0327         if (local_drain) {
0328             lru_add_drain_all();
0329             /* We give up here, and let the caller try again */
0330             return -EAGAIN;
0331         }
0332         /*
0333          * We are here if the page refcount does not match the
0334          * expected safe value. The main culprits are usually
0335          * pagevecs. With lru_add_drain() we drain the pagevecs
0336          * on the local CPU so that hopefully the refcount will
0337          * reach the expected safe value.
0338          */
0339         lru_add_drain();
0340         local_drain = true;
0341         /* And now we try again immediately after draining */
0342         goto again;
0343     } else if (rc == -ENXIO) {
0344         if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
0345             return -EFAULT;
0346         return -EAGAIN;
0347     }
0348     return rc;
0349 }
0350 EXPORT_SYMBOL_GPL(gmap_make_secure);
0351 
0352 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
0353 {
0354     struct uv_cb_cts uvcb = {
0355         .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
0356         .header.len = sizeof(uvcb),
0357         .guest_handle = gmap->guest_handle,
0358         .gaddr = gaddr,
0359     };
0360 
0361     return gmap_make_secure(gmap, gaddr, &uvcb);
0362 }
0363 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
0364 
0365 /**
0366  * gmap_destroy_page - Destroy a guest page.
0367  * @gmap: the gmap of the guest
0368  * @gaddr: the guest address to destroy
0369  *
0370  * An attempt will be made to destroy the given guest page. If the attempt
0371  * fails, an attempt is made to export the page. If both attempts fail, an
0372  * appropriate error is returned.
0373  */
0374 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
0375 {
0376     struct vm_area_struct *vma;
0377     unsigned long uaddr;
0378     struct page *page;
0379     int rc;
0380 
0381     rc = -EFAULT;
0382     mmap_read_lock(gmap->mm);
0383 
0384     uaddr = __gmap_translate(gmap, gaddr);
0385     if (IS_ERR_VALUE(uaddr))
0386         goto out;
0387     vma = vma_lookup(gmap->mm, uaddr);
0388     if (!vma)
0389         goto out;
0390     /*
0391      * Huge pages should not be able to become secure
0392      */
0393     if (is_vm_hugetlb_page(vma))
0394         goto out;
0395 
0396     rc = 0;
0397     /* we take an extra reference here */
0398     page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
0399     if (IS_ERR_OR_NULL(page))
0400         goto out;
0401     rc = uv_destroy_owned_page(page_to_phys(page));
0402     /*
0403      * Fault handlers can race; it is possible that two CPUs will fault
0404      * on the same secure page. One CPU can destroy the page, reboot,
0405      * re-enter secure mode and import it, while the second CPU was
0406      * stuck at the beginning of the handler. At some point the second
0407      * CPU will be able to progress, and it will not be able to destroy
0408      * the page. In that case we do not want to terminate the process,
0409      * we instead try to export the page.
0410      */
0411     if (rc)
0412         rc = uv_convert_owned_from_secure(page_to_phys(page));
0413     put_page(page);
0414 out:
0415     mmap_read_unlock(gmap->mm);
0416     return rc;
0417 }
0418 EXPORT_SYMBOL_GPL(gmap_destroy_page);
0419 
0420 /*
0421  * To be called with the page locked or with an extra reference! This will
0422  * prevent gmap_make_secure from touching the page concurrently. Having 2
0423  * parallel make_page_accessible is fine, as the UV calls will become a
0424  * no-op if the page is already exported.
0425  */
0426 int arch_make_page_accessible(struct page *page)
0427 {
0428     int rc = 0;
0429 
0430     /* Hugepage cannot be protected, so nothing to do */
0431     if (PageHuge(page))
0432         return 0;
0433 
0434     /*
0435      * PG_arch_1 is used in 3 places:
0436      * 1. for kernel page tables during early boot
0437      * 2. for storage keys of huge pages and KVM
0438      * 3. As an indication that this page might be secure. This can
0439      *    overindicate, e.g. we set the bit before calling
0440      *    convert_to_secure.
0441      * As secure pages are never huge, all 3 variants can co-exists.
0442      */
0443     if (!test_bit(PG_arch_1, &page->flags))
0444         return 0;
0445 
0446     rc = uv_pin_shared(page_to_phys(page));
0447     if (!rc) {
0448         clear_bit(PG_arch_1, &page->flags);
0449         return 0;
0450     }
0451 
0452     rc = uv_convert_from_secure(page_to_phys(page));
0453     if (!rc) {
0454         clear_bit(PG_arch_1, &page->flags);
0455         return 0;
0456     }
0457 
0458     return rc;
0459 }
0460 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
0461 
0462 #endif
0463 
0464 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
0465 static ssize_t uv_query_facilities(struct kobject *kobj,
0466                    struct kobj_attribute *attr, char *page)
0467 {
0468     return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
0469             uv_info.inst_calls_list[0],
0470             uv_info.inst_calls_list[1],
0471             uv_info.inst_calls_list[2],
0472             uv_info.inst_calls_list[3]);
0473 }
0474 
0475 static struct kobj_attribute uv_query_facilities_attr =
0476     __ATTR(facilities, 0444, uv_query_facilities, NULL);
0477 
0478 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
0479                     struct kobj_attribute *attr, char *buf)
0480 {
0481     return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
0482 }
0483 
0484 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
0485     __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
0486 
0487 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
0488                     struct kobj_attribute *attr, char *buf)
0489 {
0490     return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
0491 }
0492 
0493 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
0494     __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
0495 
0496 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
0497                      struct kobj_attribute *attr, char *page)
0498 {
0499     return scnprintf(page, PAGE_SIZE, "%lx\n",
0500             uv_info.guest_cpu_stor_len);
0501 }
0502 
0503 static struct kobj_attribute uv_query_dump_cpu_len_attr =
0504     __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
0505 
0506 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
0507                            struct kobj_attribute *attr, char *page)
0508 {
0509     return scnprintf(page, PAGE_SIZE, "%lx\n",
0510             uv_info.conf_dump_storage_state_len);
0511 }
0512 
0513 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
0514     __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
0515 
0516 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
0517                       struct kobj_attribute *attr, char *page)
0518 {
0519     return scnprintf(page, PAGE_SIZE, "%lx\n",
0520             uv_info.conf_dump_finalize_len);
0521 }
0522 
0523 static struct kobj_attribute uv_query_dump_finalize_len_attr =
0524     __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
0525 
0526 static ssize_t uv_query_feature_indications(struct kobject *kobj,
0527                         struct kobj_attribute *attr, char *buf)
0528 {
0529     return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
0530 }
0531 
0532 static struct kobj_attribute uv_query_feature_indications_attr =
0533     __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
0534 
0535 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
0536                        struct kobj_attribute *attr, char *page)
0537 {
0538     return scnprintf(page, PAGE_SIZE, "%d\n",
0539             uv_info.max_guest_cpu_id + 1);
0540 }
0541 
0542 static struct kobj_attribute uv_query_max_guest_cpus_attr =
0543     __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
0544 
0545 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
0546                       struct kobj_attribute *attr, char *page)
0547 {
0548     return scnprintf(page, PAGE_SIZE, "%d\n",
0549             uv_info.max_num_sec_conf);
0550 }
0551 
0552 static struct kobj_attribute uv_query_max_guest_vms_attr =
0553     __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
0554 
0555 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
0556                        struct kobj_attribute *attr, char *page)
0557 {
0558     return scnprintf(page, PAGE_SIZE, "%lx\n",
0559             uv_info.max_sec_stor_addr);
0560 }
0561 
0562 static struct kobj_attribute uv_query_max_guest_addr_attr =
0563     __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
0564 
0565 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
0566                          struct kobj_attribute *attr, char *page)
0567 {
0568     return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
0569 }
0570 
0571 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
0572     __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
0573 
0574 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
0575                     struct kobj_attribute *attr, char *page)
0576 {
0577     return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
0578 }
0579 
0580 static struct kobj_attribute uv_query_supp_att_pflags_attr =
0581     __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
0582 
0583 static struct attribute *uv_query_attrs[] = {
0584     &uv_query_facilities_attr.attr,
0585     &uv_query_feature_indications_attr.attr,
0586     &uv_query_max_guest_cpus_attr.attr,
0587     &uv_query_max_guest_vms_attr.attr,
0588     &uv_query_max_guest_addr_attr.attr,
0589     &uv_query_supp_se_hdr_ver_attr.attr,
0590     &uv_query_supp_se_hdr_pcf_attr.attr,
0591     &uv_query_dump_storage_state_len_attr.attr,
0592     &uv_query_dump_finalize_len_attr.attr,
0593     &uv_query_dump_cpu_len_attr.attr,
0594     &uv_query_supp_att_req_hdr_ver_attr.attr,
0595     &uv_query_supp_att_pflags_attr.attr,
0596     NULL,
0597 };
0598 
0599 static struct attribute_group uv_query_attr_group = {
0600     .attrs = uv_query_attrs,
0601 };
0602 
0603 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
0604                      struct kobj_attribute *attr, char *page)
0605 {
0606     int val = 0;
0607 
0608 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
0609     val = prot_virt_guest;
0610 #endif
0611     return scnprintf(page, PAGE_SIZE, "%d\n", val);
0612 }
0613 
0614 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
0615                     struct kobj_attribute *attr, char *page)
0616 {
0617     int val = 0;
0618 
0619 #if IS_ENABLED(CONFIG_KVM)
0620     val = prot_virt_host;
0621 #endif
0622 
0623     return scnprintf(page, PAGE_SIZE, "%d\n", val);
0624 }
0625 
0626 static struct kobj_attribute uv_prot_virt_guest =
0627     __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
0628 
0629 static struct kobj_attribute uv_prot_virt_host =
0630     __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
0631 
0632 static const struct attribute *uv_prot_virt_attrs[] = {
0633     &uv_prot_virt_guest.attr,
0634     &uv_prot_virt_host.attr,
0635     NULL,
0636 };
0637 
0638 static struct kset *uv_query_kset;
0639 static struct kobject *uv_kobj;
0640 
0641 static int __init uv_info_init(void)
0642 {
0643     int rc = -ENOMEM;
0644 
0645     if (!test_facility(158))
0646         return 0;
0647 
0648     uv_kobj = kobject_create_and_add("uv", firmware_kobj);
0649     if (!uv_kobj)
0650         return -ENOMEM;
0651 
0652     rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
0653     if (rc)
0654         goto out_kobj;
0655 
0656     uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
0657     if (!uv_query_kset) {
0658         rc = -ENOMEM;
0659         goto out_ind_files;
0660     }
0661 
0662     rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
0663     if (!rc)
0664         return 0;
0665 
0666     kset_unregister(uv_query_kset);
0667 out_ind_files:
0668     sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
0669 out_kobj:
0670     kobject_del(uv_kobj);
0671     kobject_put(uv_kobj);
0672     return rc;
0673 }
0674 device_initcall(uv_info_init);
0675 #endif