Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * VFIO PCI Intel Graphics support
0004  *
0005  * Copyright (C) 2016 Red Hat, Inc.  All rights reserved.
0006  *  Author: Alex Williamson <alex.williamson@redhat.com>
0007  *
0008  * Register a device specific region through which to provide read-only
0009  * access to the Intel IGD opregion.  The register defining the opregion
0010  * address is also virtualized to prevent user modification.
0011  */
0012 
0013 #include <linux/io.h>
0014 #include <linux/pci.h>
0015 #include <linux/uaccess.h>
0016 #include <linux/vfio.h>
0017 
0018 #include <linux/vfio_pci_core.h>
0019 
0020 #define OPREGION_SIGNATURE  "IntelGraphicsMem"
0021 #define OPREGION_SIZE       (8 * 1024)
0022 #define OPREGION_PCI_ADDR   0xfc
0023 
0024 #define OPREGION_RVDA       0x3ba
0025 #define OPREGION_RVDS       0x3c2
0026 #define OPREGION_VERSION    0x16
0027 
0028 struct igd_opregion_vbt {
0029     void *opregion;
0030     void *vbt_ex;
0031 };
0032 
0033 /**
0034  * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
0035  * @dst: User buffer ptr to copy to.
0036  * @off: Offset to user buffer ptr. Increased by bytes on return.
0037  * @src: Source buffer to copy from.
0038  * @pos: Increased by bytes on return.
0039  * @remaining: Decreased by bytes on return.
0040  * @bytes: Bytes to copy and adjust off, pos and remaining.
0041  *
0042  * Copy OpRegion to offset from specific source ptr and shift the offset.
0043  *
0044  * Return: 0 on success, -EFAULT otherwise.
0045  *
0046  */
0047 static inline unsigned long igd_opregion_shift_copy(char __user *dst,
0048                             loff_t *off,
0049                             void *src,
0050                             loff_t *pos,
0051                             size_t *remaining,
0052                             size_t bytes)
0053 {
0054     if (copy_to_user(dst + (*off), src, bytes))
0055         return -EFAULT;
0056 
0057     *off += bytes;
0058     *pos += bytes;
0059     *remaining -= bytes;
0060 
0061     return 0;
0062 }
0063 
0064 static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
0065                    char __user *buf, size_t count, loff_t *ppos,
0066                    bool iswrite)
0067 {
0068     unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
0069     struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
0070     loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
0071     size_t remaining;
0072 
0073     if (pos >= vdev->region[i].size || iswrite)
0074         return -EINVAL;
0075 
0076     count = min_t(size_t, count, vdev->region[i].size - pos);
0077     remaining = count;
0078 
0079     /* Copy until OpRegion version */
0080     if (remaining && pos < OPREGION_VERSION) {
0081         size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
0082 
0083         if (igd_opregion_shift_copy(buf, &off,
0084                         opregionvbt->opregion + pos, &pos,
0085                         &remaining, bytes))
0086             return -EFAULT;
0087     }
0088 
0089     /* Copy patched (if necessary) OpRegion version */
0090     if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
0091         size_t bytes = min_t(size_t, remaining,
0092                      OPREGION_VERSION + sizeof(__le16) - pos);
0093         __le16 version = *(__le16 *)(opregionvbt->opregion +
0094                          OPREGION_VERSION);
0095 
0096         /* Patch to 2.1 if OpRegion 2.0 has extended VBT */
0097         if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
0098             version = cpu_to_le16(0x0201);
0099 
0100         if (igd_opregion_shift_copy(buf, &off,
0101                         (u8 *)&version +
0102                         (pos - OPREGION_VERSION),
0103                         &pos, &remaining, bytes))
0104             return -EFAULT;
0105     }
0106 
0107     /* Copy until RVDA */
0108     if (remaining && pos < OPREGION_RVDA) {
0109         size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
0110 
0111         if (igd_opregion_shift_copy(buf, &off,
0112                         opregionvbt->opregion + pos, &pos,
0113                         &remaining, bytes))
0114             return -EFAULT;
0115     }
0116 
0117     /* Copy modified (if necessary) RVDA */
0118     if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
0119         size_t bytes = min_t(size_t, remaining,
0120                      OPREGION_RVDA + sizeof(__le64) - pos);
0121         __le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
0122                       OPREGION_SIZE : 0);
0123 
0124         if (igd_opregion_shift_copy(buf, &off,
0125                         (u8 *)&rvda + (pos - OPREGION_RVDA),
0126                         &pos, &remaining, bytes))
0127             return -EFAULT;
0128     }
0129 
0130     /* Copy the rest of OpRegion */
0131     if (remaining && pos < OPREGION_SIZE) {
0132         size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
0133 
0134         if (igd_opregion_shift_copy(buf, &off,
0135                         opregionvbt->opregion + pos, &pos,
0136                         &remaining, bytes))
0137             return -EFAULT;
0138     }
0139 
0140     /* Copy extended VBT if exists */
0141     if (remaining &&
0142         copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
0143              remaining))
0144         return -EFAULT;
0145 
0146     *ppos += count;
0147 
0148     return count;
0149 }
0150 
0151 static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
0152                  struct vfio_pci_region *region)
0153 {
0154     struct igd_opregion_vbt *opregionvbt = region->data;
0155 
0156     if (opregionvbt->vbt_ex)
0157         memunmap(opregionvbt->vbt_ex);
0158 
0159     memunmap(opregionvbt->opregion);
0160     kfree(opregionvbt);
0161 }
0162 
0163 static const struct vfio_pci_regops vfio_pci_igd_regops = {
0164     .rw     = vfio_pci_igd_rw,
0165     .release    = vfio_pci_igd_release,
0166 };
0167 
0168 static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
0169 {
0170     __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
0171     u32 addr, size;
0172     struct igd_opregion_vbt *opregionvbt;
0173     int ret;
0174     u16 version;
0175 
0176     ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
0177     if (ret)
0178         return ret;
0179 
0180     if (!addr || !(~addr))
0181         return -ENODEV;
0182 
0183     opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
0184     if (!opregionvbt)
0185         return -ENOMEM;
0186 
0187     opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
0188     if (!opregionvbt->opregion) {
0189         kfree(opregionvbt);
0190         return -ENOMEM;
0191     }
0192 
0193     if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
0194         memunmap(opregionvbt->opregion);
0195         kfree(opregionvbt);
0196         return -EINVAL;
0197     }
0198 
0199     size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
0200     if (!size) {
0201         memunmap(opregionvbt->opregion);
0202         kfree(opregionvbt);
0203         return -EINVAL;
0204     }
0205 
0206     size *= 1024; /* In KB */
0207 
0208     /*
0209      * OpRegion and VBT:
0210      * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
0211      * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
0212      * to hold the VBT data, the Extended VBT region is introduced since
0213      * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
0214      * introduced to define the extended VBT data location and size.
0215      * OpRegion 2.0: RVDA defines the absolute physical address of the
0216      *   extended VBT data, RVDS defines the VBT data size.
0217      * OpRegion 2.1 and above: RVDA defines the relative address of the
0218      *   extended VBT data to OpRegion base, RVDS defines the VBT data size.
0219      *
0220      * Due to the RVDA definition diff in OpRegion VBT (also the only diff
0221      * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
0222      * for OpRegion 2.0 and above makes it possible to support the
0223      * non-contiguous VBT through a single vfio region. From r/w ops view,
0224      * only contiguous VBT after OpRegion with version 2.1+ is exposed,
0225      * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
0226      * ops will on-the-fly shift the actural offset into VBT so that data at
0227      * correct position can be returned to the requester.
0228      */
0229     version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
0230                       OPREGION_VERSION));
0231     if (version >= 0x0200) {
0232         u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
0233                            OPREGION_RVDA));
0234         u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
0235                            OPREGION_RVDS));
0236 
0237         /* The extended VBT is valid only when RVDA/RVDS are non-zero */
0238         if (rvda && rvds) {
0239             size += rvds;
0240 
0241             /*
0242              * Extended VBT location by RVDA:
0243              * Absolute physical addr for 2.0.
0244              * Relative addr to OpRegion header for 2.1+.
0245              */
0246             if (version == 0x0200)
0247                 addr = rvda;
0248             else
0249                 addr += rvda;
0250 
0251             opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
0252             if (!opregionvbt->vbt_ex) {
0253                 memunmap(opregionvbt->opregion);
0254                 kfree(opregionvbt);
0255                 return -ENOMEM;
0256             }
0257         }
0258     }
0259 
0260     ret = vfio_pci_register_dev_region(vdev,
0261         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
0262         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
0263         size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
0264     if (ret) {
0265         if (opregionvbt->vbt_ex)
0266             memunmap(opregionvbt->vbt_ex);
0267 
0268         memunmap(opregionvbt->opregion);
0269         kfree(opregionvbt);
0270         return ret;
0271     }
0272 
0273     /* Fill vconfig with the hw value and virtualize register */
0274     *dwordp = cpu_to_le32(addr);
0275     memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
0276            PCI_CAP_ID_INVALID_VIRT, 4);
0277 
0278     return ret;
0279 }
0280 
0281 static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
0282                    char __user *buf, size_t count, loff_t *ppos,
0283                    bool iswrite)
0284 {
0285     unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
0286     struct pci_dev *pdev = vdev->region[i].data;
0287     loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
0288     size_t size;
0289     int ret;
0290 
0291     if (pos >= vdev->region[i].size || iswrite)
0292         return -EINVAL;
0293 
0294     size = count = min(count, (size_t)(vdev->region[i].size - pos));
0295 
0296     if ((pos & 1) && size) {
0297         u8 val;
0298 
0299         ret = pci_user_read_config_byte(pdev, pos, &val);
0300         if (ret)
0301             return ret;
0302 
0303         if (copy_to_user(buf + count - size, &val, 1))
0304             return -EFAULT;
0305 
0306         pos++;
0307         size--;
0308     }
0309 
0310     if ((pos & 3) && size > 2) {
0311         u16 val;
0312         __le16 lval;
0313 
0314         ret = pci_user_read_config_word(pdev, pos, &val);
0315         if (ret)
0316             return ret;
0317 
0318         lval = cpu_to_le16(val);
0319         if (copy_to_user(buf + count - size, &lval, 2))
0320             return -EFAULT;
0321 
0322         pos += 2;
0323         size -= 2;
0324     }
0325 
0326     while (size > 3) {
0327         u32 val;
0328         __le32 lval;
0329 
0330         ret = pci_user_read_config_dword(pdev, pos, &val);
0331         if (ret)
0332             return ret;
0333 
0334         lval = cpu_to_le32(val);
0335         if (copy_to_user(buf + count - size, &lval, 4))
0336             return -EFAULT;
0337 
0338         pos += 4;
0339         size -= 4;
0340     }
0341 
0342     while (size >= 2) {
0343         u16 val;
0344         __le16 lval;
0345 
0346         ret = pci_user_read_config_word(pdev, pos, &val);
0347         if (ret)
0348             return ret;
0349 
0350         lval = cpu_to_le16(val);
0351         if (copy_to_user(buf + count - size, &lval, 2))
0352             return -EFAULT;
0353 
0354         pos += 2;
0355         size -= 2;
0356     }
0357 
0358     while (size) {
0359         u8 val;
0360 
0361         ret = pci_user_read_config_byte(pdev, pos, &val);
0362         if (ret)
0363             return ret;
0364 
0365         if (copy_to_user(buf + count - size, &val, 1))
0366             return -EFAULT;
0367 
0368         pos++;
0369         size--;
0370     }
0371 
0372     *ppos += count;
0373 
0374     return count;
0375 }
0376 
0377 static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
0378                      struct vfio_pci_region *region)
0379 {
0380     struct pci_dev *pdev = region->data;
0381 
0382     pci_dev_put(pdev);
0383 }
0384 
0385 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
0386     .rw     = vfio_pci_igd_cfg_rw,
0387     .release    = vfio_pci_igd_cfg_release,
0388 };
0389 
0390 static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
0391 {
0392     struct pci_dev *host_bridge, *lpc_bridge;
0393     int ret;
0394 
0395     host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
0396     if (!host_bridge)
0397         return -ENODEV;
0398 
0399     if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
0400         host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
0401         pci_dev_put(host_bridge);
0402         return -EINVAL;
0403     }
0404 
0405     ret = vfio_pci_register_dev_region(vdev,
0406         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
0407         VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
0408         &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
0409         VFIO_REGION_INFO_FLAG_READ, host_bridge);
0410     if (ret) {
0411         pci_dev_put(host_bridge);
0412         return ret;
0413     }
0414 
0415     lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
0416     if (!lpc_bridge)
0417         return -ENODEV;
0418 
0419     if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
0420         lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
0421         pci_dev_put(lpc_bridge);
0422         return -EINVAL;
0423     }
0424 
0425     ret = vfio_pci_register_dev_region(vdev,
0426         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
0427         VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
0428         &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
0429         VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
0430     if (ret) {
0431         pci_dev_put(lpc_bridge);
0432         return ret;
0433     }
0434 
0435     return 0;
0436 }
0437 
0438 int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
0439 {
0440     int ret;
0441 
0442     ret = vfio_pci_igd_opregion_init(vdev);
0443     if (ret)
0444         return ret;
0445 
0446     ret = vfio_pci_igd_cfg_init(vdev);
0447     if (ret)
0448         return ret;
0449 
0450     return 0;
0451 }