0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #define pr_fmt(fmt) "ACPI: VIOT: " fmt
0020
0021 #include <linux/acpi_viot.h>
0022 #include <linux/dma-iommu.h>
0023 #include <linux/fwnode.h>
0024 #include <linux/iommu.h>
0025 #include <linux/list.h>
0026 #include <linux/pci.h>
0027 #include <linux/platform_device.h>
0028
0029 struct viot_iommu {
0030
0031 unsigned int offset;
0032 struct fwnode_handle *fwnode;
0033 struct list_head list;
0034 };
0035
0036 struct viot_endpoint {
0037 union {
0038
0039 struct {
0040 u16 segment_start;
0041 u16 segment_end;
0042 u16 bdf_start;
0043 u16 bdf_end;
0044 };
0045
0046 u64 address;
0047 };
0048 u32 endpoint_id;
0049 struct viot_iommu *viommu;
0050 struct list_head list;
0051 };
0052
0053 static struct acpi_table_viot *viot;
0054 static LIST_HEAD(viot_iommus);
0055 static LIST_HEAD(viot_pci_ranges);
0056 static LIST_HEAD(viot_mmio_endpoints);
0057
0058 static int __init viot_check_bounds(const struct acpi_viot_header *hdr)
0059 {
0060 struct acpi_viot_header *start, *end, *hdr_end;
0061
0062 start = ACPI_ADD_PTR(struct acpi_viot_header, viot,
0063 max_t(size_t, sizeof(*viot), viot->node_offset));
0064 end = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->header.length);
0065 hdr_end = ACPI_ADD_PTR(struct acpi_viot_header, hdr, sizeof(*hdr));
0066
0067 if (hdr < start || hdr_end > end) {
0068 pr_err(FW_BUG "Node pointer overflows\n");
0069 return -EOVERFLOW;
0070 }
0071 if (hdr->length < sizeof(*hdr)) {
0072 pr_err(FW_BUG "Empty node\n");
0073 return -EINVAL;
0074 }
0075 return 0;
0076 }
0077
0078 static int __init viot_get_pci_iommu_fwnode(struct viot_iommu *viommu,
0079 u16 segment, u16 bdf)
0080 {
0081 struct pci_dev *pdev;
0082 struct fwnode_handle *fwnode;
0083
0084 pdev = pci_get_domain_bus_and_slot(segment, PCI_BUS_NUM(bdf),
0085 bdf & 0xff);
0086 if (!pdev) {
0087 pr_err("Could not find PCI IOMMU\n");
0088 return -ENODEV;
0089 }
0090
0091 fwnode = dev_fwnode(&pdev->dev);
0092 if (!fwnode) {
0093
0094
0095
0096
0097 fwnode = acpi_alloc_fwnode_static();
0098 if (!fwnode) {
0099 pci_dev_put(pdev);
0100 return -ENOMEM;
0101 }
0102 set_primary_fwnode(&pdev->dev, fwnode);
0103 }
0104 viommu->fwnode = dev_fwnode(&pdev->dev);
0105 pci_dev_put(pdev);
0106 return 0;
0107 }
0108
0109 static int __init viot_get_mmio_iommu_fwnode(struct viot_iommu *viommu,
0110 u64 address)
0111 {
0112 struct acpi_device *adev;
0113 struct resource res = {
0114 .start = address,
0115 .end = address,
0116 .flags = IORESOURCE_MEM,
0117 };
0118
0119 adev = acpi_resource_consumer(&res);
0120 if (!adev) {
0121 pr_err("Could not find MMIO IOMMU\n");
0122 return -EINVAL;
0123 }
0124 viommu->fwnode = &adev->fwnode;
0125 return 0;
0126 }
0127
0128 static struct viot_iommu * __init viot_get_iommu(unsigned int offset)
0129 {
0130 int ret;
0131 struct viot_iommu *viommu;
0132 struct acpi_viot_header *hdr = ACPI_ADD_PTR(struct acpi_viot_header,
0133 viot, offset);
0134 union {
0135 struct acpi_viot_virtio_iommu_pci pci;
0136 struct acpi_viot_virtio_iommu_mmio mmio;
0137 } *node = (void *)hdr;
0138
0139 list_for_each_entry(viommu, &viot_iommus, list)
0140 if (viommu->offset == offset)
0141 return viommu;
0142
0143 if (viot_check_bounds(hdr))
0144 return NULL;
0145
0146 viommu = kzalloc(sizeof(*viommu), GFP_KERNEL);
0147 if (!viommu)
0148 return NULL;
0149
0150 viommu->offset = offset;
0151 switch (hdr->type) {
0152 case ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI:
0153 if (hdr->length < sizeof(node->pci))
0154 goto err_free;
0155
0156 ret = viot_get_pci_iommu_fwnode(viommu, node->pci.segment,
0157 node->pci.bdf);
0158 break;
0159 case ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO:
0160 if (hdr->length < sizeof(node->mmio))
0161 goto err_free;
0162
0163 ret = viot_get_mmio_iommu_fwnode(viommu,
0164 node->mmio.base_address);
0165 break;
0166 default:
0167 ret = -EINVAL;
0168 }
0169 if (ret)
0170 goto err_free;
0171
0172 list_add(&viommu->list, &viot_iommus);
0173 return viommu;
0174
0175 err_free:
0176 kfree(viommu);
0177 return NULL;
0178 }
0179
0180 static int __init viot_parse_node(const struct acpi_viot_header *hdr)
0181 {
0182 int ret = -EINVAL;
0183 struct list_head *list;
0184 struct viot_endpoint *ep;
0185 union {
0186 struct acpi_viot_mmio mmio;
0187 struct acpi_viot_pci_range pci;
0188 } *node = (void *)hdr;
0189
0190 if (viot_check_bounds(hdr))
0191 return -EINVAL;
0192
0193 if (hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI ||
0194 hdr->type == ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO)
0195 return 0;
0196
0197 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
0198 if (!ep)
0199 return -ENOMEM;
0200
0201 switch (hdr->type) {
0202 case ACPI_VIOT_NODE_PCI_RANGE:
0203 if (hdr->length < sizeof(node->pci)) {
0204 pr_err(FW_BUG "Invalid PCI node size\n");
0205 goto err_free;
0206 }
0207
0208 ep->segment_start = node->pci.segment_start;
0209 ep->segment_end = node->pci.segment_end;
0210 ep->bdf_start = node->pci.bdf_start;
0211 ep->bdf_end = node->pci.bdf_end;
0212 ep->endpoint_id = node->pci.endpoint_start;
0213 ep->viommu = viot_get_iommu(node->pci.output_node);
0214 list = &viot_pci_ranges;
0215 break;
0216 case ACPI_VIOT_NODE_MMIO:
0217 if (hdr->length < sizeof(node->mmio)) {
0218 pr_err(FW_BUG "Invalid MMIO node size\n");
0219 goto err_free;
0220 }
0221
0222 ep->address = node->mmio.base_address;
0223 ep->endpoint_id = node->mmio.endpoint;
0224 ep->viommu = viot_get_iommu(node->mmio.output_node);
0225 list = &viot_mmio_endpoints;
0226 break;
0227 default:
0228 pr_warn("Unsupported node %x\n", hdr->type);
0229 ret = 0;
0230 goto err_free;
0231 }
0232
0233 if (!ep->viommu) {
0234 pr_warn("No IOMMU node found\n");
0235
0236
0237
0238
0239 ret = 0;
0240 goto err_free;
0241 }
0242
0243 list_add(&ep->list, list);
0244 return 0;
0245
0246 err_free:
0247 kfree(ep);
0248 return ret;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257 void __init acpi_viot_early_init(void)
0258 {
0259 #ifdef CONFIG_PCI
0260 acpi_status status;
0261 struct acpi_table_header *hdr;
0262
0263 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
0264 if (ACPI_FAILURE(status))
0265 return;
0266 pci_request_acs();
0267 acpi_put_table(hdr);
0268 #endif
0269 }
0270
0271
0272
0273
0274
0275
0276
0277 void __init acpi_viot_init(void)
0278 {
0279 int i;
0280 acpi_status status;
0281 struct acpi_table_header *hdr;
0282 struct acpi_viot_header *node;
0283
0284 status = acpi_get_table(ACPI_SIG_VIOT, 0, &hdr);
0285 if (ACPI_FAILURE(status)) {
0286 if (status != AE_NOT_FOUND) {
0287 const char *msg = acpi_format_exception(status);
0288
0289 pr_err("Failed to get table, %s\n", msg);
0290 }
0291 return;
0292 }
0293
0294 viot = (void *)hdr;
0295
0296 node = ACPI_ADD_PTR(struct acpi_viot_header, viot, viot->node_offset);
0297 for (i = 0; i < viot->node_count; i++) {
0298 if (viot_parse_node(node))
0299 return;
0300
0301 node = ACPI_ADD_PTR(struct acpi_viot_header, node,
0302 node->length);
0303 }
0304
0305 acpi_put_table(hdr);
0306 }
0307
0308 static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu,
0309 u32 epid)
0310 {
0311 const struct iommu_ops *ops;
0312
0313 if (!viommu)
0314 return -ENODEV;
0315
0316
0317 if (device_match_fwnode(dev, viommu->fwnode))
0318 return -EINVAL;
0319
0320 ops = iommu_ops_from_fwnode(viommu->fwnode);
0321 if (!ops)
0322 return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ?
0323 -EPROBE_DEFER : -ENODEV;
0324
0325 return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops);
0326 }
0327
0328 static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data)
0329 {
0330 u32 epid;
0331 struct viot_endpoint *ep;
0332 u32 domain_nr = pci_domain_nr(pdev->bus);
0333
0334 list_for_each_entry(ep, &viot_pci_ranges, list) {
0335 if (domain_nr >= ep->segment_start &&
0336 domain_nr <= ep->segment_end &&
0337 dev_id >= ep->bdf_start &&
0338 dev_id <= ep->bdf_end) {
0339 epid = ((domain_nr - ep->segment_start) << 16) +
0340 dev_id - ep->bdf_start + ep->endpoint_id;
0341
0342 return viot_dev_iommu_init(&pdev->dev, ep->viommu,
0343 epid);
0344 }
0345 }
0346 return -ENODEV;
0347 }
0348
0349 static int viot_mmio_dev_iommu_init(struct platform_device *pdev)
0350 {
0351 struct resource *mem;
0352 struct viot_endpoint *ep;
0353
0354 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
0355 if (!mem)
0356 return -ENODEV;
0357
0358 list_for_each_entry(ep, &viot_mmio_endpoints, list) {
0359 if (ep->address == mem->start)
0360 return viot_dev_iommu_init(&pdev->dev, ep->viommu,
0361 ep->endpoint_id);
0362 }
0363 return -ENODEV;
0364 }
0365
0366
0367
0368
0369
0370
0371
0372 int viot_iommu_configure(struct device *dev)
0373 {
0374 if (dev_is_pci(dev))
0375 return pci_for_each_dma_alias(to_pci_dev(dev),
0376 viot_pci_dev_iommu_init, NULL);
0377 else if (dev_is_platform(dev))
0378 return viot_mmio_dev_iommu_init(to_platform_device(dev));
0379 return -ENODEV;
0380 }