Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Copyright (C) 2016, Semihalf
0004  *  Author: Tomasz Nowicki <tn@semihalf.com>
0005  *
0006  * This file implements early detection/parsing of I/O mapping
0007  * reported to OS through firmware via I/O Remapping Table (IORT)
0008  * IORT document number: ARM DEN 0049A
0009  */
0010 
0011 #define pr_fmt(fmt) "ACPI: IORT: " fmt
0012 
0013 #include <linux/acpi_iort.h>
0014 #include <linux/bitfield.h>
0015 #include <linux/iommu.h>
0016 #include <linux/kernel.h>
0017 #include <linux/list.h>
0018 #include <linux/pci.h>
0019 #include <linux/platform_device.h>
0020 #include <linux/slab.h>
0021 #include <linux/dma-map-ops.h>
0022 
0023 #define IORT_TYPE_MASK(type)    (1 << (type))
0024 #define IORT_MSI_TYPE       (1 << ACPI_IORT_NODE_ITS_GROUP)
0025 #define IORT_IOMMU_TYPE     ((1 << ACPI_IORT_NODE_SMMU) |   \
0026                 (1 << ACPI_IORT_NODE_SMMU_V3))
0027 
0028 struct iort_its_msi_chip {
0029     struct list_head    list;
0030     struct fwnode_handle    *fw_node;
0031     phys_addr_t     base_addr;
0032     u32         translation_id;
0033 };
0034 
0035 struct iort_fwnode {
0036     struct list_head list;
0037     struct acpi_iort_node *iort_node;
0038     struct fwnode_handle *fwnode;
0039 };
0040 static LIST_HEAD(iort_fwnode_list);
0041 static DEFINE_SPINLOCK(iort_fwnode_lock);
0042 
0043 /**
0044  * iort_set_fwnode() - Create iort_fwnode and use it to register
0045  *             iommu data in the iort_fwnode_list
0046  *
0047  * @iort_node: IORT table node associated with the IOMMU
0048  * @fwnode: fwnode associated with the IORT node
0049  *
0050  * Returns: 0 on success
0051  *          <0 on failure
0052  */
0053 static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
0054                   struct fwnode_handle *fwnode)
0055 {
0056     struct iort_fwnode *np;
0057 
0058     np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
0059 
0060     if (WARN_ON(!np))
0061         return -ENOMEM;
0062 
0063     INIT_LIST_HEAD(&np->list);
0064     np->iort_node = iort_node;
0065     np->fwnode = fwnode;
0066 
0067     spin_lock(&iort_fwnode_lock);
0068     list_add_tail(&np->list, &iort_fwnode_list);
0069     spin_unlock(&iort_fwnode_lock);
0070 
0071     return 0;
0072 }
0073 
0074 /**
0075  * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
0076  *
0077  * @node: IORT table node to be looked-up
0078  *
0079  * Returns: fwnode_handle pointer on success, NULL on failure
0080  */
0081 static inline struct fwnode_handle *iort_get_fwnode(
0082             struct acpi_iort_node *node)
0083 {
0084     struct iort_fwnode *curr;
0085     struct fwnode_handle *fwnode = NULL;
0086 
0087     spin_lock(&iort_fwnode_lock);
0088     list_for_each_entry(curr, &iort_fwnode_list, list) {
0089         if (curr->iort_node == node) {
0090             fwnode = curr->fwnode;
0091             break;
0092         }
0093     }
0094     spin_unlock(&iort_fwnode_lock);
0095 
0096     return fwnode;
0097 }
0098 
0099 /**
0100  * iort_delete_fwnode() - Delete fwnode associated with an IORT node
0101  *
0102  * @node: IORT table node associated with fwnode to delete
0103  */
0104 static inline void iort_delete_fwnode(struct acpi_iort_node *node)
0105 {
0106     struct iort_fwnode *curr, *tmp;
0107 
0108     spin_lock(&iort_fwnode_lock);
0109     list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
0110         if (curr->iort_node == node) {
0111             list_del(&curr->list);
0112             kfree(curr);
0113             break;
0114         }
0115     }
0116     spin_unlock(&iort_fwnode_lock);
0117 }
0118 
0119 /**
0120  * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
0121  *
0122  * @fwnode: fwnode associated with device to be looked-up
0123  *
0124  * Returns: iort_node pointer on success, NULL on failure
0125  */
0126 static inline struct acpi_iort_node *iort_get_iort_node(
0127             struct fwnode_handle *fwnode)
0128 {
0129     struct iort_fwnode *curr;
0130     struct acpi_iort_node *iort_node = NULL;
0131 
0132     spin_lock(&iort_fwnode_lock);
0133     list_for_each_entry(curr, &iort_fwnode_list, list) {
0134         if (curr->fwnode == fwnode) {
0135             iort_node = curr->iort_node;
0136             break;
0137         }
0138     }
0139     spin_unlock(&iort_fwnode_lock);
0140 
0141     return iort_node;
0142 }
0143 
0144 typedef acpi_status (*iort_find_node_callback)
0145     (struct acpi_iort_node *node, void *context);
0146 
0147 /* Root pointer to the mapped IORT table */
0148 static struct acpi_table_header *iort_table;
0149 
0150 static LIST_HEAD(iort_msi_chip_list);
0151 static DEFINE_SPINLOCK(iort_msi_chip_lock);
0152 
0153 /**
0154  * iort_register_domain_token() - register domain token along with related
0155  * ITS ID and base address to the list from where we can get it back later on.
0156  * @trans_id: ITS ID.
0157  * @base: ITS base address.
0158  * @fw_node: Domain token.
0159  *
0160  * Returns: 0 on success, -ENOMEM if no memory when allocating list element
0161  */
0162 int iort_register_domain_token(int trans_id, phys_addr_t base,
0163                    struct fwnode_handle *fw_node)
0164 {
0165     struct iort_its_msi_chip *its_msi_chip;
0166 
0167     its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
0168     if (!its_msi_chip)
0169         return -ENOMEM;
0170 
0171     its_msi_chip->fw_node = fw_node;
0172     its_msi_chip->translation_id = trans_id;
0173     its_msi_chip->base_addr = base;
0174 
0175     spin_lock(&iort_msi_chip_lock);
0176     list_add(&its_msi_chip->list, &iort_msi_chip_list);
0177     spin_unlock(&iort_msi_chip_lock);
0178 
0179     return 0;
0180 }
0181 
0182 /**
0183  * iort_deregister_domain_token() - Deregister domain token based on ITS ID
0184  * @trans_id: ITS ID.
0185  *
0186  * Returns: none.
0187  */
0188 void iort_deregister_domain_token(int trans_id)
0189 {
0190     struct iort_its_msi_chip *its_msi_chip, *t;
0191 
0192     spin_lock(&iort_msi_chip_lock);
0193     list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
0194         if (its_msi_chip->translation_id == trans_id) {
0195             list_del(&its_msi_chip->list);
0196             kfree(its_msi_chip);
0197             break;
0198         }
0199     }
0200     spin_unlock(&iort_msi_chip_lock);
0201 }
0202 
0203 /**
0204  * iort_find_domain_token() - Find domain token based on given ITS ID
0205  * @trans_id: ITS ID.
0206  *
0207  * Returns: domain token when find on the list, NULL otherwise
0208  */
0209 struct fwnode_handle *iort_find_domain_token(int trans_id)
0210 {
0211     struct fwnode_handle *fw_node = NULL;
0212     struct iort_its_msi_chip *its_msi_chip;
0213 
0214     spin_lock(&iort_msi_chip_lock);
0215     list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
0216         if (its_msi_chip->translation_id == trans_id) {
0217             fw_node = its_msi_chip->fw_node;
0218             break;
0219         }
0220     }
0221     spin_unlock(&iort_msi_chip_lock);
0222 
0223     return fw_node;
0224 }
0225 
0226 static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
0227                          iort_find_node_callback callback,
0228                          void *context)
0229 {
0230     struct acpi_iort_node *iort_node, *iort_end;
0231     struct acpi_table_iort *iort;
0232     int i;
0233 
0234     if (!iort_table)
0235         return NULL;
0236 
0237     /* Get the first IORT node */
0238     iort = (struct acpi_table_iort *)iort_table;
0239     iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
0240                  iort->node_offset);
0241     iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
0242                 iort_table->length);
0243 
0244     for (i = 0; i < iort->node_count; i++) {
0245         if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
0246                    "IORT node pointer overflows, bad table!\n"))
0247             return NULL;
0248 
0249         if (iort_node->type == type &&
0250             ACPI_SUCCESS(callback(iort_node, context)))
0251             return iort_node;
0252 
0253         iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
0254                      iort_node->length);
0255     }
0256 
0257     return NULL;
0258 }
0259 
0260 static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
0261                         void *context)
0262 {
0263     struct device *dev = context;
0264     acpi_status status = AE_NOT_FOUND;
0265 
0266     if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
0267         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
0268         struct acpi_device *adev;
0269         struct acpi_iort_named_component *ncomp;
0270         struct device *nc_dev = dev;
0271 
0272         /*
0273          * Walk the device tree to find a device with an
0274          * ACPI companion; there is no point in scanning
0275          * IORT for a device matching a named component if
0276          * the device does not have an ACPI companion to
0277          * start with.
0278          */
0279         do {
0280             adev = ACPI_COMPANION(nc_dev);
0281             if (adev)
0282                 break;
0283 
0284             nc_dev = nc_dev->parent;
0285         } while (nc_dev);
0286 
0287         if (!adev)
0288             goto out;
0289 
0290         status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
0291         if (ACPI_FAILURE(status)) {
0292             dev_warn(nc_dev, "Can't get device full path name\n");
0293             goto out;
0294         }
0295 
0296         ncomp = (struct acpi_iort_named_component *)node->node_data;
0297         status = !strcmp(ncomp->device_name, buf.pointer) ?
0298                             AE_OK : AE_NOT_FOUND;
0299         acpi_os_free(buf.pointer);
0300     } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
0301         struct acpi_iort_root_complex *pci_rc;
0302         struct pci_bus *bus;
0303 
0304         bus = to_pci_bus(dev);
0305         pci_rc = (struct acpi_iort_root_complex *)node->node_data;
0306 
0307         /*
0308          * It is assumed that PCI segment numbers maps one-to-one
0309          * with root complexes. Each segment number can represent only
0310          * one root complex.
0311          */
0312         status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
0313                             AE_OK : AE_NOT_FOUND;
0314     }
0315 out:
0316     return status;
0317 }
0318 
0319 static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
0320                u32 *rid_out, bool check_overlap)
0321 {
0322     /* Single mapping does not care for input id */
0323     if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
0324         if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
0325             type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
0326             *rid_out = map->output_base;
0327             return 0;
0328         }
0329 
0330         pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
0331             map, type);
0332         return -ENXIO;
0333     }
0334 
0335     if (rid_in < map->input_base ||
0336         (rid_in > map->input_base + map->id_count))
0337         return -ENXIO;
0338 
0339     if (check_overlap) {
0340         /*
0341          * We already found a mapping for this input ID at the end of
0342          * another region. If it coincides with the start of this
0343          * region, we assume the prior match was due to the off-by-1
0344          * issue mentioned below, and allow it to be superseded.
0345          * Otherwise, things are *really* broken, and we just disregard
0346          * duplicate matches entirely to retain compatibility.
0347          */
0348         pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
0349                map, rid_in);
0350         if (rid_in != map->input_base)
0351             return -ENXIO;
0352 
0353         pr_err(FW_BUG "applying workaround.\n");
0354     }
0355 
0356     *rid_out = map->output_base + (rid_in - map->input_base);
0357 
0358     /*
0359      * Due to confusion regarding the meaning of the id_count field (which
0360      * carries the number of IDs *minus 1*), we may have to disregard this
0361      * match if it is at the end of the range, and overlaps with the start
0362      * of another one.
0363      */
0364     if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
0365         return -EAGAIN;
0366     return 0;
0367 }
0368 
0369 static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
0370                            u32 *id_out, int index)
0371 {
0372     struct acpi_iort_node *parent;
0373     struct acpi_iort_id_mapping *map;
0374 
0375     if (!node->mapping_offset || !node->mapping_count ||
0376                      index >= node->mapping_count)
0377         return NULL;
0378 
0379     map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
0380                node->mapping_offset + index * sizeof(*map));
0381 
0382     /* Firmware bug! */
0383     if (!map->output_reference) {
0384         pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
0385                node, node->type);
0386         return NULL;
0387     }
0388 
0389     parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
0390                    map->output_reference);
0391 
0392     if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
0393         if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
0394             node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
0395             node->type == ACPI_IORT_NODE_SMMU_V3 ||
0396             node->type == ACPI_IORT_NODE_PMCG) {
0397             *id_out = map->output_base;
0398             return parent;
0399         }
0400     }
0401 
0402     return NULL;
0403 }
0404 
0405 static int iort_get_id_mapping_index(struct acpi_iort_node *node)
0406 {
0407     struct acpi_iort_smmu_v3 *smmu;
0408     struct acpi_iort_pmcg *pmcg;
0409 
0410     switch (node->type) {
0411     case ACPI_IORT_NODE_SMMU_V3:
0412         /*
0413          * SMMUv3 dev ID mapping index was introduced in revision 1
0414          * table, not available in revision 0
0415          */
0416         if (node->revision < 1)
0417             return -EINVAL;
0418 
0419         smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
0420         /*
0421          * ID mapping index is only ignored if all interrupts are
0422          * GSIV based
0423          */
0424         if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
0425             && smmu->sync_gsiv)
0426             return -EINVAL;
0427 
0428         if (smmu->id_mapping_index >= node->mapping_count) {
0429             pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
0430                    node, node->type);
0431             return -EINVAL;
0432         }
0433 
0434         return smmu->id_mapping_index;
0435     case ACPI_IORT_NODE_PMCG:
0436         pmcg = (struct acpi_iort_pmcg *)node->node_data;
0437         if (pmcg->overflow_gsiv || node->mapping_count == 0)
0438             return -EINVAL;
0439 
0440         return 0;
0441     default:
0442         return -EINVAL;
0443     }
0444 }
0445 
0446 static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
0447                            u32 id_in, u32 *id_out,
0448                            u8 type_mask)
0449 {
0450     u32 id = id_in;
0451 
0452     /* Parse the ID mapping tree to find specified node type */
0453     while (node) {
0454         struct acpi_iort_id_mapping *map;
0455         int i, index, rc = 0;
0456         u32 out_ref = 0, map_id = id;
0457 
0458         if (IORT_TYPE_MASK(node->type) & type_mask) {
0459             if (id_out)
0460                 *id_out = id;
0461             return node;
0462         }
0463 
0464         if (!node->mapping_offset || !node->mapping_count)
0465             goto fail_map;
0466 
0467         map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
0468                    node->mapping_offset);
0469 
0470         /* Firmware bug! */
0471         if (!map->output_reference) {
0472             pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
0473                    node, node->type);
0474             goto fail_map;
0475         }
0476 
0477         /*
0478          * Get the special ID mapping index (if any) and skip its
0479          * associated ID map to prevent erroneous multi-stage
0480          * IORT ID translations.
0481          */
0482         index = iort_get_id_mapping_index(node);
0483 
0484         /* Do the ID translation */
0485         for (i = 0; i < node->mapping_count; i++, map++) {
0486             /* if it is special mapping index, skip it */
0487             if (i == index)
0488                 continue;
0489 
0490             rc = iort_id_map(map, node->type, map_id, &id, out_ref);
0491             if (!rc)
0492                 break;
0493             if (rc == -EAGAIN)
0494                 out_ref = map->output_reference;
0495         }
0496 
0497         if (i == node->mapping_count && !out_ref)
0498             goto fail_map;
0499 
0500         node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
0501                     rc ? out_ref : map->output_reference);
0502     }
0503 
0504 fail_map:
0505     /* Map input ID to output ID unchanged on mapping failure */
0506     if (id_out)
0507         *id_out = id_in;
0508 
0509     return NULL;
0510 }
0511 
0512 static struct acpi_iort_node *iort_node_map_platform_id(
0513         struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
0514         int index)
0515 {
0516     struct acpi_iort_node *parent;
0517     u32 id;
0518 
0519     /* step 1: retrieve the initial dev id */
0520     parent = iort_node_get_id(node, &id, index);
0521     if (!parent)
0522         return NULL;
0523 
0524     /*
0525      * optional step 2: map the initial dev id if its parent is not
0526      * the target type we want, map it again for the use cases such
0527      * as NC (named component) -> SMMU -> ITS. If the type is matched,
0528      * return the initial dev id and its parent pointer directly.
0529      */
0530     if (!(IORT_TYPE_MASK(parent->type) & type_mask))
0531         parent = iort_node_map_id(parent, id, id_out, type_mask);
0532     else
0533         if (id_out)
0534             *id_out = id;
0535 
0536     return parent;
0537 }
0538 
0539 static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
0540 {
0541     struct pci_bus *pbus;
0542 
0543     if (!dev_is_pci(dev)) {
0544         struct acpi_iort_node *node;
0545         /*
0546          * scan iort_fwnode_list to see if it's an iort platform
0547          * device (such as SMMU, PMCG),its iort node already cached
0548          * and associated with fwnode when iort platform devices
0549          * were initialized.
0550          */
0551         node = iort_get_iort_node(dev->fwnode);
0552         if (node)
0553             return node;
0554         /*
0555          * if not, then it should be a platform device defined in
0556          * DSDT/SSDT (with Named Component node in IORT)
0557          */
0558         return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
0559                       iort_match_node_callback, dev);
0560     }
0561 
0562     pbus = to_pci_dev(dev)->bus;
0563 
0564     return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
0565                   iort_match_node_callback, &pbus->dev);
0566 }
0567 
0568 /**
0569  * iort_msi_map_id() - Map a MSI input ID for a device
0570  * @dev: The device for which the mapping is to be done.
0571  * @input_id: The device input ID.
0572  *
0573  * Returns: mapped MSI ID on success, input ID otherwise
0574  */
0575 u32 iort_msi_map_id(struct device *dev, u32 input_id)
0576 {
0577     struct acpi_iort_node *node;
0578     u32 dev_id;
0579 
0580     node = iort_find_dev_node(dev);
0581     if (!node)
0582         return input_id;
0583 
0584     iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
0585     return dev_id;
0586 }
0587 
0588 /**
0589  * iort_pmsi_get_dev_id() - Get the device id for a device
0590  * @dev: The device for which the mapping is to be done.
0591  * @dev_id: The device ID found.
0592  *
0593  * Returns: 0 for successful find a dev id, -ENODEV on error
0594  */
0595 int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
0596 {
0597     int i, index;
0598     struct acpi_iort_node *node;
0599 
0600     node = iort_find_dev_node(dev);
0601     if (!node)
0602         return -ENODEV;
0603 
0604     index = iort_get_id_mapping_index(node);
0605     /* if there is a valid index, go get the dev_id directly */
0606     if (index >= 0) {
0607         if (iort_node_get_id(node, dev_id, index))
0608             return 0;
0609     } else {
0610         for (i = 0; i < node->mapping_count; i++) {
0611             if (iort_node_map_platform_id(node, dev_id,
0612                               IORT_MSI_TYPE, i))
0613                 return 0;
0614         }
0615     }
0616 
0617     return -ENODEV;
0618 }
0619 
0620 static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
0621 {
0622     struct iort_its_msi_chip *its_msi_chip;
0623     int ret = -ENODEV;
0624 
0625     spin_lock(&iort_msi_chip_lock);
0626     list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
0627         if (its_msi_chip->translation_id == its_id) {
0628             *base = its_msi_chip->base_addr;
0629             ret = 0;
0630             break;
0631         }
0632     }
0633     spin_unlock(&iort_msi_chip_lock);
0634 
0635     return ret;
0636 }
0637 
0638 /**
0639  * iort_dev_find_its_id() - Find the ITS identifier for a device
0640  * @dev: The device.
0641  * @id: Device's ID
0642  * @idx: Index of the ITS identifier list.
0643  * @its_id: ITS identifier.
0644  *
0645  * Returns: 0 on success, appropriate error value otherwise
0646  */
0647 static int iort_dev_find_its_id(struct device *dev, u32 id,
0648                 unsigned int idx, int *its_id)
0649 {
0650     struct acpi_iort_its_group *its;
0651     struct acpi_iort_node *node;
0652 
0653     node = iort_find_dev_node(dev);
0654     if (!node)
0655         return -ENXIO;
0656 
0657     node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
0658     if (!node)
0659         return -ENXIO;
0660 
0661     /* Move to ITS specific data */
0662     its = (struct acpi_iort_its_group *)node->node_data;
0663     if (idx >= its->its_count) {
0664         dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
0665             idx, its->its_count);
0666         return -ENXIO;
0667     }
0668 
0669     *its_id = its->identifiers[idx];
0670     return 0;
0671 }
0672 
0673 /**
0674  * iort_get_device_domain() - Find MSI domain related to a device
0675  * @dev: The device.
0676  * @id: Requester ID for the device.
0677  * @bus_token: irq domain bus token.
0678  *
0679  * Returns: the MSI domain for this device, NULL otherwise
0680  */
0681 struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
0682                       enum irq_domain_bus_token bus_token)
0683 {
0684     struct fwnode_handle *handle;
0685     int its_id;
0686 
0687     if (iort_dev_find_its_id(dev, id, 0, &its_id))
0688         return NULL;
0689 
0690     handle = iort_find_domain_token(its_id);
0691     if (!handle)
0692         return NULL;
0693 
0694     return irq_find_matching_fwnode(handle, bus_token);
0695 }
0696 
0697 static void iort_set_device_domain(struct device *dev,
0698                    struct acpi_iort_node *node)
0699 {
0700     struct acpi_iort_its_group *its;
0701     struct acpi_iort_node *msi_parent;
0702     struct acpi_iort_id_mapping *map;
0703     struct fwnode_handle *iort_fwnode;
0704     struct irq_domain *domain;
0705     int index;
0706 
0707     index = iort_get_id_mapping_index(node);
0708     if (index < 0)
0709         return;
0710 
0711     map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
0712                node->mapping_offset + index * sizeof(*map));
0713 
0714     /* Firmware bug! */
0715     if (!map->output_reference ||
0716         !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
0717         pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
0718                node, node->type);
0719         return;
0720     }
0721 
0722     msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
0723                   map->output_reference);
0724 
0725     if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
0726         return;
0727 
0728     /* Move to ITS specific data */
0729     its = (struct acpi_iort_its_group *)msi_parent->node_data;
0730 
0731     iort_fwnode = iort_find_domain_token(its->identifiers[0]);
0732     if (!iort_fwnode)
0733         return;
0734 
0735     domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
0736     if (domain)
0737         dev_set_msi_domain(dev, domain);
0738 }
0739 
0740 /**
0741  * iort_get_platform_device_domain() - Find MSI domain related to a
0742  * platform device
0743  * @dev: the dev pointer associated with the platform device
0744  *
0745  * Returns: the MSI domain for this device, NULL otherwise
0746  */
0747 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
0748 {
0749     struct acpi_iort_node *node, *msi_parent = NULL;
0750     struct fwnode_handle *iort_fwnode;
0751     struct acpi_iort_its_group *its;
0752     int i;
0753 
0754     /* find its associated iort node */
0755     node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
0756                   iort_match_node_callback, dev);
0757     if (!node)
0758         return NULL;
0759 
0760     /* then find its msi parent node */
0761     for (i = 0; i < node->mapping_count; i++) {
0762         msi_parent = iort_node_map_platform_id(node, NULL,
0763                                IORT_MSI_TYPE, i);
0764         if (msi_parent)
0765             break;
0766     }
0767 
0768     if (!msi_parent)
0769         return NULL;
0770 
0771     /* Move to ITS specific data */
0772     its = (struct acpi_iort_its_group *)msi_parent->node_data;
0773 
0774     iort_fwnode = iort_find_domain_token(its->identifiers[0]);
0775     if (!iort_fwnode)
0776         return NULL;
0777 
0778     return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
0779 }
0780 
0781 void acpi_configure_pmsi_domain(struct device *dev)
0782 {
0783     struct irq_domain *msi_domain;
0784 
0785     msi_domain = iort_get_platform_device_domain(dev);
0786     if (msi_domain)
0787         dev_set_msi_domain(dev, msi_domain);
0788 }
0789 
0790 #ifdef CONFIG_IOMMU_API
0791 static void iort_rmr_free(struct device *dev,
0792               struct iommu_resv_region *region)
0793 {
0794     struct iommu_iort_rmr_data *rmr_data;
0795 
0796     rmr_data = container_of(region, struct iommu_iort_rmr_data, rr);
0797     kfree(rmr_data->sids);
0798     kfree(rmr_data);
0799 }
0800 
0801 static struct iommu_iort_rmr_data *iort_rmr_alloc(
0802                     struct acpi_iort_rmr_desc *rmr_desc,
0803                     int prot, enum iommu_resv_type type,
0804                     u32 *sids, u32 num_sids)
0805 {
0806     struct iommu_iort_rmr_data *rmr_data;
0807     struct iommu_resv_region *region;
0808     u32 *sids_copy;
0809     u64 addr = rmr_desc->base_address, size = rmr_desc->length;
0810 
0811     rmr_data = kmalloc(sizeof(*rmr_data), GFP_KERNEL);
0812     if (!rmr_data)
0813         return NULL;
0814 
0815     /* Create a copy of SIDs array to associate with this rmr_data */
0816     sids_copy = kmemdup(sids, num_sids * sizeof(*sids), GFP_KERNEL);
0817     if (!sids_copy) {
0818         kfree(rmr_data);
0819         return NULL;
0820     }
0821     rmr_data->sids = sids_copy;
0822     rmr_data->num_sids = num_sids;
0823 
0824     if (!IS_ALIGNED(addr, SZ_64K) || !IS_ALIGNED(size, SZ_64K)) {
0825         /* PAGE align base addr and size */
0826         addr &= PAGE_MASK;
0827         size = PAGE_ALIGN(size + offset_in_page(rmr_desc->base_address));
0828 
0829         pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] not aligned to 64K, continue with [0x%llx - 0x%llx]\n",
0830                rmr_desc->base_address,
0831                rmr_desc->base_address + rmr_desc->length - 1,
0832                addr, addr + size - 1);
0833     }
0834 
0835     region = &rmr_data->rr;
0836     INIT_LIST_HEAD(&region->list);
0837     region->start = addr;
0838     region->length = size;
0839     region->prot = prot;
0840     region->type = type;
0841     region->free = iort_rmr_free;
0842 
0843     return rmr_data;
0844 }
0845 
0846 static void iort_rmr_desc_check_overlap(struct acpi_iort_rmr_desc *desc,
0847                     u32 count)
0848 {
0849     int i, j;
0850 
0851     for (i = 0; i < count; i++) {
0852         u64 end, start = desc[i].base_address, length = desc[i].length;
0853 
0854         if (!length) {
0855             pr_err(FW_BUG "RMR descriptor[0x%llx] with zero length, continue anyway\n",
0856                    start);
0857             continue;
0858         }
0859 
0860         end = start + length - 1;
0861 
0862         /* Check for address overlap */
0863         for (j = i + 1; j < count; j++) {
0864             u64 e_start = desc[j].base_address;
0865             u64 e_end = e_start + desc[j].length - 1;
0866 
0867             if (start <= e_end && end >= e_start)
0868                 pr_err(FW_BUG "RMR descriptor[0x%llx - 0x%llx] overlaps, continue anyway\n",
0869                        start, end);
0870         }
0871     }
0872 }
0873 
0874 /*
0875  * Please note, we will keep the already allocated RMR reserve
0876  * regions in case of a memory allocation failure.
0877  */
0878 static void iort_get_rmrs(struct acpi_iort_node *node,
0879               struct acpi_iort_node *smmu,
0880               u32 *sids, u32 num_sids,
0881               struct list_head *head)
0882 {
0883     struct acpi_iort_rmr *rmr = (struct acpi_iort_rmr *)node->node_data;
0884     struct acpi_iort_rmr_desc *rmr_desc;
0885     int i;
0886 
0887     rmr_desc = ACPI_ADD_PTR(struct acpi_iort_rmr_desc, node,
0888                 rmr->rmr_offset);
0889 
0890     iort_rmr_desc_check_overlap(rmr_desc, rmr->rmr_count);
0891 
0892     for (i = 0; i < rmr->rmr_count; i++, rmr_desc++) {
0893         struct iommu_iort_rmr_data *rmr_data;
0894         enum iommu_resv_type type;
0895         int prot = IOMMU_READ | IOMMU_WRITE;
0896 
0897         if (rmr->flags & ACPI_IORT_RMR_REMAP_PERMITTED)
0898             type = IOMMU_RESV_DIRECT_RELAXABLE;
0899         else
0900             type = IOMMU_RESV_DIRECT;
0901 
0902         if (rmr->flags & ACPI_IORT_RMR_ACCESS_PRIVILEGE)
0903             prot |= IOMMU_PRIV;
0904 
0905         /* Attributes 0x00 - 0x03 represents device memory */
0906         if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) <=
0907                 ACPI_IORT_RMR_ATTR_DEVICE_GRE)
0908             prot |= IOMMU_MMIO;
0909         else if (ACPI_IORT_RMR_ACCESS_ATTRIBUTES(rmr->flags) ==
0910                 ACPI_IORT_RMR_ATTR_NORMAL_IWB_OWB)
0911             prot |= IOMMU_CACHE;
0912 
0913         rmr_data = iort_rmr_alloc(rmr_desc, prot, type,
0914                       sids, num_sids);
0915         if (!rmr_data)
0916             return;
0917 
0918         list_add_tail(&rmr_data->rr.list, head);
0919     }
0920 }
0921 
0922 static u32 *iort_rmr_alloc_sids(u32 *sids, u32 count, u32 id_start,
0923                 u32 new_count)
0924 {
0925     u32 *new_sids;
0926     u32 total_count = count + new_count;
0927     int i;
0928 
0929     new_sids = krealloc_array(sids, count + new_count,
0930                   sizeof(*new_sids), GFP_KERNEL);
0931     if (!new_sids)
0932         return NULL;
0933 
0934     for (i = count; i < total_count; i++)
0935         new_sids[i] = id_start++;
0936 
0937     return new_sids;
0938 }
0939 
0940 static bool iort_rmr_has_dev(struct device *dev, u32 id_start,
0941                  u32 id_count)
0942 {
0943     int i;
0944     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
0945 
0946     /*
0947      * Make sure the kernel has preserved the boot firmware PCIe
0948      * configuration. This is required to ensure that the RMR PCIe
0949      * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
0950      */
0951     if (dev_is_pci(dev)) {
0952         struct pci_dev *pdev = to_pci_dev(dev);
0953         struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
0954 
0955         if (!host->preserve_config)
0956             return false;
0957     }
0958 
0959     for (i = 0; i < fwspec->num_ids; i++) {
0960         if (fwspec->ids[i] >= id_start &&
0961             fwspec->ids[i] <= id_start + id_count)
0962             return true;
0963     }
0964 
0965     return false;
0966 }
0967 
0968 static void iort_node_get_rmr_info(struct acpi_iort_node *node,
0969                    struct acpi_iort_node *iommu,
0970                    struct device *dev, struct list_head *head)
0971 {
0972     struct acpi_iort_node *smmu = NULL;
0973     struct acpi_iort_rmr *rmr;
0974     struct acpi_iort_id_mapping *map;
0975     u32 *sids = NULL;
0976     u32 num_sids = 0;
0977     int i;
0978 
0979     if (!node->mapping_offset || !node->mapping_count) {
0980         pr_err(FW_BUG "Invalid ID mapping, skipping RMR node %p\n",
0981                node);
0982         return;
0983     }
0984 
0985     rmr = (struct acpi_iort_rmr *)node->node_data;
0986     if (!rmr->rmr_offset || !rmr->rmr_count)
0987         return;
0988 
0989     map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
0990                node->mapping_offset);
0991 
0992     /*
0993      * Go through the ID mappings and see if we have a match for SMMU
0994      * and dev(if !NULL). If found, get the sids for the Node.
0995      * Please note, id_count is equal to the number of IDs  in the
0996      * range minus one.
0997      */
0998     for (i = 0; i < node->mapping_count; i++, map++) {
0999         struct acpi_iort_node *parent;
1000 
1001         if (!map->id_count)
1002             continue;
1003 
1004         parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
1005                       map->output_reference);
1006         if (parent != iommu)
1007             continue;
1008 
1009         /* If dev is valid, check RMR node corresponds to the dev SID */
1010         if (dev && !iort_rmr_has_dev(dev, map->output_base,
1011                          map->id_count))
1012             continue;
1013 
1014         /* Retrieve SIDs associated with the Node. */
1015         sids = iort_rmr_alloc_sids(sids, num_sids, map->output_base,
1016                        map->id_count + 1);
1017         if (!sids)
1018             return;
1019 
1020         num_sids += map->id_count + 1;
1021     }
1022 
1023     if (!sids)
1024         return;
1025 
1026     iort_get_rmrs(node, smmu, sids, num_sids, head);
1027     kfree(sids);
1028 }
1029 
1030 static void iort_find_rmrs(struct acpi_iort_node *iommu, struct device *dev,
1031                struct list_head *head)
1032 {
1033     struct acpi_table_iort *iort;
1034     struct acpi_iort_node *iort_node, *iort_end;
1035     int i;
1036 
1037     /* Only supports ARM DEN 0049E.d onwards */
1038     if (iort_table->revision < 5)
1039         return;
1040 
1041     iort = (struct acpi_table_iort *)iort_table;
1042 
1043     iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1044                  iort->node_offset);
1045     iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1046                 iort_table->length);
1047 
1048     for (i = 0; i < iort->node_count; i++) {
1049         if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
1050                    "IORT node pointer overflows, bad table!\n"))
1051             return;
1052 
1053         if (iort_node->type == ACPI_IORT_NODE_RMR)
1054             iort_node_get_rmr_info(iort_node, iommu, dev, head);
1055 
1056         iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1057                      iort_node->length);
1058     }
1059 }
1060 
1061 /*
1062  * Populate the RMR list associated with a given IOMMU and dev(if provided).
1063  * If dev is NULL, the function populates all the RMRs associated with the
1064  * given IOMMU.
1065  */
1066 static void iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode,
1067                         struct device *dev,
1068                         struct list_head *head)
1069 {
1070     struct acpi_iort_node *iommu;
1071 
1072     iommu = iort_get_iort_node(iommu_fwnode);
1073     if (!iommu)
1074         return;
1075 
1076     iort_find_rmrs(iommu, dev, head);
1077 }
1078 
1079 static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
1080 {
1081     struct acpi_iort_node *iommu;
1082     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1083 
1084     iommu = iort_get_iort_node(fwspec->iommu_fwnode);
1085 
1086     if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
1087         struct acpi_iort_smmu_v3 *smmu;
1088 
1089         smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
1090         if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
1091             return iommu;
1092     }
1093 
1094     return NULL;
1095 }
1096 
1097 /*
1098  * Retrieve platform specific HW MSI reserve regions.
1099  * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K)
1100  * associated with the device are the HW MSI reserved regions.
1101  */
1102 static void iort_iommu_msi_get_resv_regions(struct device *dev,
1103                         struct list_head *head)
1104 {
1105     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1106     struct acpi_iort_its_group *its;
1107     struct acpi_iort_node *iommu_node, *its_node = NULL;
1108     int i;
1109 
1110     iommu_node = iort_get_msi_resv_iommu(dev);
1111     if (!iommu_node)
1112         return;
1113 
1114     /*
1115      * Current logic to reserve ITS regions relies on HW topologies
1116      * where a given PCI or named component maps its IDs to only one
1117      * ITS group; if a PCI or named component can map its IDs to
1118      * different ITS groups through IORT mappings this function has
1119      * to be reworked to ensure we reserve regions for all ITS groups
1120      * a given PCI or named component may map IDs to.
1121      */
1122 
1123     for (i = 0; i < fwspec->num_ids; i++) {
1124         its_node = iort_node_map_id(iommu_node,
1125                     fwspec->ids[i],
1126                     NULL, IORT_MSI_TYPE);
1127         if (its_node)
1128             break;
1129     }
1130 
1131     if (!its_node)
1132         return;
1133 
1134     /* Move to ITS specific data */
1135     its = (struct acpi_iort_its_group *)its_node->node_data;
1136 
1137     for (i = 0; i < its->its_count; i++) {
1138         phys_addr_t base;
1139 
1140         if (!iort_find_its_base(its->identifiers[i], &base)) {
1141             int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1142             struct iommu_resv_region *region;
1143 
1144             region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
1145                              prot, IOMMU_RESV_MSI);
1146             if (region)
1147                 list_add_tail(&region->list, head);
1148         }
1149     }
1150 }
1151 
1152 /**
1153  * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions.
1154  * @dev: Device from iommu_get_resv_regions()
1155  * @head: Reserved region list from iommu_get_resv_regions()
1156  */
1157 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1158 {
1159     struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1160 
1161     iort_iommu_msi_get_resv_regions(dev, head);
1162     iort_iommu_rmr_get_resv_regions(fwspec->iommu_fwnode, dev, head);
1163 }
1164 
1165 /**
1166  * iort_get_rmr_sids - Retrieve IORT RMR node reserved regions with
1167  *                     associated StreamIDs information.
1168  * @iommu_fwnode: fwnode associated with IOMMU
1169  * @head: Resereved region list
1170  */
1171 void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
1172                struct list_head *head)
1173 {
1174     iort_iommu_rmr_get_resv_regions(iommu_fwnode, NULL, head);
1175 }
1176 EXPORT_SYMBOL_GPL(iort_get_rmr_sids);
1177 
1178 /**
1179  * iort_put_rmr_sids - Free memory allocated for RMR reserved regions.
1180  * @iommu_fwnode: fwnode associated with IOMMU
1181  * @head: Resereved region list
1182  */
1183 void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
1184                struct list_head *head)
1185 {
1186     struct iommu_resv_region *entry, *next;
1187 
1188     list_for_each_entry_safe(entry, next, head, list)
1189         entry->free(NULL, entry);
1190 }
1191 EXPORT_SYMBOL_GPL(iort_put_rmr_sids);
1192 
1193 static inline bool iort_iommu_driver_enabled(u8 type)
1194 {
1195     switch (type) {
1196     case ACPI_IORT_NODE_SMMU_V3:
1197         return IS_ENABLED(CONFIG_ARM_SMMU_V3);
1198     case ACPI_IORT_NODE_SMMU:
1199         return IS_ENABLED(CONFIG_ARM_SMMU);
1200     default:
1201         pr_warn("IORT node type %u does not describe an SMMU\n", type);
1202         return false;
1203     }
1204 }
1205 
1206 static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
1207 {
1208     struct acpi_iort_root_complex *pci_rc;
1209 
1210     pci_rc = (struct acpi_iort_root_complex *)node->node_data;
1211     return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
1212 }
1213 
1214 static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
1215                 u32 streamid)
1216 {
1217     const struct iommu_ops *ops;
1218     struct fwnode_handle *iort_fwnode;
1219 
1220     if (!node)
1221         return -ENODEV;
1222 
1223     iort_fwnode = iort_get_fwnode(node);
1224     if (!iort_fwnode)
1225         return -ENODEV;
1226 
1227     /*
1228      * If the ops look-up fails, this means that either
1229      * the SMMU drivers have not been probed yet or that
1230      * the SMMU drivers are not built in the kernel;
1231      * Depending on whether the SMMU drivers are built-in
1232      * in the kernel or not, defer the IOMMU configuration
1233      * or just abort it.
1234      */
1235     ops = iommu_ops_from_fwnode(iort_fwnode);
1236     if (!ops)
1237         return iort_iommu_driver_enabled(node->type) ?
1238                -EPROBE_DEFER : -ENODEV;
1239 
1240     return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops);
1241 }
1242 
1243 struct iort_pci_alias_info {
1244     struct device *dev;
1245     struct acpi_iort_node *node;
1246 };
1247 
1248 static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
1249 {
1250     struct iort_pci_alias_info *info = data;
1251     struct acpi_iort_node *parent;
1252     u32 streamid;
1253 
1254     parent = iort_node_map_id(info->node, alias, &streamid,
1255                   IORT_IOMMU_TYPE);
1256     return iort_iommu_xlate(info->dev, parent, streamid);
1257 }
1258 
1259 static void iort_named_component_init(struct device *dev,
1260                       struct acpi_iort_node *node)
1261 {
1262     struct property_entry props[3] = {};
1263     struct acpi_iort_named_component *nc;
1264 
1265     nc = (struct acpi_iort_named_component *)node->node_data;
1266     props[0] = PROPERTY_ENTRY_U32("pasid-num-bits",
1267                       FIELD_GET(ACPI_IORT_NC_PASID_BITS,
1268                         nc->node_flags));
1269     if (nc->node_flags & ACPI_IORT_NC_STALL_SUPPORTED)
1270         props[1] = PROPERTY_ENTRY_BOOL("dma-can-stall");
1271 
1272     if (device_create_managed_software_node(dev, props, NULL))
1273         dev_warn(dev, "Could not add device properties\n");
1274 }
1275 
1276 static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
1277 {
1278     struct acpi_iort_node *parent;
1279     int err = -ENODEV, i = 0;
1280     u32 streamid = 0;
1281 
1282     do {
1283 
1284         parent = iort_node_map_platform_id(node, &streamid,
1285                            IORT_IOMMU_TYPE,
1286                            i++);
1287 
1288         if (parent)
1289             err = iort_iommu_xlate(dev, parent, streamid);
1290     } while (parent && !err);
1291 
1292     return err;
1293 }
1294 
1295 static int iort_nc_iommu_map_id(struct device *dev,
1296                 struct acpi_iort_node *node,
1297                 const u32 *in_id)
1298 {
1299     struct acpi_iort_node *parent;
1300     u32 streamid;
1301 
1302     parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1303     if (parent)
1304         return iort_iommu_xlate(dev, parent, streamid);
1305 
1306     return -ENODEV;
1307 }
1308 
1309 
1310 /**
1311  * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1312  *
1313  * @dev: device to configure
1314  * @id_in: optional input id const value pointer
1315  *
1316  * Returns: 0 on success, <0 on failure
1317  */
1318 int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
1319 {
1320     struct acpi_iort_node *node;
1321     int err = -ENODEV;
1322 
1323     if (dev_is_pci(dev)) {
1324         struct iommu_fwspec *fwspec;
1325         struct pci_bus *bus = to_pci_dev(dev)->bus;
1326         struct iort_pci_alias_info info = { .dev = dev };
1327 
1328         node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1329                       iort_match_node_callback, &bus->dev);
1330         if (!node)
1331             return -ENODEV;
1332 
1333         info.node = node;
1334         err = pci_for_each_dma_alias(to_pci_dev(dev),
1335                          iort_pci_iommu_init, &info);
1336 
1337         fwspec = dev_iommu_fwspec_get(dev);
1338         if (fwspec && iort_pci_rc_supports_ats(node))
1339             fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1340     } else {
1341         node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1342                       iort_match_node_callback, dev);
1343         if (!node)
1344             return -ENODEV;
1345 
1346         err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1347                   iort_nc_iommu_map(dev, node);
1348 
1349         if (!err)
1350             iort_named_component_init(dev, node);
1351     }
1352 
1353     return err;
1354 }
1355 
1356 #else
1357 void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
1358 { }
1359 int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
1360 { return -ENODEV; }
1361 #endif
1362 
1363 static int nc_dma_get_range(struct device *dev, u64 *size)
1364 {
1365     struct acpi_iort_node *node;
1366     struct acpi_iort_named_component *ncomp;
1367 
1368     node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1369                   iort_match_node_callback, dev);
1370     if (!node)
1371         return -ENODEV;
1372 
1373     ncomp = (struct acpi_iort_named_component *)node->node_data;
1374 
1375     if (!ncomp->memory_address_limit) {
1376         pr_warn(FW_BUG "Named component missing memory address limit\n");
1377         return -EINVAL;
1378     }
1379 
1380     *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1381             1ULL<<ncomp->memory_address_limit;
1382 
1383     return 0;
1384 }
1385 
1386 static int rc_dma_get_range(struct device *dev, u64 *size)
1387 {
1388     struct acpi_iort_node *node;
1389     struct acpi_iort_root_complex *rc;
1390     struct pci_bus *pbus = to_pci_dev(dev)->bus;
1391 
1392     node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1393                   iort_match_node_callback, &pbus->dev);
1394     if (!node || node->revision < 1)
1395         return -ENODEV;
1396 
1397     rc = (struct acpi_iort_root_complex *)node->node_data;
1398 
1399     if (!rc->memory_address_limit) {
1400         pr_warn(FW_BUG "Root complex missing memory address limit\n");
1401         return -EINVAL;
1402     }
1403 
1404     *size = rc->memory_address_limit >= 64 ? U64_MAX :
1405             1ULL<<rc->memory_address_limit;
1406 
1407     return 0;
1408 }
1409 
1410 /**
1411  * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1412  * @dev: device to lookup
1413  * @size: DMA range size result pointer
1414  *
1415  * Return: 0 on success, an error otherwise.
1416  */
1417 int iort_dma_get_ranges(struct device *dev, u64 *size)
1418 {
1419     if (dev_is_pci(dev))
1420         return rc_dma_get_range(dev, size);
1421     else
1422         return nc_dma_get_range(dev, size);
1423 }
1424 
1425 static void __init acpi_iort_register_irq(int hwirq, const char *name,
1426                       int trigger,
1427                       struct resource *res)
1428 {
1429     int irq = acpi_register_gsi(NULL, hwirq, trigger,
1430                     ACPI_ACTIVE_HIGH);
1431 
1432     if (irq <= 0) {
1433         pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1434                                       name);
1435         return;
1436     }
1437 
1438     res->start = irq;
1439     res->end = irq;
1440     res->flags = IORESOURCE_IRQ;
1441     res->name = name;
1442 }
1443 
1444 static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1445 {
1446     struct acpi_iort_smmu_v3 *smmu;
1447     /* Always present mem resource */
1448     int num_res = 1;
1449 
1450     /* Retrieve SMMUv3 specific data */
1451     smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1452 
1453     if (smmu->event_gsiv)
1454         num_res++;
1455 
1456     if (smmu->pri_gsiv)
1457         num_res++;
1458 
1459     if (smmu->gerr_gsiv)
1460         num_res++;
1461 
1462     if (smmu->sync_gsiv)
1463         num_res++;
1464 
1465     return num_res;
1466 }
1467 
1468 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1469 {
1470     /*
1471      * Cavium ThunderX2 implementation doesn't not support unique
1472      * irq line. Use single irq line for all the SMMUv3 interrupts.
1473      */
1474     if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1475         return false;
1476 
1477     /*
1478      * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1479      * SPI numbers here.
1480      */
1481     return smmu->event_gsiv == smmu->pri_gsiv &&
1482            smmu->event_gsiv == smmu->gerr_gsiv &&
1483            smmu->event_gsiv == smmu->sync_gsiv;
1484 }
1485 
1486 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1487 {
1488     /*
1489      * Override the size, for Cavium ThunderX2 implementation
1490      * which doesn't support the page 1 SMMU register space.
1491      */
1492     if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1493         return SZ_64K;
1494 
1495     return SZ_128K;
1496 }
1497 
1498 static void __init arm_smmu_v3_init_resources(struct resource *res,
1499                           struct acpi_iort_node *node)
1500 {
1501     struct acpi_iort_smmu_v3 *smmu;
1502     int num_res = 0;
1503 
1504     /* Retrieve SMMUv3 specific data */
1505     smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1506 
1507     res[num_res].start = smmu->base_address;
1508     res[num_res].end = smmu->base_address +
1509                 arm_smmu_v3_resource_size(smmu) - 1;
1510     res[num_res].flags = IORESOURCE_MEM;
1511 
1512     num_res++;
1513     if (arm_smmu_v3_is_combined_irq(smmu)) {
1514         if (smmu->event_gsiv)
1515             acpi_iort_register_irq(smmu->event_gsiv, "combined",
1516                            ACPI_EDGE_SENSITIVE,
1517                            &res[num_res++]);
1518     } else {
1519 
1520         if (smmu->event_gsiv)
1521             acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1522                            ACPI_EDGE_SENSITIVE,
1523                            &res[num_res++]);
1524 
1525         if (smmu->pri_gsiv)
1526             acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1527                            ACPI_EDGE_SENSITIVE,
1528                            &res[num_res++]);
1529 
1530         if (smmu->gerr_gsiv)
1531             acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1532                            ACPI_EDGE_SENSITIVE,
1533                            &res[num_res++]);
1534 
1535         if (smmu->sync_gsiv)
1536             acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1537                            ACPI_EDGE_SENSITIVE,
1538                            &res[num_res++]);
1539     }
1540 }
1541 
1542 static void __init arm_smmu_v3_dma_configure(struct device *dev,
1543                          struct acpi_iort_node *node)
1544 {
1545     struct acpi_iort_smmu_v3 *smmu;
1546     enum dev_dma_attr attr;
1547 
1548     /* Retrieve SMMUv3 specific data */
1549     smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1550 
1551     attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1552             DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1553 
1554     /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1555     dev->dma_mask = &dev->coherent_dma_mask;
1556 
1557     /* Configure DMA for the page table walker */
1558     acpi_dma_configure(dev, attr);
1559 }
1560 
1561 #if defined(CONFIG_ACPI_NUMA)
1562 /*
1563  * set numa proximity domain for smmuv3 device
1564  */
1565 static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1566                           struct acpi_iort_node *node)
1567 {
1568     struct acpi_iort_smmu_v3 *smmu;
1569 
1570     smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1571     if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1572         int dev_node = pxm_to_node(smmu->pxm);
1573 
1574         if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1575             return -EINVAL;
1576 
1577         set_dev_node(dev, dev_node);
1578         pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1579             smmu->base_address,
1580             smmu->pxm);
1581     }
1582     return 0;
1583 }
1584 #else
1585 #define arm_smmu_v3_set_proximity NULL
1586 #endif
1587 
1588 static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1589 {
1590     struct acpi_iort_smmu *smmu;
1591 
1592     /* Retrieve SMMU specific data */
1593     smmu = (struct acpi_iort_smmu *)node->node_data;
1594 
1595     /*
1596      * Only consider the global fault interrupt and ignore the
1597      * configuration access interrupt.
1598      *
1599      * MMIO address and global fault interrupt resources are always
1600      * present so add them to the context interrupt count as a static
1601      * value.
1602      */
1603     return smmu->context_interrupt_count + 2;
1604 }
1605 
1606 static void __init arm_smmu_init_resources(struct resource *res,
1607                        struct acpi_iort_node *node)
1608 {
1609     struct acpi_iort_smmu *smmu;
1610     int i, hw_irq, trigger, num_res = 0;
1611     u64 *ctx_irq, *glb_irq;
1612 
1613     /* Retrieve SMMU specific data */
1614     smmu = (struct acpi_iort_smmu *)node->node_data;
1615 
1616     res[num_res].start = smmu->base_address;
1617     res[num_res].end = smmu->base_address + smmu->span - 1;
1618     res[num_res].flags = IORESOURCE_MEM;
1619     num_res++;
1620 
1621     glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1622     /* Global IRQs */
1623     hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1624     trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1625 
1626     acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1627                      &res[num_res++]);
1628 
1629     /* Context IRQs */
1630     ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1631     for (i = 0; i < smmu->context_interrupt_count; i++) {
1632         hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1633         trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1634 
1635         acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1636                        &res[num_res++]);
1637     }
1638 }
1639 
1640 static void __init arm_smmu_dma_configure(struct device *dev,
1641                       struct acpi_iort_node *node)
1642 {
1643     struct acpi_iort_smmu *smmu;
1644     enum dev_dma_attr attr;
1645 
1646     /* Retrieve SMMU specific data */
1647     smmu = (struct acpi_iort_smmu *)node->node_data;
1648 
1649     attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1650             DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1651 
1652     /* We expect the dma masks to be equivalent for SMMU set-ups */
1653     dev->dma_mask = &dev->coherent_dma_mask;
1654 
1655     /* Configure DMA for the page table walker */
1656     acpi_dma_configure(dev, attr);
1657 }
1658 
1659 static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1660 {
1661     struct acpi_iort_pmcg *pmcg;
1662 
1663     /* Retrieve PMCG specific data */
1664     pmcg = (struct acpi_iort_pmcg *)node->node_data;
1665 
1666     /*
1667      * There are always 2 memory resources.
1668      * If the overflow_gsiv is present then add that for a total of 3.
1669      */
1670     return pmcg->overflow_gsiv ? 3 : 2;
1671 }
1672 
1673 static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1674                            struct acpi_iort_node *node)
1675 {
1676     struct acpi_iort_pmcg *pmcg;
1677 
1678     /* Retrieve PMCG specific data */
1679     pmcg = (struct acpi_iort_pmcg *)node->node_data;
1680 
1681     res[0].start = pmcg->page0_base_address;
1682     res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1683     res[0].flags = IORESOURCE_MEM;
1684     /*
1685      * The initial version in DEN0049C lacked a way to describe register
1686      * page 1, which makes it broken for most PMCG implementations; in
1687      * that case, just let the driver fail gracefully if it expects to
1688      * find a second memory resource.
1689      */
1690     if (node->revision > 0) {
1691         res[1].start = pmcg->page1_base_address;
1692         res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1693         res[1].flags = IORESOURCE_MEM;
1694     }
1695 
1696     if (pmcg->overflow_gsiv)
1697         acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1698                        ACPI_EDGE_SENSITIVE, &res[2]);
1699 }
1700 
1701 static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1702     /* HiSilicon Hip08 Platform */
1703     {"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1704      "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
1705     { }
1706 };
1707 
1708 static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1709 {
1710     u32 model;
1711     int idx;
1712 
1713     idx = acpi_match_platform_list(pmcg_plat_info);
1714     if (idx >= 0)
1715         model = pmcg_plat_info[idx].data;
1716     else
1717         model = IORT_SMMU_V3_PMCG_GENERIC;
1718 
1719     return platform_device_add_data(pdev, &model, sizeof(model));
1720 }
1721 
1722 struct iort_dev_config {
1723     const char *name;
1724     int (*dev_init)(struct acpi_iort_node *node);
1725     void (*dev_dma_configure)(struct device *dev,
1726                   struct acpi_iort_node *node);
1727     int (*dev_count_resources)(struct acpi_iort_node *node);
1728     void (*dev_init_resources)(struct resource *res,
1729                      struct acpi_iort_node *node);
1730     int (*dev_set_proximity)(struct device *dev,
1731                     struct acpi_iort_node *node);
1732     int (*dev_add_platdata)(struct platform_device *pdev);
1733 };
1734 
1735 static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1736     .name = "arm-smmu-v3",
1737     .dev_dma_configure = arm_smmu_v3_dma_configure,
1738     .dev_count_resources = arm_smmu_v3_count_resources,
1739     .dev_init_resources = arm_smmu_v3_init_resources,
1740     .dev_set_proximity = arm_smmu_v3_set_proximity,
1741 };
1742 
1743 static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1744     .name = "arm-smmu",
1745     .dev_dma_configure = arm_smmu_dma_configure,
1746     .dev_count_resources = arm_smmu_count_resources,
1747     .dev_init_resources = arm_smmu_init_resources,
1748 };
1749 
1750 static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1751     .name = "arm-smmu-v3-pmcg",
1752     .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1753     .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1754     .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1755 };
1756 
1757 static __init const struct iort_dev_config *iort_get_dev_cfg(
1758             struct acpi_iort_node *node)
1759 {
1760     switch (node->type) {
1761     case ACPI_IORT_NODE_SMMU_V3:
1762         return &iort_arm_smmu_v3_cfg;
1763     case ACPI_IORT_NODE_SMMU:
1764         return &iort_arm_smmu_cfg;
1765     case ACPI_IORT_NODE_PMCG:
1766         return &iort_arm_smmu_v3_pmcg_cfg;
1767     default:
1768         return NULL;
1769     }
1770 }
1771 
1772 /**
1773  * iort_add_platform_device() - Allocate a platform device for IORT node
1774  * @node: Pointer to device ACPI IORT node
1775  * @ops: Pointer to IORT device config struct
1776  *
1777  * Returns: 0 on success, <0 failure
1778  */
1779 static int __init iort_add_platform_device(struct acpi_iort_node *node,
1780                        const struct iort_dev_config *ops)
1781 {
1782     struct fwnode_handle *fwnode;
1783     struct platform_device *pdev;
1784     struct resource *r;
1785     int ret, count;
1786 
1787     pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1788     if (!pdev)
1789         return -ENOMEM;
1790 
1791     if (ops->dev_set_proximity) {
1792         ret = ops->dev_set_proximity(&pdev->dev, node);
1793         if (ret)
1794             goto dev_put;
1795     }
1796 
1797     count = ops->dev_count_resources(node);
1798 
1799     r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1800     if (!r) {
1801         ret = -ENOMEM;
1802         goto dev_put;
1803     }
1804 
1805     ops->dev_init_resources(r, node);
1806 
1807     ret = platform_device_add_resources(pdev, r, count);
1808     /*
1809      * Resources are duplicated in platform_device_add_resources,
1810      * free their allocated memory
1811      */
1812     kfree(r);
1813 
1814     if (ret)
1815         goto dev_put;
1816 
1817     /*
1818      * Platform devices based on PMCG nodes uses platform_data to
1819      * pass the hardware model info to the driver. For others, add
1820      * a copy of IORT node pointer to platform_data to be used to
1821      * retrieve IORT data information.
1822      */
1823     if (ops->dev_add_platdata)
1824         ret = ops->dev_add_platdata(pdev);
1825     else
1826         ret = platform_device_add_data(pdev, &node, sizeof(node));
1827 
1828     if (ret)
1829         goto dev_put;
1830 
1831     fwnode = iort_get_fwnode(node);
1832 
1833     if (!fwnode) {
1834         ret = -ENODEV;
1835         goto dev_put;
1836     }
1837 
1838     pdev->dev.fwnode = fwnode;
1839 
1840     if (ops->dev_dma_configure)
1841         ops->dev_dma_configure(&pdev->dev, node);
1842 
1843     iort_set_device_domain(&pdev->dev, node);
1844 
1845     ret = platform_device_add(pdev);
1846     if (ret)
1847         goto dma_deconfigure;
1848 
1849     return 0;
1850 
1851 dma_deconfigure:
1852     arch_teardown_dma_ops(&pdev->dev);
1853 dev_put:
1854     platform_device_put(pdev);
1855 
1856     return ret;
1857 }
1858 
1859 #ifdef CONFIG_PCI
1860 static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1861 {
1862     static bool acs_enabled __initdata;
1863 
1864     if (acs_enabled)
1865         return;
1866 
1867     if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1868         struct acpi_iort_node *parent;
1869         struct acpi_iort_id_mapping *map;
1870         int i;
1871 
1872         map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1873                    iort_node->mapping_offset);
1874 
1875         for (i = 0; i < iort_node->mapping_count; i++, map++) {
1876             if (!map->output_reference)
1877                 continue;
1878 
1879             parent = ACPI_ADD_PTR(struct acpi_iort_node,
1880                     iort_table,  map->output_reference);
1881             /*
1882              * If we detect a RC->SMMU mapping, make sure
1883              * we enable ACS on the system.
1884              */
1885             if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1886                 (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1887                 pci_request_acs();
1888                 acs_enabled = true;
1889                 return;
1890             }
1891         }
1892     }
1893 }
1894 #else
1895 static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1896 #endif
1897 
1898 static void __init iort_init_platform_devices(void)
1899 {
1900     struct acpi_iort_node *iort_node, *iort_end;
1901     struct acpi_table_iort *iort;
1902     struct fwnode_handle *fwnode;
1903     int i, ret;
1904     const struct iort_dev_config *ops;
1905 
1906     /*
1907      * iort_table and iort both point to the start of IORT table, but
1908      * have different struct types
1909      */
1910     iort = (struct acpi_table_iort *)iort_table;
1911 
1912     /* Get the first IORT node */
1913     iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1914                  iort->node_offset);
1915     iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1916                 iort_table->length);
1917 
1918     for (i = 0; i < iort->node_count; i++) {
1919         if (iort_node >= iort_end) {
1920             pr_err("iort node pointer overflows, bad table\n");
1921             return;
1922         }
1923 
1924         iort_enable_acs(iort_node);
1925 
1926         ops = iort_get_dev_cfg(iort_node);
1927         if (ops) {
1928             fwnode = acpi_alloc_fwnode_static();
1929             if (!fwnode)
1930                 return;
1931 
1932             iort_set_fwnode(iort_node, fwnode);
1933 
1934             ret = iort_add_platform_device(iort_node, ops);
1935             if (ret) {
1936                 iort_delete_fwnode(iort_node);
1937                 acpi_free_fwnode_static(fwnode);
1938                 return;
1939             }
1940         }
1941 
1942         iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1943                      iort_node->length);
1944     }
1945 }
1946 
1947 void __init acpi_iort_init(void)
1948 {
1949     acpi_status status;
1950 
1951     /* iort_table will be used at runtime after the iort init,
1952      * so we don't need to call acpi_put_table() to release
1953      * the IORT table mapping.
1954      */
1955     status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1956     if (ACPI_FAILURE(status)) {
1957         if (status != AE_NOT_FOUND) {
1958             const char *msg = acpi_format_exception(status);
1959 
1960             pr_err("Failed to get table, %s\n", msg);
1961         }
1962 
1963         return;
1964     }
1965 
1966     iort_init_platform_devices();
1967 }
1968 
1969 #ifdef CONFIG_ZONE_DMA
1970 /*
1971  * Extract the highest CPU physical address accessible to all DMA masters in
1972  * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1973  */
1974 phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1975 {
1976     phys_addr_t limit = PHYS_ADDR_MAX;
1977     struct acpi_iort_node *node, *end;
1978     struct acpi_table_iort *iort;
1979     acpi_status status;
1980     int i;
1981 
1982     if (acpi_disabled)
1983         return limit;
1984 
1985     status = acpi_get_table(ACPI_SIG_IORT, 0,
1986                 (struct acpi_table_header **)&iort);
1987     if (ACPI_FAILURE(status))
1988         return limit;
1989 
1990     node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
1991     end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
1992 
1993     for (i = 0; i < iort->node_count; i++) {
1994         if (node >= end)
1995             break;
1996 
1997         switch (node->type) {
1998             struct acpi_iort_named_component *ncomp;
1999             struct acpi_iort_root_complex *rc;
2000             phys_addr_t local_limit;
2001 
2002         case ACPI_IORT_NODE_NAMED_COMPONENT:
2003             ncomp = (struct acpi_iort_named_component *)node->node_data;
2004             local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
2005             limit = min_not_zero(limit, local_limit);
2006             break;
2007 
2008         case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
2009             if (node->revision < 1)
2010                 break;
2011 
2012             rc = (struct acpi_iort_root_complex *)node->node_data;
2013             local_limit = DMA_BIT_MASK(rc->memory_address_limit);
2014             limit = min_not_zero(limit, local_limit);
2015             break;
2016         }
2017         node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
2018     }
2019     acpi_put_table(&iort->header);
2020     return limit;
2021 }
2022 #endif