Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Thunderbolt bus support
0004  *
0005  * Copyright (C) 2017, Intel Corporation
0006  * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
0007  */
0008 
0009 #include <linux/device.h>
0010 #include <linux/idr.h>
0011 #include <linux/module.h>
0012 #include <linux/pm_runtime.h>
0013 #include <linux/slab.h>
0014 #include <linux/random.h>
0015 #include <crypto/hash.h>
0016 
0017 #include "tb.h"
0018 
0019 static DEFINE_IDA(tb_domain_ida);
0020 
0021 static bool match_service_id(const struct tb_service_id *id,
0022                  const struct tb_service *svc)
0023 {
0024     if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
0025         if (strcmp(id->protocol_key, svc->key))
0026             return false;
0027     }
0028 
0029     if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
0030         if (id->protocol_id != svc->prtcid)
0031             return false;
0032     }
0033 
0034     if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
0035         if (id->protocol_version != svc->prtcvers)
0036             return false;
0037     }
0038 
0039     if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
0040         if (id->protocol_revision != svc->prtcrevs)
0041             return false;
0042     }
0043 
0044     return true;
0045 }
0046 
0047 static const struct tb_service_id *__tb_service_match(struct device *dev,
0048                               struct device_driver *drv)
0049 {
0050     struct tb_service_driver *driver;
0051     const struct tb_service_id *ids;
0052     struct tb_service *svc;
0053 
0054     svc = tb_to_service(dev);
0055     if (!svc)
0056         return NULL;
0057 
0058     driver = container_of(drv, struct tb_service_driver, driver);
0059     if (!driver->id_table)
0060         return NULL;
0061 
0062     for (ids = driver->id_table; ids->match_flags != 0; ids++) {
0063         if (match_service_id(ids, svc))
0064             return ids;
0065     }
0066 
0067     return NULL;
0068 }
0069 
0070 static int tb_service_match(struct device *dev, struct device_driver *drv)
0071 {
0072     return !!__tb_service_match(dev, drv);
0073 }
0074 
0075 static int tb_service_probe(struct device *dev)
0076 {
0077     struct tb_service *svc = tb_to_service(dev);
0078     struct tb_service_driver *driver;
0079     const struct tb_service_id *id;
0080 
0081     driver = container_of(dev->driver, struct tb_service_driver, driver);
0082     id = __tb_service_match(dev, &driver->driver);
0083 
0084     return driver->probe(svc, id);
0085 }
0086 
0087 static void tb_service_remove(struct device *dev)
0088 {
0089     struct tb_service *svc = tb_to_service(dev);
0090     struct tb_service_driver *driver;
0091 
0092     driver = container_of(dev->driver, struct tb_service_driver, driver);
0093     if (driver->remove)
0094         driver->remove(svc);
0095 }
0096 
0097 static void tb_service_shutdown(struct device *dev)
0098 {
0099     struct tb_service_driver *driver;
0100     struct tb_service *svc;
0101 
0102     svc = tb_to_service(dev);
0103     if (!svc || !dev->driver)
0104         return;
0105 
0106     driver = container_of(dev->driver, struct tb_service_driver, driver);
0107     if (driver->shutdown)
0108         driver->shutdown(svc);
0109 }
0110 
0111 static const char * const tb_security_names[] = {
0112     [TB_SECURITY_NONE] = "none",
0113     [TB_SECURITY_USER] = "user",
0114     [TB_SECURITY_SECURE] = "secure",
0115     [TB_SECURITY_DPONLY] = "dponly",
0116     [TB_SECURITY_USBONLY] = "usbonly",
0117     [TB_SECURITY_NOPCIE] = "nopcie",
0118 };
0119 
0120 static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
0121                  char *buf)
0122 {
0123     struct tb *tb = container_of(dev, struct tb, dev);
0124     uuid_t *uuids;
0125     ssize_t ret;
0126     int i;
0127 
0128     uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
0129     if (!uuids)
0130         return -ENOMEM;
0131 
0132     pm_runtime_get_sync(&tb->dev);
0133 
0134     if (mutex_lock_interruptible(&tb->lock)) {
0135         ret = -ERESTARTSYS;
0136         goto out;
0137     }
0138     ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
0139     if (ret) {
0140         mutex_unlock(&tb->lock);
0141         goto out;
0142     }
0143     mutex_unlock(&tb->lock);
0144 
0145     for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
0146         if (!uuid_is_null(&uuids[i]))
0147             ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
0148                     &uuids[i]);
0149 
0150         ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
0151                    i < tb->nboot_acl - 1 ? "," : "\n");
0152     }
0153 
0154 out:
0155     pm_runtime_mark_last_busy(&tb->dev);
0156     pm_runtime_put_autosuspend(&tb->dev);
0157     kfree(uuids);
0158 
0159     return ret;
0160 }
0161 
0162 static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
0163                   const char *buf, size_t count)
0164 {
0165     struct tb *tb = container_of(dev, struct tb, dev);
0166     char *str, *s, *uuid_str;
0167     ssize_t ret = 0;
0168     uuid_t *acl;
0169     int i = 0;
0170 
0171     /*
0172      * Make sure the value is not bigger than tb->nboot_acl * UUID
0173      * length + commas and optional "\n". Also the smallest allowable
0174      * string is tb->nboot_acl * ",".
0175      */
0176     if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
0177         return -EINVAL;
0178     if (count < tb->nboot_acl - 1)
0179         return -EINVAL;
0180 
0181     str = kstrdup(buf, GFP_KERNEL);
0182     if (!str)
0183         return -ENOMEM;
0184 
0185     acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
0186     if (!acl) {
0187         ret = -ENOMEM;
0188         goto err_free_str;
0189     }
0190 
0191     uuid_str = strim(str);
0192     while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
0193         size_t len = strlen(s);
0194 
0195         if (len) {
0196             if (len != UUID_STRING_LEN) {
0197                 ret = -EINVAL;
0198                 goto err_free_acl;
0199             }
0200             ret = uuid_parse(s, &acl[i]);
0201             if (ret)
0202                 goto err_free_acl;
0203         }
0204 
0205         i++;
0206     }
0207 
0208     if (s || i < tb->nboot_acl) {
0209         ret = -EINVAL;
0210         goto err_free_acl;
0211     }
0212 
0213     pm_runtime_get_sync(&tb->dev);
0214 
0215     if (mutex_lock_interruptible(&tb->lock)) {
0216         ret = -ERESTARTSYS;
0217         goto err_rpm_put;
0218     }
0219     ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
0220     if (!ret) {
0221         /* Notify userspace about the change */
0222         kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
0223     }
0224     mutex_unlock(&tb->lock);
0225 
0226 err_rpm_put:
0227     pm_runtime_mark_last_busy(&tb->dev);
0228     pm_runtime_put_autosuspend(&tb->dev);
0229 err_free_acl:
0230     kfree(acl);
0231 err_free_str:
0232     kfree(str);
0233 
0234     return ret ?: count;
0235 }
0236 static DEVICE_ATTR_RW(boot_acl);
0237 
0238 static ssize_t deauthorization_show(struct device *dev,
0239                     struct device_attribute *attr,
0240                     char *buf)
0241 {
0242     const struct tb *tb = container_of(dev, struct tb, dev);
0243     bool deauthorization = false;
0244 
0245     /* Only meaningful if authorization is supported */
0246     if (tb->security_level == TB_SECURITY_USER ||
0247         tb->security_level == TB_SECURITY_SECURE)
0248         deauthorization = !!tb->cm_ops->disapprove_switch;
0249 
0250     return sprintf(buf, "%d\n", deauthorization);
0251 }
0252 static DEVICE_ATTR_RO(deauthorization);
0253 
0254 static ssize_t iommu_dma_protection_show(struct device *dev,
0255                      struct device_attribute *attr,
0256                      char *buf)
0257 {
0258     struct tb *tb = container_of(dev, struct tb, dev);
0259 
0260     return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
0261 }
0262 static DEVICE_ATTR_RO(iommu_dma_protection);
0263 
0264 static ssize_t security_show(struct device *dev, struct device_attribute *attr,
0265                  char *buf)
0266 {
0267     struct tb *tb = container_of(dev, struct tb, dev);
0268     const char *name = "unknown";
0269 
0270     if (tb->security_level < ARRAY_SIZE(tb_security_names))
0271         name = tb_security_names[tb->security_level];
0272 
0273     return sprintf(buf, "%s\n", name);
0274 }
0275 static DEVICE_ATTR_RO(security);
0276 
0277 static struct attribute *domain_attrs[] = {
0278     &dev_attr_boot_acl.attr,
0279     &dev_attr_deauthorization.attr,
0280     &dev_attr_iommu_dma_protection.attr,
0281     &dev_attr_security.attr,
0282     NULL,
0283 };
0284 
0285 static umode_t domain_attr_is_visible(struct kobject *kobj,
0286                       struct attribute *attr, int n)
0287 {
0288     struct device *dev = kobj_to_dev(kobj);
0289     struct tb *tb = container_of(dev, struct tb, dev);
0290 
0291     if (attr == &dev_attr_boot_acl.attr) {
0292         if (tb->nboot_acl &&
0293             tb->cm_ops->get_boot_acl &&
0294             tb->cm_ops->set_boot_acl)
0295             return attr->mode;
0296         return 0;
0297     }
0298 
0299     return attr->mode;
0300 }
0301 
0302 static const struct attribute_group domain_attr_group = {
0303     .is_visible = domain_attr_is_visible,
0304     .attrs = domain_attrs,
0305 };
0306 
0307 static const struct attribute_group *domain_attr_groups[] = {
0308     &domain_attr_group,
0309     NULL,
0310 };
0311 
0312 struct bus_type tb_bus_type = {
0313     .name = "thunderbolt",
0314     .match = tb_service_match,
0315     .probe = tb_service_probe,
0316     .remove = tb_service_remove,
0317     .shutdown = tb_service_shutdown,
0318 };
0319 
0320 static void tb_domain_release(struct device *dev)
0321 {
0322     struct tb *tb = container_of(dev, struct tb, dev);
0323 
0324     tb_ctl_free(tb->ctl);
0325     destroy_workqueue(tb->wq);
0326     ida_simple_remove(&tb_domain_ida, tb->index);
0327     mutex_destroy(&tb->lock);
0328     kfree(tb);
0329 }
0330 
0331 struct device_type tb_domain_type = {
0332     .name = "thunderbolt_domain",
0333     .release = tb_domain_release,
0334 };
0335 
0336 static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
0337                    const void *buf, size_t size)
0338 {
0339     struct tb *tb = data;
0340 
0341     if (!tb->cm_ops->handle_event) {
0342         tb_warn(tb, "domain does not have event handler\n");
0343         return true;
0344     }
0345 
0346     switch (type) {
0347     case TB_CFG_PKG_XDOMAIN_REQ:
0348     case TB_CFG_PKG_XDOMAIN_RESP:
0349         if (tb_is_xdomain_enabled())
0350             return tb_xdomain_handle_request(tb, type, buf, size);
0351         break;
0352 
0353     default:
0354         tb->cm_ops->handle_event(tb, type, buf, size);
0355     }
0356 
0357     return true;
0358 }
0359 
0360 /**
0361  * tb_domain_alloc() - Allocate a domain
0362  * @nhi: Pointer to the host controller
0363  * @timeout_msec: Control channel timeout for non-raw messages
0364  * @privsize: Size of the connection manager private data
0365  *
0366  * Allocates and initializes a new Thunderbolt domain. Connection
0367  * managers are expected to call this and then fill in @cm_ops
0368  * accordingly.
0369  *
0370  * Call tb_domain_put() to release the domain before it has been added
0371  * to the system.
0372  *
0373  * Return: allocated domain structure on %NULL in case of error
0374  */
0375 struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
0376 {
0377     struct tb *tb;
0378 
0379     /*
0380      * Make sure the structure sizes map with that the hardware
0381      * expects because bit-fields are being used.
0382      */
0383     BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
0384     BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
0385     BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
0386 
0387     tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
0388     if (!tb)
0389         return NULL;
0390 
0391     tb->nhi = nhi;
0392     mutex_init(&tb->lock);
0393 
0394     tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
0395     if (tb->index < 0)
0396         goto err_free;
0397 
0398     tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
0399     if (!tb->wq)
0400         goto err_remove_ida;
0401 
0402     tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
0403     if (!tb->ctl)
0404         goto err_destroy_wq;
0405 
0406     tb->dev.parent = &nhi->pdev->dev;
0407     tb->dev.bus = &tb_bus_type;
0408     tb->dev.type = &tb_domain_type;
0409     tb->dev.groups = domain_attr_groups;
0410     dev_set_name(&tb->dev, "domain%d", tb->index);
0411     device_initialize(&tb->dev);
0412 
0413     return tb;
0414 
0415 err_destroy_wq:
0416     destroy_workqueue(tb->wq);
0417 err_remove_ida:
0418     ida_simple_remove(&tb_domain_ida, tb->index);
0419 err_free:
0420     kfree(tb);
0421 
0422     return NULL;
0423 }
0424 
0425 /**
0426  * tb_domain_add() - Add domain to the system
0427  * @tb: Domain to add
0428  *
0429  * Starts the domain and adds it to the system. Hotplugging devices will
0430  * work after this has been returned successfully. In order to remove
0431  * and release the domain after this function has been called, call
0432  * tb_domain_remove().
0433  *
0434  * Return: %0 in case of success and negative errno in case of error
0435  */
0436 int tb_domain_add(struct tb *tb)
0437 {
0438     int ret;
0439 
0440     if (WARN_ON(!tb->cm_ops))
0441         return -EINVAL;
0442 
0443     mutex_lock(&tb->lock);
0444     /*
0445      * tb_schedule_hotplug_handler may be called as soon as the config
0446      * channel is started. Thats why we have to hold the lock here.
0447      */
0448     tb_ctl_start(tb->ctl);
0449 
0450     if (tb->cm_ops->driver_ready) {
0451         ret = tb->cm_ops->driver_ready(tb);
0452         if (ret)
0453             goto err_ctl_stop;
0454     }
0455 
0456     tb_dbg(tb, "security level set to %s\n",
0457            tb_security_names[tb->security_level]);
0458 
0459     ret = device_add(&tb->dev);
0460     if (ret)
0461         goto err_ctl_stop;
0462 
0463     /* Start the domain */
0464     if (tb->cm_ops->start) {
0465         ret = tb->cm_ops->start(tb);
0466         if (ret)
0467             goto err_domain_del;
0468     }
0469 
0470     /* This starts event processing */
0471     mutex_unlock(&tb->lock);
0472 
0473     device_init_wakeup(&tb->dev, true);
0474 
0475     pm_runtime_no_callbacks(&tb->dev);
0476     pm_runtime_set_active(&tb->dev);
0477     pm_runtime_enable(&tb->dev);
0478     pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
0479     pm_runtime_mark_last_busy(&tb->dev);
0480     pm_runtime_use_autosuspend(&tb->dev);
0481 
0482     return 0;
0483 
0484 err_domain_del:
0485     device_del(&tb->dev);
0486 err_ctl_stop:
0487     tb_ctl_stop(tb->ctl);
0488     mutex_unlock(&tb->lock);
0489 
0490     return ret;
0491 }
0492 
0493 /**
0494  * tb_domain_remove() - Removes and releases a domain
0495  * @tb: Domain to remove
0496  *
0497  * Stops the domain, removes it from the system and releases all
0498  * resources once the last reference has been released.
0499  */
0500 void tb_domain_remove(struct tb *tb)
0501 {
0502     mutex_lock(&tb->lock);
0503     if (tb->cm_ops->stop)
0504         tb->cm_ops->stop(tb);
0505     /* Stop the domain control traffic */
0506     tb_ctl_stop(tb->ctl);
0507     mutex_unlock(&tb->lock);
0508 
0509     flush_workqueue(tb->wq);
0510     device_unregister(&tb->dev);
0511 }
0512 
0513 /**
0514  * tb_domain_suspend_noirq() - Suspend a domain
0515  * @tb: Domain to suspend
0516  *
0517  * Suspends all devices in the domain and stops the control channel.
0518  */
0519 int tb_domain_suspend_noirq(struct tb *tb)
0520 {
0521     int ret = 0;
0522 
0523     /*
0524      * The control channel interrupt is left enabled during suspend
0525      * and taking the lock here prevents any events happening before
0526      * we actually have stopped the domain and the control channel.
0527      */
0528     mutex_lock(&tb->lock);
0529     if (tb->cm_ops->suspend_noirq)
0530         ret = tb->cm_ops->suspend_noirq(tb);
0531     if (!ret)
0532         tb_ctl_stop(tb->ctl);
0533     mutex_unlock(&tb->lock);
0534 
0535     return ret;
0536 }
0537 
0538 /**
0539  * tb_domain_resume_noirq() - Resume a domain
0540  * @tb: Domain to resume
0541  *
0542  * Re-starts the control channel, and resumes all devices connected to
0543  * the domain.
0544  */
0545 int tb_domain_resume_noirq(struct tb *tb)
0546 {
0547     int ret = 0;
0548 
0549     mutex_lock(&tb->lock);
0550     tb_ctl_start(tb->ctl);
0551     if (tb->cm_ops->resume_noirq)
0552         ret = tb->cm_ops->resume_noirq(tb);
0553     mutex_unlock(&tb->lock);
0554 
0555     return ret;
0556 }
0557 
0558 int tb_domain_suspend(struct tb *tb)
0559 {
0560     return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
0561 }
0562 
0563 int tb_domain_freeze_noirq(struct tb *tb)
0564 {
0565     int ret = 0;
0566 
0567     mutex_lock(&tb->lock);
0568     if (tb->cm_ops->freeze_noirq)
0569         ret = tb->cm_ops->freeze_noirq(tb);
0570     if (!ret)
0571         tb_ctl_stop(tb->ctl);
0572     mutex_unlock(&tb->lock);
0573 
0574     return ret;
0575 }
0576 
0577 int tb_domain_thaw_noirq(struct tb *tb)
0578 {
0579     int ret = 0;
0580 
0581     mutex_lock(&tb->lock);
0582     tb_ctl_start(tb->ctl);
0583     if (tb->cm_ops->thaw_noirq)
0584         ret = tb->cm_ops->thaw_noirq(tb);
0585     mutex_unlock(&tb->lock);
0586 
0587     return ret;
0588 }
0589 
0590 void tb_domain_complete(struct tb *tb)
0591 {
0592     if (tb->cm_ops->complete)
0593         tb->cm_ops->complete(tb);
0594 }
0595 
0596 int tb_domain_runtime_suspend(struct tb *tb)
0597 {
0598     if (tb->cm_ops->runtime_suspend) {
0599         int ret = tb->cm_ops->runtime_suspend(tb);
0600         if (ret)
0601             return ret;
0602     }
0603     tb_ctl_stop(tb->ctl);
0604     return 0;
0605 }
0606 
0607 int tb_domain_runtime_resume(struct tb *tb)
0608 {
0609     tb_ctl_start(tb->ctl);
0610     if (tb->cm_ops->runtime_resume) {
0611         int ret = tb->cm_ops->runtime_resume(tb);
0612         if (ret)
0613             return ret;
0614     }
0615     return 0;
0616 }
0617 
0618 /**
0619  * tb_domain_disapprove_switch() - Disapprove switch
0620  * @tb: Domain the switch belongs to
0621  * @sw: Switch to disapprove
0622  *
0623  * This will disconnect PCIe tunnel from parent to this @sw.
0624  *
0625  * Return: %0 on success and negative errno in case of failure.
0626  */
0627 int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
0628 {
0629     if (!tb->cm_ops->disapprove_switch)
0630         return -EPERM;
0631 
0632     return tb->cm_ops->disapprove_switch(tb, sw);
0633 }
0634 
0635 /**
0636  * tb_domain_approve_switch() - Approve switch
0637  * @tb: Domain the switch belongs to
0638  * @sw: Switch to approve
0639  *
0640  * This will approve switch by connection manager specific means. In
0641  * case of success the connection manager will create PCIe tunnel from
0642  * parent to @sw.
0643  */
0644 int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
0645 {
0646     struct tb_switch *parent_sw;
0647 
0648     if (!tb->cm_ops->approve_switch)
0649         return -EPERM;
0650 
0651     /* The parent switch must be authorized before this one */
0652     parent_sw = tb_to_switch(sw->dev.parent);
0653     if (!parent_sw || !parent_sw->authorized)
0654         return -EINVAL;
0655 
0656     return tb->cm_ops->approve_switch(tb, sw);
0657 }
0658 
0659 /**
0660  * tb_domain_approve_switch_key() - Approve switch and add key
0661  * @tb: Domain the switch belongs to
0662  * @sw: Switch to approve
0663  *
0664  * For switches that support secure connect, this function first adds
0665  * key to the switch NVM using connection manager specific means. If
0666  * adding the key is successful, the switch is approved and connected.
0667  *
0668  * Return: %0 on success and negative errno in case of failure.
0669  */
0670 int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
0671 {
0672     struct tb_switch *parent_sw;
0673     int ret;
0674 
0675     if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
0676         return -EPERM;
0677 
0678     /* The parent switch must be authorized before this one */
0679     parent_sw = tb_to_switch(sw->dev.parent);
0680     if (!parent_sw || !parent_sw->authorized)
0681         return -EINVAL;
0682 
0683     ret = tb->cm_ops->add_switch_key(tb, sw);
0684     if (ret)
0685         return ret;
0686 
0687     return tb->cm_ops->approve_switch(tb, sw);
0688 }
0689 
0690 /**
0691  * tb_domain_challenge_switch_key() - Challenge and approve switch
0692  * @tb: Domain the switch belongs to
0693  * @sw: Switch to approve
0694  *
0695  * For switches that support secure connect, this function generates
0696  * random challenge and sends it to the switch. The switch responds to
0697  * this and if the response matches our random challenge, the switch is
0698  * approved and connected.
0699  *
0700  * Return: %0 on success and negative errno in case of failure.
0701  */
0702 int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
0703 {
0704     u8 challenge[TB_SWITCH_KEY_SIZE];
0705     u8 response[TB_SWITCH_KEY_SIZE];
0706     u8 hmac[TB_SWITCH_KEY_SIZE];
0707     struct tb_switch *parent_sw;
0708     struct crypto_shash *tfm;
0709     struct shash_desc *shash;
0710     int ret;
0711 
0712     if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
0713         return -EPERM;
0714 
0715     /* The parent switch must be authorized before this one */
0716     parent_sw = tb_to_switch(sw->dev.parent);
0717     if (!parent_sw || !parent_sw->authorized)
0718         return -EINVAL;
0719 
0720     get_random_bytes(challenge, sizeof(challenge));
0721     ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
0722     if (ret)
0723         return ret;
0724 
0725     tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
0726     if (IS_ERR(tfm))
0727         return PTR_ERR(tfm);
0728 
0729     ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
0730     if (ret)
0731         goto err_free_tfm;
0732 
0733     shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
0734             GFP_KERNEL);
0735     if (!shash) {
0736         ret = -ENOMEM;
0737         goto err_free_tfm;
0738     }
0739 
0740     shash->tfm = tfm;
0741 
0742     memset(hmac, 0, sizeof(hmac));
0743     ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
0744     if (ret)
0745         goto err_free_shash;
0746 
0747     /* The returned HMAC must match the one we calculated */
0748     if (memcmp(response, hmac, sizeof(hmac))) {
0749         ret = -EKEYREJECTED;
0750         goto err_free_shash;
0751     }
0752 
0753     crypto_free_shash(tfm);
0754     kfree(shash);
0755 
0756     return tb->cm_ops->approve_switch(tb, sw);
0757 
0758 err_free_shash:
0759     kfree(shash);
0760 err_free_tfm:
0761     crypto_free_shash(tfm);
0762 
0763     return ret;
0764 }
0765 
0766 /**
0767  * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
0768  * @tb: Domain whose PCIe paths to disconnect
0769  *
0770  * This needs to be called in preparation for NVM upgrade of the host
0771  * controller. Makes sure all PCIe paths are disconnected.
0772  *
0773  * Return %0 on success and negative errno in case of error.
0774  */
0775 int tb_domain_disconnect_pcie_paths(struct tb *tb)
0776 {
0777     if (!tb->cm_ops->disconnect_pcie_paths)
0778         return -EPERM;
0779 
0780     return tb->cm_ops->disconnect_pcie_paths(tb);
0781 }
0782 
0783 /**
0784  * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
0785  * @tb: Domain enabling the DMA paths
0786  * @xd: XDomain DMA paths are created to
0787  * @transmit_path: HopID we are using to send out packets
0788  * @transmit_ring: DMA ring used to send out packets
0789  * @receive_path: HopID the other end is using to send packets to us
0790  * @receive_ring: DMA ring used to receive packets from @receive_path
0791  *
0792  * Calls connection manager specific method to enable DMA paths to the
0793  * XDomain in question.
0794  *
0795  * Return: 0% in case of success and negative errno otherwise. In
0796  * particular returns %-ENOTSUPP if the connection manager
0797  * implementation does not support XDomains.
0798  */
0799 int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
0800                     int transmit_path, int transmit_ring,
0801                     int receive_path, int receive_ring)
0802 {
0803     if (!tb->cm_ops->approve_xdomain_paths)
0804         return -ENOTSUPP;
0805 
0806     return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
0807             transmit_ring, receive_path, receive_ring);
0808 }
0809 
0810 /**
0811  * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
0812  * @tb: Domain disabling the DMA paths
0813  * @xd: XDomain whose DMA paths are disconnected
0814  * @transmit_path: HopID we are using to send out packets
0815  * @transmit_ring: DMA ring used to send out packets
0816  * @receive_path: HopID the other end is using to send packets to us
0817  * @receive_ring: DMA ring used to receive packets from @receive_path
0818  *
0819  * Calls connection manager specific method to disconnect DMA paths to
0820  * the XDomain in question.
0821  *
0822  * Return: 0% in case of success and negative errno otherwise. In
0823  * particular returns %-ENOTSUPP if the connection manager
0824  * implementation does not support XDomains.
0825  */
0826 int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
0827                        int transmit_path, int transmit_ring,
0828                        int receive_path, int receive_ring)
0829 {
0830     if (!tb->cm_ops->disconnect_xdomain_paths)
0831         return -ENOTSUPP;
0832 
0833     return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
0834             transmit_ring, receive_path, receive_ring);
0835 }
0836 
0837 static int disconnect_xdomain(struct device *dev, void *data)
0838 {
0839     struct tb_xdomain *xd;
0840     struct tb *tb = data;
0841     int ret = 0;
0842 
0843     xd = tb_to_xdomain(dev);
0844     if (xd && xd->tb == tb)
0845         ret = tb_xdomain_disable_all_paths(xd);
0846 
0847     return ret;
0848 }
0849 
0850 /**
0851  * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
0852  * @tb: Domain whose paths are disconnected
0853  *
0854  * This function can be used to disconnect all paths (PCIe, XDomain) for
0855  * example in preparation for host NVM firmware upgrade. After this is
0856  * called the paths cannot be established without resetting the switch.
0857  *
0858  * Return: %0 in case of success and negative errno otherwise.
0859  */
0860 int tb_domain_disconnect_all_paths(struct tb *tb)
0861 {
0862     int ret;
0863 
0864     ret = tb_domain_disconnect_pcie_paths(tb);
0865     if (ret)
0866         return ret;
0867 
0868     return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
0869 }
0870 
0871 int tb_domain_init(void)
0872 {
0873     int ret;
0874 
0875     tb_debugfs_init();
0876     tb_acpi_init();
0877 
0878     ret = tb_xdomain_init();
0879     if (ret)
0880         goto err_acpi;
0881     ret = bus_register(&tb_bus_type);
0882     if (ret)
0883         goto err_xdomain;
0884 
0885     return 0;
0886 
0887 err_xdomain:
0888     tb_xdomain_exit();
0889 err_acpi:
0890     tb_acpi_exit();
0891     tb_debugfs_exit();
0892 
0893     return ret;
0894 }
0895 
0896 void tb_domain_exit(void)
0897 {
0898     bus_unregister(&tb_bus_type);
0899     ida_destroy(&tb_domain_ida);
0900     tb_nvm_exit();
0901     tb_xdomain_exit();
0902     tb_acpi_exit();
0903     tb_debugfs_exit();
0904 }