Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Greybus connections
0004  *
0005  * Copyright 2014 Google Inc.
0006  * Copyright 2014 Linaro Ltd.
0007  */
0008 
0009 #include <linux/workqueue.h>
0010 #include <linux/greybus.h>
0011 
0012 #include "greybus_trace.h"
0013 
0014 #define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
0015 
0016 static void gb_connection_kref_release(struct kref *kref);
0017 
0018 static DEFINE_SPINLOCK(gb_connections_lock);
0019 static DEFINE_MUTEX(gb_connection_mutex);
0020 
0021 /* Caller holds gb_connection_mutex. */
0022 static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
0023 {
0024     struct gb_host_device *hd = intf->hd;
0025     struct gb_connection *connection;
0026 
0027     list_for_each_entry(connection, &hd->connections, hd_links) {
0028         if (connection->intf == intf &&
0029             connection->intf_cport_id == cport_id)
0030             return true;
0031     }
0032 
0033     return false;
0034 }
0035 
0036 static void gb_connection_get(struct gb_connection *connection)
0037 {
0038     kref_get(&connection->kref);
0039 
0040     trace_gb_connection_get(connection);
0041 }
0042 
0043 static void gb_connection_put(struct gb_connection *connection)
0044 {
0045     trace_gb_connection_put(connection);
0046 
0047     kref_put(&connection->kref, gb_connection_kref_release);
0048 }
0049 
0050 /*
0051  * Returns a reference-counted pointer to the connection if found.
0052  */
0053 static struct gb_connection *
0054 gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
0055 {
0056     struct gb_connection *connection;
0057     unsigned long flags;
0058 
0059     spin_lock_irqsave(&gb_connections_lock, flags);
0060     list_for_each_entry(connection, &hd->connections, hd_links)
0061         if (connection->hd_cport_id == cport_id) {
0062             gb_connection_get(connection);
0063             goto found;
0064         }
0065     connection = NULL;
0066 found:
0067     spin_unlock_irqrestore(&gb_connections_lock, flags);
0068 
0069     return connection;
0070 }
0071 
0072 /*
0073  * Callback from the host driver to let us know that data has been
0074  * received on the bundle.
0075  */
0076 void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
0077                u8 *data, size_t length)
0078 {
0079     struct gb_connection *connection;
0080 
0081     trace_gb_hd_in(hd);
0082 
0083     connection = gb_connection_hd_find(hd, cport_id);
0084     if (!connection) {
0085         dev_err(&hd->dev,
0086             "nonexistent connection (%zu bytes dropped)\n", length);
0087         return;
0088     }
0089     gb_connection_recv(connection, data, length);
0090     gb_connection_put(connection);
0091 }
0092 EXPORT_SYMBOL_GPL(greybus_data_rcvd);
0093 
0094 static void gb_connection_kref_release(struct kref *kref)
0095 {
0096     struct gb_connection *connection;
0097 
0098     connection = container_of(kref, struct gb_connection, kref);
0099 
0100     trace_gb_connection_release(connection);
0101 
0102     kfree(connection);
0103 }
0104 
0105 static void gb_connection_init_name(struct gb_connection *connection)
0106 {
0107     u16 hd_cport_id = connection->hd_cport_id;
0108     u16 cport_id = 0;
0109     u8 intf_id = 0;
0110 
0111     if (connection->intf) {
0112         intf_id = connection->intf->interface_id;
0113         cport_id = connection->intf_cport_id;
0114     }
0115 
0116     snprintf(connection->name, sizeof(connection->name),
0117          "%u/%u:%u", hd_cport_id, intf_id, cport_id);
0118 }
0119 
0120 /*
0121  * _gb_connection_create() - create a Greybus connection
0122  * @hd:         host device of the connection
0123  * @hd_cport_id:    host-device cport id, or -1 for dynamic allocation
0124  * @intf:       remote interface, or NULL for static connections
0125  * @bundle:     remote-interface bundle (may be NULL)
0126  * @cport_id:       remote-interface cport id, or 0 for static connections
0127  * @handler:        request handler (may be NULL)
0128  * @flags:      connection flags
0129  *
0130  * Create a Greybus connection, representing the bidirectional link
0131  * between a CPort on a (local) Greybus host device and a CPort on
0132  * another Greybus interface.
0133  *
0134  * A connection also maintains the state of operations sent over the
0135  * connection.
0136  *
0137  * Serialised against concurrent create and destroy using the
0138  * gb_connection_mutex.
0139  *
0140  * Return: A pointer to the new connection if successful, or an ERR_PTR
0141  * otherwise.
0142  */
0143 static struct gb_connection *
0144 _gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
0145               struct gb_interface *intf,
0146               struct gb_bundle *bundle, int cport_id,
0147               gb_request_handler_t handler,
0148               unsigned long flags)
0149 {
0150     struct gb_connection *connection;
0151     int ret;
0152 
0153     mutex_lock(&gb_connection_mutex);
0154 
0155     if (intf && gb_connection_cport_in_use(intf, cport_id)) {
0156         dev_err(&intf->dev, "cport %u already in use\n", cport_id);
0157         ret = -EBUSY;
0158         goto err_unlock;
0159     }
0160 
0161     ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
0162     if (ret < 0) {
0163         dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
0164         goto err_unlock;
0165     }
0166     hd_cport_id = ret;
0167 
0168     connection = kzalloc(sizeof(*connection), GFP_KERNEL);
0169     if (!connection) {
0170         ret = -ENOMEM;
0171         goto err_hd_cport_release;
0172     }
0173 
0174     connection->hd_cport_id = hd_cport_id;
0175     connection->intf_cport_id = cport_id;
0176     connection->hd = hd;
0177     connection->intf = intf;
0178     connection->bundle = bundle;
0179     connection->handler = handler;
0180     connection->flags = flags;
0181     if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
0182         connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
0183     connection->state = GB_CONNECTION_STATE_DISABLED;
0184 
0185     atomic_set(&connection->op_cycle, 0);
0186     mutex_init(&connection->mutex);
0187     spin_lock_init(&connection->lock);
0188     INIT_LIST_HEAD(&connection->operations);
0189 
0190     connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
0191                      dev_name(&hd->dev), hd_cport_id);
0192     if (!connection->wq) {
0193         ret = -ENOMEM;
0194         goto err_free_connection;
0195     }
0196 
0197     kref_init(&connection->kref);
0198 
0199     gb_connection_init_name(connection);
0200 
0201     spin_lock_irq(&gb_connections_lock);
0202     list_add(&connection->hd_links, &hd->connections);
0203 
0204     if (bundle)
0205         list_add(&connection->bundle_links, &bundle->connections);
0206     else
0207         INIT_LIST_HEAD(&connection->bundle_links);
0208 
0209     spin_unlock_irq(&gb_connections_lock);
0210 
0211     mutex_unlock(&gb_connection_mutex);
0212 
0213     trace_gb_connection_create(connection);
0214 
0215     return connection;
0216 
0217 err_free_connection:
0218     kfree(connection);
0219 err_hd_cport_release:
0220     gb_hd_cport_release(hd, hd_cport_id);
0221 err_unlock:
0222     mutex_unlock(&gb_connection_mutex);
0223 
0224     return ERR_PTR(ret);
0225 }
0226 
0227 struct gb_connection *
0228 gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
0229                 gb_request_handler_t handler)
0230 {
0231     return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
0232                      GB_CONNECTION_FLAG_HIGH_PRIO);
0233 }
0234 
0235 struct gb_connection *
0236 gb_connection_create_control(struct gb_interface *intf)
0237 {
0238     return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
0239                      GB_CONNECTION_FLAG_CONTROL |
0240                      GB_CONNECTION_FLAG_HIGH_PRIO);
0241 }
0242 
0243 struct gb_connection *
0244 gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
0245              gb_request_handler_t handler)
0246 {
0247     struct gb_interface *intf = bundle->intf;
0248 
0249     return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
0250                      handler, 0);
0251 }
0252 EXPORT_SYMBOL_GPL(gb_connection_create);
0253 
0254 struct gb_connection *
0255 gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
0256                gb_request_handler_t handler,
0257                unsigned long flags)
0258 {
0259     struct gb_interface *intf = bundle->intf;
0260 
0261     if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
0262         flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
0263 
0264     return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
0265                      handler, flags);
0266 }
0267 EXPORT_SYMBOL_GPL(gb_connection_create_flags);
0268 
0269 struct gb_connection *
0270 gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
0271                    unsigned long flags)
0272 {
0273     flags |= GB_CONNECTION_FLAG_OFFLOADED;
0274 
0275     return gb_connection_create_flags(bundle, cport_id, NULL, flags);
0276 }
0277 EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
0278 
0279 static int gb_connection_hd_cport_enable(struct gb_connection *connection)
0280 {
0281     struct gb_host_device *hd = connection->hd;
0282     int ret;
0283 
0284     if (!hd->driver->cport_enable)
0285         return 0;
0286 
0287     ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
0288                        connection->flags);
0289     if (ret) {
0290         dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
0291             connection->name, ret);
0292         return ret;
0293     }
0294 
0295     return 0;
0296 }
0297 
0298 static void gb_connection_hd_cport_disable(struct gb_connection *connection)
0299 {
0300     struct gb_host_device *hd = connection->hd;
0301     int ret;
0302 
0303     if (!hd->driver->cport_disable)
0304         return;
0305 
0306     ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
0307     if (ret) {
0308         dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
0309             connection->name, ret);
0310     }
0311 }
0312 
0313 static int gb_connection_hd_cport_connected(struct gb_connection *connection)
0314 {
0315     struct gb_host_device *hd = connection->hd;
0316     int ret;
0317 
0318     if (!hd->driver->cport_connected)
0319         return 0;
0320 
0321     ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
0322     if (ret) {
0323         dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
0324             connection->name, ret);
0325         return ret;
0326     }
0327 
0328     return 0;
0329 }
0330 
0331 static int gb_connection_hd_cport_flush(struct gb_connection *connection)
0332 {
0333     struct gb_host_device *hd = connection->hd;
0334     int ret;
0335 
0336     if (!hd->driver->cport_flush)
0337         return 0;
0338 
0339     ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
0340     if (ret) {
0341         dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
0342             connection->name, ret);
0343         return ret;
0344     }
0345 
0346     return 0;
0347 }
0348 
0349 static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
0350 {
0351     struct gb_host_device *hd = connection->hd;
0352     size_t peer_space;
0353     int ret;
0354 
0355     if (!hd->driver->cport_quiesce)
0356         return 0;
0357 
0358     peer_space = sizeof(struct gb_operation_msg_hdr) +
0359             sizeof(struct gb_cport_shutdown_request);
0360 
0361     if (connection->mode_switch)
0362         peer_space += sizeof(struct gb_operation_msg_hdr);
0363 
0364     ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
0365                     peer_space,
0366                     GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
0367     if (ret) {
0368         dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
0369             connection->name, ret);
0370         return ret;
0371     }
0372 
0373     return 0;
0374 }
0375 
0376 static int gb_connection_hd_cport_clear(struct gb_connection *connection)
0377 {
0378     struct gb_host_device *hd = connection->hd;
0379     int ret;
0380 
0381     if (!hd->driver->cport_clear)
0382         return 0;
0383 
0384     ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
0385     if (ret) {
0386         dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
0387             connection->name, ret);
0388         return ret;
0389     }
0390 
0391     return 0;
0392 }
0393 
0394 /*
0395  * Request the SVC to create a connection from AP's cport to interface's
0396  * cport.
0397  */
0398 static int
0399 gb_connection_svc_connection_create(struct gb_connection *connection)
0400 {
0401     struct gb_host_device *hd = connection->hd;
0402     struct gb_interface *intf;
0403     u8 cport_flags;
0404     int ret;
0405 
0406     if (gb_connection_is_static(connection))
0407         return 0;
0408 
0409     intf = connection->intf;
0410 
0411     /*
0412      * Enable either E2EFC or CSD, unless no flow control is requested.
0413      */
0414     cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
0415     if (gb_connection_flow_control_disabled(connection)) {
0416         cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
0417     } else if (gb_connection_e2efc_enabled(connection)) {
0418         cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
0419                 GB_SVC_CPORT_FLAG_E2EFC;
0420     }
0421 
0422     ret = gb_svc_connection_create(hd->svc,
0423                        hd->svc->ap_intf_id,
0424                        connection->hd_cport_id,
0425                        intf->interface_id,
0426                        connection->intf_cport_id,
0427                        cport_flags);
0428     if (ret) {
0429         dev_err(&connection->hd->dev,
0430             "%s: failed to create svc connection: %d\n",
0431             connection->name, ret);
0432         return ret;
0433     }
0434 
0435     return 0;
0436 }
0437 
0438 static void
0439 gb_connection_svc_connection_destroy(struct gb_connection *connection)
0440 {
0441     if (gb_connection_is_static(connection))
0442         return;
0443 
0444     gb_svc_connection_destroy(connection->hd->svc,
0445                   connection->hd->svc->ap_intf_id,
0446                   connection->hd_cport_id,
0447                   connection->intf->interface_id,
0448                   connection->intf_cport_id);
0449 }
0450 
0451 /* Inform Interface about active CPorts */
0452 static int gb_connection_control_connected(struct gb_connection *connection)
0453 {
0454     struct gb_control *control;
0455     u16 cport_id = connection->intf_cport_id;
0456     int ret;
0457 
0458     if (gb_connection_is_static(connection))
0459         return 0;
0460 
0461     if (gb_connection_is_control(connection))
0462         return 0;
0463 
0464     control = connection->intf->control;
0465 
0466     ret = gb_control_connected_operation(control, cport_id);
0467     if (ret) {
0468         dev_err(&connection->bundle->dev,
0469             "failed to connect cport: %d\n", ret);
0470         return ret;
0471     }
0472 
0473     return 0;
0474 }
0475 
0476 static void
0477 gb_connection_control_disconnecting(struct gb_connection *connection)
0478 {
0479     struct gb_control *control;
0480     u16 cport_id = connection->intf_cport_id;
0481     int ret;
0482 
0483     if (gb_connection_is_static(connection))
0484         return;
0485 
0486     control = connection->intf->control;
0487 
0488     ret = gb_control_disconnecting_operation(control, cport_id);
0489     if (ret) {
0490         dev_err(&connection->hd->dev,
0491             "%s: failed to send disconnecting: %d\n",
0492             connection->name, ret);
0493     }
0494 }
0495 
0496 static void
0497 gb_connection_control_disconnected(struct gb_connection *connection)
0498 {
0499     struct gb_control *control;
0500     u16 cport_id = connection->intf_cport_id;
0501     int ret;
0502 
0503     if (gb_connection_is_static(connection))
0504         return;
0505 
0506     control = connection->intf->control;
0507 
0508     if (gb_connection_is_control(connection)) {
0509         if (connection->mode_switch) {
0510             ret = gb_control_mode_switch_operation(control);
0511             if (ret) {
0512                 /*
0513                  * Allow mode switch to time out waiting for
0514                  * mailbox event.
0515                  */
0516                 return;
0517             }
0518         }
0519 
0520         return;
0521     }
0522 
0523     ret = gb_control_disconnected_operation(control, cport_id);
0524     if (ret) {
0525         dev_warn(&connection->bundle->dev,
0526              "failed to disconnect cport: %d\n", ret);
0527     }
0528 }
0529 
0530 static int gb_connection_shutdown_operation(struct gb_connection *connection,
0531                         u8 phase)
0532 {
0533     struct gb_cport_shutdown_request *req;
0534     struct gb_operation *operation;
0535     int ret;
0536 
0537     operation = gb_operation_create_core(connection,
0538                          GB_REQUEST_TYPE_CPORT_SHUTDOWN,
0539                          sizeof(*req), 0, 0,
0540                          GFP_KERNEL);
0541     if (!operation)
0542         return -ENOMEM;
0543 
0544     req = operation->request->payload;
0545     req->phase = phase;
0546 
0547     ret = gb_operation_request_send_sync(operation);
0548 
0549     gb_operation_put(operation);
0550 
0551     return ret;
0552 }
0553 
0554 static int gb_connection_cport_shutdown(struct gb_connection *connection,
0555                     u8 phase)
0556 {
0557     struct gb_host_device *hd = connection->hd;
0558     const struct gb_hd_driver *drv = hd->driver;
0559     int ret;
0560 
0561     if (gb_connection_is_static(connection))
0562         return 0;
0563 
0564     if (gb_connection_is_offloaded(connection)) {
0565         if (!drv->cport_shutdown)
0566             return 0;
0567 
0568         ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
0569                       GB_OPERATION_TIMEOUT_DEFAULT);
0570     } else {
0571         ret = gb_connection_shutdown_operation(connection, phase);
0572     }
0573 
0574     if (ret) {
0575         dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
0576             connection->name, phase, ret);
0577         return ret;
0578     }
0579 
0580     return 0;
0581 }
0582 
0583 static int
0584 gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
0585 {
0586     return gb_connection_cport_shutdown(connection, 1);
0587 }
0588 
0589 static int
0590 gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
0591 {
0592     return gb_connection_cport_shutdown(connection, 2);
0593 }
0594 
0595 /*
0596  * Cancel all active operations on a connection.
0597  *
0598  * Locking: Called with connection lock held and state set to DISABLED or
0599  * DISCONNECTING.
0600  */
0601 static void gb_connection_cancel_operations(struct gb_connection *connection,
0602                         int errno)
0603     __must_hold(&connection->lock)
0604 {
0605     struct gb_operation *operation;
0606 
0607     while (!list_empty(&connection->operations)) {
0608         operation = list_last_entry(&connection->operations,
0609                         struct gb_operation, links);
0610         gb_operation_get(operation);
0611         spin_unlock_irq(&connection->lock);
0612 
0613         if (gb_operation_is_incoming(operation))
0614             gb_operation_cancel_incoming(operation, errno);
0615         else
0616             gb_operation_cancel(operation, errno);
0617 
0618         gb_operation_put(operation);
0619 
0620         spin_lock_irq(&connection->lock);
0621     }
0622 }
0623 
0624 /*
0625  * Cancel all active incoming operations on a connection.
0626  *
0627  * Locking: Called with connection lock held and state set to ENABLED_TX.
0628  */
0629 static void
0630 gb_connection_flush_incoming_operations(struct gb_connection *connection,
0631                     int errno)
0632     __must_hold(&connection->lock)
0633 {
0634     struct gb_operation *operation;
0635     bool incoming;
0636 
0637     while (!list_empty(&connection->operations)) {
0638         incoming = false;
0639         list_for_each_entry(operation, &connection->operations,
0640                     links) {
0641             if (gb_operation_is_incoming(operation)) {
0642                 gb_operation_get(operation);
0643                 incoming = true;
0644                 break;
0645             }
0646         }
0647 
0648         if (!incoming)
0649             break;
0650 
0651         spin_unlock_irq(&connection->lock);
0652 
0653         /* FIXME: flush, not cancel? */
0654         gb_operation_cancel_incoming(operation, errno);
0655         gb_operation_put(operation);
0656 
0657         spin_lock_irq(&connection->lock);
0658     }
0659 }
0660 
0661 /*
0662  * _gb_connection_enable() - enable a connection
0663  * @connection:     connection to enable
0664  * @rx:         whether to enable incoming requests
0665  *
0666  * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
0667  * ENABLED_TX->ENABLED state transitions.
0668  *
0669  * Locking: Caller holds connection->mutex.
0670  */
0671 static int _gb_connection_enable(struct gb_connection *connection, bool rx)
0672 {
0673     int ret;
0674 
0675     /* Handle ENABLED_TX -> ENABLED transitions. */
0676     if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
0677         if (!(connection->handler && rx))
0678             return 0;
0679 
0680         spin_lock_irq(&connection->lock);
0681         connection->state = GB_CONNECTION_STATE_ENABLED;
0682         spin_unlock_irq(&connection->lock);
0683 
0684         return 0;
0685     }
0686 
0687     ret = gb_connection_hd_cport_enable(connection);
0688     if (ret)
0689         return ret;
0690 
0691     ret = gb_connection_svc_connection_create(connection);
0692     if (ret)
0693         goto err_hd_cport_clear;
0694 
0695     ret = gb_connection_hd_cport_connected(connection);
0696     if (ret)
0697         goto err_svc_connection_destroy;
0698 
0699     spin_lock_irq(&connection->lock);
0700     if (connection->handler && rx)
0701         connection->state = GB_CONNECTION_STATE_ENABLED;
0702     else
0703         connection->state = GB_CONNECTION_STATE_ENABLED_TX;
0704     spin_unlock_irq(&connection->lock);
0705 
0706     ret = gb_connection_control_connected(connection);
0707     if (ret)
0708         goto err_control_disconnecting;
0709 
0710     return 0;
0711 
0712 err_control_disconnecting:
0713     spin_lock_irq(&connection->lock);
0714     connection->state = GB_CONNECTION_STATE_DISCONNECTING;
0715     gb_connection_cancel_operations(connection, -ESHUTDOWN);
0716     spin_unlock_irq(&connection->lock);
0717 
0718     /* Transmit queue should already be empty. */
0719     gb_connection_hd_cport_flush(connection);
0720 
0721     gb_connection_control_disconnecting(connection);
0722     gb_connection_cport_shutdown_phase_1(connection);
0723     gb_connection_hd_cport_quiesce(connection);
0724     gb_connection_cport_shutdown_phase_2(connection);
0725     gb_connection_control_disconnected(connection);
0726     connection->state = GB_CONNECTION_STATE_DISABLED;
0727 err_svc_connection_destroy:
0728     gb_connection_svc_connection_destroy(connection);
0729 err_hd_cport_clear:
0730     gb_connection_hd_cport_clear(connection);
0731 
0732     gb_connection_hd_cport_disable(connection);
0733 
0734     return ret;
0735 }
0736 
0737 int gb_connection_enable(struct gb_connection *connection)
0738 {
0739     int ret = 0;
0740 
0741     mutex_lock(&connection->mutex);
0742 
0743     if (connection->state == GB_CONNECTION_STATE_ENABLED)
0744         goto out_unlock;
0745 
0746     ret = _gb_connection_enable(connection, true);
0747     if (!ret)
0748         trace_gb_connection_enable(connection);
0749 
0750 out_unlock:
0751     mutex_unlock(&connection->mutex);
0752 
0753     return ret;
0754 }
0755 EXPORT_SYMBOL_GPL(gb_connection_enable);
0756 
0757 int gb_connection_enable_tx(struct gb_connection *connection)
0758 {
0759     int ret = 0;
0760 
0761     mutex_lock(&connection->mutex);
0762 
0763     if (connection->state == GB_CONNECTION_STATE_ENABLED) {
0764         ret = -EINVAL;
0765         goto out_unlock;
0766     }
0767 
0768     if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
0769         goto out_unlock;
0770 
0771     ret = _gb_connection_enable(connection, false);
0772     if (!ret)
0773         trace_gb_connection_enable(connection);
0774 
0775 out_unlock:
0776     mutex_unlock(&connection->mutex);
0777 
0778     return ret;
0779 }
0780 EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
0781 
0782 void gb_connection_disable_rx(struct gb_connection *connection)
0783 {
0784     mutex_lock(&connection->mutex);
0785 
0786     spin_lock_irq(&connection->lock);
0787     if (connection->state != GB_CONNECTION_STATE_ENABLED) {
0788         spin_unlock_irq(&connection->lock);
0789         goto out_unlock;
0790     }
0791     connection->state = GB_CONNECTION_STATE_ENABLED_TX;
0792     gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
0793     spin_unlock_irq(&connection->lock);
0794 
0795     trace_gb_connection_disable(connection);
0796 
0797 out_unlock:
0798     mutex_unlock(&connection->mutex);
0799 }
0800 EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
0801 
0802 void gb_connection_mode_switch_prepare(struct gb_connection *connection)
0803 {
0804     connection->mode_switch = true;
0805 }
0806 
0807 void gb_connection_mode_switch_complete(struct gb_connection *connection)
0808 {
0809     gb_connection_svc_connection_destroy(connection);
0810     gb_connection_hd_cport_clear(connection);
0811 
0812     gb_connection_hd_cport_disable(connection);
0813 
0814     connection->mode_switch = false;
0815 }
0816 
0817 void gb_connection_disable(struct gb_connection *connection)
0818 {
0819     mutex_lock(&connection->mutex);
0820 
0821     if (connection->state == GB_CONNECTION_STATE_DISABLED)
0822         goto out_unlock;
0823 
0824     trace_gb_connection_disable(connection);
0825 
0826     spin_lock_irq(&connection->lock);
0827     connection->state = GB_CONNECTION_STATE_DISCONNECTING;
0828     gb_connection_cancel_operations(connection, -ESHUTDOWN);
0829     spin_unlock_irq(&connection->lock);
0830 
0831     gb_connection_hd_cport_flush(connection);
0832 
0833     gb_connection_control_disconnecting(connection);
0834     gb_connection_cport_shutdown_phase_1(connection);
0835     gb_connection_hd_cport_quiesce(connection);
0836     gb_connection_cport_shutdown_phase_2(connection);
0837     gb_connection_control_disconnected(connection);
0838 
0839     connection->state = GB_CONNECTION_STATE_DISABLED;
0840 
0841     /* control-connection tear down is deferred when mode switching */
0842     if (!connection->mode_switch) {
0843         gb_connection_svc_connection_destroy(connection);
0844         gb_connection_hd_cport_clear(connection);
0845 
0846         gb_connection_hd_cport_disable(connection);
0847     }
0848 
0849 out_unlock:
0850     mutex_unlock(&connection->mutex);
0851 }
0852 EXPORT_SYMBOL_GPL(gb_connection_disable);
0853 
0854 /* Disable a connection without communicating with the remote end. */
0855 void gb_connection_disable_forced(struct gb_connection *connection)
0856 {
0857     mutex_lock(&connection->mutex);
0858 
0859     if (connection->state == GB_CONNECTION_STATE_DISABLED)
0860         goto out_unlock;
0861 
0862     trace_gb_connection_disable(connection);
0863 
0864     spin_lock_irq(&connection->lock);
0865     connection->state = GB_CONNECTION_STATE_DISABLED;
0866     gb_connection_cancel_operations(connection, -ESHUTDOWN);
0867     spin_unlock_irq(&connection->lock);
0868 
0869     gb_connection_hd_cport_flush(connection);
0870 
0871     gb_connection_svc_connection_destroy(connection);
0872     gb_connection_hd_cport_clear(connection);
0873 
0874     gb_connection_hd_cport_disable(connection);
0875 out_unlock:
0876     mutex_unlock(&connection->mutex);
0877 }
0878 EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
0879 
0880 /* Caller must have disabled the connection before destroying it. */
0881 void gb_connection_destroy(struct gb_connection *connection)
0882 {
0883     if (!connection)
0884         return;
0885 
0886     if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
0887         gb_connection_disable(connection);
0888 
0889     mutex_lock(&gb_connection_mutex);
0890 
0891     spin_lock_irq(&gb_connections_lock);
0892     list_del(&connection->bundle_links);
0893     list_del(&connection->hd_links);
0894     spin_unlock_irq(&gb_connections_lock);
0895 
0896     destroy_workqueue(connection->wq);
0897 
0898     gb_hd_cport_release(connection->hd, connection->hd_cport_id);
0899     connection->hd_cport_id = CPORT_ID_BAD;
0900 
0901     mutex_unlock(&gb_connection_mutex);
0902 
0903     gb_connection_put(connection);
0904 }
0905 EXPORT_SYMBOL_GPL(gb_connection_destroy);
0906 
0907 void gb_connection_latency_tag_enable(struct gb_connection *connection)
0908 {
0909     struct gb_host_device *hd = connection->hd;
0910     int ret;
0911 
0912     if (!hd->driver->latency_tag_enable)
0913         return;
0914 
0915     ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
0916     if (ret) {
0917         dev_err(&connection->hd->dev,
0918             "%s: failed to enable latency tag: %d\n",
0919             connection->name, ret);
0920     }
0921 }
0922 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
0923 
0924 void gb_connection_latency_tag_disable(struct gb_connection *connection)
0925 {
0926     struct gb_host_device *hd = connection->hd;
0927     int ret;
0928 
0929     if (!hd->driver->latency_tag_disable)
0930         return;
0931 
0932     ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
0933     if (ret) {
0934         dev_err(&connection->hd->dev,
0935             "%s: failed to disable latency tag: %d\n",
0936             connection->name, ret);
0937     }
0938 }
0939 EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);