Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * SVC Greybus driver.
0004  *
0005  * Copyright 2015 Google Inc.
0006  * Copyright 2015 Linaro Ltd.
0007  */
0008 
0009 #include <linux/debugfs.h>
0010 #include <linux/workqueue.h>
0011 #include <linux/greybus.h>
0012 
0013 #define SVC_INTF_EJECT_TIMEOUT      9000
0014 #define SVC_INTF_ACTIVATE_TIMEOUT   6000
0015 #define SVC_INTF_RESUME_TIMEOUT     3000
0016 
0017 struct gb_svc_deferred_request {
0018     struct work_struct work;
0019     struct gb_operation *operation;
0020 };
0021 
0022 static int gb_svc_queue_deferred_request(struct gb_operation *operation);
0023 
0024 static ssize_t endo_id_show(struct device *dev,
0025                 struct device_attribute *attr, char *buf)
0026 {
0027     struct gb_svc *svc = to_gb_svc(dev);
0028 
0029     return sprintf(buf, "0x%04x\n", svc->endo_id);
0030 }
0031 static DEVICE_ATTR_RO(endo_id);
0032 
0033 static ssize_t ap_intf_id_show(struct device *dev,
0034                    struct device_attribute *attr, char *buf)
0035 {
0036     struct gb_svc *svc = to_gb_svc(dev);
0037 
0038     return sprintf(buf, "%u\n", svc->ap_intf_id);
0039 }
0040 static DEVICE_ATTR_RO(ap_intf_id);
0041 
0042 // FIXME
0043 // This is a hack, we need to do this "right" and clean the interface up
0044 // properly, not just forcibly yank the thing out of the system and hope for the
0045 // best.  But for now, people want their modules to come out without having to
0046 // throw the thing to the ground or get out a screwdriver.
0047 static ssize_t intf_eject_store(struct device *dev,
0048                 struct device_attribute *attr, const char *buf,
0049                 size_t len)
0050 {
0051     struct gb_svc *svc = to_gb_svc(dev);
0052     unsigned short intf_id;
0053     int ret;
0054 
0055     ret = kstrtou16(buf, 10, &intf_id);
0056     if (ret < 0)
0057         return ret;
0058 
0059     dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
0060 
0061     ret = gb_svc_intf_eject(svc, intf_id);
0062     if (ret < 0)
0063         return ret;
0064 
0065     return len;
0066 }
0067 static DEVICE_ATTR_WO(intf_eject);
0068 
0069 static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
0070                  char *buf)
0071 {
0072     struct gb_svc *svc = to_gb_svc(dev);
0073 
0074     return sprintf(buf, "%s\n",
0075                gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
0076 }
0077 
0078 static ssize_t watchdog_store(struct device *dev,
0079                   struct device_attribute *attr, const char *buf,
0080                   size_t len)
0081 {
0082     struct gb_svc *svc = to_gb_svc(dev);
0083     int retval;
0084     bool user_request;
0085 
0086     retval = strtobool(buf, &user_request);
0087     if (retval)
0088         return retval;
0089 
0090     if (user_request)
0091         retval = gb_svc_watchdog_enable(svc);
0092     else
0093         retval = gb_svc_watchdog_disable(svc);
0094     if (retval)
0095         return retval;
0096     return len;
0097 }
0098 static DEVICE_ATTR_RW(watchdog);
0099 
0100 static ssize_t watchdog_action_show(struct device *dev,
0101                     struct device_attribute *attr, char *buf)
0102 {
0103     struct gb_svc *svc = to_gb_svc(dev);
0104 
0105     if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
0106         return sprintf(buf, "panic\n");
0107     else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
0108         return sprintf(buf, "reset\n");
0109 
0110     return -EINVAL;
0111 }
0112 
0113 static ssize_t watchdog_action_store(struct device *dev,
0114                      struct device_attribute *attr,
0115                      const char *buf, size_t len)
0116 {
0117     struct gb_svc *svc = to_gb_svc(dev);
0118 
0119     if (sysfs_streq(buf, "panic"))
0120         svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
0121     else if (sysfs_streq(buf, "reset"))
0122         svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
0123     else
0124         return -EINVAL;
0125 
0126     return len;
0127 }
0128 static DEVICE_ATTR_RW(watchdog_action);
0129 
0130 static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
0131 {
0132     struct gb_svc_pwrmon_rail_count_get_response response;
0133     int ret;
0134 
0135     ret = gb_operation_sync(svc->connection,
0136                 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
0137                 &response, sizeof(response));
0138     if (ret) {
0139         dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
0140         return ret;
0141     }
0142 
0143     *value = response.rail_count;
0144 
0145     return 0;
0146 }
0147 
0148 static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
0149         struct gb_svc_pwrmon_rail_names_get_response *response,
0150         size_t bufsize)
0151 {
0152     int ret;
0153 
0154     ret = gb_operation_sync(svc->connection,
0155                 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
0156                 response, bufsize);
0157     if (ret) {
0158         dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
0159         return ret;
0160     }
0161 
0162     if (response->status != GB_SVC_OP_SUCCESS) {
0163         dev_err(&svc->dev,
0164             "SVC error while getting rail names: %u\n",
0165             response->status);
0166         return -EREMOTEIO;
0167     }
0168 
0169     return 0;
0170 }
0171 
0172 static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
0173                     u8 measurement_type, u32 *value)
0174 {
0175     struct gb_svc_pwrmon_sample_get_request request;
0176     struct gb_svc_pwrmon_sample_get_response response;
0177     int ret;
0178 
0179     request.rail_id = rail_id;
0180     request.measurement_type = measurement_type;
0181 
0182     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
0183                 &request, sizeof(request),
0184                 &response, sizeof(response));
0185     if (ret) {
0186         dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
0187         return ret;
0188     }
0189 
0190     if (response.result) {
0191         dev_err(&svc->dev,
0192             "UniPro error while getting rail power sample (%d %d): %d\n",
0193             rail_id, measurement_type, response.result);
0194         switch (response.result) {
0195         case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
0196             return -EINVAL;
0197         case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
0198             return -ENOMSG;
0199         default:
0200             return -EREMOTEIO;
0201         }
0202     }
0203 
0204     *value = le32_to_cpu(response.measurement);
0205 
0206     return 0;
0207 }
0208 
0209 int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
0210                   u8 measurement_type, u32 *value)
0211 {
0212     struct gb_svc_pwrmon_intf_sample_get_request request;
0213     struct gb_svc_pwrmon_intf_sample_get_response response;
0214     int ret;
0215 
0216     request.intf_id = intf_id;
0217     request.measurement_type = measurement_type;
0218 
0219     ret = gb_operation_sync(svc->connection,
0220                 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
0221                 &request, sizeof(request),
0222                 &response, sizeof(response));
0223     if (ret) {
0224         dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
0225         return ret;
0226     }
0227 
0228     if (response.result) {
0229         dev_err(&svc->dev,
0230             "UniPro error while getting intf power sample (%d %d): %d\n",
0231             intf_id, measurement_type, response.result);
0232         switch (response.result) {
0233         case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
0234             return -EINVAL;
0235         case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
0236             return -ENOMSG;
0237         default:
0238             return -EREMOTEIO;
0239         }
0240     }
0241 
0242     *value = le32_to_cpu(response.measurement);
0243 
0244     return 0;
0245 }
0246 
0247 static struct attribute *svc_attrs[] = {
0248     &dev_attr_endo_id.attr,
0249     &dev_attr_ap_intf_id.attr,
0250     &dev_attr_intf_eject.attr,
0251     &dev_attr_watchdog.attr,
0252     &dev_attr_watchdog_action.attr,
0253     NULL,
0254 };
0255 ATTRIBUTE_GROUPS(svc);
0256 
0257 int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
0258 {
0259     struct gb_svc_intf_device_id_request request;
0260 
0261     request.intf_id = intf_id;
0262     request.device_id = device_id;
0263 
0264     return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
0265                  &request, sizeof(request), NULL, 0);
0266 }
0267 
0268 int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
0269 {
0270     struct gb_svc_intf_eject_request request;
0271     int ret;
0272 
0273     request.intf_id = intf_id;
0274 
0275     /*
0276      * The pulse width for module release in svc is long so we need to
0277      * increase the timeout so the operation will not return to soon.
0278      */
0279     ret = gb_operation_sync_timeout(svc->connection,
0280                     GB_SVC_TYPE_INTF_EJECT, &request,
0281                     sizeof(request), NULL, 0,
0282                     SVC_INTF_EJECT_TIMEOUT);
0283     if (ret) {
0284         dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
0285         return ret;
0286     }
0287 
0288     return 0;
0289 }
0290 
0291 int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
0292 {
0293     struct gb_svc_intf_vsys_request request;
0294     struct gb_svc_intf_vsys_response response;
0295     int type, ret;
0296 
0297     request.intf_id = intf_id;
0298 
0299     if (enable)
0300         type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
0301     else
0302         type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
0303 
0304     ret = gb_operation_sync(svc->connection, type,
0305                 &request, sizeof(request),
0306                 &response, sizeof(response));
0307     if (ret < 0)
0308         return ret;
0309     if (response.result_code != GB_SVC_INTF_VSYS_OK)
0310         return -EREMOTEIO;
0311     return 0;
0312 }
0313 
0314 int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
0315 {
0316     struct gb_svc_intf_refclk_request request;
0317     struct gb_svc_intf_refclk_response response;
0318     int type, ret;
0319 
0320     request.intf_id = intf_id;
0321 
0322     if (enable)
0323         type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
0324     else
0325         type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
0326 
0327     ret = gb_operation_sync(svc->connection, type,
0328                 &request, sizeof(request),
0329                 &response, sizeof(response));
0330     if (ret < 0)
0331         return ret;
0332     if (response.result_code != GB_SVC_INTF_REFCLK_OK)
0333         return -EREMOTEIO;
0334     return 0;
0335 }
0336 
0337 int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
0338 {
0339     struct gb_svc_intf_unipro_request request;
0340     struct gb_svc_intf_unipro_response response;
0341     int type, ret;
0342 
0343     request.intf_id = intf_id;
0344 
0345     if (enable)
0346         type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
0347     else
0348         type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
0349 
0350     ret = gb_operation_sync(svc->connection, type,
0351                 &request, sizeof(request),
0352                 &response, sizeof(response));
0353     if (ret < 0)
0354         return ret;
0355     if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
0356         return -EREMOTEIO;
0357     return 0;
0358 }
0359 
0360 int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
0361 {
0362     struct gb_svc_intf_activate_request request;
0363     struct gb_svc_intf_activate_response response;
0364     int ret;
0365 
0366     request.intf_id = intf_id;
0367 
0368     ret = gb_operation_sync_timeout(svc->connection,
0369                     GB_SVC_TYPE_INTF_ACTIVATE,
0370                     &request, sizeof(request),
0371                     &response, sizeof(response),
0372                     SVC_INTF_ACTIVATE_TIMEOUT);
0373     if (ret < 0)
0374         return ret;
0375     if (response.status != GB_SVC_OP_SUCCESS) {
0376         dev_err(&svc->dev, "failed to activate interface %u: %u\n",
0377             intf_id, response.status);
0378         return -EREMOTEIO;
0379     }
0380 
0381     *intf_type = response.intf_type;
0382 
0383     return 0;
0384 }
0385 
0386 int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
0387 {
0388     struct gb_svc_intf_resume_request request;
0389     struct gb_svc_intf_resume_response response;
0390     int ret;
0391 
0392     request.intf_id = intf_id;
0393 
0394     ret = gb_operation_sync_timeout(svc->connection,
0395                     GB_SVC_TYPE_INTF_RESUME,
0396                     &request, sizeof(request),
0397                     &response, sizeof(response),
0398                     SVC_INTF_RESUME_TIMEOUT);
0399     if (ret < 0) {
0400         dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
0401             intf_id, ret);
0402         return ret;
0403     }
0404 
0405     if (response.status != GB_SVC_OP_SUCCESS) {
0406         dev_err(&svc->dev, "failed to resume interface %u: %u\n",
0407             intf_id, response.status);
0408         return -EREMOTEIO;
0409     }
0410 
0411     return 0;
0412 }
0413 
0414 int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
0415             u32 *value)
0416 {
0417     struct gb_svc_dme_peer_get_request request;
0418     struct gb_svc_dme_peer_get_response response;
0419     u16 result;
0420     int ret;
0421 
0422     request.intf_id = intf_id;
0423     request.attr = cpu_to_le16(attr);
0424     request.selector = cpu_to_le16(selector);
0425 
0426     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
0427                 &request, sizeof(request),
0428                 &response, sizeof(response));
0429     if (ret) {
0430         dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
0431             intf_id, attr, selector, ret);
0432         return ret;
0433     }
0434 
0435     result = le16_to_cpu(response.result_code);
0436     if (result) {
0437         dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
0438             intf_id, attr, selector, result);
0439         return -EREMOTEIO;
0440     }
0441 
0442     if (value)
0443         *value = le32_to_cpu(response.attr_value);
0444 
0445     return 0;
0446 }
0447 
0448 int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
0449             u32 value)
0450 {
0451     struct gb_svc_dme_peer_set_request request;
0452     struct gb_svc_dme_peer_set_response response;
0453     u16 result;
0454     int ret;
0455 
0456     request.intf_id = intf_id;
0457     request.attr = cpu_to_le16(attr);
0458     request.selector = cpu_to_le16(selector);
0459     request.value = cpu_to_le32(value);
0460 
0461     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
0462                 &request, sizeof(request),
0463                 &response, sizeof(response));
0464     if (ret) {
0465         dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
0466             intf_id, attr, selector, value, ret);
0467         return ret;
0468     }
0469 
0470     result = le16_to_cpu(response.result_code);
0471     if (result) {
0472         dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
0473             intf_id, attr, selector, value, result);
0474         return -EREMOTEIO;
0475     }
0476 
0477     return 0;
0478 }
0479 
0480 int gb_svc_connection_create(struct gb_svc *svc,
0481                  u8 intf1_id, u16 cport1_id,
0482                  u8 intf2_id, u16 cport2_id,
0483                  u8 cport_flags)
0484 {
0485     struct gb_svc_conn_create_request request;
0486 
0487     request.intf1_id = intf1_id;
0488     request.cport1_id = cpu_to_le16(cport1_id);
0489     request.intf2_id = intf2_id;
0490     request.cport2_id = cpu_to_le16(cport2_id);
0491     request.tc = 0;     /* TC0 */
0492     request.flags = cport_flags;
0493 
0494     return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
0495                  &request, sizeof(request), NULL, 0);
0496 }
0497 
0498 void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
0499                    u8 intf2_id, u16 cport2_id)
0500 {
0501     struct gb_svc_conn_destroy_request request;
0502     struct gb_connection *connection = svc->connection;
0503     int ret;
0504 
0505     request.intf1_id = intf1_id;
0506     request.cport1_id = cpu_to_le16(cport1_id);
0507     request.intf2_id = intf2_id;
0508     request.cport2_id = cpu_to_le16(cport2_id);
0509 
0510     ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
0511                 &request, sizeof(request), NULL, 0);
0512     if (ret) {
0513         dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
0514             intf1_id, cport1_id, intf2_id, cport2_id, ret);
0515     }
0516 }
0517 
0518 /* Creates bi-directional routes between the devices */
0519 int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
0520             u8 intf2_id, u8 dev2_id)
0521 {
0522     struct gb_svc_route_create_request request;
0523 
0524     request.intf1_id = intf1_id;
0525     request.dev1_id = dev1_id;
0526     request.intf2_id = intf2_id;
0527     request.dev2_id = dev2_id;
0528 
0529     return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
0530                  &request, sizeof(request), NULL, 0);
0531 }
0532 
0533 /* Destroys bi-directional routes between the devices */
0534 void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
0535 {
0536     struct gb_svc_route_destroy_request request;
0537     int ret;
0538 
0539     request.intf1_id = intf1_id;
0540     request.intf2_id = intf2_id;
0541 
0542     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
0543                 &request, sizeof(request), NULL, 0);
0544     if (ret) {
0545         dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
0546             intf1_id, intf2_id, ret);
0547     }
0548 }
0549 
0550 int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
0551                    u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
0552                    u8 tx_amplitude, u8 tx_hs_equalizer,
0553                    u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
0554                    u8 flags, u32 quirks,
0555                    struct gb_svc_l2_timer_cfg *local,
0556                    struct gb_svc_l2_timer_cfg *remote)
0557 {
0558     struct gb_svc_intf_set_pwrm_request request;
0559     struct gb_svc_intf_set_pwrm_response response;
0560     int ret;
0561     u16 result_code;
0562 
0563     memset(&request, 0, sizeof(request));
0564 
0565     request.intf_id = intf_id;
0566     request.hs_series = hs_series;
0567     request.tx_mode = tx_mode;
0568     request.tx_gear = tx_gear;
0569     request.tx_nlanes = tx_nlanes;
0570     request.tx_amplitude = tx_amplitude;
0571     request.tx_hs_equalizer = tx_hs_equalizer;
0572     request.rx_mode = rx_mode;
0573     request.rx_gear = rx_gear;
0574     request.rx_nlanes = rx_nlanes;
0575     request.flags = flags;
0576     request.quirks = cpu_to_le32(quirks);
0577     if (local)
0578         request.local_l2timerdata = *local;
0579     if (remote)
0580         request.remote_l2timerdata = *remote;
0581 
0582     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
0583                 &request, sizeof(request),
0584                 &response, sizeof(response));
0585     if (ret < 0)
0586         return ret;
0587 
0588     result_code = response.result_code;
0589     if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
0590         dev_err(&svc->dev, "set power mode = %d\n", result_code);
0591         return -EIO;
0592     }
0593 
0594     return 0;
0595 }
0596 EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
0597 
0598 int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
0599 {
0600     struct gb_svc_intf_set_pwrm_request request;
0601     struct gb_svc_intf_set_pwrm_response response;
0602     int ret;
0603     u16 result_code;
0604 
0605     memset(&request, 0, sizeof(request));
0606 
0607     request.intf_id = intf_id;
0608     request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
0609     request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
0610     request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
0611 
0612     ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
0613                 &request, sizeof(request),
0614                 &response, sizeof(response));
0615     if (ret < 0) {
0616         dev_err(&svc->dev,
0617             "failed to send set power mode operation to interface %u: %d\n",
0618             intf_id, ret);
0619         return ret;
0620     }
0621 
0622     result_code = response.result_code;
0623     if (result_code != GB_SVC_SETPWRM_PWR_OK) {
0624         dev_err(&svc->dev,
0625             "failed to hibernate the link for interface %u: %u\n",
0626             intf_id, result_code);
0627         return -EIO;
0628     }
0629 
0630     return 0;
0631 }
0632 
0633 int gb_svc_ping(struct gb_svc *svc)
0634 {
0635     return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
0636                      NULL, 0, NULL, 0,
0637                      GB_OPERATION_TIMEOUT_DEFAULT * 2);
0638 }
0639 
0640 static int gb_svc_version_request(struct gb_operation *op)
0641 {
0642     struct gb_connection *connection = op->connection;
0643     struct gb_svc *svc = gb_connection_get_data(connection);
0644     struct gb_svc_version_request *request;
0645     struct gb_svc_version_response *response;
0646 
0647     if (op->request->payload_size < sizeof(*request)) {
0648         dev_err(&svc->dev, "short version request (%zu < %zu)\n",
0649             op->request->payload_size,
0650             sizeof(*request));
0651         return -EINVAL;
0652     }
0653 
0654     request = op->request->payload;
0655 
0656     if (request->major > GB_SVC_VERSION_MAJOR) {
0657         dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
0658              request->major, GB_SVC_VERSION_MAJOR);
0659         return -ENOTSUPP;
0660     }
0661 
0662     svc->protocol_major = request->major;
0663     svc->protocol_minor = request->minor;
0664 
0665     if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
0666         return -ENOMEM;
0667 
0668     response = op->response->payload;
0669     response->major = svc->protocol_major;
0670     response->minor = svc->protocol_minor;
0671 
0672     return 0;
0673 }
0674 
0675 static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
0676                     size_t len, loff_t *offset)
0677 {
0678     struct svc_debugfs_pwrmon_rail *pwrmon_rails =
0679         file_inode(file)->i_private;
0680     struct gb_svc *svc = pwrmon_rails->svc;
0681     int ret, desc;
0682     u32 value;
0683     char buff[16];
0684 
0685     ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
0686                        GB_SVC_PWRMON_TYPE_VOL, &value);
0687     if (ret) {
0688         dev_err(&svc->dev,
0689             "failed to get voltage sample %u: %d\n",
0690             pwrmon_rails->id, ret);
0691         return ret;
0692     }
0693 
0694     desc = scnprintf(buff, sizeof(buff), "%u\n", value);
0695 
0696     return simple_read_from_buffer(buf, len, offset, buff, desc);
0697 }
0698 
0699 static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
0700                     size_t len, loff_t *offset)
0701 {
0702     struct svc_debugfs_pwrmon_rail *pwrmon_rails =
0703         file_inode(file)->i_private;
0704     struct gb_svc *svc = pwrmon_rails->svc;
0705     int ret, desc;
0706     u32 value;
0707     char buff[16];
0708 
0709     ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
0710                        GB_SVC_PWRMON_TYPE_CURR, &value);
0711     if (ret) {
0712         dev_err(&svc->dev,
0713             "failed to get current sample %u: %d\n",
0714             pwrmon_rails->id, ret);
0715         return ret;
0716     }
0717 
0718     desc = scnprintf(buff, sizeof(buff), "%u\n", value);
0719 
0720     return simple_read_from_buffer(buf, len, offset, buff, desc);
0721 }
0722 
0723 static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
0724                       size_t len, loff_t *offset)
0725 {
0726     struct svc_debugfs_pwrmon_rail *pwrmon_rails =
0727         file_inode(file)->i_private;
0728     struct gb_svc *svc = pwrmon_rails->svc;
0729     int ret, desc;
0730     u32 value;
0731     char buff[16];
0732 
0733     ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
0734                        GB_SVC_PWRMON_TYPE_PWR, &value);
0735     if (ret) {
0736         dev_err(&svc->dev, "failed to get power sample %u: %d\n",
0737             pwrmon_rails->id, ret);
0738         return ret;
0739     }
0740 
0741     desc = scnprintf(buff, sizeof(buff), "%u\n", value);
0742 
0743     return simple_read_from_buffer(buf, len, offset, buff, desc);
0744 }
0745 
0746 static const struct file_operations pwrmon_debugfs_voltage_fops = {
0747     .read       = pwr_debugfs_voltage_read,
0748 };
0749 
0750 static const struct file_operations pwrmon_debugfs_current_fops = {
0751     .read       = pwr_debugfs_current_read,
0752 };
0753 
0754 static const struct file_operations pwrmon_debugfs_power_fops = {
0755     .read       = pwr_debugfs_power_read,
0756 };
0757 
0758 static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
0759 {
0760     int i;
0761     size_t bufsize;
0762     struct dentry *dent;
0763     struct gb_svc_pwrmon_rail_names_get_response *rail_names;
0764     u8 rail_count;
0765 
0766     dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
0767     if (IS_ERR_OR_NULL(dent))
0768         return;
0769 
0770     if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
0771         goto err_pwrmon_debugfs;
0772 
0773     if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
0774         goto err_pwrmon_debugfs;
0775 
0776     bufsize = sizeof(*rail_names) +
0777         GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
0778 
0779     rail_names = kzalloc(bufsize, GFP_KERNEL);
0780     if (!rail_names)
0781         goto err_pwrmon_debugfs;
0782 
0783     svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
0784                     GFP_KERNEL);
0785     if (!svc->pwrmon_rails)
0786         goto err_pwrmon_debugfs_free;
0787 
0788     if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
0789         goto err_pwrmon_debugfs_free;
0790 
0791     for (i = 0; i < rail_count; i++) {
0792         struct dentry *dir;
0793         struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
0794         char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
0795 
0796         snprintf(fname, sizeof(fname), "%s",
0797              (char *)&rail_names->name[i]);
0798 
0799         rail->id = i;
0800         rail->svc = svc;
0801 
0802         dir = debugfs_create_dir(fname, dent);
0803         debugfs_create_file("voltage_now", 0444, dir, rail,
0804                     &pwrmon_debugfs_voltage_fops);
0805         debugfs_create_file("current_now", 0444, dir, rail,
0806                     &pwrmon_debugfs_current_fops);
0807         debugfs_create_file("power_now", 0444, dir, rail,
0808                     &pwrmon_debugfs_power_fops);
0809     }
0810 
0811     kfree(rail_names);
0812     return;
0813 
0814 err_pwrmon_debugfs_free:
0815     kfree(rail_names);
0816     kfree(svc->pwrmon_rails);
0817     svc->pwrmon_rails = NULL;
0818 
0819 err_pwrmon_debugfs:
0820     debugfs_remove(dent);
0821 }
0822 
0823 static void gb_svc_debugfs_init(struct gb_svc *svc)
0824 {
0825     svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
0826                          gb_debugfs_get());
0827     gb_svc_pwrmon_debugfs_init(svc);
0828 }
0829 
0830 static void gb_svc_debugfs_exit(struct gb_svc *svc)
0831 {
0832     debugfs_remove_recursive(svc->debugfs_dentry);
0833     kfree(svc->pwrmon_rails);
0834     svc->pwrmon_rails = NULL;
0835 }
0836 
0837 static int gb_svc_hello(struct gb_operation *op)
0838 {
0839     struct gb_connection *connection = op->connection;
0840     struct gb_svc *svc = gb_connection_get_data(connection);
0841     struct gb_svc_hello_request *hello_request;
0842     int ret;
0843 
0844     if (op->request->payload_size < sizeof(*hello_request)) {
0845         dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
0846              op->request->payload_size,
0847              sizeof(*hello_request));
0848         return -EINVAL;
0849     }
0850 
0851     hello_request = op->request->payload;
0852     svc->endo_id = le16_to_cpu(hello_request->endo_id);
0853     svc->ap_intf_id = hello_request->interface_id;
0854 
0855     ret = device_add(&svc->dev);
0856     if (ret) {
0857         dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
0858         return ret;
0859     }
0860 
0861     ret = gb_svc_watchdog_create(svc);
0862     if (ret) {
0863         dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
0864         goto err_deregister_svc;
0865     }
0866 
0867     /*
0868      * FIXME: This is a temporary hack to reconfigure the link at HELLO
0869      * (which abuses the deferred request processing mechanism).
0870      */
0871     ret = gb_svc_queue_deferred_request(op);
0872     if (ret)
0873         goto err_destroy_watchdog;
0874 
0875     gb_svc_debugfs_init(svc);
0876 
0877     return 0;
0878 
0879 err_destroy_watchdog:
0880     gb_svc_watchdog_destroy(svc);
0881 err_deregister_svc:
0882     device_del(&svc->dev);
0883 
0884     return ret;
0885 }
0886 
0887 static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
0888                             u8 intf_id)
0889 {
0890     struct gb_host_device *hd = svc->hd;
0891     struct gb_module *module;
0892     size_t num_interfaces;
0893     u8 module_id;
0894 
0895     list_for_each_entry(module, &hd->modules, hd_node) {
0896         module_id = module->module_id;
0897         num_interfaces = module->num_interfaces;
0898 
0899         if (intf_id >= module_id &&
0900             intf_id < module_id + num_interfaces) {
0901             return module->interfaces[intf_id - module_id];
0902         }
0903     }
0904 
0905     return NULL;
0906 }
0907 
0908 static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
0909 {
0910     struct gb_host_device *hd = svc->hd;
0911     struct gb_module *module;
0912 
0913     list_for_each_entry(module, &hd->modules, hd_node) {
0914         if (module->module_id == module_id)
0915             return module;
0916     }
0917 
0918     return NULL;
0919 }
0920 
0921 static void gb_svc_process_hello_deferred(struct gb_operation *operation)
0922 {
0923     struct gb_connection *connection = operation->connection;
0924     struct gb_svc *svc = gb_connection_get_data(connection);
0925     int ret;
0926 
0927     /*
0928      * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
0929      * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
0930      * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
0931      * module.
0932      *
0933      * The code should be removed once SW-2217, Heuristic for UniPro
0934      * Power Mode Changes is resolved.
0935      */
0936     ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
0937                      GB_SVC_UNIPRO_HS_SERIES_A,
0938                      GB_SVC_UNIPRO_SLOW_AUTO_MODE,
0939                      2, 1,
0940                      GB_SVC_SMALL_AMPLITUDE,
0941                      GB_SVC_NO_DE_EMPHASIS,
0942                      GB_SVC_UNIPRO_SLOW_AUTO_MODE,
0943                      2, 1,
0944                      0, 0,
0945                      NULL, NULL);
0946 
0947     if (ret)
0948         dev_warn(&svc->dev,
0949              "power mode change failed on AP to switch link: %d\n",
0950              ret);
0951 }
0952 
0953 static void gb_svc_process_module_inserted(struct gb_operation *operation)
0954 {
0955     struct gb_svc_module_inserted_request *request;
0956     struct gb_connection *connection = operation->connection;
0957     struct gb_svc *svc = gb_connection_get_data(connection);
0958     struct gb_host_device *hd = svc->hd;
0959     struct gb_module *module;
0960     size_t num_interfaces;
0961     u8 module_id;
0962     u16 flags;
0963     int ret;
0964 
0965     /* The request message size has already been verified. */
0966     request = operation->request->payload;
0967     module_id = request->primary_intf_id;
0968     num_interfaces = request->intf_count;
0969     flags = le16_to_cpu(request->flags);
0970 
0971     dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
0972         __func__, module_id, num_interfaces, flags);
0973 
0974     if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
0975         dev_warn(&svc->dev, "no primary interface detected on module %u\n",
0976              module_id);
0977     }
0978 
0979     module = gb_svc_module_lookup(svc, module_id);
0980     if (module) {
0981         dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
0982              module_id);
0983         return;
0984     }
0985 
0986     module = gb_module_create(hd, module_id, num_interfaces);
0987     if (!module) {
0988         dev_err(&svc->dev, "failed to create module\n");
0989         return;
0990     }
0991 
0992     ret = gb_module_add(module);
0993     if (ret) {
0994         gb_module_put(module);
0995         return;
0996     }
0997 
0998     list_add(&module->hd_node, &hd->modules);
0999 }
1000 
1001 static void gb_svc_process_module_removed(struct gb_operation *operation)
1002 {
1003     struct gb_svc_module_removed_request *request;
1004     struct gb_connection *connection = operation->connection;
1005     struct gb_svc *svc = gb_connection_get_data(connection);
1006     struct gb_module *module;
1007     u8 module_id;
1008 
1009     /* The request message size has already been verified. */
1010     request = operation->request->payload;
1011     module_id = request->primary_intf_id;
1012 
1013     dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1014 
1015     module = gb_svc_module_lookup(svc, module_id);
1016     if (!module) {
1017         dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1018              module_id);
1019         return;
1020     }
1021 
1022     module->disconnected = true;
1023 
1024     gb_module_del(module);
1025     list_del(&module->hd_node);
1026     gb_module_put(module);
1027 }
1028 
1029 static void gb_svc_process_intf_oops(struct gb_operation *operation)
1030 {
1031     struct gb_svc_intf_oops_request *request;
1032     struct gb_connection *connection = operation->connection;
1033     struct gb_svc *svc = gb_connection_get_data(connection);
1034     struct gb_interface *intf;
1035     u8 intf_id;
1036     u8 reason;
1037 
1038     /* The request message size has already been verified. */
1039     request = operation->request->payload;
1040     intf_id = request->intf_id;
1041     reason = request->reason;
1042 
1043     intf = gb_svc_interface_lookup(svc, intf_id);
1044     if (!intf) {
1045         dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1046              intf_id);
1047         return;
1048     }
1049 
1050     dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1051          intf_id, reason);
1052 
1053     mutex_lock(&intf->mutex);
1054     intf->disconnected = true;
1055     gb_interface_disable(intf);
1056     gb_interface_deactivate(intf);
1057     mutex_unlock(&intf->mutex);
1058 }
1059 
1060 static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1061 {
1062     struct gb_svc_intf_mailbox_event_request *request;
1063     struct gb_connection *connection = operation->connection;
1064     struct gb_svc *svc = gb_connection_get_data(connection);
1065     struct gb_interface *intf;
1066     u8 intf_id;
1067     u16 result_code;
1068     u32 mailbox;
1069 
1070     /* The request message size has already been verified. */
1071     request = operation->request->payload;
1072     intf_id = request->intf_id;
1073     result_code = le16_to_cpu(request->result_code);
1074     mailbox = le32_to_cpu(request->mailbox);
1075 
1076     dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1077         __func__, intf_id, result_code, mailbox);
1078 
1079     intf = gb_svc_interface_lookup(svc, intf_id);
1080     if (!intf) {
1081         dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1082         return;
1083     }
1084 
1085     gb_interface_mailbox_event(intf, result_code, mailbox);
1086 }
1087 
1088 static void gb_svc_process_deferred_request(struct work_struct *work)
1089 {
1090     struct gb_svc_deferred_request *dr;
1091     struct gb_operation *operation;
1092     struct gb_svc *svc;
1093     u8 type;
1094 
1095     dr = container_of(work, struct gb_svc_deferred_request, work);
1096     operation = dr->operation;
1097     svc = gb_connection_get_data(operation->connection);
1098     type = operation->request->header->type;
1099 
1100     switch (type) {
1101     case GB_SVC_TYPE_SVC_HELLO:
1102         gb_svc_process_hello_deferred(operation);
1103         break;
1104     case GB_SVC_TYPE_MODULE_INSERTED:
1105         gb_svc_process_module_inserted(operation);
1106         break;
1107     case GB_SVC_TYPE_MODULE_REMOVED:
1108         gb_svc_process_module_removed(operation);
1109         break;
1110     case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1111         gb_svc_process_intf_mailbox_event(operation);
1112         break;
1113     case GB_SVC_TYPE_INTF_OOPS:
1114         gb_svc_process_intf_oops(operation);
1115         break;
1116     default:
1117         dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1118     }
1119 
1120     gb_operation_put(operation);
1121     kfree(dr);
1122 }
1123 
1124 static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1125 {
1126     struct gb_svc *svc = gb_connection_get_data(operation->connection);
1127     struct gb_svc_deferred_request *dr;
1128 
1129     dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1130     if (!dr)
1131         return -ENOMEM;
1132 
1133     gb_operation_get(operation);
1134 
1135     dr->operation = operation;
1136     INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1137 
1138     queue_work(svc->wq, &dr->work);
1139 
1140     return 0;
1141 }
1142 
1143 static int gb_svc_intf_reset_recv(struct gb_operation *op)
1144 {
1145     struct gb_svc *svc = gb_connection_get_data(op->connection);
1146     struct gb_message *request = op->request;
1147     struct gb_svc_intf_reset_request *reset;
1148 
1149     if (request->payload_size < sizeof(*reset)) {
1150         dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1151              request->payload_size, sizeof(*reset));
1152         return -EINVAL;
1153     }
1154     reset = request->payload;
1155 
1156     /* FIXME Reset the interface here */
1157 
1158     return 0;
1159 }
1160 
1161 static int gb_svc_module_inserted_recv(struct gb_operation *op)
1162 {
1163     struct gb_svc *svc = gb_connection_get_data(op->connection);
1164     struct gb_svc_module_inserted_request *request;
1165 
1166     if (op->request->payload_size < sizeof(*request)) {
1167         dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1168              op->request->payload_size, sizeof(*request));
1169         return -EINVAL;
1170     }
1171 
1172     request = op->request->payload;
1173 
1174     dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1175         request->primary_intf_id);
1176 
1177     return gb_svc_queue_deferred_request(op);
1178 }
1179 
1180 static int gb_svc_module_removed_recv(struct gb_operation *op)
1181 {
1182     struct gb_svc *svc = gb_connection_get_data(op->connection);
1183     struct gb_svc_module_removed_request *request;
1184 
1185     if (op->request->payload_size < sizeof(*request)) {
1186         dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1187              op->request->payload_size, sizeof(*request));
1188         return -EINVAL;
1189     }
1190 
1191     request = op->request->payload;
1192 
1193     dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1194         request->primary_intf_id);
1195 
1196     return gb_svc_queue_deferred_request(op);
1197 }
1198 
1199 static int gb_svc_intf_oops_recv(struct gb_operation *op)
1200 {
1201     struct gb_svc *svc = gb_connection_get_data(op->connection);
1202     struct gb_svc_intf_oops_request *request;
1203 
1204     if (op->request->payload_size < sizeof(*request)) {
1205         dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1206              op->request->payload_size, sizeof(*request));
1207         return -EINVAL;
1208     }
1209 
1210     return gb_svc_queue_deferred_request(op);
1211 }
1212 
1213 static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1214 {
1215     struct gb_svc *svc = gb_connection_get_data(op->connection);
1216     struct gb_svc_intf_mailbox_event_request *request;
1217 
1218     if (op->request->payload_size < sizeof(*request)) {
1219         dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1220              op->request->payload_size, sizeof(*request));
1221         return -EINVAL;
1222     }
1223 
1224     request = op->request->payload;
1225 
1226     dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1227 
1228     return gb_svc_queue_deferred_request(op);
1229 }
1230 
1231 static int gb_svc_request_handler(struct gb_operation *op)
1232 {
1233     struct gb_connection *connection = op->connection;
1234     struct gb_svc *svc = gb_connection_get_data(connection);
1235     u8 type = op->type;
1236     int ret = 0;
1237 
1238     /*
1239      * SVC requests need to follow a specific order (at least initially) and
1240      * below code takes care of enforcing that. The expected order is:
1241      * - PROTOCOL_VERSION
1242      * - SVC_HELLO
1243      * - Any other request, but the earlier two.
1244      *
1245      * Incoming requests are guaranteed to be serialized and so we don't
1246      * need to protect 'state' for any races.
1247      */
1248     switch (type) {
1249     case GB_SVC_TYPE_PROTOCOL_VERSION:
1250         if (svc->state != GB_SVC_STATE_RESET)
1251             ret = -EINVAL;
1252         break;
1253     case GB_SVC_TYPE_SVC_HELLO:
1254         if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1255             ret = -EINVAL;
1256         break;
1257     default:
1258         if (svc->state != GB_SVC_STATE_SVC_HELLO)
1259             ret = -EINVAL;
1260         break;
1261     }
1262 
1263     if (ret) {
1264         dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1265              type, svc->state);
1266         return ret;
1267     }
1268 
1269     switch (type) {
1270     case GB_SVC_TYPE_PROTOCOL_VERSION:
1271         ret = gb_svc_version_request(op);
1272         if (!ret)
1273             svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1274         return ret;
1275     case GB_SVC_TYPE_SVC_HELLO:
1276         ret = gb_svc_hello(op);
1277         if (!ret)
1278             svc->state = GB_SVC_STATE_SVC_HELLO;
1279         return ret;
1280     case GB_SVC_TYPE_INTF_RESET:
1281         return gb_svc_intf_reset_recv(op);
1282     case GB_SVC_TYPE_MODULE_INSERTED:
1283         return gb_svc_module_inserted_recv(op);
1284     case GB_SVC_TYPE_MODULE_REMOVED:
1285         return gb_svc_module_removed_recv(op);
1286     case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1287         return gb_svc_intf_mailbox_event_recv(op);
1288     case GB_SVC_TYPE_INTF_OOPS:
1289         return gb_svc_intf_oops_recv(op);
1290     default:
1291         dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1292         return -EINVAL;
1293     }
1294 }
1295 
1296 static void gb_svc_release(struct device *dev)
1297 {
1298     struct gb_svc *svc = to_gb_svc(dev);
1299 
1300     if (svc->connection)
1301         gb_connection_destroy(svc->connection);
1302     ida_destroy(&svc->device_id_map);
1303     destroy_workqueue(svc->wq);
1304     kfree(svc);
1305 }
1306 
1307 struct device_type greybus_svc_type = {
1308     .name       = "greybus_svc",
1309     .release    = gb_svc_release,
1310 };
1311 
1312 struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1313 {
1314     struct gb_svc *svc;
1315 
1316     svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1317     if (!svc)
1318         return NULL;
1319 
1320     svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1321     if (!svc->wq) {
1322         kfree(svc);
1323         return NULL;
1324     }
1325 
1326     svc->dev.parent = &hd->dev;
1327     svc->dev.bus = &greybus_bus_type;
1328     svc->dev.type = &greybus_svc_type;
1329     svc->dev.groups = svc_groups;
1330     svc->dev.dma_mask = svc->dev.parent->dma_mask;
1331     device_initialize(&svc->dev);
1332 
1333     dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1334 
1335     ida_init(&svc->device_id_map);
1336     svc->state = GB_SVC_STATE_RESET;
1337     svc->hd = hd;
1338 
1339     svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1340                               gb_svc_request_handler);
1341     if (IS_ERR(svc->connection)) {
1342         dev_err(&svc->dev, "failed to create connection: %ld\n",
1343             PTR_ERR(svc->connection));
1344         goto err_put_device;
1345     }
1346 
1347     gb_connection_set_data(svc->connection, svc);
1348 
1349     return svc;
1350 
1351 err_put_device:
1352     put_device(&svc->dev);
1353     return NULL;
1354 }
1355 
1356 int gb_svc_add(struct gb_svc *svc)
1357 {
1358     int ret;
1359 
1360     /*
1361      * The SVC protocol is currently driven by the SVC, so the SVC device
1362      * is added from the connection request handler when enough
1363      * information has been received.
1364      */
1365     ret = gb_connection_enable(svc->connection);
1366     if (ret)
1367         return ret;
1368 
1369     return 0;
1370 }
1371 
1372 static void gb_svc_remove_modules(struct gb_svc *svc)
1373 {
1374     struct gb_host_device *hd = svc->hd;
1375     struct gb_module *module, *tmp;
1376 
1377     list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1378         gb_module_del(module);
1379         list_del(&module->hd_node);
1380         gb_module_put(module);
1381     }
1382 }
1383 
1384 void gb_svc_del(struct gb_svc *svc)
1385 {
1386     gb_connection_disable_rx(svc->connection);
1387 
1388     /*
1389      * The SVC device may have been registered from the request handler.
1390      */
1391     if (device_is_registered(&svc->dev)) {
1392         gb_svc_debugfs_exit(svc);
1393         gb_svc_watchdog_destroy(svc);
1394         device_del(&svc->dev);
1395     }
1396 
1397     flush_workqueue(svc->wq);
1398 
1399     gb_svc_remove_modules(svc);
1400 
1401     gb_connection_disable(svc->connection);
1402 }
1403 
1404 void gb_svc_put(struct gb_svc *svc)
1405 {
1406     put_device(&svc->dev);
1407 }