0001
0002
0003
0004
0005
0006
0007
0008
0009 #include <linux/delay.h>
0010 #include <linux/greybus.h>
0011
0012 #include "greybus_trace.h"
0013
0014 #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
0015
0016 #define GB_INTERFACE_DEVICE_ID_BAD 0xff
0017
0018 #define GB_INTERFACE_AUTOSUSPEND_MS 3000
0019
0020
0021 #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
0022
0023
0024 #define DME_SELECTOR_INDEX_NULL 0
0025
0026
0027
0028 #define DME_T_TST_SRC_INCREMENT 0x4083
0029
0030 #define DME_DDBL1_MANUFACTURERID 0x5003
0031 #define DME_DDBL1_PRODUCTID 0x5004
0032
0033 #define DME_TOSHIBA_GMP_VID 0x6000
0034 #define DME_TOSHIBA_GMP_PID 0x6001
0035 #define DME_TOSHIBA_GMP_SN0 0x6002
0036 #define DME_TOSHIBA_GMP_SN1 0x6003
0037 #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
0038
0039
0040 #define TOSHIBA_DMID 0x0126
0041 #define TOSHIBA_ES2_BRIDGE_DPID 0x1000
0042 #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
0043 #define TOSHIBA_ES3_GBPHY_DPID 0x1002
0044
0045 static int gb_interface_hibernate_link(struct gb_interface *intf);
0046 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
0047
0048 static int gb_interface_dme_attr_get(struct gb_interface *intf,
0049 u16 attr, u32 *val)
0050 {
0051 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
0052 attr, DME_SELECTOR_INDEX_NULL, val);
0053 }
0054
0055 static int gb_interface_read_ara_dme(struct gb_interface *intf)
0056 {
0057 u32 sn0, sn1;
0058 int ret;
0059
0060
0061
0062
0063
0064 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
0065 dev_err(&intf->dev, "unknown manufacturer %08x\n",
0066 intf->ddbl1_manufacturer_id);
0067 return -ENODEV;
0068 }
0069
0070 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
0071 &intf->vendor_id);
0072 if (ret)
0073 return ret;
0074
0075 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
0076 &intf->product_id);
0077 if (ret)
0078 return ret;
0079
0080 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
0081 if (ret)
0082 return ret;
0083
0084 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
0085 if (ret)
0086 return ret;
0087
0088 intf->serial_number = (u64)sn1 << 32 | sn0;
0089
0090 return 0;
0091 }
0092
0093 static int gb_interface_read_dme(struct gb_interface *intf)
0094 {
0095 int ret;
0096
0097
0098 if (intf->dme_read)
0099 return 0;
0100
0101 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
0102 &intf->ddbl1_manufacturer_id);
0103 if (ret)
0104 return ret;
0105
0106 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
0107 &intf->ddbl1_product_id);
0108 if (ret)
0109 return ret;
0110
0111 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
0112 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
0113 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
0114 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
0115 }
0116
0117 ret = gb_interface_read_ara_dme(intf);
0118 if (ret)
0119 return ret;
0120
0121 intf->dme_read = true;
0122
0123 return 0;
0124 }
0125
0126 static int gb_interface_route_create(struct gb_interface *intf)
0127 {
0128 struct gb_svc *svc = intf->hd->svc;
0129 u8 intf_id = intf->interface_id;
0130 u8 device_id;
0131 int ret;
0132
0133
0134 ret = ida_simple_get(&svc->device_id_map,
0135 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
0136 GFP_KERNEL);
0137 if (ret < 0) {
0138 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
0139 return ret;
0140 }
0141 device_id = ret;
0142
0143 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
0144 if (ret) {
0145 dev_err(&intf->dev, "failed to set device id %u: %d\n",
0146 device_id, ret);
0147 goto err_ida_remove;
0148 }
0149
0150
0151 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
0152 intf_id, device_id);
0153 if (ret) {
0154 dev_err(&intf->dev, "failed to create route: %d\n", ret);
0155 goto err_svc_id_free;
0156 }
0157
0158 intf->device_id = device_id;
0159
0160 return 0;
0161
0162 err_svc_id_free:
0163
0164
0165
0166
0167 err_ida_remove:
0168 ida_simple_remove(&svc->device_id_map, device_id);
0169
0170 return ret;
0171 }
0172
0173 static void gb_interface_route_destroy(struct gb_interface *intf)
0174 {
0175 struct gb_svc *svc = intf->hd->svc;
0176
0177 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
0178 return;
0179
0180 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
0181 ida_simple_remove(&svc->device_id_map, intf->device_id);
0182 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
0183 }
0184
0185
0186 static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
0187 {
0188 int ret;
0189
0190 dev_info(&intf->dev, "legacy mode switch detected\n");
0191
0192
0193 intf->disconnected = true;
0194 gb_interface_disable(intf);
0195 intf->disconnected = false;
0196
0197 ret = gb_interface_enable(intf);
0198 if (ret) {
0199 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
0200 gb_interface_deactivate(intf);
0201 }
0202
0203 return ret;
0204 }
0205
0206 void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
0207 u32 mailbox)
0208 {
0209 mutex_lock(&intf->mutex);
0210
0211 if (result) {
0212 dev_warn(&intf->dev,
0213 "mailbox event with UniPro error: 0x%04x\n",
0214 result);
0215 goto err_disable;
0216 }
0217
0218 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
0219 dev_warn(&intf->dev,
0220 "mailbox event with unexpected value: 0x%08x\n",
0221 mailbox);
0222 goto err_disable;
0223 }
0224
0225 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
0226 gb_interface_legacy_mode_switch(intf);
0227 goto out_unlock;
0228 }
0229
0230 if (!intf->mode_switch) {
0231 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
0232 mailbox);
0233 goto err_disable;
0234 }
0235
0236 dev_info(&intf->dev, "mode switch detected\n");
0237
0238 complete(&intf->mode_switch_completion);
0239
0240 out_unlock:
0241 mutex_unlock(&intf->mutex);
0242
0243 return;
0244
0245 err_disable:
0246 gb_interface_disable(intf);
0247 gb_interface_deactivate(intf);
0248 mutex_unlock(&intf->mutex);
0249 }
0250
0251 static void gb_interface_mode_switch_work(struct work_struct *work)
0252 {
0253 struct gb_interface *intf;
0254 struct gb_control *control;
0255 unsigned long timeout;
0256 int ret;
0257
0258 intf = container_of(work, struct gb_interface, mode_switch_work);
0259
0260 mutex_lock(&intf->mutex);
0261
0262 if (!intf->enabled) {
0263 dev_dbg(&intf->dev, "mode switch aborted\n");
0264 intf->mode_switch = false;
0265 mutex_unlock(&intf->mutex);
0266 goto out_interface_put;
0267 }
0268
0269
0270
0271
0272
0273 control = gb_control_get(intf->control);
0274 gb_control_mode_switch_prepare(control);
0275 gb_interface_disable(intf);
0276 mutex_unlock(&intf->mutex);
0277
0278 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
0279 ret = wait_for_completion_interruptible_timeout(
0280 &intf->mode_switch_completion, timeout);
0281
0282
0283 gb_control_mode_switch_complete(control);
0284 gb_control_put(control);
0285
0286 if (ret < 0) {
0287 dev_err(&intf->dev, "mode switch interrupted\n");
0288 goto err_deactivate;
0289 } else if (ret == 0) {
0290 dev_err(&intf->dev, "mode switch timed out\n");
0291 goto err_deactivate;
0292 }
0293
0294
0295 mutex_lock(&intf->mutex);
0296 intf->mode_switch = false;
0297 if (intf->active) {
0298 ret = gb_interface_enable(intf);
0299 if (ret) {
0300 dev_err(&intf->dev, "failed to re-enable interface: %d\n",
0301 ret);
0302 gb_interface_deactivate(intf);
0303 }
0304 }
0305 mutex_unlock(&intf->mutex);
0306
0307 out_interface_put:
0308 gb_interface_put(intf);
0309
0310 return;
0311
0312 err_deactivate:
0313 mutex_lock(&intf->mutex);
0314 intf->mode_switch = false;
0315 gb_interface_deactivate(intf);
0316 mutex_unlock(&intf->mutex);
0317
0318 gb_interface_put(intf);
0319 }
0320
0321 int gb_interface_request_mode_switch(struct gb_interface *intf)
0322 {
0323 int ret = 0;
0324
0325 mutex_lock(&intf->mutex);
0326 if (intf->mode_switch) {
0327 ret = -EBUSY;
0328 goto out_unlock;
0329 }
0330
0331 intf->mode_switch = true;
0332 reinit_completion(&intf->mode_switch_completion);
0333
0334
0335
0336
0337
0338 get_device(&intf->dev);
0339
0340 if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
0341 put_device(&intf->dev);
0342 ret = -EBUSY;
0343 goto out_unlock;
0344 }
0345
0346 out_unlock:
0347 mutex_unlock(&intf->mutex);
0348
0349 return ret;
0350 }
0351 EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361 static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
0362 {
0363 struct gb_host_device *hd = intf->hd;
0364 unsigned long bootrom_quirks;
0365 unsigned long s2l_quirks;
0366 int ret;
0367 u32 value;
0368 u16 attr;
0369 u8 init_status;
0370
0371
0372
0373
0374
0375
0376 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
0377 attr = DME_T_TST_SRC_INCREMENT;
0378 else
0379 attr = DME_TOSHIBA_GMP_INIT_STATUS;
0380
0381 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
0382 DME_SELECTOR_INDEX_NULL, &value);
0383 if (ret)
0384 return ret;
0385
0386
0387
0388
0389
0390 if (!value) {
0391 dev_err(&intf->dev, "invalid init status\n");
0392 return -ENODEV;
0393 }
0394
0395
0396
0397
0398
0399
0400
0401
0402
0403 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
0404 init_status = value & 0xff;
0405 else
0406 init_status = value >> 24;
0407
0408
0409
0410
0411
0412 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
0413 GB_INTERFACE_QUIRK_FORCED_DISABLE |
0414 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
0415 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
0416
0417 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
0418
0419 switch (init_status) {
0420 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
0421 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
0422 intf->quirks |= bootrom_quirks;
0423 break;
0424 case GB_INIT_S2_LOADER_BOOT_STARTED:
0425
0426 intf->quirks &= ~bootrom_quirks;
0427 intf->quirks |= s2l_quirks;
0428 break;
0429 default:
0430 intf->quirks &= ~bootrom_quirks;
0431 intf->quirks &= ~s2l_quirks;
0432 }
0433
0434
0435 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
0436 DME_SELECTOR_INDEX_NULL, 0);
0437 }
0438
0439
0440 #define gb_interface_attr(field, type) \
0441 static ssize_t field##_show(struct device *dev, \
0442 struct device_attribute *attr, \
0443 char *buf) \
0444 { \
0445 struct gb_interface *intf = to_gb_interface(dev); \
0446 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
0447 } \
0448 static DEVICE_ATTR_RO(field)
0449
0450 gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
0451 gb_interface_attr(ddbl1_product_id, "0x%08x");
0452 gb_interface_attr(interface_id, "%u");
0453 gb_interface_attr(vendor_id, "0x%08x");
0454 gb_interface_attr(product_id, "0x%08x");
0455 gb_interface_attr(serial_number, "0x%016llx");
0456
0457 static ssize_t voltage_now_show(struct device *dev,
0458 struct device_attribute *attr, char *buf)
0459 {
0460 struct gb_interface *intf = to_gb_interface(dev);
0461 int ret;
0462 u32 measurement;
0463
0464 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
0465 GB_SVC_PWRMON_TYPE_VOL,
0466 &measurement);
0467 if (ret) {
0468 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
0469 return ret;
0470 }
0471
0472 return sprintf(buf, "%u\n", measurement);
0473 }
0474 static DEVICE_ATTR_RO(voltage_now);
0475
0476 static ssize_t current_now_show(struct device *dev,
0477 struct device_attribute *attr, char *buf)
0478 {
0479 struct gb_interface *intf = to_gb_interface(dev);
0480 int ret;
0481 u32 measurement;
0482
0483 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
0484 GB_SVC_PWRMON_TYPE_CURR,
0485 &measurement);
0486 if (ret) {
0487 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
0488 return ret;
0489 }
0490
0491 return sprintf(buf, "%u\n", measurement);
0492 }
0493 static DEVICE_ATTR_RO(current_now);
0494
0495 static ssize_t power_now_show(struct device *dev,
0496 struct device_attribute *attr, char *buf)
0497 {
0498 struct gb_interface *intf = to_gb_interface(dev);
0499 int ret;
0500 u32 measurement;
0501
0502 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
0503 GB_SVC_PWRMON_TYPE_PWR,
0504 &measurement);
0505 if (ret) {
0506 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
0507 return ret;
0508 }
0509
0510 return sprintf(buf, "%u\n", measurement);
0511 }
0512 static DEVICE_ATTR_RO(power_now);
0513
0514 static ssize_t power_state_show(struct device *dev,
0515 struct device_attribute *attr, char *buf)
0516 {
0517 struct gb_interface *intf = to_gb_interface(dev);
0518
0519 if (intf->active)
0520 return scnprintf(buf, PAGE_SIZE, "on\n");
0521 else
0522 return scnprintf(buf, PAGE_SIZE, "off\n");
0523 }
0524
0525 static ssize_t power_state_store(struct device *dev,
0526 struct device_attribute *attr, const char *buf,
0527 size_t len)
0528 {
0529 struct gb_interface *intf = to_gb_interface(dev);
0530 bool activate;
0531 int ret = 0;
0532
0533 if (kstrtobool(buf, &activate))
0534 return -EINVAL;
0535
0536 mutex_lock(&intf->mutex);
0537
0538 if (activate == intf->active)
0539 goto unlock;
0540
0541 if (activate) {
0542 ret = gb_interface_activate(intf);
0543 if (ret) {
0544 dev_err(&intf->dev,
0545 "failed to activate interface: %d\n", ret);
0546 goto unlock;
0547 }
0548
0549 ret = gb_interface_enable(intf);
0550 if (ret) {
0551 dev_err(&intf->dev,
0552 "failed to enable interface: %d\n", ret);
0553 gb_interface_deactivate(intf);
0554 goto unlock;
0555 }
0556 } else {
0557 gb_interface_disable(intf);
0558 gb_interface_deactivate(intf);
0559 }
0560
0561 unlock:
0562 mutex_unlock(&intf->mutex);
0563
0564 if (ret)
0565 return ret;
0566
0567 return len;
0568 }
0569 static DEVICE_ATTR_RW(power_state);
0570
0571 static const char *gb_interface_type_string(struct gb_interface *intf)
0572 {
0573 static const char * const types[] = {
0574 [GB_INTERFACE_TYPE_INVALID] = "invalid",
0575 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
0576 [GB_INTERFACE_TYPE_DUMMY] = "dummy",
0577 [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
0578 [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
0579 };
0580
0581 return types[intf->type];
0582 }
0583
0584 static ssize_t interface_type_show(struct device *dev,
0585 struct device_attribute *attr, char *buf)
0586 {
0587 struct gb_interface *intf = to_gb_interface(dev);
0588
0589 return sprintf(buf, "%s\n", gb_interface_type_string(intf));
0590 }
0591 static DEVICE_ATTR_RO(interface_type);
0592
0593 static struct attribute *interface_unipro_attrs[] = {
0594 &dev_attr_ddbl1_manufacturer_id.attr,
0595 &dev_attr_ddbl1_product_id.attr,
0596 NULL
0597 };
0598
0599 static struct attribute *interface_greybus_attrs[] = {
0600 &dev_attr_vendor_id.attr,
0601 &dev_attr_product_id.attr,
0602 &dev_attr_serial_number.attr,
0603 NULL
0604 };
0605
0606 static struct attribute *interface_power_attrs[] = {
0607 &dev_attr_voltage_now.attr,
0608 &dev_attr_current_now.attr,
0609 &dev_attr_power_now.attr,
0610 &dev_attr_power_state.attr,
0611 NULL
0612 };
0613
0614 static struct attribute *interface_common_attrs[] = {
0615 &dev_attr_interface_id.attr,
0616 &dev_attr_interface_type.attr,
0617 NULL
0618 };
0619
0620 static umode_t interface_unipro_is_visible(struct kobject *kobj,
0621 struct attribute *attr, int n)
0622 {
0623 struct device *dev = kobj_to_dev(kobj);
0624 struct gb_interface *intf = to_gb_interface(dev);
0625
0626 switch (intf->type) {
0627 case GB_INTERFACE_TYPE_UNIPRO:
0628 case GB_INTERFACE_TYPE_GREYBUS:
0629 return attr->mode;
0630 default:
0631 return 0;
0632 }
0633 }
0634
0635 static umode_t interface_greybus_is_visible(struct kobject *kobj,
0636 struct attribute *attr, int n)
0637 {
0638 struct device *dev = kobj_to_dev(kobj);
0639 struct gb_interface *intf = to_gb_interface(dev);
0640
0641 switch (intf->type) {
0642 case GB_INTERFACE_TYPE_GREYBUS:
0643 return attr->mode;
0644 default:
0645 return 0;
0646 }
0647 }
0648
0649 static umode_t interface_power_is_visible(struct kobject *kobj,
0650 struct attribute *attr, int n)
0651 {
0652 struct device *dev = kobj_to_dev(kobj);
0653 struct gb_interface *intf = to_gb_interface(dev);
0654
0655 switch (intf->type) {
0656 case GB_INTERFACE_TYPE_UNIPRO:
0657 case GB_INTERFACE_TYPE_GREYBUS:
0658 return attr->mode;
0659 default:
0660 return 0;
0661 }
0662 }
0663
0664 static const struct attribute_group interface_unipro_group = {
0665 .is_visible = interface_unipro_is_visible,
0666 .attrs = interface_unipro_attrs,
0667 };
0668
0669 static const struct attribute_group interface_greybus_group = {
0670 .is_visible = interface_greybus_is_visible,
0671 .attrs = interface_greybus_attrs,
0672 };
0673
0674 static const struct attribute_group interface_power_group = {
0675 .is_visible = interface_power_is_visible,
0676 .attrs = interface_power_attrs,
0677 };
0678
0679 static const struct attribute_group interface_common_group = {
0680 .attrs = interface_common_attrs,
0681 };
0682
0683 static const struct attribute_group *interface_groups[] = {
0684 &interface_unipro_group,
0685 &interface_greybus_group,
0686 &interface_power_group,
0687 &interface_common_group,
0688 NULL
0689 };
0690
0691 static void gb_interface_release(struct device *dev)
0692 {
0693 struct gb_interface *intf = to_gb_interface(dev);
0694
0695 trace_gb_interface_release(intf);
0696
0697 kfree(intf);
0698 }
0699
0700 #ifdef CONFIG_PM
0701 static int gb_interface_suspend(struct device *dev)
0702 {
0703 struct gb_interface *intf = to_gb_interface(dev);
0704 int ret;
0705
0706 ret = gb_control_interface_suspend_prepare(intf->control);
0707 if (ret)
0708 return ret;
0709
0710 ret = gb_control_suspend(intf->control);
0711 if (ret)
0712 goto err_hibernate_abort;
0713
0714 ret = gb_interface_hibernate_link(intf);
0715 if (ret)
0716 return ret;
0717
0718
0719 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
0720
0721 ret = gb_interface_refclk_set(intf, false);
0722 if (ret)
0723 return ret;
0724
0725 return 0;
0726
0727 err_hibernate_abort:
0728 gb_control_interface_hibernate_abort(intf->control);
0729
0730 return ret;
0731 }
0732
0733 static int gb_interface_resume(struct device *dev)
0734 {
0735 struct gb_interface *intf = to_gb_interface(dev);
0736 struct gb_svc *svc = intf->hd->svc;
0737 int ret;
0738
0739 ret = gb_interface_refclk_set(intf, true);
0740 if (ret)
0741 return ret;
0742
0743 ret = gb_svc_intf_resume(svc, intf->interface_id);
0744 if (ret)
0745 return ret;
0746
0747 ret = gb_control_resume(intf->control);
0748 if (ret)
0749 return ret;
0750
0751 return 0;
0752 }
0753
0754 static int gb_interface_runtime_idle(struct device *dev)
0755 {
0756 pm_runtime_mark_last_busy(dev);
0757 pm_request_autosuspend(dev);
0758
0759 return 0;
0760 }
0761 #endif
0762
0763 static const struct dev_pm_ops gb_interface_pm_ops = {
0764 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
0765 gb_interface_runtime_idle)
0766 };
0767
0768 struct device_type greybus_interface_type = {
0769 .name = "greybus_interface",
0770 .release = gb_interface_release,
0771 .pm = &gb_interface_pm_ops,
0772 };
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784
0785
0786 struct gb_interface *gb_interface_create(struct gb_module *module,
0787 u8 interface_id)
0788 {
0789 struct gb_host_device *hd = module->hd;
0790 struct gb_interface *intf;
0791
0792 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
0793 if (!intf)
0794 return NULL;
0795
0796 intf->hd = hd;
0797 intf->module = module;
0798 intf->interface_id = interface_id;
0799 INIT_LIST_HEAD(&intf->bundles);
0800 INIT_LIST_HEAD(&intf->manifest_descs);
0801 mutex_init(&intf->mutex);
0802 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
0803 init_completion(&intf->mode_switch_completion);
0804
0805
0806 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
0807
0808 intf->dev.parent = &module->dev;
0809 intf->dev.bus = &greybus_bus_type;
0810 intf->dev.type = &greybus_interface_type;
0811 intf->dev.groups = interface_groups;
0812 intf->dev.dma_mask = module->dev.dma_mask;
0813 device_initialize(&intf->dev);
0814 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
0815 interface_id);
0816
0817 pm_runtime_set_autosuspend_delay(&intf->dev,
0818 GB_INTERFACE_AUTOSUSPEND_MS);
0819
0820 trace_gb_interface_create(intf);
0821
0822 return intf;
0823 }
0824
0825 static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
0826 {
0827 struct gb_svc *svc = intf->hd->svc;
0828 int ret;
0829
0830 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
0831
0832 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
0833 if (ret) {
0834 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
0835 return ret;
0836 }
0837
0838 return 0;
0839 }
0840
0841 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
0842 {
0843 struct gb_svc *svc = intf->hd->svc;
0844 int ret;
0845
0846 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
0847
0848 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
0849 if (ret) {
0850 dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
0851 return ret;
0852 }
0853
0854 return 0;
0855 }
0856
0857 static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
0858 {
0859 struct gb_svc *svc = intf->hd->svc;
0860 int ret;
0861
0862 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
0863
0864 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
0865 if (ret) {
0866 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
0867 return ret;
0868 }
0869
0870 return 0;
0871 }
0872
0873 static int gb_interface_activate_operation(struct gb_interface *intf,
0874 enum gb_interface_type *intf_type)
0875 {
0876 struct gb_svc *svc = intf->hd->svc;
0877 u8 type;
0878 int ret;
0879
0880 dev_dbg(&intf->dev, "%s\n", __func__);
0881
0882 ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
0883 if (ret) {
0884 dev_err(&intf->dev, "failed to activate: %d\n", ret);
0885 return ret;
0886 }
0887
0888 switch (type) {
0889 case GB_SVC_INTF_TYPE_DUMMY:
0890 *intf_type = GB_INTERFACE_TYPE_DUMMY;
0891
0892 return -ENODEV;
0893 case GB_SVC_INTF_TYPE_UNIPRO:
0894 *intf_type = GB_INTERFACE_TYPE_UNIPRO;
0895 dev_err(&intf->dev, "interface type UniPro not supported\n");
0896
0897 return -ENODEV;
0898 case GB_SVC_INTF_TYPE_GREYBUS:
0899 *intf_type = GB_INTERFACE_TYPE_GREYBUS;
0900 break;
0901 default:
0902 dev_err(&intf->dev, "unknown interface type: %u\n", type);
0903 *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
0904 return -ENODEV;
0905 }
0906
0907 return 0;
0908 }
0909
0910 static int gb_interface_hibernate_link(struct gb_interface *intf)
0911 {
0912 struct gb_svc *svc = intf->hd->svc;
0913
0914 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
0915 }
0916
0917 static int _gb_interface_activate(struct gb_interface *intf,
0918 enum gb_interface_type *type)
0919 {
0920 int ret;
0921
0922 *type = GB_INTERFACE_TYPE_UNKNOWN;
0923
0924 if (intf->ejected || intf->removed)
0925 return -ENODEV;
0926
0927 ret = gb_interface_vsys_set(intf, true);
0928 if (ret)
0929 return ret;
0930
0931 ret = gb_interface_refclk_set(intf, true);
0932 if (ret)
0933 goto err_vsys_disable;
0934
0935 ret = gb_interface_unipro_set(intf, true);
0936 if (ret)
0937 goto err_refclk_disable;
0938
0939 ret = gb_interface_activate_operation(intf, type);
0940 if (ret) {
0941 switch (*type) {
0942 case GB_INTERFACE_TYPE_UNIPRO:
0943 case GB_INTERFACE_TYPE_GREYBUS:
0944 goto err_hibernate_link;
0945 default:
0946 goto err_unipro_disable;
0947 }
0948 }
0949
0950 ret = gb_interface_read_dme(intf);
0951 if (ret)
0952 goto err_hibernate_link;
0953
0954 ret = gb_interface_route_create(intf);
0955 if (ret)
0956 goto err_hibernate_link;
0957
0958 intf->active = true;
0959
0960 trace_gb_interface_activate(intf);
0961
0962 return 0;
0963
0964 err_hibernate_link:
0965 gb_interface_hibernate_link(intf);
0966 err_unipro_disable:
0967 gb_interface_unipro_set(intf, false);
0968 err_refclk_disable:
0969 gb_interface_refclk_set(intf, false);
0970 err_vsys_disable:
0971 gb_interface_vsys_set(intf, false);
0972
0973 return ret;
0974 }
0975
0976
0977
0978
0979
0980
0981
0982
0983 static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
0984 enum gb_interface_type *type)
0985 {
0986 int retries = 3;
0987 int ret;
0988
0989 while (retries--) {
0990 ret = _gb_interface_activate(intf, type);
0991 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
0992 continue;
0993
0994 break;
0995 }
0996
0997 return ret;
0998 }
0999
1000
1001
1002
1003
1004
1005 int gb_interface_activate(struct gb_interface *intf)
1006 {
1007 enum gb_interface_type type;
1008 int ret;
1009
1010 switch (intf->type) {
1011 case GB_INTERFACE_TYPE_INVALID:
1012 case GB_INTERFACE_TYPE_GREYBUS:
1013 ret = _gb_interface_activate_es3_hack(intf, &type);
1014 break;
1015 default:
1016 ret = _gb_interface_activate(intf, &type);
1017 }
1018
1019
1020 if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1021 if (type != intf->type) {
1022 dev_err(&intf->dev, "failed to detect interface type\n");
1023
1024 if (!ret)
1025 gb_interface_deactivate(intf);
1026
1027 return -EIO;
1028 }
1029 } else {
1030 intf->type = type;
1031 }
1032
1033 return ret;
1034 }
1035
1036
1037
1038
1039
1040
1041 void gb_interface_deactivate(struct gb_interface *intf)
1042 {
1043 if (!intf->active)
1044 return;
1045
1046 trace_gb_interface_deactivate(intf);
1047
1048
1049 if (intf->mode_switch)
1050 complete(&intf->mode_switch_completion);
1051
1052 gb_interface_route_destroy(intf);
1053 gb_interface_hibernate_link(intf);
1054 gb_interface_unipro_set(intf, false);
1055 gb_interface_refclk_set(intf, false);
1056 gb_interface_vsys_set(intf, false);
1057
1058 intf->active = false;
1059 }
1060
1061
1062
1063
1064
1065
1066
1067
1068 int gb_interface_enable(struct gb_interface *intf)
1069 {
1070 struct gb_control *control;
1071 struct gb_bundle *bundle, *tmp;
1072 int ret, size;
1073 void *manifest;
1074
1075 ret = gb_interface_read_and_clear_init_status(intf);
1076 if (ret) {
1077 dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1078 return ret;
1079 }
1080
1081
1082 control = gb_control_create(intf);
1083 if (IS_ERR(control)) {
1084 dev_err(&intf->dev, "failed to create control device: %ld\n",
1085 PTR_ERR(control));
1086 return PTR_ERR(control);
1087 }
1088 intf->control = control;
1089
1090 ret = gb_control_enable(intf->control);
1091 if (ret)
1092 goto err_put_control;
1093
1094
1095 size = gb_control_get_manifest_size_operation(intf);
1096 if (size <= 0) {
1097 dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1098
1099 if (size)
1100 ret = size;
1101 else
1102 ret = -EINVAL;
1103
1104 goto err_disable_control;
1105 }
1106
1107 manifest = kmalloc(size, GFP_KERNEL);
1108 if (!manifest) {
1109 ret = -ENOMEM;
1110 goto err_disable_control;
1111 }
1112
1113
1114 ret = gb_control_get_manifest_operation(intf, manifest, size);
1115 if (ret) {
1116 dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1117 goto err_free_manifest;
1118 }
1119
1120
1121
1122
1123
1124 if (!gb_manifest_parse(intf, manifest, size)) {
1125 dev_err(&intf->dev, "failed to parse manifest\n");
1126 ret = -EINVAL;
1127 goto err_destroy_bundles;
1128 }
1129
1130 ret = gb_control_get_bundle_versions(intf->control);
1131 if (ret)
1132 goto err_destroy_bundles;
1133
1134
1135 ret = gb_control_add(intf->control);
1136 if (ret)
1137 goto err_destroy_bundles;
1138
1139 pm_runtime_use_autosuspend(&intf->dev);
1140 pm_runtime_get_noresume(&intf->dev);
1141 pm_runtime_set_active(&intf->dev);
1142 pm_runtime_enable(&intf->dev);
1143
1144 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1145 ret = gb_bundle_add(bundle);
1146 if (ret) {
1147 gb_bundle_destroy(bundle);
1148 continue;
1149 }
1150 }
1151
1152 kfree(manifest);
1153
1154 intf->enabled = true;
1155
1156 pm_runtime_put(&intf->dev);
1157
1158 trace_gb_interface_enable(intf);
1159
1160 return 0;
1161
1162 err_destroy_bundles:
1163 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1164 gb_bundle_destroy(bundle);
1165 err_free_manifest:
1166 kfree(manifest);
1167 err_disable_control:
1168 gb_control_disable(intf->control);
1169 err_put_control:
1170 gb_control_put(intf->control);
1171 intf->control = NULL;
1172
1173 return ret;
1174 }
1175
1176
1177
1178
1179
1180
1181 void gb_interface_disable(struct gb_interface *intf)
1182 {
1183 struct gb_bundle *bundle;
1184 struct gb_bundle *next;
1185
1186 if (!intf->enabled)
1187 return;
1188
1189 trace_gb_interface_disable(intf);
1190
1191 pm_runtime_get_sync(&intf->dev);
1192
1193
1194 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1195 intf->disconnected = true;
1196
1197 list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1198 gb_bundle_destroy(bundle);
1199
1200 if (!intf->mode_switch && !intf->disconnected)
1201 gb_control_interface_deactivate_prepare(intf->control);
1202
1203 gb_control_del(intf->control);
1204 gb_control_disable(intf->control);
1205 gb_control_put(intf->control);
1206 intf->control = NULL;
1207
1208 intf->enabled = false;
1209
1210 pm_runtime_disable(&intf->dev);
1211 pm_runtime_set_suspended(&intf->dev);
1212 pm_runtime_dont_use_autosuspend(&intf->dev);
1213 pm_runtime_put_noidle(&intf->dev);
1214 }
1215
1216
1217 int gb_interface_add(struct gb_interface *intf)
1218 {
1219 int ret;
1220
1221 ret = device_add(&intf->dev);
1222 if (ret) {
1223 dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1224 return ret;
1225 }
1226
1227 trace_gb_interface_add(intf);
1228
1229 dev_info(&intf->dev, "Interface added (%s)\n",
1230 gb_interface_type_string(intf));
1231
1232 switch (intf->type) {
1233 case GB_INTERFACE_TYPE_GREYBUS:
1234 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1235 intf->vendor_id, intf->product_id);
1236 fallthrough;
1237 case GB_INTERFACE_TYPE_UNIPRO:
1238 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1239 intf->ddbl1_manufacturer_id,
1240 intf->ddbl1_product_id);
1241 break;
1242 default:
1243 break;
1244 }
1245
1246 return 0;
1247 }
1248
1249
1250 void gb_interface_del(struct gb_interface *intf)
1251 {
1252 if (device_is_registered(&intf->dev)) {
1253 trace_gb_interface_del(intf);
1254
1255 device_del(&intf->dev);
1256 dev_info(&intf->dev, "Interface removed\n");
1257 }
1258 }
1259
1260 void gb_interface_put(struct gb_interface *intf)
1261 {
1262 put_device(&intf->dev);
1263 }