mtd: nand: brcmnand: Check flash #WP pin status before nand erase/program
[linux/fpc-iii.git] / drivers / staging / greybus / interface.c
blob546b090e2d51c9fc1211210e0d16decf9d70cdcb
1 /*
2 * Greybus interface code
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
8 */
10 #include <linux/delay.h>
12 #include "greybus.h"
13 #include "greybus_trace.h"
15 #define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
17 #define GB_INTERFACE_DEVICE_ID_BAD 0xff
19 #define GB_INTERFACE_AUTOSUSPEND_MS 3000
21 /* Time required for interface to enter standby before disabling REFCLK */
22 #define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
24 /* Don't-care selector index */
25 #define DME_SELECTOR_INDEX_NULL 0
27 /* DME attributes */
28 /* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
29 #define DME_T_TST_SRC_INCREMENT 0x4083
31 #define DME_DDBL1_MANUFACTURERID 0x5003
32 #define DME_DDBL1_PRODUCTID 0x5004
34 #define DME_TOSHIBA_GMP_VID 0x6000
35 #define DME_TOSHIBA_GMP_PID 0x6001
36 #define DME_TOSHIBA_GMP_SN0 0x6002
37 #define DME_TOSHIBA_GMP_SN1 0x6003
38 #define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
40 /* DDBL1 Manufacturer and Product ids */
41 #define TOSHIBA_DMID 0x0126
42 #define TOSHIBA_ES2_BRIDGE_DPID 0x1000
43 #define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
44 #define TOSHIBA_ES3_GBPHY_DPID 0x1002
46 static int gb_interface_hibernate_link(struct gb_interface *intf);
47 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
49 static int gb_interface_dme_attr_get(struct gb_interface *intf,
50 u16 attr, u32 *val)
52 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
53 attr, DME_SELECTOR_INDEX_NULL, val);
56 static int gb_interface_read_ara_dme(struct gb_interface *intf)
58 u32 sn0, sn1;
59 int ret;
62 * Unless this is a Toshiba bridge, bail out until we have defined
63 * standard GMP attributes.
65 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
66 dev_err(&intf->dev, "unknown manufacturer %08x\n",
67 intf->ddbl1_manufacturer_id);
68 return -ENODEV;
71 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
72 &intf->vendor_id);
73 if (ret)
74 return ret;
76 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
77 &intf->product_id);
78 if (ret)
79 return ret;
81 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
82 if (ret)
83 return ret;
85 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
86 if (ret)
87 return ret;
89 intf->serial_number = (u64)sn1 << 32 | sn0;
91 return 0;
94 static int gb_interface_read_dme(struct gb_interface *intf)
96 int ret;
98 /* DME attributes have already been read */
99 if (intf->dme_read)
100 return 0;
102 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
103 &intf->ddbl1_manufacturer_id);
104 if (ret)
105 return ret;
107 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
108 &intf->ddbl1_product_id);
109 if (ret)
110 return ret;
112 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
113 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
114 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
115 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
118 ret = gb_interface_read_ara_dme(intf);
119 if (ret)
120 return ret;
122 intf->dme_read = true;
124 return 0;
127 static int gb_interface_route_create(struct gb_interface *intf)
129 struct gb_svc *svc = intf->hd->svc;
130 u8 intf_id = intf->interface_id;
131 u8 device_id;
132 int ret;
134 /* Allocate an interface device id. */
135 ret = ida_simple_get(&svc->device_id_map,
136 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
137 GFP_KERNEL);
138 if (ret < 0) {
139 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
140 return ret;
142 device_id = ret;
144 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
145 if (ret) {
146 dev_err(&intf->dev, "failed to set device id %u: %d\n",
147 device_id, ret);
148 goto err_ida_remove;
151 /* FIXME: Hard-coded AP device id. */
152 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
153 intf_id, device_id);
154 if (ret) {
155 dev_err(&intf->dev, "failed to create route: %d\n", ret);
156 goto err_svc_id_free;
159 intf->device_id = device_id;
161 return 0;
163 err_svc_id_free:
165 * XXX Should we tell SVC that this id doesn't belong to interface
166 * XXX anymore.
168 err_ida_remove:
169 ida_simple_remove(&svc->device_id_map, device_id);
171 return ret;
174 static void gb_interface_route_destroy(struct gb_interface *intf)
176 struct gb_svc *svc = intf->hd->svc;
178 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
179 return;
181 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
182 ida_simple_remove(&svc->device_id_map, intf->device_id);
183 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
186 /* Locking: Caller holds the interface mutex. */
187 static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
189 int ret;
191 dev_info(&intf->dev, "legacy mode switch detected\n");
193 /* Mark as disconnected to prevent I/O during disable. */
194 intf->disconnected = true;
195 gb_interface_disable(intf);
196 intf->disconnected = false;
198 ret = gb_interface_enable(intf);
199 if (ret) {
200 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
201 gb_interface_deactivate(intf);
204 return ret;
207 void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
208 u32 mailbox)
210 mutex_lock(&intf->mutex);
212 if (result) {
213 dev_warn(&intf->dev,
214 "mailbox event with UniPro error: 0x%04x\n",
215 result);
216 goto err_disable;
219 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
220 dev_warn(&intf->dev,
221 "mailbox event with unexpected value: 0x%08x\n",
222 mailbox);
223 goto err_disable;
226 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
227 gb_interface_legacy_mode_switch(intf);
228 goto out_unlock;
231 if (!intf->mode_switch) {
232 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
233 mailbox);
234 goto err_disable;
237 dev_info(&intf->dev, "mode switch detected\n");
239 complete(&intf->mode_switch_completion);
241 out_unlock:
242 mutex_unlock(&intf->mutex);
244 return;
246 err_disable:
247 gb_interface_disable(intf);
248 gb_interface_deactivate(intf);
249 mutex_unlock(&intf->mutex);
252 static void gb_interface_mode_switch_work(struct work_struct *work)
254 struct gb_interface *intf;
255 struct gb_control *control;
256 unsigned long timeout;
257 int ret;
259 intf = container_of(work, struct gb_interface, mode_switch_work);
261 mutex_lock(&intf->mutex);
262 /* Make sure interface is still enabled. */
263 if (!intf->enabled) {
264 dev_dbg(&intf->dev, "mode switch aborted\n");
265 intf->mode_switch = false;
266 mutex_unlock(&intf->mutex);
267 goto out_interface_put;
271 * Prepare the control device for mode switch and make sure to get an
272 * extra reference before it goes away during interface disable.
274 control = gb_control_get(intf->control);
275 gb_control_mode_switch_prepare(control);
276 gb_interface_disable(intf);
277 mutex_unlock(&intf->mutex);
279 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
280 ret = wait_for_completion_interruptible_timeout(
281 &intf->mode_switch_completion, timeout);
283 /* Finalise control-connection mode switch. */
284 gb_control_mode_switch_complete(control);
285 gb_control_put(control);
287 if (ret < 0) {
288 dev_err(&intf->dev, "mode switch interrupted\n");
289 goto err_deactivate;
290 } else if (ret == 0) {
291 dev_err(&intf->dev, "mode switch timed out\n");
292 goto err_deactivate;
295 /* Re-enable (re-enumerate) interface if still active. */
296 mutex_lock(&intf->mutex);
297 intf->mode_switch = false;
298 if (intf->active) {
299 ret = gb_interface_enable(intf);
300 if (ret) {
301 dev_err(&intf->dev, "failed to re-enable interface: %d\n",
302 ret);
303 gb_interface_deactivate(intf);
306 mutex_unlock(&intf->mutex);
308 out_interface_put:
309 gb_interface_put(intf);
311 return;
313 err_deactivate:
314 mutex_lock(&intf->mutex);
315 intf->mode_switch = false;
316 gb_interface_deactivate(intf);
317 mutex_unlock(&intf->mutex);
319 gb_interface_put(intf);
322 int gb_interface_request_mode_switch(struct gb_interface *intf)
324 int ret = 0;
326 mutex_lock(&intf->mutex);
327 if (intf->mode_switch) {
328 ret = -EBUSY;
329 goto out_unlock;
332 intf->mode_switch = true;
333 reinit_completion(&intf->mode_switch_completion);
336 * Get a reference to the interface device, which will be put once the
337 * mode switch is complete.
339 get_device(&intf->dev);
341 if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
342 put_device(&intf->dev);
343 ret = -EBUSY;
344 goto out_unlock;
347 out_unlock:
348 mutex_unlock(&intf->mutex);
350 return ret;
352 EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
355 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
356 * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
357 * clear it after reading a non-zero value from it.
359 * FIXME: This is module-hardware dependent and needs to be extended for every
360 * type of module we want to support.
362 static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
364 struct gb_host_device *hd = intf->hd;
365 unsigned long bootrom_quirks;
366 unsigned long s2l_quirks;
367 int ret;
368 u32 value;
369 u16 attr;
370 u8 init_status;
373 * ES2 bridges use T_TstSrcIncrement for the init status.
375 * FIXME: Remove ES2 support
377 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
378 attr = DME_T_TST_SRC_INCREMENT;
379 else
380 attr = DME_TOSHIBA_GMP_INIT_STATUS;
382 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
383 DME_SELECTOR_INDEX_NULL, &value);
384 if (ret)
385 return ret;
388 * A nonzero init status indicates the module has finished
389 * initializing.
391 if (!value) {
392 dev_err(&intf->dev, "invalid init status\n");
393 return -ENODEV;
397 * Extract the init status.
399 * For ES2: We need to check lowest 8 bits of 'value'.
400 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
402 * FIXME: Remove ES2 support
404 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
405 init_status = value & 0xff;
406 else
407 init_status = value >> 24;
410 * Check if the interface is executing the quirky ES3 bootrom that,
411 * for example, requires E2EFC, CSD and CSV to be disabled.
413 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
414 GB_INTERFACE_QUIRK_FORCED_DISABLE |
415 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
416 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
418 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
420 switch (init_status) {
421 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
422 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
423 intf->quirks |= bootrom_quirks;
424 break;
425 case GB_INIT_S2_LOADER_BOOT_STARTED:
426 /* S2 Loader doesn't support runtime PM */
427 intf->quirks &= ~bootrom_quirks;
428 intf->quirks |= s2l_quirks;
429 break;
430 default:
431 intf->quirks &= ~bootrom_quirks;
432 intf->quirks &= ~s2l_quirks;
435 /* Clear the init status. */
436 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
437 DME_SELECTOR_INDEX_NULL, 0);
440 /* interface sysfs attributes */
441 #define gb_interface_attr(field, type) \
442 static ssize_t field##_show(struct device *dev, \
443 struct device_attribute *attr, \
444 char *buf) \
446 struct gb_interface *intf = to_gb_interface(dev); \
447 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
449 static DEVICE_ATTR_RO(field)
451 gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
452 gb_interface_attr(ddbl1_product_id, "0x%08x");
453 gb_interface_attr(interface_id, "%u");
454 gb_interface_attr(vendor_id, "0x%08x");
455 gb_interface_attr(product_id, "0x%08x");
456 gb_interface_attr(serial_number, "0x%016llx");
458 static ssize_t voltage_now_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
461 struct gb_interface *intf = to_gb_interface(dev);
462 int ret;
463 u32 measurement;
465 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
466 GB_SVC_PWRMON_TYPE_VOL,
467 &measurement);
468 if (ret) {
469 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
470 return ret;
473 return sprintf(buf, "%u\n", measurement);
475 static DEVICE_ATTR_RO(voltage_now);
477 static ssize_t current_now_show(struct device *dev,
478 struct device_attribute *attr, char *buf)
480 struct gb_interface *intf = to_gb_interface(dev);
481 int ret;
482 u32 measurement;
484 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
485 GB_SVC_PWRMON_TYPE_CURR,
486 &measurement);
487 if (ret) {
488 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
489 return ret;
492 return sprintf(buf, "%u\n", measurement);
494 static DEVICE_ATTR_RO(current_now);
496 static ssize_t power_now_show(struct device *dev,
497 struct device_attribute *attr, char *buf)
499 struct gb_interface *intf = to_gb_interface(dev);
500 int ret;
501 u32 measurement;
503 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
504 GB_SVC_PWRMON_TYPE_PWR,
505 &measurement);
506 if (ret) {
507 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
508 return ret;
511 return sprintf(buf, "%u\n", measurement);
513 static DEVICE_ATTR_RO(power_now);
515 static ssize_t power_state_show(struct device *dev,
516 struct device_attribute *attr, char *buf)
518 struct gb_interface *intf = to_gb_interface(dev);
520 if (intf->active)
521 return scnprintf(buf, PAGE_SIZE, "on\n");
522 else
523 return scnprintf(buf, PAGE_SIZE, "off\n");
526 static ssize_t power_state_store(struct device *dev,
527 struct device_attribute *attr, const char *buf,
528 size_t len)
530 struct gb_interface *intf = to_gb_interface(dev);
531 bool activate;
532 int ret = 0;
534 if (kstrtobool(buf, &activate))
535 return -EINVAL;
537 mutex_lock(&intf->mutex);
539 if (activate == intf->active)
540 goto unlock;
542 if (activate) {
543 ret = gb_interface_activate(intf);
544 if (ret) {
545 dev_err(&intf->dev,
546 "failed to activate interface: %d\n", ret);
547 goto unlock;
550 ret = gb_interface_enable(intf);
551 if (ret) {
552 dev_err(&intf->dev,
553 "failed to enable interface: %d\n", ret);
554 gb_interface_deactivate(intf);
555 goto unlock;
557 } else {
558 gb_interface_disable(intf);
559 gb_interface_deactivate(intf);
562 unlock:
563 mutex_unlock(&intf->mutex);
565 if (ret)
566 return ret;
568 return len;
570 static DEVICE_ATTR_RW(power_state);
572 static const char *gb_interface_type_string(struct gb_interface *intf)
574 static const char * const types[] = {
575 [GB_INTERFACE_TYPE_INVALID] = "invalid",
576 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
577 [GB_INTERFACE_TYPE_DUMMY] = "dummy",
578 [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
579 [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
582 return types[intf->type];
585 static ssize_t interface_type_show(struct device *dev,
586 struct device_attribute *attr, char *buf)
588 struct gb_interface *intf = to_gb_interface(dev);
590 return sprintf(buf, "%s\n", gb_interface_type_string(intf));
592 static DEVICE_ATTR_RO(interface_type);
594 static struct attribute *interface_unipro_attrs[] = {
595 &dev_attr_ddbl1_manufacturer_id.attr,
596 &dev_attr_ddbl1_product_id.attr,
597 NULL
600 static struct attribute *interface_greybus_attrs[] = {
601 &dev_attr_vendor_id.attr,
602 &dev_attr_product_id.attr,
603 &dev_attr_serial_number.attr,
604 NULL
607 static struct attribute *interface_power_attrs[] = {
608 &dev_attr_voltage_now.attr,
609 &dev_attr_current_now.attr,
610 &dev_attr_power_now.attr,
611 &dev_attr_power_state.attr,
612 NULL
615 static struct attribute *interface_common_attrs[] = {
616 &dev_attr_interface_id.attr,
617 &dev_attr_interface_type.attr,
618 NULL
621 static umode_t interface_unipro_is_visible(struct kobject *kobj,
622 struct attribute *attr, int n)
624 struct device *dev = container_of(kobj, struct device, kobj);
625 struct gb_interface *intf = to_gb_interface(dev);
627 switch (intf->type) {
628 case GB_INTERFACE_TYPE_UNIPRO:
629 case GB_INTERFACE_TYPE_GREYBUS:
630 return attr->mode;
631 default:
632 return 0;
636 static umode_t interface_greybus_is_visible(struct kobject *kobj,
637 struct attribute *attr, int n)
639 struct device *dev = container_of(kobj, struct device, kobj);
640 struct gb_interface *intf = to_gb_interface(dev);
642 switch (intf->type) {
643 case GB_INTERFACE_TYPE_GREYBUS:
644 return attr->mode;
645 default:
646 return 0;
650 static umode_t interface_power_is_visible(struct kobject *kobj,
651 struct attribute *attr, int n)
653 struct device *dev = container_of(kobj, struct device, kobj);
654 struct gb_interface *intf = to_gb_interface(dev);
656 switch (intf->type) {
657 case GB_INTERFACE_TYPE_UNIPRO:
658 case GB_INTERFACE_TYPE_GREYBUS:
659 return attr->mode;
660 default:
661 return 0;
665 static const struct attribute_group interface_unipro_group = {
666 .is_visible = interface_unipro_is_visible,
667 .attrs = interface_unipro_attrs,
670 static const struct attribute_group interface_greybus_group = {
671 .is_visible = interface_greybus_is_visible,
672 .attrs = interface_greybus_attrs,
675 static const struct attribute_group interface_power_group = {
676 .is_visible = interface_power_is_visible,
677 .attrs = interface_power_attrs,
680 static const struct attribute_group interface_common_group = {
681 .attrs = interface_common_attrs,
684 static const struct attribute_group *interface_groups[] = {
685 &interface_unipro_group,
686 &interface_greybus_group,
687 &interface_power_group,
688 &interface_common_group,
689 NULL
692 static void gb_interface_release(struct device *dev)
694 struct gb_interface *intf = to_gb_interface(dev);
696 trace_gb_interface_release(intf);
698 kfree(intf);
701 #ifdef CONFIG_PM
702 static int gb_interface_suspend(struct device *dev)
704 struct gb_interface *intf = to_gb_interface(dev);
705 int ret, timesync_ret;
707 ret = gb_control_interface_suspend_prepare(intf->control);
708 if (ret)
709 return ret;
711 gb_timesync_interface_remove(intf);
713 ret = gb_control_suspend(intf->control);
714 if (ret)
715 goto err_hibernate_abort;
717 ret = gb_interface_hibernate_link(intf);
718 if (ret)
719 return ret;
721 /* Delay to allow interface to enter standby before disabling refclk */
722 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
724 ret = gb_interface_refclk_set(intf, false);
725 if (ret)
726 return ret;
728 return 0;
730 err_hibernate_abort:
731 gb_control_interface_hibernate_abort(intf->control);
733 timesync_ret = gb_timesync_interface_add(intf);
734 if (timesync_ret) {
735 dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
736 return timesync_ret;
739 return ret;
742 static int gb_interface_resume(struct device *dev)
744 struct gb_interface *intf = to_gb_interface(dev);
745 struct gb_svc *svc = intf->hd->svc;
746 int ret;
748 ret = gb_interface_refclk_set(intf, true);
749 if (ret)
750 return ret;
752 ret = gb_svc_intf_resume(svc, intf->interface_id);
753 if (ret)
754 return ret;
756 ret = gb_control_resume(intf->control);
757 if (ret)
758 return ret;
760 ret = gb_timesync_interface_add(intf);
761 if (ret) {
762 dev_err(dev, "failed to add to timesync: %d\n", ret);
763 return ret;
766 ret = gb_timesync_schedule_synchronous(intf);
767 if (ret) {
768 dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
769 return ret;
772 return 0;
775 static int gb_interface_runtime_idle(struct device *dev)
777 pm_runtime_mark_last_busy(dev);
778 pm_request_autosuspend(dev);
780 return 0;
782 #endif
784 static const struct dev_pm_ops gb_interface_pm_ops = {
785 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
786 gb_interface_runtime_idle)
789 struct device_type greybus_interface_type = {
790 .name = "greybus_interface",
791 .release = gb_interface_release,
792 .pm = &gb_interface_pm_ops,
796 * A Greybus module represents a user-replaceable component on a GMP
797 * phone. An interface is the physical connection on that module. A
798 * module may have more than one interface.
800 * Create a gb_interface structure to represent a discovered interface.
801 * The position of interface within the Endo is encoded in "interface_id"
802 * argument.
804 * Returns a pointer to the new interfce or a null pointer if a
805 * failure occurs due to memory exhaustion.
807 struct gb_interface *gb_interface_create(struct gb_module *module,
808 u8 interface_id)
810 struct gb_host_device *hd = module->hd;
811 struct gb_interface *intf;
813 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
814 if (!intf)
815 return NULL;
817 intf->hd = hd; /* XXX refcount? */
818 intf->module = module;
819 intf->interface_id = interface_id;
820 INIT_LIST_HEAD(&intf->bundles);
821 INIT_LIST_HEAD(&intf->manifest_descs);
822 mutex_init(&intf->mutex);
823 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
824 init_completion(&intf->mode_switch_completion);
826 /* Invalid device id to start with */
827 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
829 intf->dev.parent = &module->dev;
830 intf->dev.bus = &greybus_bus_type;
831 intf->dev.type = &greybus_interface_type;
832 intf->dev.groups = interface_groups;
833 intf->dev.dma_mask = module->dev.dma_mask;
834 device_initialize(&intf->dev);
835 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
836 interface_id);
838 pm_runtime_set_autosuspend_delay(&intf->dev,
839 GB_INTERFACE_AUTOSUSPEND_MS);
841 trace_gb_interface_create(intf);
843 return intf;
846 static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
848 struct gb_svc *svc = intf->hd->svc;
849 int ret;
851 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
853 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
854 if (ret) {
855 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
856 return ret;
859 return 0;
862 static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
864 struct gb_svc *svc = intf->hd->svc;
865 int ret;
867 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
869 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
870 if (ret) {
871 dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
872 return ret;
875 return 0;
878 static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
880 struct gb_svc *svc = intf->hd->svc;
881 int ret;
883 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
885 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
886 if (ret) {
887 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
888 return ret;
891 return 0;
894 static int gb_interface_activate_operation(struct gb_interface *intf,
895 enum gb_interface_type *intf_type)
897 struct gb_svc *svc = intf->hd->svc;
898 u8 type;
899 int ret;
901 dev_dbg(&intf->dev, "%s\n", __func__);
903 ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
904 if (ret) {
905 dev_err(&intf->dev, "failed to activate: %d\n", ret);
906 return ret;
909 switch (type) {
910 case GB_SVC_INTF_TYPE_DUMMY:
911 *intf_type = GB_INTERFACE_TYPE_DUMMY;
912 /* FIXME: handle as an error for now */
913 return -ENODEV;
914 case GB_SVC_INTF_TYPE_UNIPRO:
915 *intf_type = GB_INTERFACE_TYPE_UNIPRO;
916 dev_err(&intf->dev, "interface type UniPro not supported\n");
917 /* FIXME: handle as an error for now */
918 return -ENODEV;
919 case GB_SVC_INTF_TYPE_GREYBUS:
920 *intf_type = GB_INTERFACE_TYPE_GREYBUS;
921 break;
922 default:
923 dev_err(&intf->dev, "unknown interface type: %u\n", type);
924 *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
925 return -ENODEV;
928 return 0;
931 static int gb_interface_hibernate_link(struct gb_interface *intf)
933 struct gb_svc *svc = intf->hd->svc;
935 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
938 static int _gb_interface_activate(struct gb_interface *intf,
939 enum gb_interface_type *type)
941 int ret;
943 *type = GB_INTERFACE_TYPE_UNKNOWN;
945 if (intf->ejected || intf->removed)
946 return -ENODEV;
948 ret = gb_interface_vsys_set(intf, true);
949 if (ret)
950 return ret;
952 ret = gb_interface_refclk_set(intf, true);
953 if (ret)
954 goto err_vsys_disable;
956 ret = gb_interface_unipro_set(intf, true);
957 if (ret)
958 goto err_refclk_disable;
960 ret = gb_interface_activate_operation(intf, type);
961 if (ret) {
962 switch (*type) {
963 case GB_INTERFACE_TYPE_UNIPRO:
964 case GB_INTERFACE_TYPE_GREYBUS:
965 goto err_hibernate_link;
966 default:
967 goto err_unipro_disable;
971 ret = gb_interface_read_dme(intf);
972 if (ret)
973 goto err_hibernate_link;
975 ret = gb_interface_route_create(intf);
976 if (ret)
977 goto err_hibernate_link;
979 intf->active = true;
981 trace_gb_interface_activate(intf);
983 return 0;
985 err_hibernate_link:
986 gb_interface_hibernate_link(intf);
987 err_unipro_disable:
988 gb_interface_unipro_set(intf, false);
989 err_refclk_disable:
990 gb_interface_refclk_set(intf, false);
991 err_vsys_disable:
992 gb_interface_vsys_set(intf, false);
994 return ret;
998 * At present, we assume a UniPro-only module to be a Greybus module that
999 * failed to send its mailbox poke. There is some reason to believe that this
1000 * is because of a bug in the ES3 bootrom.
1002 * FIXME: Check if this is a Toshiba bridge before retrying?
1004 static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
1005 enum gb_interface_type *type)
1007 int retries = 3;
1008 int ret;
1010 while (retries--) {
1011 ret = _gb_interface_activate(intf, type);
1012 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
1013 continue;
1015 break;
1018 return ret;
1022 * Activate an interface.
1024 * Locking: Caller holds the interface mutex.
1026 int gb_interface_activate(struct gb_interface *intf)
1028 enum gb_interface_type type;
1029 int ret;
1031 switch (intf->type) {
1032 case GB_INTERFACE_TYPE_INVALID:
1033 case GB_INTERFACE_TYPE_GREYBUS:
1034 ret = _gb_interface_activate_es3_hack(intf, &type);
1035 break;
1036 default:
1037 ret = _gb_interface_activate(intf, &type);
1040 /* Make sure type is detected correctly during reactivation. */
1041 if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1042 if (type != intf->type) {
1043 dev_err(&intf->dev, "failed to detect interface type\n");
1045 if (!ret)
1046 gb_interface_deactivate(intf);
1048 return -EIO;
1050 } else {
1051 intf->type = type;
1054 return ret;
1058 * Deactivate an interface.
1060 * Locking: Caller holds the interface mutex.
1062 void gb_interface_deactivate(struct gb_interface *intf)
1064 if (!intf->active)
1065 return;
1067 trace_gb_interface_deactivate(intf);
1069 /* Abort any ongoing mode switch. */
1070 if (intf->mode_switch)
1071 complete(&intf->mode_switch_completion);
1073 gb_interface_route_destroy(intf);
1074 gb_interface_hibernate_link(intf);
1075 gb_interface_unipro_set(intf, false);
1076 gb_interface_refclk_set(intf, false);
1077 gb_interface_vsys_set(intf, false);
1079 intf->active = false;
1083 * Enable an interface by enabling its control connection, fetching the
1084 * manifest and other information over it, and finally registering its child
1085 * devices.
1087 * Locking: Caller holds the interface mutex.
1089 int gb_interface_enable(struct gb_interface *intf)
1091 struct gb_control *control;
1092 struct gb_bundle *bundle, *tmp;
1093 int ret, size;
1094 void *manifest;
1096 ret = gb_interface_read_and_clear_init_status(intf);
1097 if (ret) {
1098 dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1099 return ret;
1102 /* Establish control connection */
1103 control = gb_control_create(intf);
1104 if (IS_ERR(control)) {
1105 dev_err(&intf->dev, "failed to create control device: %ld\n",
1106 PTR_ERR(control));
1107 return PTR_ERR(control);
1109 intf->control = control;
1111 ret = gb_control_enable(intf->control);
1112 if (ret)
1113 goto err_put_control;
1115 /* Get manifest size using control protocol on CPort */
1116 size = gb_control_get_manifest_size_operation(intf);
1117 if (size <= 0) {
1118 dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1120 if (size)
1121 ret = size;
1122 else
1123 ret = -EINVAL;
1125 goto err_disable_control;
1128 manifest = kmalloc(size, GFP_KERNEL);
1129 if (!manifest) {
1130 ret = -ENOMEM;
1131 goto err_disable_control;
1134 /* Get manifest using control protocol on CPort */
1135 ret = gb_control_get_manifest_operation(intf, manifest, size);
1136 if (ret) {
1137 dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1138 goto err_free_manifest;
1142 * Parse the manifest and build up our data structures representing
1143 * what's in it.
1145 if (!gb_manifest_parse(intf, manifest, size)) {
1146 dev_err(&intf->dev, "failed to parse manifest\n");
1147 ret = -EINVAL;
1148 goto err_destroy_bundles;
1151 ret = gb_control_get_bundle_versions(intf->control);
1152 if (ret)
1153 goto err_destroy_bundles;
1155 ret = gb_timesync_interface_add(intf);
1156 if (ret) {
1157 dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
1158 goto err_destroy_bundles;
1161 /* Register the control device and any bundles */
1162 ret = gb_control_add(intf->control);
1163 if (ret)
1164 goto err_remove_timesync;
1166 pm_runtime_use_autosuspend(&intf->dev);
1167 pm_runtime_get_noresume(&intf->dev);
1168 pm_runtime_set_active(&intf->dev);
1169 pm_runtime_enable(&intf->dev);
1171 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1172 ret = gb_bundle_add(bundle);
1173 if (ret) {
1174 gb_bundle_destroy(bundle);
1175 continue;
1179 kfree(manifest);
1181 intf->enabled = true;
1183 pm_runtime_put(&intf->dev);
1185 trace_gb_interface_enable(intf);
1187 return 0;
1189 err_remove_timesync:
1190 gb_timesync_interface_remove(intf);
1191 err_destroy_bundles:
1192 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1193 gb_bundle_destroy(bundle);
1194 err_free_manifest:
1195 kfree(manifest);
1196 err_disable_control:
1197 gb_control_disable(intf->control);
1198 err_put_control:
1199 gb_control_put(intf->control);
1200 intf->control = NULL;
1202 return ret;
1206 * Disable an interface and destroy its bundles.
1208 * Locking: Caller holds the interface mutex.
1210 void gb_interface_disable(struct gb_interface *intf)
1212 struct gb_bundle *bundle;
1213 struct gb_bundle *next;
1215 if (!intf->enabled)
1216 return;
1218 trace_gb_interface_disable(intf);
1220 pm_runtime_get_sync(&intf->dev);
1222 /* Set disconnected flag to avoid I/O during connection tear down. */
1223 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1224 intf->disconnected = true;
1226 list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1227 gb_bundle_destroy(bundle);
1229 if (!intf->mode_switch && !intf->disconnected)
1230 gb_control_interface_deactivate_prepare(intf->control);
1232 gb_control_del(intf->control);
1233 gb_timesync_interface_remove(intf);
1234 gb_control_disable(intf->control);
1235 gb_control_put(intf->control);
1236 intf->control = NULL;
1238 intf->enabled = false;
1240 pm_runtime_disable(&intf->dev);
1241 pm_runtime_set_suspended(&intf->dev);
1242 pm_runtime_dont_use_autosuspend(&intf->dev);
1243 pm_runtime_put_noidle(&intf->dev);
1246 /* Enable TimeSync on an Interface control connection. */
1247 int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
1248 u64 frame_time, u32 strobe_delay, u32 refclk)
1250 return gb_control_timesync_enable(intf->control, count,
1251 frame_time, strobe_delay,
1252 refclk);
1255 /* Disable TimeSync on an Interface control connection. */
1256 int gb_interface_timesync_disable(struct gb_interface *intf)
1258 return gb_control_timesync_disable(intf->control);
1261 /* Transmit the Authoritative FrameTime via an Interface control connection. */
1262 int gb_interface_timesync_authoritative(struct gb_interface *intf,
1263 u64 *frame_time)
1265 return gb_control_timesync_authoritative(intf->control,
1266 frame_time);
1269 /* Register an interface. */
1270 int gb_interface_add(struct gb_interface *intf)
1272 int ret;
1274 ret = device_add(&intf->dev);
1275 if (ret) {
1276 dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1277 return ret;
1280 trace_gb_interface_add(intf);
1282 dev_info(&intf->dev, "Interface added (%s)\n",
1283 gb_interface_type_string(intf));
1285 switch (intf->type) {
1286 case GB_INTERFACE_TYPE_GREYBUS:
1287 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1288 intf->vendor_id, intf->product_id);
1289 /* fall-through */
1290 case GB_INTERFACE_TYPE_UNIPRO:
1291 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1292 intf->ddbl1_manufacturer_id,
1293 intf->ddbl1_product_id);
1294 break;
1295 default:
1296 break;
1299 return 0;
1302 /* Deregister an interface. */
1303 void gb_interface_del(struct gb_interface *intf)
1305 if (device_is_registered(&intf->dev)) {
1306 trace_gb_interface_del(intf);
1308 device_del(&intf->dev);
1309 dev_info(&intf->dev, "Interface removed\n");
1313 void gb_interface_put(struct gb_interface *intf)
1315 put_device(&intf->dev);