1 // SPDX-License-Identifier: GPL-2.0
3 * Loopback bridge driver for the Greybus loopback module.
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/kthread.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/sizes.h>
19 #include <linux/cdev.h>
21 #include <linux/kfifo.h>
22 #include <linux/debugfs.h>
23 #include <linux/list_sort.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <linux/pm_runtime.h>
29 #include <asm/div64.h>
32 #include "connection.h"
34 #define NSEC_PER_DAY 86400000000000ULL
36 struct gb_loopback_stats
{
43 struct gb_loopback_device
{
48 /* We need to take a lock in atomic context */
50 struct list_head list
;
51 struct list_head list_op_async
;
55 static struct gb_loopback_device gb_dev
;
57 struct gb_loopback_async_operation
{
58 struct gb_loopback
*gb
;
59 struct gb_operation
*operation
;
61 int (*completion
)(struct gb_loopback_async_operation
*op_async
);
65 struct gb_connection
*connection
;
68 struct kfifo kfifo_lat
;
70 struct task_struct
*task
;
71 struct list_head entry
;
74 wait_queue_head_t wq_completion
;
75 atomic_t outstanding_operations
;
77 /* Per connection stats */
79 struct gb_loopback_stats latency
;
80 struct gb_loopback_stats throughput
;
81 struct gb_loopback_stats requests_per_second
;
82 struct gb_loopback_stats apbridge_unipro_latency
;
83 struct gb_loopback_stats gbphy_firmware_latency
;
93 u32 requests_completed
;
94 u32 requests_timedout
;
99 u32 outstanding_operations_max
;
102 u32 apbridge_latency_ts
;
103 u32 gbphy_latency_ts
;
108 static struct class loopback_class
= {
109 .name
= "gb_loopback",
110 .owner
= THIS_MODULE
,
112 static DEFINE_IDA(loopback_ida
);
114 /* Min/max values in jiffies */
115 #define GB_LOOPBACK_TIMEOUT_MIN 1
116 #define GB_LOOPBACK_TIMEOUT_MAX 10000
118 #define GB_LOOPBACK_FIFO_DEFAULT 8192
120 static unsigned int kfifo_depth
= GB_LOOPBACK_FIFO_DEFAULT
;
121 module_param(kfifo_depth
, uint
, 0444);
123 /* Maximum size of any one send data buffer we support */
124 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
126 #define GB_LOOPBACK_US_WAIT_MAX 1000000
128 /* interface sysfs attributes */
129 #define gb_loopback_ro_attr(field) \
130 static ssize_t field##_show(struct device *dev, \
131 struct device_attribute *attr, \
134 struct gb_loopback *gb = dev_get_drvdata(dev); \
135 return sprintf(buf, "%u\n", gb->field); \
137 static DEVICE_ATTR_RO(field)
139 #define gb_loopback_ro_stats_attr(name, field, type) \
140 static ssize_t name##_##field##_show(struct device *dev, \
141 struct device_attribute *attr, \
144 struct gb_loopback *gb = dev_get_drvdata(dev); \
145 /* Report 0 for min and max if no transfer successed */ \
146 if (!gb->requests_completed) \
147 return sprintf(buf, "0\n"); \
148 return sprintf(buf, "%"#type"\n", gb->name.field); \
150 static DEVICE_ATTR_RO(name##_##field)
152 #define gb_loopback_ro_avg_attr(name) \
153 static ssize_t name##_avg_show(struct device *dev, \
154 struct device_attribute *attr, \
157 struct gb_loopback_stats *stats; \
158 struct gb_loopback *gb; \
161 gb = dev_get_drvdata(dev); \
163 count = stats->count ? stats->count : 1; \
164 avg = stats->sum + count / 2000000; /* round closest */ \
165 rem = do_div(avg, count); \
167 do_div(rem, count); \
168 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
170 static DEVICE_ATTR_RO(name##_avg)
172 #define gb_loopback_stats_attrs(field) \
173 gb_loopback_ro_stats_attr(field, min, u); \
174 gb_loopback_ro_stats_attr(field, max, u); \
175 gb_loopback_ro_avg_attr(field)
177 #define gb_loopback_attr(field, type) \
178 static ssize_t field##_show(struct device *dev, \
179 struct device_attribute *attr, \
182 struct gb_loopback *gb = dev_get_drvdata(dev); \
183 return sprintf(buf, "%"#type"\n", gb->field); \
185 static ssize_t field##_store(struct device *dev, \
186 struct device_attribute *attr, \
191 struct gb_loopback *gb = dev_get_drvdata(dev); \
192 mutex_lock(&gb->mutex); \
193 ret = sscanf(buf, "%"#type, &gb->field); \
197 gb_loopback_check_attr(gb, bundle); \
198 mutex_unlock(&gb->mutex); \
201 static DEVICE_ATTR_RW(field)
203 #define gb_dev_loopback_ro_attr(field, conn) \
204 static ssize_t field##_show(struct device *dev, \
205 struct device_attribute *attr, \
208 struct gb_loopback *gb = dev_get_drvdata(dev); \
209 return sprintf(buf, "%u\n", gb->field); \
211 static DEVICE_ATTR_RO(field)
213 #define gb_dev_loopback_rw_attr(field, type) \
214 static ssize_t field##_show(struct device *dev, \
215 struct device_attribute *attr, \
218 struct gb_loopback *gb = dev_get_drvdata(dev); \
219 return sprintf(buf, "%"#type"\n", gb->field); \
221 static ssize_t field##_store(struct device *dev, \
222 struct device_attribute *attr, \
227 struct gb_loopback *gb = dev_get_drvdata(dev); \
228 mutex_lock(&gb->mutex); \
229 ret = sscanf(buf, "%"#type, &gb->field); \
233 gb_loopback_check_attr(gb); \
234 mutex_unlock(&gb->mutex); \
237 static DEVICE_ATTR_RW(field)
239 static void gb_loopback_reset_stats(struct gb_loopback
*gb
);
240 static void gb_loopback_check_attr(struct gb_loopback
*gb
)
242 if (gb
->us_wait
> GB_LOOPBACK_US_WAIT_MAX
)
243 gb
->us_wait
= GB_LOOPBACK_US_WAIT_MAX
;
244 if (gb
->size
> gb_dev
.size_max
)
245 gb
->size
= gb_dev
.size_max
;
246 gb
->requests_timedout
= 0;
247 gb
->requests_completed
= 0;
248 gb
->iteration_count
= 0;
252 if (kfifo_depth
< gb
->iteration_max
) {
254 "cannot log bytes %u kfifo_depth %u\n",
255 gb
->iteration_max
, kfifo_depth
);
257 kfifo_reset_out(&gb
->kfifo_lat
);
260 case GB_LOOPBACK_TYPE_PING
:
261 case GB_LOOPBACK_TYPE_TRANSFER
:
262 case GB_LOOPBACK_TYPE_SINK
:
263 gb
->jiffy_timeout
= usecs_to_jiffies(gb
->timeout
);
264 if (!gb
->jiffy_timeout
)
265 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MIN
;
266 else if (gb
->jiffy_timeout
> GB_LOOPBACK_TIMEOUT_MAX
)
267 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MAX
;
268 gb_loopback_reset_stats(gb
);
277 /* Time to send and receive one message */
278 gb_loopback_stats_attrs(latency
);
279 /* Number of requests sent per second on this cport */
280 gb_loopback_stats_attrs(requests_per_second
);
281 /* Quantity of data sent and received on this cport */
282 gb_loopback_stats_attrs(throughput
);
283 /* Latency across the UniPro link from APBridge's perspective */
284 gb_loopback_stats_attrs(apbridge_unipro_latency
);
285 /* Firmware induced overhead in the GPBridge */
286 gb_loopback_stats_attrs(gbphy_firmware_latency
);
288 /* Number of errors encountered during loop */
289 gb_loopback_ro_attr(error
);
290 /* Number of requests successfully completed async */
291 gb_loopback_ro_attr(requests_completed
);
292 /* Number of requests timed out async */
293 gb_loopback_ro_attr(requests_timedout
);
294 /* Timeout minimum in useconds */
295 gb_loopback_ro_attr(timeout_min
);
296 /* Timeout minimum in useconds */
297 gb_loopback_ro_attr(timeout_max
);
300 * Type of loopback message to send based on protocol type definitions
301 * 0 => Don't send message
302 * 2 => Send ping message continuously (message without payload)
303 * 3 => Send transfer message continuously (message with payload,
304 * payload returned in response)
305 * 4 => Send a sink message (message with payload, no payload in response)
307 gb_dev_loopback_rw_attr(type
, d
);
308 /* Size of transfer message payload: 0-4096 bytes */
309 gb_dev_loopback_rw_attr(size
, u
);
310 /* Time to wait between two messages: 0-1000 ms */
311 gb_dev_loopback_rw_attr(us_wait
, d
);
312 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
313 gb_dev_loopback_rw_attr(iteration_max
, u
);
314 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
315 gb_dev_loopback_ro_attr(iteration_count
, false);
316 /* A flag to indicate synchronous or asynchronous operations */
317 gb_dev_loopback_rw_attr(async
, u
);
318 /* Timeout of an individual asynchronous request */
319 gb_dev_loopback_rw_attr(timeout
, u
);
320 /* Maximum number of in-flight operations before back-off */
321 gb_dev_loopback_rw_attr(outstanding_operations_max
, u
);
323 static struct attribute
*loopback_attrs
[] = {
324 &dev_attr_latency_min
.attr
,
325 &dev_attr_latency_max
.attr
,
326 &dev_attr_latency_avg
.attr
,
327 &dev_attr_requests_per_second_min
.attr
,
328 &dev_attr_requests_per_second_max
.attr
,
329 &dev_attr_requests_per_second_avg
.attr
,
330 &dev_attr_throughput_min
.attr
,
331 &dev_attr_throughput_max
.attr
,
332 &dev_attr_throughput_avg
.attr
,
333 &dev_attr_apbridge_unipro_latency_min
.attr
,
334 &dev_attr_apbridge_unipro_latency_max
.attr
,
335 &dev_attr_apbridge_unipro_latency_avg
.attr
,
336 &dev_attr_gbphy_firmware_latency_min
.attr
,
337 &dev_attr_gbphy_firmware_latency_max
.attr
,
338 &dev_attr_gbphy_firmware_latency_avg
.attr
,
341 &dev_attr_us_wait
.attr
,
342 &dev_attr_iteration_count
.attr
,
343 &dev_attr_iteration_max
.attr
,
344 &dev_attr_async
.attr
,
345 &dev_attr_error
.attr
,
346 &dev_attr_requests_completed
.attr
,
347 &dev_attr_requests_timedout
.attr
,
348 &dev_attr_timeout
.attr
,
349 &dev_attr_outstanding_operations_max
.attr
,
350 &dev_attr_timeout_min
.attr
,
351 &dev_attr_timeout_max
.attr
,
354 ATTRIBUTE_GROUPS(loopback
);
356 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
);
358 static u32
gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs
)
360 do_div(elapsed_nsecs
, NSEC_PER_USEC
);
361 return elapsed_nsecs
;
364 static u64
__gb_loopback_calc_latency(u64 t1
, u64 t2
)
369 return NSEC_PER_DAY
- t2
+ t1
;
372 static u64
gb_loopback_calc_latency(ktime_t ts
, ktime_t te
)
374 return __gb_loopback_calc_latency(ktime_to_ns(ts
), ktime_to_ns(te
));
377 static int gb_loopback_operation_sync(struct gb_loopback
*gb
, int type
,
378 void *request
, int request_size
,
379 void *response
, int response_size
)
381 struct gb_operation
*operation
;
386 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
387 response_size
, GFP_KERNEL
);
392 memcpy(operation
->request
->payload
, request
, request_size
);
394 ret
= gb_operation_request_send_sync(operation
);
396 dev_err(&gb
->connection
->bundle
->dev
,
397 "synchronous operation failed: %d\n", ret
);
398 goto out_put_operation
;
400 if (response_size
== operation
->response
->payload_size
) {
401 memcpy(response
, operation
->response
->payload
,
404 dev_err(&gb
->connection
->bundle
->dev
,
405 "response size %zu expected %d\n",
406 operation
->response
->payload_size
,
409 goto out_put_operation
;
415 /* Calculate the total time the message took */
416 gb
->elapsed_nsecs
= gb_loopback_calc_latency(ts
, te
);
419 gb_operation_put(operation
);
424 static void gb_loopback_async_wait_all(struct gb_loopback
*gb
)
426 wait_event(gb
->wq_completion
,
427 !atomic_read(&gb
->outstanding_operations
));
430 static void gb_loopback_async_operation_callback(struct gb_operation
*operation
)
432 struct gb_loopback_async_operation
*op_async
;
433 struct gb_loopback
*gb
;
438 result
= gb_operation_result(operation
);
439 op_async
= gb_operation_get_data(operation
);
442 mutex_lock(&gb
->mutex
);
444 if (!result
&& op_async
->completion
)
445 result
= op_async
->completion(op_async
);
448 gb
->elapsed_nsecs
= gb_loopback_calc_latency(op_async
->ts
, te
);
451 if (result
== -ETIMEDOUT
)
452 gb
->requests_timedout
++;
455 gb
->iteration_count
++;
456 gb_loopback_calculate_stats(gb
, result
);
458 mutex_unlock(&gb
->mutex
);
460 dev_dbg(&gb
->connection
->bundle
->dev
, "complete operation %d\n",
463 /* Wake up waiters */
464 atomic_dec(&op_async
->gb
->outstanding_operations
);
465 wake_up(&gb
->wq_completion
);
467 /* Release resources */
468 gb_operation_put(operation
);
472 static int gb_loopback_async_operation(struct gb_loopback
*gb
, int type
,
473 void *request
, int request_size
,
477 struct gb_loopback_async_operation
*op_async
;
478 struct gb_operation
*operation
;
481 op_async
= kzalloc(sizeof(*op_async
), GFP_KERNEL
);
485 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
486 response_size
, GFP_KERNEL
);
493 memcpy(operation
->request
->payload
, request
, request_size
);
495 gb_operation_set_data(operation
, op_async
);
498 op_async
->operation
= operation
;
499 op_async
->completion
= completion
;
501 op_async
->ts
= ktime_get();
503 atomic_inc(&gb
->outstanding_operations
);
504 ret
= gb_operation_request_send(operation
,
505 gb_loopback_async_operation_callback
,
506 jiffies_to_msecs(gb
->jiffy_timeout
),
509 atomic_dec(&gb
->outstanding_operations
);
510 gb_operation_put(operation
);
516 static int gb_loopback_sync_sink(struct gb_loopback
*gb
, u32 len
)
518 struct gb_loopback_transfer_request
*request
;
521 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
525 request
->len
= cpu_to_le32(len
);
526 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_SINK
,
527 request
, len
+ sizeof(*request
),
533 static int gb_loopback_sync_transfer(struct gb_loopback
*gb
, u32 len
)
535 struct gb_loopback_transfer_request
*request
;
536 struct gb_loopback_transfer_response
*response
;
539 gb
->apbridge_latency_ts
= 0;
540 gb
->gbphy_latency_ts
= 0;
542 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
545 response
= kmalloc(len
+ sizeof(*response
), GFP_KERNEL
);
551 memset(request
->data
, 0x5A, len
);
553 request
->len
= cpu_to_le32(len
);
554 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
555 request
, len
+ sizeof(*request
),
556 response
, len
+ sizeof(*response
));
560 if (memcmp(request
->data
, response
->data
, len
)) {
561 dev_err(&gb
->connection
->bundle
->dev
,
562 "Loopback Data doesn't match\n");
565 gb
->apbridge_latency_ts
= (u32
)__le32_to_cpu(response
->reserved0
);
566 gb
->gbphy_latency_ts
= (u32
)__le32_to_cpu(response
->reserved1
);
575 static int gb_loopback_sync_ping(struct gb_loopback
*gb
)
577 return gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_PING
,
581 static int gb_loopback_async_sink(struct gb_loopback
*gb
, u32 len
)
583 struct gb_loopback_transfer_request
*request
;
586 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
590 request
->len
= cpu_to_le32(len
);
591 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_SINK
,
592 request
, len
+ sizeof(*request
),
598 static int gb_loopback_async_transfer_complete(
599 struct gb_loopback_async_operation
*op_async
)
601 struct gb_loopback
*gb
;
602 struct gb_operation
*operation
;
603 struct gb_loopback_transfer_request
*request
;
604 struct gb_loopback_transfer_response
*response
;
609 operation
= op_async
->operation
;
610 request
= operation
->request
->payload
;
611 response
= operation
->response
->payload
;
612 len
= le32_to_cpu(request
->len
);
614 if (memcmp(request
->data
, response
->data
, len
)) {
615 dev_err(&gb
->connection
->bundle
->dev
,
616 "Loopback Data doesn't match operation id %d\n",
620 gb
->apbridge_latency_ts
=
621 (u32
)__le32_to_cpu(response
->reserved0
);
622 gb
->gbphy_latency_ts
=
623 (u32
)__le32_to_cpu(response
->reserved1
);
629 static int gb_loopback_async_transfer(struct gb_loopback
*gb
, u32 len
)
631 struct gb_loopback_transfer_request
*request
;
632 int retval
, response_len
;
634 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
638 memset(request
->data
, 0x5A, len
);
640 request
->len
= cpu_to_le32(len
);
641 response_len
= sizeof(struct gb_loopback_transfer_response
);
642 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
643 request
, len
+ sizeof(*request
),
645 gb_loopback_async_transfer_complete
);
654 static int gb_loopback_async_ping(struct gb_loopback
*gb
)
656 return gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_PING
,
660 static int gb_loopback_request_handler(struct gb_operation
*operation
)
662 struct gb_connection
*connection
= operation
->connection
;
663 struct gb_loopback_transfer_request
*request
;
664 struct gb_loopback_transfer_response
*response
;
665 struct device
*dev
= &connection
->bundle
->dev
;
668 /* By convention, the AP initiates the version operation */
669 switch (operation
->type
) {
670 case GB_LOOPBACK_TYPE_PING
:
671 case GB_LOOPBACK_TYPE_SINK
:
673 case GB_LOOPBACK_TYPE_TRANSFER
:
674 if (operation
->request
->payload_size
< sizeof(*request
)) {
675 dev_err(dev
, "transfer request too small (%zu < %zu)\n",
676 operation
->request
->payload_size
,
678 return -EINVAL
; /* -EMSGSIZE */
680 request
= operation
->request
->payload
;
681 len
= le32_to_cpu(request
->len
);
682 if (len
> gb_dev
.size_max
) {
683 dev_err(dev
, "transfer request too large (%zu > %zu)\n",
684 len
, gb_dev
.size_max
);
688 if (!gb_operation_response_alloc(operation
,
689 len
+ sizeof(*response
), GFP_KERNEL
)) {
690 dev_err(dev
, "error allocating response\n");
693 response
= operation
->response
->payload
;
694 response
->len
= cpu_to_le32(len
);
696 memcpy(response
->data
, request
->data
, len
);
700 dev_err(dev
, "unsupported request: %u\n", operation
->type
);
705 static void gb_loopback_reset_stats(struct gb_loopback
*gb
)
707 struct gb_loopback_stats reset
= {
711 /* Reset per-connection stats */
712 memcpy(&gb
->latency
, &reset
,
713 sizeof(struct gb_loopback_stats
));
714 memcpy(&gb
->throughput
, &reset
,
715 sizeof(struct gb_loopback_stats
));
716 memcpy(&gb
->requests_per_second
, &reset
,
717 sizeof(struct gb_loopback_stats
));
718 memcpy(&gb
->apbridge_unipro_latency
, &reset
,
719 sizeof(struct gb_loopback_stats
));
720 memcpy(&gb
->gbphy_firmware_latency
, &reset
,
721 sizeof(struct gb_loopback_stats
));
723 /* Should be initialized at least once per transaction set */
724 gb
->apbridge_latency_ts
= 0;
725 gb
->gbphy_latency_ts
= 0;
726 gb
->ts
= ktime_set(0, 0);
729 static void gb_loopback_update_stats(struct gb_loopback_stats
*stats
, u32 val
)
731 if (stats
->min
> val
)
733 if (stats
->max
< val
)
739 static void gb_loopback_update_stats_window(struct gb_loopback_stats
*stats
,
743 stats
->count
+= count
;
746 if (stats
->min
> val
)
748 if (stats
->max
< val
)
752 static void gb_loopback_requests_update(struct gb_loopback
*gb
, u32 latency
)
754 u64 req
= gb
->requests_completed
* USEC_PER_SEC
;
756 gb_loopback_update_stats_window(&gb
->requests_per_second
, req
, latency
);
759 static void gb_loopback_throughput_update(struct gb_loopback
*gb
, u32 latency
)
761 u64 aggregate_size
= sizeof(struct gb_operation_msg_hdr
) * 2;
764 case GB_LOOPBACK_TYPE_PING
:
766 case GB_LOOPBACK_TYPE_SINK
:
767 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
770 case GB_LOOPBACK_TYPE_TRANSFER
:
771 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
772 sizeof(struct gb_loopback_transfer_response
) +
779 aggregate_size
*= gb
->requests_completed
;
780 aggregate_size
*= USEC_PER_SEC
;
781 gb_loopback_update_stats_window(&gb
->throughput
, aggregate_size
,
785 static void gb_loopback_calculate_latency_stats(struct gb_loopback
*gb
)
789 /* Express latency in terms of microseconds */
790 lat
= gb_loopback_nsec_to_usec_latency(gb
->elapsed_nsecs
);
792 /* Log latency stastic */
793 gb_loopback_update_stats(&gb
->latency
, lat
);
795 /* Raw latency log on a per thread basis */
796 kfifo_in(&gb
->kfifo_lat
, (unsigned char *)&lat
, sizeof(lat
));
798 /* Log the firmware supplied latency values */
799 gb_loopback_update_stats(&gb
->apbridge_unipro_latency
,
800 gb
->apbridge_latency_ts
);
801 gb_loopback_update_stats(&gb
->gbphy_firmware_latency
,
802 gb
->gbphy_latency_ts
);
805 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
)
812 gb
->requests_completed
++;
813 gb_loopback_calculate_latency_stats(gb
);
817 nlat
= gb_loopback_calc_latency(gb
->ts
, te
);
818 if (nlat
>= NSEC_PER_SEC
|| gb
->iteration_count
== gb
->iteration_max
) {
819 lat
= gb_loopback_nsec_to_usec_latency(nlat
);
821 gb_loopback_throughput_update(gb
, lat
);
822 gb_loopback_requests_update(gb
, lat
);
824 if (gb
->iteration_count
!= gb
->iteration_max
) {
826 gb
->requests_completed
= 0;
831 static void gb_loopback_async_wait_to_send(struct gb_loopback
*gb
)
833 if (!(gb
->async
&& gb
->outstanding_operations_max
))
835 wait_event_interruptible(gb
->wq_completion
,
836 (atomic_read(&gb
->outstanding_operations
) <
837 gb
->outstanding_operations_max
) ||
838 kthread_should_stop());
841 static int gb_loopback_fn(void *data
)
849 struct gb_loopback
*gb
= data
;
850 struct gb_bundle
*bundle
= gb
->connection
->bundle
;
852 ret
= gb_pm_runtime_get_sync(bundle
);
858 gb_pm_runtime_put_autosuspend(bundle
);
859 wait_event_interruptible(gb
->wq
, gb
->type
||
860 kthread_should_stop());
861 ret
= gb_pm_runtime_get_sync(bundle
);
866 if (kthread_should_stop())
869 /* Limit the maximum number of in-flight async operations */
870 gb_loopback_async_wait_to_send(gb
);
871 if (kthread_should_stop())
874 mutex_lock(&gb
->mutex
);
876 /* Optionally terminate */
877 if (gb
->send_count
== gb
->iteration_max
) {
878 mutex_unlock(&gb
->mutex
);
880 /* Wait for synchronous and asynchronus completion */
881 gb_loopback_async_wait_all(gb
);
883 /* Mark complete unless user-space has poked us */
884 mutex_lock(&gb
->mutex
);
885 if (gb
->iteration_count
== gb
->iteration_max
) {
888 sysfs_notify(&gb
->dev
->kobj
, NULL
,
890 dev_dbg(&bundle
->dev
, "load test complete\n");
892 dev_dbg(&bundle
->dev
,
893 "continuing on with new test set\n");
895 mutex_unlock(&gb
->mutex
);
899 us_wait
= gb
->us_wait
;
901 if (ktime_to_ns(gb
->ts
) == 0)
902 gb
->ts
= ktime_get();
904 /* Else operations to perform */
906 if (type
== GB_LOOPBACK_TYPE_PING
)
907 error
= gb_loopback_async_ping(gb
);
908 else if (type
== GB_LOOPBACK_TYPE_TRANSFER
)
909 error
= gb_loopback_async_transfer(gb
, size
);
910 else if (type
== GB_LOOPBACK_TYPE_SINK
)
911 error
= gb_loopback_async_sink(gb
, size
);
915 gb
->iteration_count
++;
918 /* We are effectively single threaded here */
919 if (type
== GB_LOOPBACK_TYPE_PING
)
920 error
= gb_loopback_sync_ping(gb
);
921 else if (type
== GB_LOOPBACK_TYPE_TRANSFER
)
922 error
= gb_loopback_sync_transfer(gb
, size
);
923 else if (type
== GB_LOOPBACK_TYPE_SINK
)
924 error
= gb_loopback_sync_sink(gb
, size
);
928 gb
->iteration_count
++;
929 gb_loopback_calculate_stats(gb
, !!error
);
932 mutex_unlock(&gb
->mutex
);
936 usleep_range(us_wait
, us_wait
+ 100);
938 msleep(us_wait
/ 1000);
942 gb_pm_runtime_put_autosuspend(bundle
);
947 static int gb_loopback_dbgfs_latency_show_common(struct seq_file
*s
,
954 if (kfifo_len(kfifo
) == 0) {
960 retval
= kfifo_out(kfifo
, &latency
, sizeof(latency
));
962 seq_printf(s
, "%u", latency
);
970 static int gb_loopback_dbgfs_latency_show(struct seq_file
*s
, void *unused
)
972 struct gb_loopback
*gb
= s
->private;
974 return gb_loopback_dbgfs_latency_show_common(s
, &gb
->kfifo_lat
,
978 static int gb_loopback_latency_open(struct inode
*inode
, struct file
*file
)
980 return single_open(file
, gb_loopback_dbgfs_latency_show
,
984 static const struct file_operations gb_loopback_debugfs_latency_ops
= {
985 .open
= gb_loopback_latency_open
,
988 .release
= single_release
,
991 static int gb_loopback_bus_id_compare(void *priv
, struct list_head
*lha
,
992 struct list_head
*lhb
)
994 struct gb_loopback
*a
= list_entry(lha
, struct gb_loopback
, entry
);
995 struct gb_loopback
*b
= list_entry(lhb
, struct gb_loopback
, entry
);
996 struct gb_connection
*ca
= a
->connection
;
997 struct gb_connection
*cb
= b
->connection
;
999 if (ca
->bundle
->intf
->interface_id
< cb
->bundle
->intf
->interface_id
)
1001 if (cb
->bundle
->intf
->interface_id
< ca
->bundle
->intf
->interface_id
)
1003 if (ca
->bundle
->id
< cb
->bundle
->id
)
1005 if (cb
->bundle
->id
< ca
->bundle
->id
)
1007 if (ca
->intf_cport_id
< cb
->intf_cport_id
)
1009 else if (cb
->intf_cport_id
< ca
->intf_cport_id
)
1015 static void gb_loopback_insert_id(struct gb_loopback
*gb
)
1017 struct gb_loopback
*gb_list
;
1020 /* perform an insertion sort */
1021 list_add_tail(&gb
->entry
, &gb_dev
.list
);
1022 list_sort(NULL
, &gb_dev
.list
, gb_loopback_bus_id_compare
);
1023 list_for_each_entry(gb_list
, &gb_dev
.list
, entry
) {
1024 gb_list
->lbid
= 1 << new_lbid
;
1029 #define DEBUGFS_NAMELEN 32
1031 static int gb_loopback_probe(struct gb_bundle
*bundle
,
1032 const struct greybus_bundle_id
*id
)
1034 struct greybus_descriptor_cport
*cport_desc
;
1035 struct gb_connection
*connection
;
1036 struct gb_loopback
*gb
;
1039 char name
[DEBUGFS_NAMELEN
];
1040 unsigned long flags
;
1042 if (bundle
->num_cports
!= 1)
1045 cport_desc
= &bundle
->cport_desc
[0];
1046 if (cport_desc
->protocol_id
!= GREYBUS_PROTOCOL_LOOPBACK
)
1049 gb
= kzalloc(sizeof(*gb
), GFP_KERNEL
);
1053 connection
= gb_connection_create(bundle
, le16_to_cpu(cport_desc
->id
),
1054 gb_loopback_request_handler
);
1055 if (IS_ERR(connection
)) {
1056 retval
= PTR_ERR(connection
);
1060 gb
->connection
= connection
;
1061 greybus_set_drvdata(bundle
, gb
);
1063 init_waitqueue_head(&gb
->wq
);
1064 init_waitqueue_head(&gb
->wq_completion
);
1065 atomic_set(&gb
->outstanding_operations
, 0);
1066 gb_loopback_reset_stats(gb
);
1068 /* Reported values to user-space for min/max timeouts */
1069 gb
->timeout_min
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN
);
1070 gb
->timeout_max
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX
);
1072 if (!gb_dev
.count
) {
1073 /* Calculate maximum payload */
1074 gb_dev
.size_max
= gb_operation_get_payload_size_max(connection
);
1075 if (gb_dev
.size_max
<=
1076 sizeof(struct gb_loopback_transfer_request
)) {
1078 goto out_connection_destroy
;
1080 gb_dev
.size_max
-= sizeof(struct gb_loopback_transfer_request
);
1083 /* Create per-connection sysfs and debugfs data-points */
1084 snprintf(name
, sizeof(name
), "raw_latency_%s",
1085 dev_name(&connection
->bundle
->dev
));
1086 gb
->file
= debugfs_create_file(name
, S_IFREG
| 0444, gb_dev
.root
, gb
,
1087 &gb_loopback_debugfs_latency_ops
);
1089 gb
->id
= ida_simple_get(&loopback_ida
, 0, 0, GFP_KERNEL
);
1092 goto out_debugfs_remove
;
1095 retval
= gb_connection_enable(connection
);
1097 goto out_ida_remove
;
1099 dev
= device_create_with_groups(&loopback_class
,
1100 &connection
->bundle
->dev
,
1101 MKDEV(0, 0), gb
, loopback_groups
,
1102 "gb_loopback%d", gb
->id
);
1104 retval
= PTR_ERR(dev
);
1105 goto out_connection_disable
;
1109 /* Allocate kfifo */
1110 if (kfifo_alloc(&gb
->kfifo_lat
, kfifo_depth
* sizeof(u32
),
1115 /* Fork worker thread */
1116 mutex_init(&gb
->mutex
);
1117 gb
->task
= kthread_run(gb_loopback_fn
, gb
, "gb_loopback");
1118 if (IS_ERR(gb
->task
)) {
1119 retval
= PTR_ERR(gb
->task
);
1123 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1124 gb_loopback_insert_id(gb
);
1126 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1128 gb_connection_latency_tag_enable(connection
);
1130 gb_pm_runtime_put_autosuspend(bundle
);
1135 kfifo_free(&gb
->kfifo_lat
);
1137 device_unregister(dev
);
1138 out_connection_disable
:
1139 gb_connection_disable(connection
);
1141 ida_simple_remove(&loopback_ida
, gb
->id
);
1143 debugfs_remove(gb
->file
);
1144 out_connection_destroy
:
1145 gb_connection_destroy(connection
);
1152 static void gb_loopback_disconnect(struct gb_bundle
*bundle
)
1154 struct gb_loopback
*gb
= greybus_get_drvdata(bundle
);
1155 unsigned long flags
;
1158 ret
= gb_pm_runtime_get_sync(bundle
);
1160 gb_pm_runtime_get_noresume(bundle
);
1162 gb_connection_disable(gb
->connection
);
1164 if (!IS_ERR_OR_NULL(gb
->task
))
1165 kthread_stop(gb
->task
);
1167 kfifo_free(&gb
->kfifo_lat
);
1168 gb_connection_latency_tag_disable(gb
->connection
);
1169 debugfs_remove(gb
->file
);
1172 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1173 * is disabled at the beginning and so we can't have any more
1174 * incoming/outgoing requests.
1176 gb_loopback_async_wait_all(gb
);
1178 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1180 list_del(&gb
->entry
);
1181 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1183 device_unregister(gb
->dev
);
1184 ida_simple_remove(&loopback_ida
, gb
->id
);
1186 gb_connection_destroy(gb
->connection
);
1190 static const struct greybus_bundle_id gb_loopback_id_table
[] = {
1191 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK
) },
1194 MODULE_DEVICE_TABLE(greybus
, gb_loopback_id_table
);
1196 static struct greybus_driver gb_loopback_driver
= {
1198 .probe
= gb_loopback_probe
,
1199 .disconnect
= gb_loopback_disconnect
,
1200 .id_table
= gb_loopback_id_table
,
1203 static int loopback_init(void)
1207 INIT_LIST_HEAD(&gb_dev
.list
);
1208 INIT_LIST_HEAD(&gb_dev
.list_op_async
);
1209 spin_lock_init(&gb_dev
.lock
);
1210 gb_dev
.root
= debugfs_create_dir("gb_loopback", NULL
);
1212 retval
= class_register(&loopback_class
);
1216 retval
= greybus_register(&gb_loopback_driver
);
1218 goto err_unregister
;
1223 class_unregister(&loopback_class
);
1225 debugfs_remove_recursive(gb_dev
.root
);
1228 module_init(loopback_init
);
1230 static void __exit
loopback_exit(void)
1232 debugfs_remove_recursive(gb_dev
.root
);
1233 greybus_deregister(&gb_loopback_driver
);
1234 class_unregister(&loopback_class
);
1235 ida_destroy(&loopback_ida
);
1237 module_exit(loopback_exit
);
1239 MODULE_LICENSE("GPL v2");