2 * Loopback bridge driver for the Greybus loopback module.
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/random.h>
19 #include <linux/sizes.h>
20 #include <linux/cdev.h>
22 #include <linux/kfifo.h>
23 #include <linux/debugfs.h>
24 #include <linux/list_sort.h>
25 #include <linux/spinlock.h>
26 #include <linux/workqueue.h>
27 #include <linux/atomic.h>
28 #include <linux/pm_runtime.h>
30 #include <asm/div64.h>
33 #include "connection.h"
35 #define NSEC_PER_DAY 86400000000000ULL
37 struct gb_loopback_stats
{
44 struct gb_loopback_device
{
49 /* We need to take a lock in atomic context */
51 struct list_head list
;
52 struct list_head list_op_async
;
56 static struct gb_loopback_device gb_dev
;
58 struct gb_loopback_async_operation
{
59 struct gb_loopback
*gb
;
60 struct gb_operation
*operation
;
62 struct timer_list timer
;
63 struct list_head entry
;
64 struct work_struct work
;
67 int (*completion
)(struct gb_loopback_async_operation
*op_async
);
71 struct gb_connection
*connection
;
74 struct kfifo kfifo_lat
;
75 struct kfifo kfifo_ts
;
77 struct task_struct
*task
;
78 struct list_head entry
;
81 wait_queue_head_t wq_completion
;
82 atomic_t outstanding_operations
;
84 /* Per connection stats */
86 struct gb_loopback_stats latency
;
87 struct gb_loopback_stats throughput
;
88 struct gb_loopback_stats requests_per_second
;
89 struct gb_loopback_stats apbridge_unipro_latency
;
90 struct gb_loopback_stats gbphy_firmware_latency
;
100 u32 requests_completed
;
101 u32 requests_timedout
;
106 u32 outstanding_operations_max
;
109 u32 apbridge_latency_ts
;
110 u32 gbphy_latency_ts
;
115 static struct class loopback_class
= {
116 .name
= "gb_loopback",
117 .owner
= THIS_MODULE
,
119 static DEFINE_IDA(loopback_ida
);
121 /* Min/max values in jiffies */
122 #define GB_LOOPBACK_TIMEOUT_MIN 1
123 #define GB_LOOPBACK_TIMEOUT_MAX 10000
125 #define GB_LOOPBACK_FIFO_DEFAULT 8192
127 static unsigned kfifo_depth
= GB_LOOPBACK_FIFO_DEFAULT
;
128 module_param(kfifo_depth
, uint
, 0444);
130 /* Maximum size of any one send data buffer we support */
131 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
133 #define GB_LOOPBACK_US_WAIT_MAX 1000000
135 /* interface sysfs attributes */
136 #define gb_loopback_ro_attr(field) \
137 static ssize_t field##_show(struct device *dev, \
138 struct device_attribute *attr, \
141 struct gb_loopback *gb = dev_get_drvdata(dev); \
142 return sprintf(buf, "%u\n", gb->field); \
144 static DEVICE_ATTR_RO(field)
146 #define gb_loopback_ro_stats_attr(name, field, type) \
147 static ssize_t name##_##field##_show(struct device *dev, \
148 struct device_attribute *attr, \
151 struct gb_loopback *gb = dev_get_drvdata(dev); \
152 /* Report 0 for min and max if no transfer successed */ \
153 if (!gb->requests_completed) \
154 return sprintf(buf, "0\n"); \
155 return sprintf(buf, "%"#type"\n", gb->name.field); \
157 static DEVICE_ATTR_RO(name##_##field)
159 #define gb_loopback_ro_avg_attr(name) \
160 static ssize_t name##_avg_show(struct device *dev, \
161 struct device_attribute *attr, \
164 struct gb_loopback_stats *stats; \
165 struct gb_loopback *gb; \
168 gb = dev_get_drvdata(dev); \
170 count = stats->count ? stats->count : 1; \
171 avg = stats->sum + count / 2000000; /* round closest */ \
172 rem = do_div(avg, count); \
174 do_div(rem, count); \
175 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
177 static DEVICE_ATTR_RO(name##_avg)
179 #define gb_loopback_stats_attrs(field) \
180 gb_loopback_ro_stats_attr(field, min, u); \
181 gb_loopback_ro_stats_attr(field, max, u); \
182 gb_loopback_ro_avg_attr(field)
184 #define gb_loopback_attr(field, type) \
185 static ssize_t field##_show(struct device *dev, \
186 struct device_attribute *attr, \
189 struct gb_loopback *gb = dev_get_drvdata(dev); \
190 return sprintf(buf, "%"#type"\n", gb->field); \
192 static ssize_t field##_store(struct device *dev, \
193 struct device_attribute *attr, \
198 struct gb_loopback *gb = dev_get_drvdata(dev); \
199 mutex_lock(&gb->mutex); \
200 ret = sscanf(buf, "%"#type, &gb->field); \
204 gb_loopback_check_attr(gb, bundle); \
205 mutex_unlock(&gb->mutex); \
208 static DEVICE_ATTR_RW(field)
210 #define gb_dev_loopback_ro_attr(field, conn) \
211 static ssize_t field##_show(struct device *dev, \
212 struct device_attribute *attr, \
215 struct gb_loopback *gb = dev_get_drvdata(dev); \
216 return sprintf(buf, "%u\n", gb->field); \
218 static DEVICE_ATTR_RO(field)
220 #define gb_dev_loopback_rw_attr(field, type) \
221 static ssize_t field##_show(struct device *dev, \
222 struct device_attribute *attr, \
225 struct gb_loopback *gb = dev_get_drvdata(dev); \
226 return sprintf(buf, "%"#type"\n", gb->field); \
228 static ssize_t field##_store(struct device *dev, \
229 struct device_attribute *attr, \
234 struct gb_loopback *gb = dev_get_drvdata(dev); \
235 mutex_lock(&gb->mutex); \
236 ret = sscanf(buf, "%"#type, &gb->field); \
240 gb_loopback_check_attr(gb); \
241 mutex_unlock(&gb->mutex); \
244 static DEVICE_ATTR_RW(field)
246 static void gb_loopback_reset_stats(struct gb_loopback
*gb
);
247 static void gb_loopback_check_attr(struct gb_loopback
*gb
)
249 if (gb
->us_wait
> GB_LOOPBACK_US_WAIT_MAX
)
250 gb
->us_wait
= GB_LOOPBACK_US_WAIT_MAX
;
251 if (gb
->size
> gb_dev
.size_max
)
252 gb
->size
= gb_dev
.size_max
;
253 gb
->requests_timedout
= 0;
254 gb
->requests_completed
= 0;
255 gb
->iteration_count
= 0;
259 if (kfifo_depth
< gb
->iteration_max
) {
261 "cannot log bytes %u kfifo_depth %u\n",
262 gb
->iteration_max
, kfifo_depth
);
264 kfifo_reset_out(&gb
->kfifo_lat
);
265 kfifo_reset_out(&gb
->kfifo_ts
);
268 case GB_LOOPBACK_TYPE_PING
:
269 case GB_LOOPBACK_TYPE_TRANSFER
:
270 case GB_LOOPBACK_TYPE_SINK
:
271 gb
->jiffy_timeout
= usecs_to_jiffies(gb
->timeout
);
272 if (!gb
->jiffy_timeout
)
273 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MIN
;
274 else if (gb
->jiffy_timeout
> GB_LOOPBACK_TIMEOUT_MAX
)
275 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MAX
;
276 gb_loopback_reset_stats(gb
);
285 /* Time to send and receive one message */
286 gb_loopback_stats_attrs(latency
);
287 /* Number of requests sent per second on this cport */
288 gb_loopback_stats_attrs(requests_per_second
);
289 /* Quantity of data sent and received on this cport */
290 gb_loopback_stats_attrs(throughput
);
291 /* Latency across the UniPro link from APBridge's perspective */
292 gb_loopback_stats_attrs(apbridge_unipro_latency
);
293 /* Firmware induced overhead in the GPBridge */
294 gb_loopback_stats_attrs(gbphy_firmware_latency
);
296 /* Number of errors encountered during loop */
297 gb_loopback_ro_attr(error
);
298 /* Number of requests successfully completed async */
299 gb_loopback_ro_attr(requests_completed
);
300 /* Number of requests timed out async */
301 gb_loopback_ro_attr(requests_timedout
);
302 /* Timeout minimum in useconds */
303 gb_loopback_ro_attr(timeout_min
);
304 /* Timeout minimum in useconds */
305 gb_loopback_ro_attr(timeout_max
);
308 * Type of loopback message to send based on protocol type definitions
309 * 0 => Don't send message
310 * 2 => Send ping message continuously (message without payload)
311 * 3 => Send transfer message continuously (message with payload,
312 * payload returned in response)
313 * 4 => Send a sink message (message with payload, no payload in response)
315 gb_dev_loopback_rw_attr(type
, d
);
316 /* Size of transfer message payload: 0-4096 bytes */
317 gb_dev_loopback_rw_attr(size
, u
);
318 /* Time to wait between two messages: 0-1000 ms */
319 gb_dev_loopback_rw_attr(us_wait
, d
);
320 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
321 gb_dev_loopback_rw_attr(iteration_max
, u
);
322 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
323 gb_dev_loopback_ro_attr(iteration_count
, false);
324 /* A flag to indicate synchronous or asynchronous operations */
325 gb_dev_loopback_rw_attr(async
, u
);
326 /* Timeout of an individual asynchronous request */
327 gb_dev_loopback_rw_attr(timeout
, u
);
328 /* Maximum number of in-flight operations before back-off */
329 gb_dev_loopback_rw_attr(outstanding_operations_max
, u
);
331 static struct attribute
*loopback_attrs
[] = {
332 &dev_attr_latency_min
.attr
,
333 &dev_attr_latency_max
.attr
,
334 &dev_attr_latency_avg
.attr
,
335 &dev_attr_requests_per_second_min
.attr
,
336 &dev_attr_requests_per_second_max
.attr
,
337 &dev_attr_requests_per_second_avg
.attr
,
338 &dev_attr_throughput_min
.attr
,
339 &dev_attr_throughput_max
.attr
,
340 &dev_attr_throughput_avg
.attr
,
341 &dev_attr_apbridge_unipro_latency_min
.attr
,
342 &dev_attr_apbridge_unipro_latency_max
.attr
,
343 &dev_attr_apbridge_unipro_latency_avg
.attr
,
344 &dev_attr_gbphy_firmware_latency_min
.attr
,
345 &dev_attr_gbphy_firmware_latency_max
.attr
,
346 &dev_attr_gbphy_firmware_latency_avg
.attr
,
349 &dev_attr_us_wait
.attr
,
350 &dev_attr_iteration_count
.attr
,
351 &dev_attr_iteration_max
.attr
,
352 &dev_attr_async
.attr
,
353 &dev_attr_error
.attr
,
354 &dev_attr_requests_completed
.attr
,
355 &dev_attr_requests_timedout
.attr
,
356 &dev_attr_timeout
.attr
,
357 &dev_attr_outstanding_operations_max
.attr
,
358 &dev_attr_timeout_min
.attr
,
359 &dev_attr_timeout_max
.attr
,
362 ATTRIBUTE_GROUPS(loopback
);
364 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
);
366 static u32
gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs
)
370 do_div(elapsed_nsecs
, NSEC_PER_USEC
);
375 static u64
__gb_loopback_calc_latency(u64 t1
, u64 t2
)
380 return NSEC_PER_DAY
- t2
+ t1
;
383 static u64
gb_loopback_calc_latency(struct timeval
*ts
, struct timeval
*te
)
387 t1
= timeval_to_ns(ts
);
388 t2
= timeval_to_ns(te
);
390 return __gb_loopback_calc_latency(t1
, t2
);
393 static void gb_loopback_push_latency_ts(struct gb_loopback
*gb
,
394 struct timeval
*ts
, struct timeval
*te
)
396 kfifo_in(&gb
->kfifo_ts
, (unsigned char *)ts
, sizeof(*ts
));
397 kfifo_in(&gb
->kfifo_ts
, (unsigned char *)te
, sizeof(*te
));
400 static int gb_loopback_operation_sync(struct gb_loopback
*gb
, int type
,
401 void *request
, int request_size
,
402 void *response
, int response_size
)
404 struct gb_operation
*operation
;
405 struct timeval ts
, te
;
408 do_gettimeofday(&ts
);
409 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
410 response_size
, GFP_KERNEL
);
415 memcpy(operation
->request
->payload
, request
, request_size
);
417 ret
= gb_operation_request_send_sync(operation
);
419 dev_err(&gb
->connection
->bundle
->dev
,
420 "synchronous operation failed: %d\n", ret
);
421 goto out_put_operation
;
423 if (response_size
== operation
->response
->payload_size
) {
424 memcpy(response
, operation
->response
->payload
,
427 dev_err(&gb
->connection
->bundle
->dev
,
428 "response size %zu expected %d\n",
429 operation
->response
->payload_size
,
432 goto out_put_operation
;
436 do_gettimeofday(&te
);
438 /* Calculate the total time the message took */
439 gb_loopback_push_latency_ts(gb
, &ts
, &te
);
440 gb
->elapsed_nsecs
= gb_loopback_calc_latency(&ts
, &te
);
443 gb_operation_put(operation
);
448 static void __gb_loopback_async_operation_destroy(struct kref
*kref
)
450 struct gb_loopback_async_operation
*op_async
;
452 op_async
= container_of(kref
, struct gb_loopback_async_operation
, kref
);
454 list_del(&op_async
->entry
);
455 if (op_async
->operation
)
456 gb_operation_put(op_async
->operation
);
457 atomic_dec(&op_async
->gb
->outstanding_operations
);
458 wake_up(&op_async
->gb
->wq_completion
);
462 static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
465 kref_get(&op_async
->kref
);
468 static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
473 spin_lock_irqsave(&gb_dev
.lock
, flags
);
474 kref_put(&op_async
->kref
, __gb_loopback_async_operation_destroy
);
475 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
478 static struct gb_loopback_async_operation
*
479 gb_loopback_operation_find(u16 id
)
481 struct gb_loopback_async_operation
*op_async
;
485 spin_lock_irqsave(&gb_dev
.lock
, flags
);
486 list_for_each_entry(op_async
, &gb_dev
.list_op_async
, entry
) {
487 if (op_async
->operation
->id
== id
) {
488 gb_loopback_async_operation_get(op_async
);
493 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
495 return found
? op_async
: NULL
;
498 static void gb_loopback_async_wait_all(struct gb_loopback
*gb
)
500 wait_event(gb
->wq_completion
,
501 !atomic_read(&gb
->outstanding_operations
));
504 static void gb_loopback_async_operation_callback(struct gb_operation
*operation
)
506 struct gb_loopback_async_operation
*op_async
;
507 struct gb_loopback
*gb
;
511 do_gettimeofday(&te
);
512 op_async
= gb_loopback_operation_find(operation
->id
);
517 mutex_lock(&gb
->mutex
);
519 if (!op_async
->pending
|| gb_operation_result(operation
)) {
522 if (op_async
->completion
)
523 if (op_async
->completion(op_async
))
528 gb_loopback_push_latency_ts(gb
, &op_async
->ts
, &te
);
529 gb
->elapsed_nsecs
= gb_loopback_calc_latency(&op_async
->ts
,
533 if (op_async
->pending
) {
536 gb
->iteration_count
++;
537 op_async
->pending
= false;
538 del_timer_sync(&op_async
->timer
);
539 gb_loopback_async_operation_put(op_async
);
540 gb_loopback_calculate_stats(gb
, err
);
542 mutex_unlock(&gb
->mutex
);
544 dev_dbg(&gb
->connection
->bundle
->dev
, "complete operation %d\n",
547 gb_loopback_async_operation_put(op_async
);
550 static void gb_loopback_async_operation_work(struct work_struct
*work
)
552 struct gb_loopback
*gb
;
553 struct gb_operation
*operation
;
554 struct gb_loopback_async_operation
*op_async
;
556 op_async
= container_of(work
, struct gb_loopback_async_operation
, work
);
558 operation
= op_async
->operation
;
560 mutex_lock(&gb
->mutex
);
561 if (op_async
->pending
) {
562 gb
->requests_timedout
++;
564 gb
->iteration_count
++;
565 op_async
->pending
= false;
566 gb_loopback_async_operation_put(op_async
);
567 gb_loopback_calculate_stats(gb
, true);
569 mutex_unlock(&gb
->mutex
);
571 dev_dbg(&gb
->connection
->bundle
->dev
, "timeout operation %d\n",
574 gb_operation_cancel(operation
, -ETIMEDOUT
);
575 gb_loopback_async_operation_put(op_async
);
578 static void gb_loopback_async_operation_timeout(unsigned long data
)
580 struct gb_loopback_async_operation
*op_async
;
583 op_async
= gb_loopback_operation_find(id
);
585 pr_err("operation %d not found - time out ?\n", id
);
588 schedule_work(&op_async
->work
);
591 static int gb_loopback_async_operation(struct gb_loopback
*gb
, int type
,
592 void *request
, int request_size
,
596 struct gb_loopback_async_operation
*op_async
;
597 struct gb_operation
*operation
;
601 op_async
= kzalloc(sizeof(*op_async
), GFP_KERNEL
);
605 INIT_WORK(&op_async
->work
, gb_loopback_async_operation_work
);
606 kref_init(&op_async
->kref
);
608 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
609 response_size
, GFP_KERNEL
);
616 memcpy(operation
->request
->payload
, request
, request_size
);
619 op_async
->operation
= operation
;
620 op_async
->completion
= completion
;
622 spin_lock_irqsave(&gb_dev
.lock
, flags
);
623 list_add_tail(&op_async
->entry
, &gb_dev
.list_op_async
);
624 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
626 do_gettimeofday(&op_async
->ts
);
627 op_async
->pending
= true;
628 atomic_inc(&gb
->outstanding_operations
);
629 mutex_lock(&gb
->mutex
);
630 ret
= gb_operation_request_send(operation
,
631 gb_loopback_async_operation_callback
,
636 setup_timer(&op_async
->timer
, gb_loopback_async_operation_timeout
,
637 (unsigned long)operation
->id
);
638 op_async
->timer
.expires
= jiffies
+ gb
->jiffy_timeout
;
639 add_timer(&op_async
->timer
);
643 gb_loopback_async_operation_put(op_async
);
645 mutex_unlock(&gb
->mutex
);
649 static int gb_loopback_sync_sink(struct gb_loopback
*gb
, u32 len
)
651 struct gb_loopback_transfer_request
*request
;
654 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
658 request
->len
= cpu_to_le32(len
);
659 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_SINK
,
660 request
, len
+ sizeof(*request
),
666 static int gb_loopback_sync_transfer(struct gb_loopback
*gb
, u32 len
)
668 struct gb_loopback_transfer_request
*request
;
669 struct gb_loopback_transfer_response
*response
;
672 gb
->apbridge_latency_ts
= 0;
673 gb
->gbphy_latency_ts
= 0;
675 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
678 response
= kmalloc(len
+ sizeof(*response
), GFP_KERNEL
);
684 memset(request
->data
, 0x5A, len
);
686 request
->len
= cpu_to_le32(len
);
687 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
688 request
, len
+ sizeof(*request
),
689 response
, len
+ sizeof(*response
));
693 if (memcmp(request
->data
, response
->data
, len
)) {
694 dev_err(&gb
->connection
->bundle
->dev
,
695 "Loopback Data doesn't match\n");
698 gb
->apbridge_latency_ts
= (u32
)__le32_to_cpu(response
->reserved0
);
699 gb
->gbphy_latency_ts
= (u32
)__le32_to_cpu(response
->reserved1
);
708 static int gb_loopback_sync_ping(struct gb_loopback
*gb
)
710 return gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_PING
,
714 static int gb_loopback_async_sink(struct gb_loopback
*gb
, u32 len
)
716 struct gb_loopback_transfer_request
*request
;
719 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
723 request
->len
= cpu_to_le32(len
);
724 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_SINK
,
725 request
, len
+ sizeof(*request
),
731 static int gb_loopback_async_transfer_complete(
732 struct gb_loopback_async_operation
*op_async
)
734 struct gb_loopback
*gb
;
735 struct gb_operation
*operation
;
736 struct gb_loopback_transfer_request
*request
;
737 struct gb_loopback_transfer_response
*response
;
742 operation
= op_async
->operation
;
743 request
= operation
->request
->payload
;
744 response
= operation
->response
->payload
;
745 len
= le32_to_cpu(request
->len
);
747 if (memcmp(request
->data
, response
->data
, len
)) {
748 dev_err(&gb
->connection
->bundle
->dev
,
749 "Loopback Data doesn't match operation id %d\n",
753 gb
->apbridge_latency_ts
=
754 (u32
)__le32_to_cpu(response
->reserved0
);
755 gb
->gbphy_latency_ts
=
756 (u32
)__le32_to_cpu(response
->reserved1
);
762 static int gb_loopback_async_transfer(struct gb_loopback
*gb
, u32 len
)
764 struct gb_loopback_transfer_request
*request
;
765 int retval
, response_len
;
767 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
771 memset(request
->data
, 0x5A, len
);
773 request
->len
= cpu_to_le32(len
);
774 response_len
= sizeof(struct gb_loopback_transfer_response
);
775 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
776 request
, len
+ sizeof(*request
),
778 gb_loopback_async_transfer_complete
);
787 static int gb_loopback_async_ping(struct gb_loopback
*gb
)
789 return gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_PING
,
793 static int gb_loopback_request_handler(struct gb_operation
*operation
)
795 struct gb_connection
*connection
= operation
->connection
;
796 struct gb_loopback_transfer_request
*request
;
797 struct gb_loopback_transfer_response
*response
;
798 struct device
*dev
= &connection
->bundle
->dev
;
801 /* By convention, the AP initiates the version operation */
802 switch (operation
->type
) {
803 case GB_LOOPBACK_TYPE_PING
:
804 case GB_LOOPBACK_TYPE_SINK
:
806 case GB_LOOPBACK_TYPE_TRANSFER
:
807 if (operation
->request
->payload_size
< sizeof(*request
)) {
808 dev_err(dev
, "transfer request too small (%zu < %zu)\n",
809 operation
->request
->payload_size
,
811 return -EINVAL
; /* -EMSGSIZE */
813 request
= operation
->request
->payload
;
814 len
= le32_to_cpu(request
->len
);
815 if (len
> gb_dev
.size_max
) {
816 dev_err(dev
, "transfer request too large (%zu > %zu)\n",
817 len
, gb_dev
.size_max
);
821 if (!gb_operation_response_alloc(operation
,
822 len
+ sizeof(*response
), GFP_KERNEL
)) {
823 dev_err(dev
, "error allocating response\n");
826 response
= operation
->response
->payload
;
827 response
->len
= cpu_to_le32(len
);
829 memcpy(response
->data
, request
->data
, len
);
833 dev_err(dev
, "unsupported request: %u\n", operation
->type
);
838 static void gb_loopback_reset_stats(struct gb_loopback
*gb
)
840 struct gb_loopback_stats reset
= {
844 /* Reset per-connection stats */
845 memcpy(&gb
->latency
, &reset
,
846 sizeof(struct gb_loopback_stats
));
847 memcpy(&gb
->throughput
, &reset
,
848 sizeof(struct gb_loopback_stats
));
849 memcpy(&gb
->requests_per_second
, &reset
,
850 sizeof(struct gb_loopback_stats
));
851 memcpy(&gb
->apbridge_unipro_latency
, &reset
,
852 sizeof(struct gb_loopback_stats
));
853 memcpy(&gb
->gbphy_firmware_latency
, &reset
,
854 sizeof(struct gb_loopback_stats
));
856 /* Should be initialized at least once per transaction set */
857 gb
->apbridge_latency_ts
= 0;
858 gb
->gbphy_latency_ts
= 0;
859 memset(&gb
->ts
, 0, sizeof(struct timeval
));
862 static void gb_loopback_update_stats(struct gb_loopback_stats
*stats
, u32 val
)
864 if (stats
->min
> val
)
866 if (stats
->max
< val
)
872 static void gb_loopback_update_stats_window(struct gb_loopback_stats
*stats
,
876 stats
->count
+= count
;
879 if (stats
->min
> val
)
881 if (stats
->max
< val
)
885 static void gb_loopback_requests_update(struct gb_loopback
*gb
, u32 latency
)
887 u64 req
= gb
->requests_completed
* USEC_PER_SEC
;
889 gb_loopback_update_stats_window(&gb
->requests_per_second
, req
, latency
);
892 static void gb_loopback_throughput_update(struct gb_loopback
*gb
, u32 latency
)
894 u64 aggregate_size
= sizeof(struct gb_operation_msg_hdr
) * 2;
897 case GB_LOOPBACK_TYPE_PING
:
899 case GB_LOOPBACK_TYPE_SINK
:
900 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
903 case GB_LOOPBACK_TYPE_TRANSFER
:
904 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
905 sizeof(struct gb_loopback_transfer_response
) +
912 aggregate_size
*= gb
->requests_completed
;
913 aggregate_size
*= USEC_PER_SEC
;
914 gb_loopback_update_stats_window(&gb
->throughput
, aggregate_size
,
918 static void gb_loopback_calculate_latency_stats(struct gb_loopback
*gb
)
922 /* Express latency in terms of microseconds */
923 lat
= gb_loopback_nsec_to_usec_latency(gb
->elapsed_nsecs
);
925 /* Log latency stastic */
926 gb_loopback_update_stats(&gb
->latency
, lat
);
928 /* Raw latency log on a per thread basis */
929 kfifo_in(&gb
->kfifo_lat
, (unsigned char *)&lat
, sizeof(lat
));
931 /* Log the firmware supplied latency values */
932 gb_loopback_update_stats(&gb
->apbridge_unipro_latency
,
933 gb
->apbridge_latency_ts
);
934 gb_loopback_update_stats(&gb
->gbphy_firmware_latency
,
935 gb
->gbphy_latency_ts
);
938 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
)
945 gb
->requests_completed
++;
946 gb_loopback_calculate_latency_stats(gb
);
949 do_gettimeofday(&te
);
950 nlat
= gb_loopback_calc_latency(&gb
->ts
, &te
);
951 if (nlat
>= NSEC_PER_SEC
|| gb
->iteration_count
== gb
->iteration_max
) {
952 lat
= gb_loopback_nsec_to_usec_latency(nlat
);
954 gb_loopback_throughput_update(gb
, lat
);
955 gb_loopback_requests_update(gb
, lat
);
957 if (gb
->iteration_count
!= gb
->iteration_max
) {
959 gb
->requests_completed
= 0;
964 static void gb_loopback_async_wait_to_send(struct gb_loopback
*gb
)
966 if (!(gb
->async
&& gb
->outstanding_operations_max
))
968 wait_event_interruptible(gb
->wq_completion
,
969 (atomic_read(&gb
->outstanding_operations
) <
970 gb
->outstanding_operations_max
) ||
971 kthread_should_stop());
974 static int gb_loopback_fn(void *data
)
982 struct gb_loopback
*gb
= data
;
983 struct gb_bundle
*bundle
= gb
->connection
->bundle
;
985 ret
= gb_pm_runtime_get_sync(bundle
);
991 gb_pm_runtime_put_autosuspend(bundle
);
992 wait_event_interruptible(gb
->wq
, gb
->type
||
993 kthread_should_stop());
994 ret
= gb_pm_runtime_get_sync(bundle
);
999 if (kthread_should_stop())
1002 /* Limit the maximum number of in-flight async operations */
1003 gb_loopback_async_wait_to_send(gb
);
1004 if (kthread_should_stop())
1007 mutex_lock(&gb
->mutex
);
1009 /* Optionally terminate */
1010 if (gb
->send_count
== gb
->iteration_max
) {
1011 if (gb
->iteration_count
== gb
->iteration_max
) {
1014 sysfs_notify(&gb
->dev
->kobj
, NULL
,
1017 mutex_unlock(&gb
->mutex
);
1021 us_wait
= gb
->us_wait
;
1023 if (gb
->ts
.tv_usec
== 0 && gb
->ts
.tv_sec
== 0)
1024 do_gettimeofday(&gb
->ts
);
1025 mutex_unlock(&gb
->mutex
);
1027 /* Else operations to perform */
1029 if (type
== GB_LOOPBACK_TYPE_PING
) {
1030 error
= gb_loopback_async_ping(gb
);
1031 } else if (type
== GB_LOOPBACK_TYPE_TRANSFER
) {
1032 error
= gb_loopback_async_transfer(gb
, size
);
1033 } else if (type
== GB_LOOPBACK_TYPE_SINK
) {
1034 error
= gb_loopback_async_sink(gb
, size
);
1040 /* We are effectively single threaded here */
1041 if (type
== GB_LOOPBACK_TYPE_PING
)
1042 error
= gb_loopback_sync_ping(gb
);
1043 else if (type
== GB_LOOPBACK_TYPE_TRANSFER
)
1044 error
= gb_loopback_sync_transfer(gb
, size
);
1045 else if (type
== GB_LOOPBACK_TYPE_SINK
)
1046 error
= gb_loopback_sync_sink(gb
, size
);
1050 gb
->iteration_count
++;
1051 gb_loopback_calculate_stats(gb
, !!error
);
1056 if (us_wait
< 20000)
1057 usleep_range(us_wait
, us_wait
+ 100);
1059 msleep(us_wait
/ 1000);
1063 gb_pm_runtime_put_autosuspend(bundle
);
1068 static int gb_loopback_dbgfs_latency_show_common(struct seq_file
*s
,
1069 struct kfifo
*kfifo
,
1070 struct mutex
*mutex
)
1075 if (kfifo_len(kfifo
) == 0) {
1081 retval
= kfifo_out(kfifo
, &latency
, sizeof(latency
));
1083 seq_printf(s
, "%u", latency
);
1086 mutex_unlock(mutex
);
1091 static int gb_loopback_dbgfs_latency_show(struct seq_file
*s
, void *unused
)
1093 struct gb_loopback
*gb
= s
->private;
1095 return gb_loopback_dbgfs_latency_show_common(s
, &gb
->kfifo_lat
,
1099 static int gb_loopback_latency_open(struct inode
*inode
, struct file
*file
)
1101 return single_open(file
, gb_loopback_dbgfs_latency_show
,
1105 static const struct file_operations gb_loopback_debugfs_latency_ops
= {
1106 .open
= gb_loopback_latency_open
,
1108 .llseek
= seq_lseek
,
1109 .release
= single_release
,
1112 static int gb_loopback_bus_id_compare(void *priv
, struct list_head
*lha
,
1113 struct list_head
*lhb
)
1115 struct gb_loopback
*a
= list_entry(lha
, struct gb_loopback
, entry
);
1116 struct gb_loopback
*b
= list_entry(lhb
, struct gb_loopback
, entry
);
1117 struct gb_connection
*ca
= a
->connection
;
1118 struct gb_connection
*cb
= b
->connection
;
1120 if (ca
->bundle
->intf
->interface_id
< cb
->bundle
->intf
->interface_id
)
1122 if (cb
->bundle
->intf
->interface_id
< ca
->bundle
->intf
->interface_id
)
1124 if (ca
->bundle
->id
< cb
->bundle
->id
)
1126 if (cb
->bundle
->id
< ca
->bundle
->id
)
1128 if (ca
->intf_cport_id
< cb
->intf_cport_id
)
1130 else if (cb
->intf_cport_id
< ca
->intf_cport_id
)
1136 static void gb_loopback_insert_id(struct gb_loopback
*gb
)
1138 struct gb_loopback
*gb_list
;
1141 /* perform an insertion sort */
1142 list_add_tail(&gb
->entry
, &gb_dev
.list
);
1143 list_sort(NULL
, &gb_dev
.list
, gb_loopback_bus_id_compare
);
1144 list_for_each_entry(gb_list
, &gb_dev
.list
, entry
) {
1145 gb_list
->lbid
= 1 << new_lbid
;
1150 #define DEBUGFS_NAMELEN 32
1152 static int gb_loopback_probe(struct gb_bundle
*bundle
,
1153 const struct greybus_bundle_id
*id
)
1155 struct greybus_descriptor_cport
*cport_desc
;
1156 struct gb_connection
*connection
;
1157 struct gb_loopback
*gb
;
1160 char name
[DEBUGFS_NAMELEN
];
1161 unsigned long flags
;
1163 if (bundle
->num_cports
!= 1)
1166 cport_desc
= &bundle
->cport_desc
[0];
1167 if (cport_desc
->protocol_id
!= GREYBUS_PROTOCOL_LOOPBACK
)
1170 gb
= kzalloc(sizeof(*gb
), GFP_KERNEL
);
1174 connection
= gb_connection_create(bundle
, le16_to_cpu(cport_desc
->id
),
1175 gb_loopback_request_handler
);
1176 if (IS_ERR(connection
)) {
1177 retval
= PTR_ERR(connection
);
1181 gb
->connection
= connection
;
1182 greybus_set_drvdata(bundle
, gb
);
1184 init_waitqueue_head(&gb
->wq
);
1185 init_waitqueue_head(&gb
->wq_completion
);
1186 atomic_set(&gb
->outstanding_operations
, 0);
1187 gb_loopback_reset_stats(gb
);
1189 /* Reported values to user-space for min/max timeouts */
1190 gb
->timeout_min
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN
);
1191 gb
->timeout_max
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX
);
1193 if (!gb_dev
.count
) {
1194 /* Calculate maximum payload */
1195 gb_dev
.size_max
= gb_operation_get_payload_size_max(connection
);
1196 if (gb_dev
.size_max
<=
1197 sizeof(struct gb_loopback_transfer_request
)) {
1199 goto out_connection_destroy
;
1201 gb_dev
.size_max
-= sizeof(struct gb_loopback_transfer_request
);
1204 /* Create per-connection sysfs and debugfs data-points */
1205 snprintf(name
, sizeof(name
), "raw_latency_%s",
1206 dev_name(&connection
->bundle
->dev
));
1207 gb
->file
= debugfs_create_file(name
, S_IFREG
| S_IRUGO
, gb_dev
.root
, gb
,
1208 &gb_loopback_debugfs_latency_ops
);
1210 gb
->id
= ida_simple_get(&loopback_ida
, 0, 0, GFP_KERNEL
);
1213 goto out_debugfs_remove
;
1216 retval
= gb_connection_enable(connection
);
1218 goto out_ida_remove
;
1220 dev
= device_create_with_groups(&loopback_class
,
1221 &connection
->bundle
->dev
,
1222 MKDEV(0, 0), gb
, loopback_groups
,
1223 "gb_loopback%d", gb
->id
);
1225 retval
= PTR_ERR(dev
);
1226 goto out_connection_disable
;
1230 /* Allocate kfifo */
1231 if (kfifo_alloc(&gb
->kfifo_lat
, kfifo_depth
* sizeof(u32
),
1236 if (kfifo_alloc(&gb
->kfifo_ts
, kfifo_depth
* sizeof(struct timeval
) * 2,
1242 /* Fork worker thread */
1243 mutex_init(&gb
->mutex
);
1244 gb
->task
= kthread_run(gb_loopback_fn
, gb
, "gb_loopback");
1245 if (IS_ERR(gb
->task
)) {
1246 retval
= PTR_ERR(gb
->task
);
1250 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1251 gb_loopback_insert_id(gb
);
1253 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1255 gb_connection_latency_tag_enable(connection
);
1257 gb_pm_runtime_put_autosuspend(bundle
);
1262 kfifo_free(&gb
->kfifo_ts
);
1264 kfifo_free(&gb
->kfifo_lat
);
1266 device_unregister(dev
);
1267 out_connection_disable
:
1268 gb_connection_disable(connection
);
1270 ida_simple_remove(&loopback_ida
, gb
->id
);
1272 debugfs_remove(gb
->file
);
1273 out_connection_destroy
:
1274 gb_connection_destroy(connection
);
1281 static void gb_loopback_disconnect(struct gb_bundle
*bundle
)
1283 struct gb_loopback
*gb
= greybus_get_drvdata(bundle
);
1284 unsigned long flags
;
1287 ret
= gb_pm_runtime_get_sync(bundle
);
1289 gb_pm_runtime_get_noresume(bundle
);
1291 gb_connection_disable(gb
->connection
);
1293 if (!IS_ERR_OR_NULL(gb
->task
))
1294 kthread_stop(gb
->task
);
1296 kfifo_free(&gb
->kfifo_lat
);
1297 kfifo_free(&gb
->kfifo_ts
);
1298 gb_connection_latency_tag_disable(gb
->connection
);
1299 debugfs_remove(gb
->file
);
1302 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1303 * is disabled at the beginning and so we can't have any more
1304 * incoming/outgoing requests.
1306 gb_loopback_async_wait_all(gb
);
1308 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1310 list_del(&gb
->entry
);
1311 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1313 device_unregister(gb
->dev
);
1314 ida_simple_remove(&loopback_ida
, gb
->id
);
1316 gb_connection_destroy(gb
->connection
);
1320 static const struct greybus_bundle_id gb_loopback_id_table
[] = {
1321 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK
) },
1324 MODULE_DEVICE_TABLE(greybus
, gb_loopback_id_table
);
1326 static struct greybus_driver gb_loopback_driver
= {
1328 .probe
= gb_loopback_probe
,
1329 .disconnect
= gb_loopback_disconnect
,
1330 .id_table
= gb_loopback_id_table
,
1333 static int loopback_init(void)
1337 INIT_LIST_HEAD(&gb_dev
.list
);
1338 INIT_LIST_HEAD(&gb_dev
.list_op_async
);
1339 spin_lock_init(&gb_dev
.lock
);
1340 gb_dev
.root
= debugfs_create_dir("gb_loopback", NULL
);
1342 retval
= class_register(&loopback_class
);
1346 retval
= greybus_register(&gb_loopback_driver
);
1348 goto err_unregister
;
1353 class_unregister(&loopback_class
);
1355 debugfs_remove_recursive(gb_dev
.root
);
1358 module_init(loopback_init
);
1360 static void __exit
loopback_exit(void)
1362 debugfs_remove_recursive(gb_dev
.root
);
1363 greybus_deregister(&gb_loopback_driver
);
1364 class_unregister(&loopback_class
);
1365 ida_destroy(&loopback_ida
);
1367 module_exit(loopback_exit
);
1369 MODULE_LICENSE("GPL v2");