1 // SPDX-License-Identifier: GPL-2.0
3 * Loopback bridge driver for the Greybus loopback module.
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/kthread.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/sizes.h>
19 #include <linux/cdev.h>
21 #include <linux/kfifo.h>
22 #include <linux/debugfs.h>
23 #include <linux/list_sort.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <linux/pm_runtime.h>
29 #include <asm/div64.h>
32 #include "connection.h"
34 #define NSEC_PER_DAY 86400000000000ULL
36 struct gb_loopback_stats
{
43 struct gb_loopback_device
{
48 /* We need to take a lock in atomic context */
53 static struct gb_loopback_device gb_dev
;
55 struct gb_loopback_async_operation
{
56 struct gb_loopback
*gb
;
57 struct gb_operation
*operation
;
59 int (*completion
)(struct gb_loopback_async_operation
*op_async
);
63 struct gb_connection
*connection
;
66 struct kfifo kfifo_lat
;
68 struct task_struct
*task
;
71 wait_queue_head_t wq_completion
;
72 atomic_t outstanding_operations
;
74 /* Per connection stats */
76 struct gb_loopback_stats latency
;
77 struct gb_loopback_stats throughput
;
78 struct gb_loopback_stats requests_per_second
;
79 struct gb_loopback_stats apbridge_unipro_latency
;
80 struct gb_loopback_stats gbphy_firmware_latency
;
90 u32 requests_completed
;
91 u32 requests_timedout
;
96 u32 outstanding_operations_max
;
98 u32 apbridge_latency_ts
;
104 static struct class loopback_class
= {
105 .name
= "gb_loopback",
106 .owner
= THIS_MODULE
,
108 static DEFINE_IDA(loopback_ida
);
110 /* Min/max values in jiffies */
111 #define GB_LOOPBACK_TIMEOUT_MIN 1
112 #define GB_LOOPBACK_TIMEOUT_MAX 10000
114 #define GB_LOOPBACK_FIFO_DEFAULT 8192
116 static unsigned int kfifo_depth
= GB_LOOPBACK_FIFO_DEFAULT
;
117 module_param(kfifo_depth
, uint
, 0444);
119 /* Maximum size of any one send data buffer we support */
120 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
122 #define GB_LOOPBACK_US_WAIT_MAX 1000000
124 /* interface sysfs attributes */
125 #define gb_loopback_ro_attr(field) \
126 static ssize_t field##_show(struct device *dev, \
127 struct device_attribute *attr, \
130 struct gb_loopback *gb = dev_get_drvdata(dev); \
131 return sprintf(buf, "%u\n", gb->field); \
133 static DEVICE_ATTR_RO(field)
135 #define gb_loopback_ro_stats_attr(name, field, type) \
136 static ssize_t name##_##field##_show(struct device *dev, \
137 struct device_attribute *attr, \
140 struct gb_loopback *gb = dev_get_drvdata(dev); \
141 /* Report 0 for min and max if no transfer successed */ \
142 if (!gb->requests_completed) \
143 return sprintf(buf, "0\n"); \
144 return sprintf(buf, "%" #type "\n", gb->name.field); \
146 static DEVICE_ATTR_RO(name##_##field)
148 #define gb_loopback_ro_avg_attr(name) \
149 static ssize_t name##_avg_show(struct device *dev, \
150 struct device_attribute *attr, \
153 struct gb_loopback_stats *stats; \
154 struct gb_loopback *gb; \
157 gb = dev_get_drvdata(dev); \
159 count = stats->count ? stats->count : 1; \
160 avg = stats->sum + count / 2000000; /* round closest */ \
161 rem = do_div(avg, count); \
163 do_div(rem, count); \
164 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
166 static DEVICE_ATTR_RO(name##_avg)
168 #define gb_loopback_stats_attrs(field) \
169 gb_loopback_ro_stats_attr(field, min, u); \
170 gb_loopback_ro_stats_attr(field, max, u); \
171 gb_loopback_ro_avg_attr(field)
173 #define gb_loopback_attr(field, type) \
174 static ssize_t field##_show(struct device *dev, \
175 struct device_attribute *attr, \
178 struct gb_loopback *gb = dev_get_drvdata(dev); \
179 return sprintf(buf, "%" #type "\n", gb->field); \
181 static ssize_t field##_store(struct device *dev, \
182 struct device_attribute *attr, \
187 struct gb_loopback *gb = dev_get_drvdata(dev); \
188 mutex_lock(&gb->mutex); \
189 ret = sscanf(buf, "%"#type, &gb->field); \
193 gb_loopback_check_attr(gb, bundle); \
194 mutex_unlock(&gb->mutex); \
197 static DEVICE_ATTR_RW(field)
199 #define gb_dev_loopback_ro_attr(field, conn) \
200 static ssize_t field##_show(struct device *dev, \
201 struct device_attribute *attr, \
204 struct gb_loopback *gb = dev_get_drvdata(dev); \
205 return sprintf(buf, "%u\n", gb->field); \
207 static DEVICE_ATTR_RO(field)
209 #define gb_dev_loopback_rw_attr(field, type) \
210 static ssize_t field##_show(struct device *dev, \
211 struct device_attribute *attr, \
214 struct gb_loopback *gb = dev_get_drvdata(dev); \
215 return sprintf(buf, "%" #type "\n", gb->field); \
217 static ssize_t field##_store(struct device *dev, \
218 struct device_attribute *attr, \
223 struct gb_loopback *gb = dev_get_drvdata(dev); \
224 mutex_lock(&gb->mutex); \
225 ret = sscanf(buf, "%"#type, &gb->field); \
229 gb_loopback_check_attr(gb); \
230 mutex_unlock(&gb->mutex); \
233 static DEVICE_ATTR_RW(field)
235 static void gb_loopback_reset_stats(struct gb_loopback
*gb
);
236 static void gb_loopback_check_attr(struct gb_loopback
*gb
)
238 if (gb
->us_wait
> GB_LOOPBACK_US_WAIT_MAX
)
239 gb
->us_wait
= GB_LOOPBACK_US_WAIT_MAX
;
240 if (gb
->size
> gb_dev
.size_max
)
241 gb
->size
= gb_dev
.size_max
;
242 gb
->requests_timedout
= 0;
243 gb
->requests_completed
= 0;
244 gb
->iteration_count
= 0;
248 if (kfifo_depth
< gb
->iteration_max
) {
250 "cannot log bytes %u kfifo_depth %u\n",
251 gb
->iteration_max
, kfifo_depth
);
253 kfifo_reset_out(&gb
->kfifo_lat
);
256 case GB_LOOPBACK_TYPE_PING
:
257 case GB_LOOPBACK_TYPE_TRANSFER
:
258 case GB_LOOPBACK_TYPE_SINK
:
259 gb
->jiffy_timeout
= usecs_to_jiffies(gb
->timeout
);
260 if (!gb
->jiffy_timeout
)
261 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MIN
;
262 else if (gb
->jiffy_timeout
> GB_LOOPBACK_TIMEOUT_MAX
)
263 gb
->jiffy_timeout
= GB_LOOPBACK_TIMEOUT_MAX
;
264 gb_loopback_reset_stats(gb
);
273 /* Time to send and receive one message */
274 gb_loopback_stats_attrs(latency
);
275 /* Number of requests sent per second on this cport */
276 gb_loopback_stats_attrs(requests_per_second
);
277 /* Quantity of data sent and received on this cport */
278 gb_loopback_stats_attrs(throughput
);
279 /* Latency across the UniPro link from APBridge's perspective */
280 gb_loopback_stats_attrs(apbridge_unipro_latency
);
281 /* Firmware induced overhead in the GPBridge */
282 gb_loopback_stats_attrs(gbphy_firmware_latency
);
284 /* Number of errors encountered during loop */
285 gb_loopback_ro_attr(error
);
286 /* Number of requests successfully completed async */
287 gb_loopback_ro_attr(requests_completed
);
288 /* Number of requests timed out async */
289 gb_loopback_ro_attr(requests_timedout
);
290 /* Timeout minimum in useconds */
291 gb_loopback_ro_attr(timeout_min
);
292 /* Timeout minimum in useconds */
293 gb_loopback_ro_attr(timeout_max
);
296 * Type of loopback message to send based on protocol type definitions
297 * 0 => Don't send message
298 * 2 => Send ping message continuously (message without payload)
299 * 3 => Send transfer message continuously (message with payload,
300 * payload returned in response)
301 * 4 => Send a sink message (message with payload, no payload in response)
303 gb_dev_loopback_rw_attr(type
, d
);
304 /* Size of transfer message payload: 0-4096 bytes */
305 gb_dev_loopback_rw_attr(size
, u
);
306 /* Time to wait between two messages: 0-1000 ms */
307 gb_dev_loopback_rw_attr(us_wait
, d
);
308 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
309 gb_dev_loopback_rw_attr(iteration_max
, u
);
310 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
311 gb_dev_loopback_ro_attr(iteration_count
, false);
312 /* A flag to indicate synchronous or asynchronous operations */
313 gb_dev_loopback_rw_attr(async
, u
);
314 /* Timeout of an individual asynchronous request */
315 gb_dev_loopback_rw_attr(timeout
, u
);
316 /* Maximum number of in-flight operations before back-off */
317 gb_dev_loopback_rw_attr(outstanding_operations_max
, u
);
319 static struct attribute
*loopback_attrs
[] = {
320 &dev_attr_latency_min
.attr
,
321 &dev_attr_latency_max
.attr
,
322 &dev_attr_latency_avg
.attr
,
323 &dev_attr_requests_per_second_min
.attr
,
324 &dev_attr_requests_per_second_max
.attr
,
325 &dev_attr_requests_per_second_avg
.attr
,
326 &dev_attr_throughput_min
.attr
,
327 &dev_attr_throughput_max
.attr
,
328 &dev_attr_throughput_avg
.attr
,
329 &dev_attr_apbridge_unipro_latency_min
.attr
,
330 &dev_attr_apbridge_unipro_latency_max
.attr
,
331 &dev_attr_apbridge_unipro_latency_avg
.attr
,
332 &dev_attr_gbphy_firmware_latency_min
.attr
,
333 &dev_attr_gbphy_firmware_latency_max
.attr
,
334 &dev_attr_gbphy_firmware_latency_avg
.attr
,
337 &dev_attr_us_wait
.attr
,
338 &dev_attr_iteration_count
.attr
,
339 &dev_attr_iteration_max
.attr
,
340 &dev_attr_async
.attr
,
341 &dev_attr_error
.attr
,
342 &dev_attr_requests_completed
.attr
,
343 &dev_attr_requests_timedout
.attr
,
344 &dev_attr_timeout
.attr
,
345 &dev_attr_outstanding_operations_max
.attr
,
346 &dev_attr_timeout_min
.attr
,
347 &dev_attr_timeout_max
.attr
,
350 ATTRIBUTE_GROUPS(loopback
);
352 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
);
354 static u32
gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs
)
356 do_div(elapsed_nsecs
, NSEC_PER_USEC
);
357 return elapsed_nsecs
;
360 static u64
__gb_loopback_calc_latency(u64 t1
, u64 t2
)
365 return NSEC_PER_DAY
- t2
+ t1
;
368 static u64
gb_loopback_calc_latency(ktime_t ts
, ktime_t te
)
370 return __gb_loopback_calc_latency(ktime_to_ns(ts
), ktime_to_ns(te
));
373 static int gb_loopback_operation_sync(struct gb_loopback
*gb
, int type
,
374 void *request
, int request_size
,
375 void *response
, int response_size
)
377 struct gb_operation
*operation
;
382 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
383 response_size
, GFP_KERNEL
);
388 memcpy(operation
->request
->payload
, request
, request_size
);
390 ret
= gb_operation_request_send_sync(operation
);
392 dev_err(&gb
->connection
->bundle
->dev
,
393 "synchronous operation failed: %d\n", ret
);
394 goto out_put_operation
;
396 if (response_size
== operation
->response
->payload_size
) {
397 memcpy(response
, operation
->response
->payload
,
400 dev_err(&gb
->connection
->bundle
->dev
,
401 "response size %zu expected %d\n",
402 operation
->response
->payload_size
,
405 goto out_put_operation
;
411 /* Calculate the total time the message took */
412 gb
->elapsed_nsecs
= gb_loopback_calc_latency(ts
, te
);
415 gb_operation_put(operation
);
420 static void gb_loopback_async_wait_all(struct gb_loopback
*gb
)
422 wait_event(gb
->wq_completion
,
423 !atomic_read(&gb
->outstanding_operations
));
426 static void gb_loopback_async_operation_callback(struct gb_operation
*operation
)
428 struct gb_loopback_async_operation
*op_async
;
429 struct gb_loopback
*gb
;
434 result
= gb_operation_result(operation
);
435 op_async
= gb_operation_get_data(operation
);
438 mutex_lock(&gb
->mutex
);
440 if (!result
&& op_async
->completion
)
441 result
= op_async
->completion(op_async
);
444 gb
->elapsed_nsecs
= gb_loopback_calc_latency(op_async
->ts
, te
);
447 if (result
== -ETIMEDOUT
)
448 gb
->requests_timedout
++;
451 gb
->iteration_count
++;
452 gb_loopback_calculate_stats(gb
, result
);
454 mutex_unlock(&gb
->mutex
);
456 dev_dbg(&gb
->connection
->bundle
->dev
, "complete operation %d\n",
459 /* Wake up waiters */
460 atomic_dec(&op_async
->gb
->outstanding_operations
);
461 wake_up(&gb
->wq_completion
);
463 /* Release resources */
464 gb_operation_put(operation
);
468 static int gb_loopback_async_operation(struct gb_loopback
*gb
, int type
,
469 void *request
, int request_size
,
473 struct gb_loopback_async_operation
*op_async
;
474 struct gb_operation
*operation
;
477 op_async
= kzalloc(sizeof(*op_async
), GFP_KERNEL
);
481 operation
= gb_operation_create(gb
->connection
, type
, request_size
,
482 response_size
, GFP_KERNEL
);
489 memcpy(operation
->request
->payload
, request
, request_size
);
491 gb_operation_set_data(operation
, op_async
);
494 op_async
->operation
= operation
;
495 op_async
->completion
= completion
;
497 op_async
->ts
= ktime_get();
499 atomic_inc(&gb
->outstanding_operations
);
500 ret
= gb_operation_request_send(operation
,
501 gb_loopback_async_operation_callback
,
502 jiffies_to_msecs(gb
->jiffy_timeout
),
505 atomic_dec(&gb
->outstanding_operations
);
506 gb_operation_put(operation
);
512 static int gb_loopback_sync_sink(struct gb_loopback
*gb
, u32 len
)
514 struct gb_loopback_transfer_request
*request
;
517 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
521 request
->len
= cpu_to_le32(len
);
522 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_SINK
,
523 request
, len
+ sizeof(*request
),
529 static int gb_loopback_sync_transfer(struct gb_loopback
*gb
, u32 len
)
531 struct gb_loopback_transfer_request
*request
;
532 struct gb_loopback_transfer_response
*response
;
535 gb
->apbridge_latency_ts
= 0;
536 gb
->gbphy_latency_ts
= 0;
538 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
541 response
= kmalloc(len
+ sizeof(*response
), GFP_KERNEL
);
547 memset(request
->data
, 0x5A, len
);
549 request
->len
= cpu_to_le32(len
);
550 retval
= gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
551 request
, len
+ sizeof(*request
),
552 response
, len
+ sizeof(*response
));
556 if (memcmp(request
->data
, response
->data
, len
)) {
557 dev_err(&gb
->connection
->bundle
->dev
,
558 "Loopback Data doesn't match\n");
561 gb
->apbridge_latency_ts
= (u32
)__le32_to_cpu(response
->reserved0
);
562 gb
->gbphy_latency_ts
= (u32
)__le32_to_cpu(response
->reserved1
);
571 static int gb_loopback_sync_ping(struct gb_loopback
*gb
)
573 return gb_loopback_operation_sync(gb
, GB_LOOPBACK_TYPE_PING
,
577 static int gb_loopback_async_sink(struct gb_loopback
*gb
, u32 len
)
579 struct gb_loopback_transfer_request
*request
;
582 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
586 request
->len
= cpu_to_le32(len
);
587 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_SINK
,
588 request
, len
+ sizeof(*request
),
594 static int gb_loopback_async_transfer_complete(
595 struct gb_loopback_async_operation
*op_async
)
597 struct gb_loopback
*gb
;
598 struct gb_operation
*operation
;
599 struct gb_loopback_transfer_request
*request
;
600 struct gb_loopback_transfer_response
*response
;
605 operation
= op_async
->operation
;
606 request
= operation
->request
->payload
;
607 response
= operation
->response
->payload
;
608 len
= le32_to_cpu(request
->len
);
610 if (memcmp(request
->data
, response
->data
, len
)) {
611 dev_err(&gb
->connection
->bundle
->dev
,
612 "Loopback Data doesn't match operation id %d\n",
616 gb
->apbridge_latency_ts
=
617 (u32
)__le32_to_cpu(response
->reserved0
);
618 gb
->gbphy_latency_ts
=
619 (u32
)__le32_to_cpu(response
->reserved1
);
625 static int gb_loopback_async_transfer(struct gb_loopback
*gb
, u32 len
)
627 struct gb_loopback_transfer_request
*request
;
628 int retval
, response_len
;
630 request
= kmalloc(len
+ sizeof(*request
), GFP_KERNEL
);
634 memset(request
->data
, 0x5A, len
);
636 request
->len
= cpu_to_le32(len
);
637 response_len
= sizeof(struct gb_loopback_transfer_response
);
638 retval
= gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_TRANSFER
,
639 request
, len
+ sizeof(*request
),
641 gb_loopback_async_transfer_complete
);
650 static int gb_loopback_async_ping(struct gb_loopback
*gb
)
652 return gb_loopback_async_operation(gb
, GB_LOOPBACK_TYPE_PING
,
656 static int gb_loopback_request_handler(struct gb_operation
*operation
)
658 struct gb_connection
*connection
= operation
->connection
;
659 struct gb_loopback_transfer_request
*request
;
660 struct gb_loopback_transfer_response
*response
;
661 struct device
*dev
= &connection
->bundle
->dev
;
664 /* By convention, the AP initiates the version operation */
665 switch (operation
->type
) {
666 case GB_LOOPBACK_TYPE_PING
:
667 case GB_LOOPBACK_TYPE_SINK
:
669 case GB_LOOPBACK_TYPE_TRANSFER
:
670 if (operation
->request
->payload_size
< sizeof(*request
)) {
671 dev_err(dev
, "transfer request too small (%zu < %zu)\n",
672 operation
->request
->payload_size
,
674 return -EINVAL
; /* -EMSGSIZE */
676 request
= operation
->request
->payload
;
677 len
= le32_to_cpu(request
->len
);
678 if (len
> gb_dev
.size_max
) {
679 dev_err(dev
, "transfer request too large (%zu > %zu)\n",
680 len
, gb_dev
.size_max
);
684 if (!gb_operation_response_alloc(operation
,
685 len
+ sizeof(*response
), GFP_KERNEL
)) {
686 dev_err(dev
, "error allocating response\n");
689 response
= operation
->response
->payload
;
690 response
->len
= cpu_to_le32(len
);
692 memcpy(response
->data
, request
->data
, len
);
696 dev_err(dev
, "unsupported request: %u\n", operation
->type
);
701 static void gb_loopback_reset_stats(struct gb_loopback
*gb
)
703 struct gb_loopback_stats reset
= {
707 /* Reset per-connection stats */
708 memcpy(&gb
->latency
, &reset
,
709 sizeof(struct gb_loopback_stats
));
710 memcpy(&gb
->throughput
, &reset
,
711 sizeof(struct gb_loopback_stats
));
712 memcpy(&gb
->requests_per_second
, &reset
,
713 sizeof(struct gb_loopback_stats
));
714 memcpy(&gb
->apbridge_unipro_latency
, &reset
,
715 sizeof(struct gb_loopback_stats
));
716 memcpy(&gb
->gbphy_firmware_latency
, &reset
,
717 sizeof(struct gb_loopback_stats
));
719 /* Should be initialized at least once per transaction set */
720 gb
->apbridge_latency_ts
= 0;
721 gb
->gbphy_latency_ts
= 0;
722 gb
->ts
= ktime_set(0, 0);
725 static void gb_loopback_update_stats(struct gb_loopback_stats
*stats
, u32 val
)
727 if (stats
->min
> val
)
729 if (stats
->max
< val
)
735 static void gb_loopback_update_stats_window(struct gb_loopback_stats
*stats
,
739 stats
->count
+= count
;
742 if (stats
->min
> val
)
744 if (stats
->max
< val
)
748 static void gb_loopback_requests_update(struct gb_loopback
*gb
, u32 latency
)
750 u64 req
= gb
->requests_completed
* USEC_PER_SEC
;
752 gb_loopback_update_stats_window(&gb
->requests_per_second
, req
, latency
);
755 static void gb_loopback_throughput_update(struct gb_loopback
*gb
, u32 latency
)
757 u64 aggregate_size
= sizeof(struct gb_operation_msg_hdr
) * 2;
760 case GB_LOOPBACK_TYPE_PING
:
762 case GB_LOOPBACK_TYPE_SINK
:
763 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
766 case GB_LOOPBACK_TYPE_TRANSFER
:
767 aggregate_size
+= sizeof(struct gb_loopback_transfer_request
) +
768 sizeof(struct gb_loopback_transfer_response
) +
775 aggregate_size
*= gb
->requests_completed
;
776 aggregate_size
*= USEC_PER_SEC
;
777 gb_loopback_update_stats_window(&gb
->throughput
, aggregate_size
,
781 static void gb_loopback_calculate_latency_stats(struct gb_loopback
*gb
)
785 /* Express latency in terms of microseconds */
786 lat
= gb_loopback_nsec_to_usec_latency(gb
->elapsed_nsecs
);
788 /* Log latency stastic */
789 gb_loopback_update_stats(&gb
->latency
, lat
);
791 /* Raw latency log on a per thread basis */
792 kfifo_in(&gb
->kfifo_lat
, (unsigned char *)&lat
, sizeof(lat
));
794 /* Log the firmware supplied latency values */
795 gb_loopback_update_stats(&gb
->apbridge_unipro_latency
,
796 gb
->apbridge_latency_ts
);
797 gb_loopback_update_stats(&gb
->gbphy_firmware_latency
,
798 gb
->gbphy_latency_ts
);
801 static void gb_loopback_calculate_stats(struct gb_loopback
*gb
, bool error
)
808 gb
->requests_completed
++;
809 gb_loopback_calculate_latency_stats(gb
);
813 nlat
= gb_loopback_calc_latency(gb
->ts
, te
);
814 if (nlat
>= NSEC_PER_SEC
|| gb
->iteration_count
== gb
->iteration_max
) {
815 lat
= gb_loopback_nsec_to_usec_latency(nlat
);
817 gb_loopback_throughput_update(gb
, lat
);
818 gb_loopback_requests_update(gb
, lat
);
820 if (gb
->iteration_count
!= gb
->iteration_max
) {
822 gb
->requests_completed
= 0;
827 static void gb_loopback_async_wait_to_send(struct gb_loopback
*gb
)
829 if (!(gb
->async
&& gb
->outstanding_operations_max
))
831 wait_event_interruptible(gb
->wq_completion
,
832 (atomic_read(&gb
->outstanding_operations
) <
833 gb
->outstanding_operations_max
) ||
834 kthread_should_stop());
837 static int gb_loopback_fn(void *data
)
845 struct gb_loopback
*gb
= data
;
846 struct gb_bundle
*bundle
= gb
->connection
->bundle
;
848 ret
= gb_pm_runtime_get_sync(bundle
);
854 gb_pm_runtime_put_autosuspend(bundle
);
855 wait_event_interruptible(gb
->wq
, gb
->type
||
856 kthread_should_stop());
857 ret
= gb_pm_runtime_get_sync(bundle
);
862 if (kthread_should_stop())
865 /* Limit the maximum number of in-flight async operations */
866 gb_loopback_async_wait_to_send(gb
);
867 if (kthread_should_stop())
870 mutex_lock(&gb
->mutex
);
872 /* Optionally terminate */
873 if (gb
->send_count
== gb
->iteration_max
) {
874 mutex_unlock(&gb
->mutex
);
876 /* Wait for synchronous and asynchronus completion */
877 gb_loopback_async_wait_all(gb
);
879 /* Mark complete unless user-space has poked us */
880 mutex_lock(&gb
->mutex
);
881 if (gb
->iteration_count
== gb
->iteration_max
) {
884 sysfs_notify(&gb
->dev
->kobj
, NULL
,
886 dev_dbg(&bundle
->dev
, "load test complete\n");
888 dev_dbg(&bundle
->dev
,
889 "continuing on with new test set\n");
891 mutex_unlock(&gb
->mutex
);
895 us_wait
= gb
->us_wait
;
897 if (ktime_to_ns(gb
->ts
) == 0)
898 gb
->ts
= ktime_get();
900 /* Else operations to perform */
902 if (type
== GB_LOOPBACK_TYPE_PING
)
903 error
= gb_loopback_async_ping(gb
);
904 else if (type
== GB_LOOPBACK_TYPE_TRANSFER
)
905 error
= gb_loopback_async_transfer(gb
, size
);
906 else if (type
== GB_LOOPBACK_TYPE_SINK
)
907 error
= gb_loopback_async_sink(gb
, size
);
911 gb
->iteration_count
++;
914 /* We are effectively single threaded here */
915 if (type
== GB_LOOPBACK_TYPE_PING
)
916 error
= gb_loopback_sync_ping(gb
);
917 else if (type
== GB_LOOPBACK_TYPE_TRANSFER
)
918 error
= gb_loopback_sync_transfer(gb
, size
);
919 else if (type
== GB_LOOPBACK_TYPE_SINK
)
920 error
= gb_loopback_sync_sink(gb
, size
);
924 gb
->iteration_count
++;
925 gb_loopback_calculate_stats(gb
, !!error
);
928 mutex_unlock(&gb
->mutex
);
932 usleep_range(us_wait
, us_wait
+ 100);
934 msleep(us_wait
/ 1000);
938 gb_pm_runtime_put_autosuspend(bundle
);
943 static int gb_loopback_dbgfs_latency_show_common(struct seq_file
*s
,
950 if (kfifo_len(kfifo
) == 0) {
956 retval
= kfifo_out(kfifo
, &latency
, sizeof(latency
));
958 seq_printf(s
, "%u", latency
);
966 static int gb_loopback_dbgfs_latency_show(struct seq_file
*s
, void *unused
)
968 struct gb_loopback
*gb
= s
->private;
970 return gb_loopback_dbgfs_latency_show_common(s
, &gb
->kfifo_lat
,
973 DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency
);
975 #define DEBUGFS_NAMELEN 32
977 static int gb_loopback_probe(struct gb_bundle
*bundle
,
978 const struct greybus_bundle_id
*id
)
980 struct greybus_descriptor_cport
*cport_desc
;
981 struct gb_connection
*connection
;
982 struct gb_loopback
*gb
;
985 char name
[DEBUGFS_NAMELEN
];
988 if (bundle
->num_cports
!= 1)
991 cport_desc
= &bundle
->cport_desc
[0];
992 if (cport_desc
->protocol_id
!= GREYBUS_PROTOCOL_LOOPBACK
)
995 gb
= kzalloc(sizeof(*gb
), GFP_KERNEL
);
999 connection
= gb_connection_create(bundle
, le16_to_cpu(cport_desc
->id
),
1000 gb_loopback_request_handler
);
1001 if (IS_ERR(connection
)) {
1002 retval
= PTR_ERR(connection
);
1006 gb
->connection
= connection
;
1007 greybus_set_drvdata(bundle
, gb
);
1009 init_waitqueue_head(&gb
->wq
);
1010 init_waitqueue_head(&gb
->wq_completion
);
1011 atomic_set(&gb
->outstanding_operations
, 0);
1012 gb_loopback_reset_stats(gb
);
1014 /* Reported values to user-space for min/max timeouts */
1015 gb
->timeout_min
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN
);
1016 gb
->timeout_max
= jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX
);
1018 if (!gb_dev
.count
) {
1019 /* Calculate maximum payload */
1020 gb_dev
.size_max
= gb_operation_get_payload_size_max(connection
);
1021 if (gb_dev
.size_max
<=
1022 sizeof(struct gb_loopback_transfer_request
)) {
1024 goto out_connection_destroy
;
1026 gb_dev
.size_max
-= sizeof(struct gb_loopback_transfer_request
);
1029 /* Create per-connection sysfs and debugfs data-points */
1030 snprintf(name
, sizeof(name
), "raw_latency_%s",
1031 dev_name(&connection
->bundle
->dev
));
1032 gb
->file
= debugfs_create_file(name
, S_IFREG
| 0444, gb_dev
.root
, gb
,
1033 &gb_loopback_dbgfs_latency_fops
);
1035 gb
->id
= ida_simple_get(&loopback_ida
, 0, 0, GFP_KERNEL
);
1038 goto out_debugfs_remove
;
1041 retval
= gb_connection_enable(connection
);
1043 goto out_ida_remove
;
1045 dev
= device_create_with_groups(&loopback_class
,
1046 &connection
->bundle
->dev
,
1047 MKDEV(0, 0), gb
, loopback_groups
,
1048 "gb_loopback%d", gb
->id
);
1050 retval
= PTR_ERR(dev
);
1051 goto out_connection_disable
;
1055 /* Allocate kfifo */
1056 if (kfifo_alloc(&gb
->kfifo_lat
, kfifo_depth
* sizeof(u32
),
1061 /* Fork worker thread */
1062 mutex_init(&gb
->mutex
);
1063 gb
->task
= kthread_run(gb_loopback_fn
, gb
, "gb_loopback");
1064 if (IS_ERR(gb
->task
)) {
1065 retval
= PTR_ERR(gb
->task
);
1069 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1071 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1073 gb_connection_latency_tag_enable(connection
);
1075 gb_pm_runtime_put_autosuspend(bundle
);
1080 kfifo_free(&gb
->kfifo_lat
);
1082 device_unregister(dev
);
1083 out_connection_disable
:
1084 gb_connection_disable(connection
);
1086 ida_simple_remove(&loopback_ida
, gb
->id
);
1088 debugfs_remove(gb
->file
);
1089 out_connection_destroy
:
1090 gb_connection_destroy(connection
);
1097 static void gb_loopback_disconnect(struct gb_bundle
*bundle
)
1099 struct gb_loopback
*gb
= greybus_get_drvdata(bundle
);
1100 unsigned long flags
;
1103 ret
= gb_pm_runtime_get_sync(bundle
);
1105 gb_pm_runtime_get_noresume(bundle
);
1107 gb_connection_disable(gb
->connection
);
1109 if (!IS_ERR_OR_NULL(gb
->task
))
1110 kthread_stop(gb
->task
);
1112 kfifo_free(&gb
->kfifo_lat
);
1113 gb_connection_latency_tag_disable(gb
->connection
);
1114 debugfs_remove(gb
->file
);
1117 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1118 * is disabled at the beginning and so we can't have any more
1119 * incoming/outgoing requests.
1121 gb_loopback_async_wait_all(gb
);
1123 spin_lock_irqsave(&gb_dev
.lock
, flags
);
1125 spin_unlock_irqrestore(&gb_dev
.lock
, flags
);
1127 device_unregister(gb
->dev
);
1128 ida_simple_remove(&loopback_ida
, gb
->id
);
1130 gb_connection_destroy(gb
->connection
);
1134 static const struct greybus_bundle_id gb_loopback_id_table
[] = {
1135 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK
) },
1138 MODULE_DEVICE_TABLE(greybus
, gb_loopback_id_table
);
1140 static struct greybus_driver gb_loopback_driver
= {
1142 .probe
= gb_loopback_probe
,
1143 .disconnect
= gb_loopback_disconnect
,
1144 .id_table
= gb_loopback_id_table
,
1147 static int loopback_init(void)
1151 spin_lock_init(&gb_dev
.lock
);
1152 gb_dev
.root
= debugfs_create_dir("gb_loopback", NULL
);
1154 retval
= class_register(&loopback_class
);
1158 retval
= greybus_register(&gb_loopback_driver
);
1160 goto err_unregister
;
1165 class_unregister(&loopback_class
);
1167 debugfs_remove_recursive(gb_dev
.root
);
1170 module_init(loopback_init
);
1172 static void __exit
loopback_exit(void)
1174 debugfs_remove_recursive(gb_dev
.root
);
1175 greybus_deregister(&gb_loopback_driver
);
1176 class_unregister(&loopback_class
);
1177 ida_destroy(&loopback_ida
);
1179 module_exit(loopback_exit
);
1181 MODULE_LICENSE("GPL v2");