Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / staging / greybus / loopback.c
blob48d85ebe404a1d727a9a083761978c3763d4fcb3
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Loopback bridge driver for the Greybus loopback module.
5 * Copyright 2014 Google Inc.
6 * Copyright 2014 Linaro Ltd.
7 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/kthread.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/sizes.h>
19 #include <linux/cdev.h>
20 #include <linux/fs.h>
21 #include <linux/kfifo.h>
22 #include <linux/debugfs.h>
23 #include <linux/list_sort.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <linux/pm_runtime.h>
29 #include <asm/div64.h>
31 #include "greybus.h"
32 #include "connection.h"
34 #define NSEC_PER_DAY 86400000000000ULL
36 struct gb_loopback_stats {
37 u32 min;
38 u32 max;
39 u64 sum;
40 u32 count;
43 struct gb_loopback_device {
44 struct dentry *root;
45 u32 count;
46 size_t size_max;
48 /* We need to take a lock in atomic context */
49 spinlock_t lock;
50 wait_queue_head_t wq;
53 static struct gb_loopback_device gb_dev;
55 struct gb_loopback_async_operation {
56 struct gb_loopback *gb;
57 struct gb_operation *operation;
58 ktime_t ts;
59 int (*completion)(struct gb_loopback_async_operation *op_async);
62 struct gb_loopback {
63 struct gb_connection *connection;
65 struct dentry *file;
66 struct kfifo kfifo_lat;
67 struct mutex mutex;
68 struct task_struct *task;
69 struct device *dev;
70 wait_queue_head_t wq;
71 wait_queue_head_t wq_completion;
72 atomic_t outstanding_operations;
74 /* Per connection stats */
75 ktime_t ts;
76 struct gb_loopback_stats latency;
77 struct gb_loopback_stats throughput;
78 struct gb_loopback_stats requests_per_second;
79 struct gb_loopback_stats apbridge_unipro_latency;
80 struct gb_loopback_stats gbphy_firmware_latency;
82 int type;
83 int async;
84 int id;
85 u32 size;
86 u32 iteration_max;
87 u32 iteration_count;
88 int us_wait;
89 u32 error;
90 u32 requests_completed;
91 u32 requests_timedout;
92 u32 timeout;
93 u32 jiffy_timeout;
94 u32 timeout_min;
95 u32 timeout_max;
96 u32 outstanding_operations_max;
97 u64 elapsed_nsecs;
98 u32 apbridge_latency_ts;
99 u32 gbphy_latency_ts;
101 u32 send_count;
104 static struct class loopback_class = {
105 .name = "gb_loopback",
106 .owner = THIS_MODULE,
108 static DEFINE_IDA(loopback_ida);
110 /* Min/max values in jiffies */
111 #define GB_LOOPBACK_TIMEOUT_MIN 1
112 #define GB_LOOPBACK_TIMEOUT_MAX 10000
114 #define GB_LOOPBACK_FIFO_DEFAULT 8192
116 static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
117 module_param(kfifo_depth, uint, 0444);
119 /* Maximum size of any one send data buffer we support */
120 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
122 #define GB_LOOPBACK_US_WAIT_MAX 1000000
124 /* interface sysfs attributes */
125 #define gb_loopback_ro_attr(field) \
126 static ssize_t field##_show(struct device *dev, \
127 struct device_attribute *attr, \
128 char *buf) \
130 struct gb_loopback *gb = dev_get_drvdata(dev); \
131 return sprintf(buf, "%u\n", gb->field); \
133 static DEVICE_ATTR_RO(field)
135 #define gb_loopback_ro_stats_attr(name, field, type) \
136 static ssize_t name##_##field##_show(struct device *dev, \
137 struct device_attribute *attr, \
138 char *buf) \
140 struct gb_loopback *gb = dev_get_drvdata(dev); \
141 /* Report 0 for min and max if no transfer successed */ \
142 if (!gb->requests_completed) \
143 return sprintf(buf, "0\n"); \
144 return sprintf(buf, "%" #type "\n", gb->name.field); \
146 static DEVICE_ATTR_RO(name##_##field)
148 #define gb_loopback_ro_avg_attr(name) \
149 static ssize_t name##_avg_show(struct device *dev, \
150 struct device_attribute *attr, \
151 char *buf) \
153 struct gb_loopback_stats *stats; \
154 struct gb_loopback *gb; \
155 u64 avg, rem; \
156 u32 count; \
157 gb = dev_get_drvdata(dev); \
158 stats = &gb->name; \
159 count = stats->count ? stats->count : 1; \
160 avg = stats->sum + count / 2000000; /* round closest */ \
161 rem = do_div(avg, count); \
162 rem *= 1000000; \
163 do_div(rem, count); \
164 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
166 static DEVICE_ATTR_RO(name##_avg)
168 #define gb_loopback_stats_attrs(field) \
169 gb_loopback_ro_stats_attr(field, min, u); \
170 gb_loopback_ro_stats_attr(field, max, u); \
171 gb_loopback_ro_avg_attr(field)
173 #define gb_loopback_attr(field, type) \
174 static ssize_t field##_show(struct device *dev, \
175 struct device_attribute *attr, \
176 char *buf) \
178 struct gb_loopback *gb = dev_get_drvdata(dev); \
179 return sprintf(buf, "%" #type "\n", gb->field); \
181 static ssize_t field##_store(struct device *dev, \
182 struct device_attribute *attr, \
183 const char *buf, \
184 size_t len) \
186 int ret; \
187 struct gb_loopback *gb = dev_get_drvdata(dev); \
188 mutex_lock(&gb->mutex); \
189 ret = sscanf(buf, "%"#type, &gb->field); \
190 if (ret != 1) \
191 len = -EINVAL; \
192 else \
193 gb_loopback_check_attr(gb, bundle); \
194 mutex_unlock(&gb->mutex); \
195 return len; \
197 static DEVICE_ATTR_RW(field)
199 #define gb_dev_loopback_ro_attr(field, conn) \
200 static ssize_t field##_show(struct device *dev, \
201 struct device_attribute *attr, \
202 char *buf) \
204 struct gb_loopback *gb = dev_get_drvdata(dev); \
205 return sprintf(buf, "%u\n", gb->field); \
207 static DEVICE_ATTR_RO(field)
209 #define gb_dev_loopback_rw_attr(field, type) \
210 static ssize_t field##_show(struct device *dev, \
211 struct device_attribute *attr, \
212 char *buf) \
214 struct gb_loopback *gb = dev_get_drvdata(dev); \
215 return sprintf(buf, "%" #type "\n", gb->field); \
217 static ssize_t field##_store(struct device *dev, \
218 struct device_attribute *attr, \
219 const char *buf, \
220 size_t len) \
222 int ret; \
223 struct gb_loopback *gb = dev_get_drvdata(dev); \
224 mutex_lock(&gb->mutex); \
225 ret = sscanf(buf, "%"#type, &gb->field); \
226 if (ret != 1) \
227 len = -EINVAL; \
228 else \
229 gb_loopback_check_attr(gb); \
230 mutex_unlock(&gb->mutex); \
231 return len; \
233 static DEVICE_ATTR_RW(field)
235 static void gb_loopback_reset_stats(struct gb_loopback *gb);
236 static void gb_loopback_check_attr(struct gb_loopback *gb)
238 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
239 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
240 if (gb->size > gb_dev.size_max)
241 gb->size = gb_dev.size_max;
242 gb->requests_timedout = 0;
243 gb->requests_completed = 0;
244 gb->iteration_count = 0;
245 gb->send_count = 0;
246 gb->error = 0;
248 if (kfifo_depth < gb->iteration_max) {
249 dev_warn(gb->dev,
250 "cannot log bytes %u kfifo_depth %u\n",
251 gb->iteration_max, kfifo_depth);
253 kfifo_reset_out(&gb->kfifo_lat);
255 switch (gb->type) {
256 case GB_LOOPBACK_TYPE_PING:
257 case GB_LOOPBACK_TYPE_TRANSFER:
258 case GB_LOOPBACK_TYPE_SINK:
259 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
260 if (!gb->jiffy_timeout)
261 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
262 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
263 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
264 gb_loopback_reset_stats(gb);
265 wake_up(&gb->wq);
266 break;
267 default:
268 gb->type = 0;
269 break;
273 /* Time to send and receive one message */
274 gb_loopback_stats_attrs(latency);
275 /* Number of requests sent per second on this cport */
276 gb_loopback_stats_attrs(requests_per_second);
277 /* Quantity of data sent and received on this cport */
278 gb_loopback_stats_attrs(throughput);
279 /* Latency across the UniPro link from APBridge's perspective */
280 gb_loopback_stats_attrs(apbridge_unipro_latency);
281 /* Firmware induced overhead in the GPBridge */
282 gb_loopback_stats_attrs(gbphy_firmware_latency);
284 /* Number of errors encountered during loop */
285 gb_loopback_ro_attr(error);
286 /* Number of requests successfully completed async */
287 gb_loopback_ro_attr(requests_completed);
288 /* Number of requests timed out async */
289 gb_loopback_ro_attr(requests_timedout);
290 /* Timeout minimum in useconds */
291 gb_loopback_ro_attr(timeout_min);
292 /* Timeout minimum in useconds */
293 gb_loopback_ro_attr(timeout_max);
296 * Type of loopback message to send based on protocol type definitions
297 * 0 => Don't send message
298 * 2 => Send ping message continuously (message without payload)
299 * 3 => Send transfer message continuously (message with payload,
300 * payload returned in response)
301 * 4 => Send a sink message (message with payload, no payload in response)
303 gb_dev_loopback_rw_attr(type, d);
304 /* Size of transfer message payload: 0-4096 bytes */
305 gb_dev_loopback_rw_attr(size, u);
306 /* Time to wait between two messages: 0-1000 ms */
307 gb_dev_loopback_rw_attr(us_wait, d);
308 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
309 gb_dev_loopback_rw_attr(iteration_max, u);
310 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
311 gb_dev_loopback_ro_attr(iteration_count, false);
312 /* A flag to indicate synchronous or asynchronous operations */
313 gb_dev_loopback_rw_attr(async, u);
314 /* Timeout of an individual asynchronous request */
315 gb_dev_loopback_rw_attr(timeout, u);
316 /* Maximum number of in-flight operations before back-off */
317 gb_dev_loopback_rw_attr(outstanding_operations_max, u);
319 static struct attribute *loopback_attrs[] = {
320 &dev_attr_latency_min.attr,
321 &dev_attr_latency_max.attr,
322 &dev_attr_latency_avg.attr,
323 &dev_attr_requests_per_second_min.attr,
324 &dev_attr_requests_per_second_max.attr,
325 &dev_attr_requests_per_second_avg.attr,
326 &dev_attr_throughput_min.attr,
327 &dev_attr_throughput_max.attr,
328 &dev_attr_throughput_avg.attr,
329 &dev_attr_apbridge_unipro_latency_min.attr,
330 &dev_attr_apbridge_unipro_latency_max.attr,
331 &dev_attr_apbridge_unipro_latency_avg.attr,
332 &dev_attr_gbphy_firmware_latency_min.attr,
333 &dev_attr_gbphy_firmware_latency_max.attr,
334 &dev_attr_gbphy_firmware_latency_avg.attr,
335 &dev_attr_type.attr,
336 &dev_attr_size.attr,
337 &dev_attr_us_wait.attr,
338 &dev_attr_iteration_count.attr,
339 &dev_attr_iteration_max.attr,
340 &dev_attr_async.attr,
341 &dev_attr_error.attr,
342 &dev_attr_requests_completed.attr,
343 &dev_attr_requests_timedout.attr,
344 &dev_attr_timeout.attr,
345 &dev_attr_outstanding_operations_max.attr,
346 &dev_attr_timeout_min.attr,
347 &dev_attr_timeout_max.attr,
348 NULL,
350 ATTRIBUTE_GROUPS(loopback);
352 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
354 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
356 do_div(elapsed_nsecs, NSEC_PER_USEC);
357 return elapsed_nsecs;
360 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
362 if (t2 > t1)
363 return t2 - t1;
364 else
365 return NSEC_PER_DAY - t2 + t1;
368 static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
370 return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
373 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
374 void *request, int request_size,
375 void *response, int response_size)
377 struct gb_operation *operation;
378 ktime_t ts, te;
379 int ret;
381 ts = ktime_get();
382 operation = gb_operation_create(gb->connection, type, request_size,
383 response_size, GFP_KERNEL);
384 if (!operation)
385 return -ENOMEM;
387 if (request_size)
388 memcpy(operation->request->payload, request, request_size);
390 ret = gb_operation_request_send_sync(operation);
391 if (ret) {
392 dev_err(&gb->connection->bundle->dev,
393 "synchronous operation failed: %d\n", ret);
394 goto out_put_operation;
395 } else {
396 if (response_size == operation->response->payload_size) {
397 memcpy(response, operation->response->payload,
398 response_size);
399 } else {
400 dev_err(&gb->connection->bundle->dev,
401 "response size %zu expected %d\n",
402 operation->response->payload_size,
403 response_size);
404 ret = -EINVAL;
405 goto out_put_operation;
409 te = ktime_get();
411 /* Calculate the total time the message took */
412 gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
414 out_put_operation:
415 gb_operation_put(operation);
417 return ret;
420 static void gb_loopback_async_wait_all(struct gb_loopback *gb)
422 wait_event(gb->wq_completion,
423 !atomic_read(&gb->outstanding_operations));
426 static void gb_loopback_async_operation_callback(struct gb_operation *operation)
428 struct gb_loopback_async_operation *op_async;
429 struct gb_loopback *gb;
430 ktime_t te;
431 int result;
433 te = ktime_get();
434 result = gb_operation_result(operation);
435 op_async = gb_operation_get_data(operation);
436 gb = op_async->gb;
438 mutex_lock(&gb->mutex);
440 if (!result && op_async->completion)
441 result = op_async->completion(op_async);
443 if (!result) {
444 gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
445 } else {
446 gb->error++;
447 if (result == -ETIMEDOUT)
448 gb->requests_timedout++;
451 gb->iteration_count++;
452 gb_loopback_calculate_stats(gb, result);
454 mutex_unlock(&gb->mutex);
456 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
457 operation->id);
459 /* Wake up waiters */
460 atomic_dec(&op_async->gb->outstanding_operations);
461 wake_up(&gb->wq_completion);
463 /* Release resources */
464 gb_operation_put(operation);
465 kfree(op_async);
468 static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
469 void *request, int request_size,
470 int response_size,
471 void *completion)
473 struct gb_loopback_async_operation *op_async;
474 struct gb_operation *operation;
475 int ret;
477 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
478 if (!op_async)
479 return -ENOMEM;
481 operation = gb_operation_create(gb->connection, type, request_size,
482 response_size, GFP_KERNEL);
483 if (!operation) {
484 kfree(op_async);
485 return -ENOMEM;
488 if (request_size)
489 memcpy(operation->request->payload, request, request_size);
491 gb_operation_set_data(operation, op_async);
493 op_async->gb = gb;
494 op_async->operation = operation;
495 op_async->completion = completion;
497 op_async->ts = ktime_get();
499 atomic_inc(&gb->outstanding_operations);
500 ret = gb_operation_request_send(operation,
501 gb_loopback_async_operation_callback,
502 jiffies_to_msecs(gb->jiffy_timeout),
503 GFP_KERNEL);
504 if (ret) {
505 atomic_dec(&gb->outstanding_operations);
506 gb_operation_put(operation);
507 kfree(op_async);
509 return ret;
512 static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
514 struct gb_loopback_transfer_request *request;
515 int retval;
517 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
518 if (!request)
519 return -ENOMEM;
521 request->len = cpu_to_le32(len);
522 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
523 request, len + sizeof(*request),
524 NULL, 0);
525 kfree(request);
526 return retval;
529 static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
531 struct gb_loopback_transfer_request *request;
532 struct gb_loopback_transfer_response *response;
533 int retval;
535 gb->apbridge_latency_ts = 0;
536 gb->gbphy_latency_ts = 0;
538 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
539 if (!request)
540 return -ENOMEM;
541 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
542 if (!response) {
543 kfree(request);
544 return -ENOMEM;
547 memset(request->data, 0x5A, len);
549 request->len = cpu_to_le32(len);
550 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
551 request, len + sizeof(*request),
552 response, len + sizeof(*response));
553 if (retval)
554 goto gb_error;
556 if (memcmp(request->data, response->data, len)) {
557 dev_err(&gb->connection->bundle->dev,
558 "Loopback Data doesn't match\n");
559 retval = -EREMOTEIO;
561 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
562 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
564 gb_error:
565 kfree(request);
566 kfree(response);
568 return retval;
571 static int gb_loopback_sync_ping(struct gb_loopback *gb)
573 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
574 NULL, 0, NULL, 0);
577 static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
579 struct gb_loopback_transfer_request *request;
580 int retval;
582 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
583 if (!request)
584 return -ENOMEM;
586 request->len = cpu_to_le32(len);
587 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
588 request, len + sizeof(*request),
589 0, NULL);
590 kfree(request);
591 return retval;
594 static int gb_loopback_async_transfer_complete(
595 struct gb_loopback_async_operation *op_async)
597 struct gb_loopback *gb;
598 struct gb_operation *operation;
599 struct gb_loopback_transfer_request *request;
600 struct gb_loopback_transfer_response *response;
601 size_t len;
602 int retval = 0;
604 gb = op_async->gb;
605 operation = op_async->operation;
606 request = operation->request->payload;
607 response = operation->response->payload;
608 len = le32_to_cpu(request->len);
610 if (memcmp(request->data, response->data, len)) {
611 dev_err(&gb->connection->bundle->dev,
612 "Loopback Data doesn't match operation id %d\n",
613 operation->id);
614 retval = -EREMOTEIO;
615 } else {
616 gb->apbridge_latency_ts =
617 (u32)__le32_to_cpu(response->reserved0);
618 gb->gbphy_latency_ts =
619 (u32)__le32_to_cpu(response->reserved1);
622 return retval;
625 static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
627 struct gb_loopback_transfer_request *request;
628 int retval, response_len;
630 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
631 if (!request)
632 return -ENOMEM;
634 memset(request->data, 0x5A, len);
636 request->len = cpu_to_le32(len);
637 response_len = sizeof(struct gb_loopback_transfer_response);
638 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
639 request, len + sizeof(*request),
640 len + response_len,
641 gb_loopback_async_transfer_complete);
642 if (retval)
643 goto gb_error;
645 gb_error:
646 kfree(request);
647 return retval;
650 static int gb_loopback_async_ping(struct gb_loopback *gb)
652 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
653 NULL, 0, 0, NULL);
656 static int gb_loopback_request_handler(struct gb_operation *operation)
658 struct gb_connection *connection = operation->connection;
659 struct gb_loopback_transfer_request *request;
660 struct gb_loopback_transfer_response *response;
661 struct device *dev = &connection->bundle->dev;
662 size_t len;
664 /* By convention, the AP initiates the version operation */
665 switch (operation->type) {
666 case GB_LOOPBACK_TYPE_PING:
667 case GB_LOOPBACK_TYPE_SINK:
668 return 0;
669 case GB_LOOPBACK_TYPE_TRANSFER:
670 if (operation->request->payload_size < sizeof(*request)) {
671 dev_err(dev, "transfer request too small (%zu < %zu)\n",
672 operation->request->payload_size,
673 sizeof(*request));
674 return -EINVAL; /* -EMSGSIZE */
676 request = operation->request->payload;
677 len = le32_to_cpu(request->len);
678 if (len > gb_dev.size_max) {
679 dev_err(dev, "transfer request too large (%zu > %zu)\n",
680 len, gb_dev.size_max);
681 return -EINVAL;
684 if (!gb_operation_response_alloc(operation,
685 len + sizeof(*response), GFP_KERNEL)) {
686 dev_err(dev, "error allocating response\n");
687 return -ENOMEM;
689 response = operation->response->payload;
690 response->len = cpu_to_le32(len);
691 if (len)
692 memcpy(response->data, request->data, len);
694 return 0;
695 default:
696 dev_err(dev, "unsupported request: %u\n", operation->type);
697 return -EINVAL;
701 static void gb_loopback_reset_stats(struct gb_loopback *gb)
703 struct gb_loopback_stats reset = {
704 .min = U32_MAX,
707 /* Reset per-connection stats */
708 memcpy(&gb->latency, &reset,
709 sizeof(struct gb_loopback_stats));
710 memcpy(&gb->throughput, &reset,
711 sizeof(struct gb_loopback_stats));
712 memcpy(&gb->requests_per_second, &reset,
713 sizeof(struct gb_loopback_stats));
714 memcpy(&gb->apbridge_unipro_latency, &reset,
715 sizeof(struct gb_loopback_stats));
716 memcpy(&gb->gbphy_firmware_latency, &reset,
717 sizeof(struct gb_loopback_stats));
719 /* Should be initialized at least once per transaction set */
720 gb->apbridge_latency_ts = 0;
721 gb->gbphy_latency_ts = 0;
722 gb->ts = ktime_set(0, 0);
725 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
727 if (stats->min > val)
728 stats->min = val;
729 if (stats->max < val)
730 stats->max = val;
731 stats->sum += val;
732 stats->count++;
735 static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
736 u64 val, u32 count)
738 stats->sum += val;
739 stats->count += count;
741 do_div(val, count);
742 if (stats->min > val)
743 stats->min = val;
744 if (stats->max < val)
745 stats->max = val;
748 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
750 u64 req = gb->requests_completed * USEC_PER_SEC;
752 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
755 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
757 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
759 switch (gb->type) {
760 case GB_LOOPBACK_TYPE_PING:
761 break;
762 case GB_LOOPBACK_TYPE_SINK:
763 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
764 gb->size;
765 break;
766 case GB_LOOPBACK_TYPE_TRANSFER:
767 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
768 sizeof(struct gb_loopback_transfer_response) +
769 gb->size * 2;
770 break;
771 default:
772 return;
775 aggregate_size *= gb->requests_completed;
776 aggregate_size *= USEC_PER_SEC;
777 gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
778 latency);
781 static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
783 u32 lat;
785 /* Express latency in terms of microseconds */
786 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
788 /* Log latency stastic */
789 gb_loopback_update_stats(&gb->latency, lat);
791 /* Raw latency log on a per thread basis */
792 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
794 /* Log the firmware supplied latency values */
795 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
796 gb->apbridge_latency_ts);
797 gb_loopback_update_stats(&gb->gbphy_firmware_latency,
798 gb->gbphy_latency_ts);
801 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
803 u64 nlat;
804 u32 lat;
805 ktime_t te;
807 if (!error) {
808 gb->requests_completed++;
809 gb_loopback_calculate_latency_stats(gb);
812 te = ktime_get();
813 nlat = gb_loopback_calc_latency(gb->ts, te);
814 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
815 lat = gb_loopback_nsec_to_usec_latency(nlat);
817 gb_loopback_throughput_update(gb, lat);
818 gb_loopback_requests_update(gb, lat);
820 if (gb->iteration_count != gb->iteration_max) {
821 gb->ts = te;
822 gb->requests_completed = 0;
827 static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
829 if (!(gb->async && gb->outstanding_operations_max))
830 return;
831 wait_event_interruptible(gb->wq_completion,
832 (atomic_read(&gb->outstanding_operations) <
833 gb->outstanding_operations_max) ||
834 kthread_should_stop());
837 static int gb_loopback_fn(void *data)
839 int error = 0;
840 int us_wait = 0;
841 int type;
842 int ret;
843 u32 size;
845 struct gb_loopback *gb = data;
846 struct gb_bundle *bundle = gb->connection->bundle;
848 ret = gb_pm_runtime_get_sync(bundle);
849 if (ret)
850 return ret;
852 while (1) {
853 if (!gb->type) {
854 gb_pm_runtime_put_autosuspend(bundle);
855 wait_event_interruptible(gb->wq, gb->type ||
856 kthread_should_stop());
857 ret = gb_pm_runtime_get_sync(bundle);
858 if (ret)
859 return ret;
862 if (kthread_should_stop())
863 break;
865 /* Limit the maximum number of in-flight async operations */
866 gb_loopback_async_wait_to_send(gb);
867 if (kthread_should_stop())
868 break;
870 mutex_lock(&gb->mutex);
872 /* Optionally terminate */
873 if (gb->send_count == gb->iteration_max) {
874 mutex_unlock(&gb->mutex);
876 /* Wait for synchronous and asynchronus completion */
877 gb_loopback_async_wait_all(gb);
879 /* Mark complete unless user-space has poked us */
880 mutex_lock(&gb->mutex);
881 if (gb->iteration_count == gb->iteration_max) {
882 gb->type = 0;
883 gb->send_count = 0;
884 sysfs_notify(&gb->dev->kobj, NULL,
885 "iteration_count");
886 dev_dbg(&bundle->dev, "load test complete\n");
887 } else {
888 dev_dbg(&bundle->dev,
889 "continuing on with new test set\n");
891 mutex_unlock(&gb->mutex);
892 continue;
894 size = gb->size;
895 us_wait = gb->us_wait;
896 type = gb->type;
897 if (ktime_to_ns(gb->ts) == 0)
898 gb->ts = ktime_get();
900 /* Else operations to perform */
901 if (gb->async) {
902 if (type == GB_LOOPBACK_TYPE_PING)
903 error = gb_loopback_async_ping(gb);
904 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
905 error = gb_loopback_async_transfer(gb, size);
906 else if (type == GB_LOOPBACK_TYPE_SINK)
907 error = gb_loopback_async_sink(gb, size);
909 if (error) {
910 gb->error++;
911 gb->iteration_count++;
913 } else {
914 /* We are effectively single threaded here */
915 if (type == GB_LOOPBACK_TYPE_PING)
916 error = gb_loopback_sync_ping(gb);
917 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
918 error = gb_loopback_sync_transfer(gb, size);
919 else if (type == GB_LOOPBACK_TYPE_SINK)
920 error = gb_loopback_sync_sink(gb, size);
922 if (error)
923 gb->error++;
924 gb->iteration_count++;
925 gb_loopback_calculate_stats(gb, !!error);
927 gb->send_count++;
928 mutex_unlock(&gb->mutex);
930 if (us_wait) {
931 if (us_wait < 20000)
932 usleep_range(us_wait, us_wait + 100);
933 else
934 msleep(us_wait / 1000);
938 gb_pm_runtime_put_autosuspend(bundle);
940 return 0;
943 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
944 struct kfifo *kfifo,
945 struct mutex *mutex)
947 u32 latency;
948 int retval;
950 if (kfifo_len(kfifo) == 0) {
951 retval = -EAGAIN;
952 goto done;
955 mutex_lock(mutex);
956 retval = kfifo_out(kfifo, &latency, sizeof(latency));
957 if (retval > 0) {
958 seq_printf(s, "%u", latency);
959 retval = 0;
961 mutex_unlock(mutex);
962 done:
963 return retval;
966 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
968 struct gb_loopback *gb = s->private;
970 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
971 &gb->mutex);
973 DEFINE_SHOW_ATTRIBUTE(gb_loopback_dbgfs_latency);
975 #define DEBUGFS_NAMELEN 32
977 static int gb_loopback_probe(struct gb_bundle *bundle,
978 const struct greybus_bundle_id *id)
980 struct greybus_descriptor_cport *cport_desc;
981 struct gb_connection *connection;
982 struct gb_loopback *gb;
983 struct device *dev;
984 int retval;
985 char name[DEBUGFS_NAMELEN];
986 unsigned long flags;
988 if (bundle->num_cports != 1)
989 return -ENODEV;
991 cport_desc = &bundle->cport_desc[0];
992 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
993 return -ENODEV;
995 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
996 if (!gb)
997 return -ENOMEM;
999 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1000 gb_loopback_request_handler);
1001 if (IS_ERR(connection)) {
1002 retval = PTR_ERR(connection);
1003 goto out_kzalloc;
1006 gb->connection = connection;
1007 greybus_set_drvdata(bundle, gb);
1009 init_waitqueue_head(&gb->wq);
1010 init_waitqueue_head(&gb->wq_completion);
1011 atomic_set(&gb->outstanding_operations, 0);
1012 gb_loopback_reset_stats(gb);
1014 /* Reported values to user-space for min/max timeouts */
1015 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1016 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1018 if (!gb_dev.count) {
1019 /* Calculate maximum payload */
1020 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1021 if (gb_dev.size_max <=
1022 sizeof(struct gb_loopback_transfer_request)) {
1023 retval = -EINVAL;
1024 goto out_connection_destroy;
1026 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1029 /* Create per-connection sysfs and debugfs data-points */
1030 snprintf(name, sizeof(name), "raw_latency_%s",
1031 dev_name(&connection->bundle->dev));
1032 gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
1033 &gb_loopback_dbgfs_latency_fops);
1035 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1036 if (gb->id < 0) {
1037 retval = gb->id;
1038 goto out_debugfs_remove;
1041 retval = gb_connection_enable(connection);
1042 if (retval)
1043 goto out_ida_remove;
1045 dev = device_create_with_groups(&loopback_class,
1046 &connection->bundle->dev,
1047 MKDEV(0, 0), gb, loopback_groups,
1048 "gb_loopback%d", gb->id);
1049 if (IS_ERR(dev)) {
1050 retval = PTR_ERR(dev);
1051 goto out_connection_disable;
1053 gb->dev = dev;
1055 /* Allocate kfifo */
1056 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1057 GFP_KERNEL)) {
1058 retval = -ENOMEM;
1059 goto out_conn;
1061 /* Fork worker thread */
1062 mutex_init(&gb->mutex);
1063 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1064 if (IS_ERR(gb->task)) {
1065 retval = PTR_ERR(gb->task);
1066 goto out_kfifo;
1069 spin_lock_irqsave(&gb_dev.lock, flags);
1070 gb_dev.count++;
1071 spin_unlock_irqrestore(&gb_dev.lock, flags);
1073 gb_connection_latency_tag_enable(connection);
1075 gb_pm_runtime_put_autosuspend(bundle);
1077 return 0;
1079 out_kfifo:
1080 kfifo_free(&gb->kfifo_lat);
1081 out_conn:
1082 device_unregister(dev);
1083 out_connection_disable:
1084 gb_connection_disable(connection);
1085 out_ida_remove:
1086 ida_simple_remove(&loopback_ida, gb->id);
1087 out_debugfs_remove:
1088 debugfs_remove(gb->file);
1089 out_connection_destroy:
1090 gb_connection_destroy(connection);
1091 out_kzalloc:
1092 kfree(gb);
1094 return retval;
1097 static void gb_loopback_disconnect(struct gb_bundle *bundle)
1099 struct gb_loopback *gb = greybus_get_drvdata(bundle);
1100 unsigned long flags;
1101 int ret;
1103 ret = gb_pm_runtime_get_sync(bundle);
1104 if (ret)
1105 gb_pm_runtime_get_noresume(bundle);
1107 gb_connection_disable(gb->connection);
1109 if (!IS_ERR_OR_NULL(gb->task))
1110 kthread_stop(gb->task);
1112 kfifo_free(&gb->kfifo_lat);
1113 gb_connection_latency_tag_disable(gb->connection);
1114 debugfs_remove(gb->file);
1117 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1118 * is disabled at the beginning and so we can't have any more
1119 * incoming/outgoing requests.
1121 gb_loopback_async_wait_all(gb);
1123 spin_lock_irqsave(&gb_dev.lock, flags);
1124 gb_dev.count--;
1125 spin_unlock_irqrestore(&gb_dev.lock, flags);
1127 device_unregister(gb->dev);
1128 ida_simple_remove(&loopback_ida, gb->id);
1130 gb_connection_destroy(gb->connection);
1131 kfree(gb);
1134 static const struct greybus_bundle_id gb_loopback_id_table[] = {
1135 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1138 MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1140 static struct greybus_driver gb_loopback_driver = {
1141 .name = "loopback",
1142 .probe = gb_loopback_probe,
1143 .disconnect = gb_loopback_disconnect,
1144 .id_table = gb_loopback_id_table,
1147 static int loopback_init(void)
1149 int retval;
1151 spin_lock_init(&gb_dev.lock);
1152 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1154 retval = class_register(&loopback_class);
1155 if (retval)
1156 goto err;
1158 retval = greybus_register(&gb_loopback_driver);
1159 if (retval)
1160 goto err_unregister;
1162 return 0;
1164 err_unregister:
1165 class_unregister(&loopback_class);
1166 err:
1167 debugfs_remove_recursive(gb_dev.root);
1168 return retval;
1170 module_init(loopback_init);
1172 static void __exit loopback_exit(void)
1174 debugfs_remove_recursive(gb_dev.root);
1175 greybus_deregister(&gb_loopback_driver);
1176 class_unregister(&loopback_class);
1177 ida_destroy(&loopback_ida);
1179 module_exit(loopback_exit);
1181 MODULE_LICENSE("GPL v2");