Linux 4.13.16
[linux/fpc-iii.git] / arch / tile / gxio / mpipe.c
blob34de300ab320c0aa81a62f4073eb145ce9019a2e
1 /*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
16 * Implementation of mpipe gxio calls.
19 #include <linux/errno.h>
20 #include <linux/io.h>
21 #include <linux/module.h>
22 #include <linux/string.h>
24 #include <gxio/iorpc_globals.h>
25 #include <gxio/iorpc_mpipe.h>
26 #include <gxio/iorpc_mpipe_info.h>
27 #include <gxio/kiorpc.h>
28 #include <gxio/mpipe.h>
30 /* HACK: Avoid pointless "shadow" warnings. */
31 #define link link_shadow
33 int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
35 char file[32];
37 int fd;
38 int i;
40 if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
41 return -EINVAL;
43 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
44 fd = hv_dev_open((HV_VirtAddr) file, 0);
46 context->fd = fd;
48 if (fd < 0) {
49 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
50 return fd;
51 else
52 return -ENODEV;
55 /* Map in the MMIO space. */
56 context->mmio_cfg_base = (void __force *)
57 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
58 HV_MPIPE_CONFIG_MMIO_SIZE);
59 if (context->mmio_cfg_base == NULL)
60 goto cfg_failed;
62 context->mmio_fast_base = (void __force *)
63 iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
64 HV_MPIPE_FAST_MMIO_SIZE);
65 if (context->mmio_fast_base == NULL)
66 goto fast_failed;
68 /* Initialize the stacks. */
69 for (i = 0; i < 8; i++)
70 context->__stacks.stacks[i] = 255;
72 context->instance = mpipe_index;
74 return 0;
76 fast_failed:
77 iounmap((void __force __iomem *)(context->mmio_cfg_base));
78 cfg_failed:
79 hv_dev_close(context->fd);
80 context->fd = -1;
81 return -ENODEV;
84 EXPORT_SYMBOL_GPL(gxio_mpipe_init);
86 int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
88 iounmap((void __force __iomem *)(context->mmio_cfg_base));
89 iounmap((void __force __iomem *)(context->mmio_fast_base));
90 return hv_dev_close(context->fd);
93 EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
95 static int16_t gxio_mpipe_buffer_sizes[8] =
96 { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
98 gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
99 size)
101 int i;
102 for (i = 0; i < 7; i++)
103 if (size <= gxio_mpipe_buffer_sizes[i])
104 break;
105 return i;
108 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
110 size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
111 buffer_size_enum)
113 if (buffer_size_enum > 7)
114 buffer_size_enum = 7;
116 return gxio_mpipe_buffer_sizes[buffer_size_enum];
119 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
121 size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
123 const int BUFFERS_PER_LINE = 12;
125 /* Count the number of cachelines. */
126 unsigned long lines =
127 (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
129 /* Convert to bytes. */
130 return lines * CHIP_L2_LINE_SIZE();
133 EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
135 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
136 unsigned int stack,
137 gxio_mpipe_buffer_size_enum_t
138 buffer_size_enum, void *mem, size_t mem_size,
139 unsigned int mem_flags)
141 int result;
143 memset(mem, 0, mem_size);
145 result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
146 mem_flags, stack,
147 buffer_size_enum);
148 if (result < 0)
149 return result;
151 /* Save the stack. */
152 context->__stacks.stacks[buffer_size_enum] = stack;
154 return 0;
157 EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
159 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
160 unsigned int ring,
161 void *mem, size_t mem_size,
162 unsigned int mem_flags)
164 return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
165 mem_flags, ring);
168 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
170 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
171 unsigned int group,
172 unsigned int ring,
173 unsigned int num_rings,
174 unsigned int bucket,
175 unsigned int num_buckets,
176 gxio_mpipe_bucket_mode_t mode)
178 int i;
179 int result;
181 gxio_mpipe_bucket_info_t bucket_info = { {
182 .group = group,
183 .mode = mode,
187 gxio_mpipe_notif_group_bits_t bits = { {0} };
189 for (i = 0; i < num_rings; i++)
190 gxio_mpipe_notif_group_add_ring(&bits, ring + i);
192 result = gxio_mpipe_init_notif_group(context, group, bits);
193 if (result != 0)
194 return result;
196 for (i = 0; i < num_buckets; i++) {
197 bucket_info.notifring = ring + (i % num_rings);
199 result = gxio_mpipe_init_bucket(context, bucket + i,
200 bucket_info);
201 if (result != 0)
202 return result;
205 return 0;
208 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
210 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
211 unsigned int ring, unsigned int channel,
212 void *mem, size_t mem_size,
213 unsigned int mem_flags)
215 memset(mem, 0, mem_size);
217 return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
218 ring, channel);
221 EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
223 void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
224 gxio_mpipe_context_t *context)
226 rules->context = context;
227 memset(&rules->list, 0, sizeof(rules->list));
230 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
232 int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
233 unsigned int bucket, unsigned int num_buckets,
234 gxio_mpipe_rules_stacks_t *stacks)
236 int i;
237 int stack = 255;
239 gxio_mpipe_rules_list_t *list = &rules->list;
241 /* Current rule. */
242 gxio_mpipe_rules_rule_t *rule =
243 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
245 unsigned int head = list->tail;
248 * Align next rule properly.
249 *Note that "dmacs_and_vlans" will also be aligned.
251 unsigned int pad = 0;
252 while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
253 pad++;
256 * Verify room.
257 * ISSUE: Mark rules as broken on error?
259 if (head + pad + sizeof(*rule) >= sizeof(list->rules))
260 return GXIO_MPIPE_ERR_RULES_FULL;
262 /* Verify num_buckets is a power of 2. */
263 if (__builtin_popcount(num_buckets) != 1)
264 return GXIO_MPIPE_ERR_RULES_INVALID;
266 /* Add padding to previous rule. */
267 rule->size += pad;
269 /* Start a new rule. */
270 list->head = head + pad;
272 rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
274 /* Default some values. */
275 rule->headroom = 2;
276 rule->tailroom = 0;
277 rule->capacity = 16384;
279 /* Save the bucket info. */
280 rule->bucket_mask = num_buckets - 1;
281 rule->bucket_first = bucket;
283 for (i = 8 - 1; i >= 0; i--) {
284 int maybe =
285 stacks ? stacks->stacks[i] : rules->context->__stacks.
286 stacks[i];
287 if (maybe != 255)
288 stack = maybe;
289 rule->stacks.stacks[i] = stack;
292 if (stack == 255)
293 return GXIO_MPIPE_ERR_RULES_INVALID;
295 /* NOTE: Only entries at the end of the array can be 255. */
296 for (i = 8 - 1; i > 0; i--) {
297 if (rule->stacks.stacks[i] == 255) {
298 rule->stacks.stacks[i] = stack;
299 rule->capacity =
300 gxio_mpipe_buffer_size_enum_to_buffer_size(i -
305 rule->size = sizeof(*rule);
306 list->tail = list->head + rule->size;
308 return 0;
311 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
313 int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
314 unsigned int channel)
316 gxio_mpipe_rules_list_t *list = &rules->list;
318 gxio_mpipe_rules_rule_t *rule =
319 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
321 /* Verify channel. */
322 if (channel >= 32)
323 return GXIO_MPIPE_ERR_RULES_INVALID;
325 /* Verify begun. */
326 if (list->tail == 0)
327 return GXIO_MPIPE_ERR_RULES_EMPTY;
329 rule->channel_bits |= (1UL << channel);
331 return 0;
334 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
336 int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
338 gxio_mpipe_rules_list_t *list = &rules->list;
340 gxio_mpipe_rules_rule_t *rule =
341 (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
343 /* Verify begun. */
344 if (list->tail == 0)
345 return GXIO_MPIPE_ERR_RULES_EMPTY;
347 rule->headroom = headroom;
349 return 0;
352 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
354 int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
356 gxio_mpipe_rules_list_t *list = &rules->list;
357 unsigned int size =
358 offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
359 return gxio_mpipe_commit_rules(rules->context, list, size);
362 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
364 int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
365 gxio_mpipe_context_t *context,
366 unsigned int ring,
367 void *mem, size_t mem_size, unsigned int mem_flags)
369 /* The init call below will verify that "mem_size" is legal. */
370 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
372 iqueue->context = context;
373 iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
374 iqueue->ring = ring;
375 iqueue->num_entries = num_entries;
376 iqueue->mask_num_entries = num_entries - 1;
377 iqueue->log2_num_entries = __builtin_ctz(num_entries);
378 iqueue->head = 1;
379 #ifdef __BIG_ENDIAN__
380 iqueue->swapped = 0;
381 #endif
383 /* Initialize the "tail". */
384 __gxio_mmio_write(mem, iqueue->head);
386 return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
387 mem_flags);
390 EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
392 int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
393 gxio_mpipe_context_t *context,
394 unsigned int ering,
395 unsigned int channel,
396 void *mem, unsigned int mem_size,
397 unsigned int mem_flags)
399 /* The init call below will verify that "mem_size" is legal. */
400 unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
402 /* Offset used to read number of completed commands. */
403 MPIPE_EDMA_POST_REGION_ADDR_t offset;
405 int result = gxio_mpipe_init_edma_ring(context, ering, channel,
406 mem, mem_size, mem_flags);
407 if (result < 0)
408 return result;
410 memset(equeue, 0, sizeof(*equeue));
412 offset.word = 0;
413 offset.region =
414 MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
415 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
416 offset.ring = ering;
418 __gxio_dma_queue_init(&equeue->dma_queue,
419 context->mmio_fast_base + offset.word,
420 num_entries);
421 equeue->edescs = mem;
422 equeue->mask_num_entries = num_entries - 1;
423 equeue->log2_num_entries = __builtin_ctz(num_entries);
424 equeue->context = context;
425 equeue->ering = ering;
426 equeue->channel = channel;
428 return 0;
431 EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
433 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
434 const struct timespec64 *ts)
436 cycles_t cycles = get_cycles();
437 return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
438 (uint64_t)ts->tv_nsec,
439 (uint64_t)cycles);
441 EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp);
443 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
444 struct timespec64 *ts)
446 int ret;
447 cycles_t cycles_prev, cycles_now, clock_rate;
448 cycles_prev = get_cycles();
449 ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
450 (uint64_t *)&ts->tv_nsec,
451 (uint64_t *)&cycles_now);
452 if (ret < 0) {
453 return ret;
456 clock_rate = get_clock_rate();
457 ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
458 if (ts->tv_nsec < 0) {
459 ts->tv_nsec += 1000000000LL;
460 ts->tv_sec -= 1;
462 return ret;
464 EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp);
466 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
468 return gxio_mpipe_adjust_timestamp_aux(context, delta);
470 EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp);
472 /* Get our internal context used for link name access. This context is
473 * special in that it is not associated with an mPIPE service domain.
475 static gxio_mpipe_context_t *_gxio_get_link_context(void)
477 static gxio_mpipe_context_t context;
478 static gxio_mpipe_context_t *contextp;
479 static int tried_open = 0;
480 static DEFINE_MUTEX(mutex);
482 mutex_lock(&mutex);
484 if (!tried_open) {
485 int i = 0;
486 tried_open = 1;
489 * "4" here is the maximum possible number of mPIPE shims; it's
490 * an exaggeration but we shouldn't ever go beyond 2 anyway.
492 for (i = 0; i < 4; i++) {
493 char file[80];
495 snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
496 context.fd = hv_dev_open((HV_VirtAddr) file, 0);
497 if (context.fd < 0)
498 continue;
500 contextp = &context;
501 break;
505 mutex_unlock(&mutex);
507 return contextp;
510 int gxio_mpipe_link_instance(const char *link_name)
512 _gxio_mpipe_link_name_t name;
513 gxio_mpipe_context_t *context = _gxio_get_link_context();
515 if (!context)
516 return GXIO_ERR_NO_DEVICE;
518 if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
519 return GXIO_ERR_NO_DEVICE;
521 return gxio_mpipe_info_instance_aux(context, name);
523 EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance);
525 int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
527 int rv;
528 _gxio_mpipe_link_name_t name;
529 _gxio_mpipe_link_mac_t mac;
531 gxio_mpipe_context_t *context = _gxio_get_link_context();
532 if (!context)
533 return GXIO_ERR_NO_DEVICE;
535 rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
536 if (rv >= 0) {
537 if (strscpy(link_name, name.name, sizeof(name.name)) < 0)
538 return GXIO_ERR_INVAL_MEMORY_SIZE;
539 memcpy(link_mac, mac.mac, sizeof(mac.mac));
542 return rv;
545 EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
547 int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
548 gxio_mpipe_context_t *context, const char *link_name,
549 unsigned int flags)
551 _gxio_mpipe_link_name_t name;
552 int rv;
554 if (strscpy(name.name, link_name, sizeof(name.name)) < 0)
555 return GXIO_ERR_NO_DEVICE;
557 rv = gxio_mpipe_link_open_aux(context, name, flags);
558 if (rv < 0)
559 return rv;
561 link->context = context;
562 link->channel = rv >> 8;
563 link->mac = rv & 0xFF;
565 return 0;
568 EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
570 int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
572 return gxio_mpipe_link_close_aux(link->context, link->mac);
575 EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
577 int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
578 int64_t val)
580 return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
581 val);
584 EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);