2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
16 * Implementation of mpipe gxio calls.
19 #include <linux/errno.h>
21 #include <linux/module.h>
23 #include <gxio/iorpc_globals.h>
24 #include <gxio/iorpc_mpipe.h>
25 #include <gxio/iorpc_mpipe_info.h>
26 #include <gxio/kiorpc.h>
27 #include <gxio/mpipe.h>
29 /* HACK: Avoid pointless "shadow" warnings. */
30 #define link link_shadow
32 int gxio_mpipe_init(gxio_mpipe_context_t
*context
, unsigned int mpipe_index
)
39 snprintf(file
, sizeof(file
), "mpipe/%d/iorpc", mpipe_index
);
40 fd
= hv_dev_open((HV_VirtAddr
) file
, 0);
42 if (fd
>= GXIO_ERR_MIN
&& fd
<= GXIO_ERR_MAX
)
50 /* Map in the MMIO space. */
51 context
->mmio_cfg_base
= (void __force
*)
52 iorpc_ioremap(fd
, HV_MPIPE_CONFIG_MMIO_OFFSET
,
53 HV_MPIPE_CONFIG_MMIO_SIZE
);
54 if (context
->mmio_cfg_base
== NULL
)
57 context
->mmio_fast_base
= (void __force
*)
58 iorpc_ioremap(fd
, HV_MPIPE_FAST_MMIO_OFFSET
,
59 HV_MPIPE_FAST_MMIO_SIZE
);
60 if (context
->mmio_fast_base
== NULL
)
63 /* Initialize the stacks. */
64 for (i
= 0; i
< 8; i
++)
65 context
->__stacks
.stacks
[i
] = 255;
70 iounmap((void __force __iomem
*)(context
->mmio_cfg_base
));
72 hv_dev_close(context
->fd
);
76 EXPORT_SYMBOL_GPL(gxio_mpipe_init
);
78 int gxio_mpipe_destroy(gxio_mpipe_context_t
*context
)
80 iounmap((void __force __iomem
*)(context
->mmio_cfg_base
));
81 iounmap((void __force __iomem
*)(context
->mmio_fast_base
));
82 return hv_dev_close(context
->fd
);
85 EXPORT_SYMBOL_GPL(gxio_mpipe_destroy
);
87 static int16_t gxio_mpipe_buffer_sizes
[8] =
88 { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
90 gxio_mpipe_buffer_size_enum_t
gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
94 for (i
= 0; i
< 7; i
++)
95 if (size
<= gxio_mpipe_buffer_sizes
[i
])
100 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum
);
102 size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
105 if (buffer_size_enum
> 7)
106 buffer_size_enum
= 7;
108 return gxio_mpipe_buffer_sizes
[buffer_size_enum
];
111 EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size
);
113 size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers
)
115 const int BUFFERS_PER_LINE
= 12;
117 /* Count the number of cachlines. */
118 unsigned long lines
=
119 (buffers
+ BUFFERS_PER_LINE
- 1) / BUFFERS_PER_LINE
;
121 /* Convert to bytes. */
122 return lines
* CHIP_L2_LINE_SIZE();
125 EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes
);
127 int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t
*context
,
129 gxio_mpipe_buffer_size_enum_t
130 buffer_size_enum
, void *mem
, size_t mem_size
,
131 unsigned int mem_flags
)
135 memset(mem
, 0, mem_size
);
137 result
= gxio_mpipe_init_buffer_stack_aux(context
, mem
, mem_size
,
143 /* Save the stack. */
144 context
->__stacks
.stacks
[buffer_size_enum
] = stack
;
149 EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack
);
151 int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t
*context
,
153 void *mem
, size_t mem_size
,
154 unsigned int mem_flags
)
156 return gxio_mpipe_init_notif_ring_aux(context
, mem
, mem_size
,
160 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring
);
162 int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t
*context
,
165 unsigned int num_rings
,
167 unsigned int num_buckets
,
168 gxio_mpipe_bucket_mode_t mode
)
173 gxio_mpipe_bucket_info_t bucket_info
= { {
179 gxio_mpipe_notif_group_bits_t bits
= { {0} };
181 for (i
= 0; i
< num_rings
; i
++)
182 gxio_mpipe_notif_group_add_ring(&bits
, ring
+ i
);
184 result
= gxio_mpipe_init_notif_group(context
, group
, bits
);
188 for (i
= 0; i
< num_buckets
; i
++) {
189 bucket_info
.notifring
= ring
+ (i
% num_rings
);
191 result
= gxio_mpipe_init_bucket(context
, bucket
+ i
,
200 EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets
);
202 int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t
*context
,
203 unsigned int ring
, unsigned int channel
,
204 void *mem
, size_t mem_size
,
205 unsigned int mem_flags
)
207 memset(mem
, 0, mem_size
);
209 return gxio_mpipe_init_edma_ring_aux(context
, mem
, mem_size
, mem_flags
,
213 EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring
);
215 void gxio_mpipe_rules_init(gxio_mpipe_rules_t
*rules
,
216 gxio_mpipe_context_t
*context
)
218 rules
->context
= context
;
219 memset(&rules
->list
, 0, sizeof(rules
->list
));
222 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init
);
224 int gxio_mpipe_rules_begin(gxio_mpipe_rules_t
*rules
,
225 unsigned int bucket
, unsigned int num_buckets
,
226 gxio_mpipe_rules_stacks_t
*stacks
)
231 gxio_mpipe_rules_list_t
*list
= &rules
->list
;
234 gxio_mpipe_rules_rule_t
*rule
=
235 (gxio_mpipe_rules_rule_t
*) (list
->rules
+ list
->head
);
237 unsigned int head
= list
->tail
;
240 * Align next rule properly.
241 *Note that "dmacs_and_vlans" will also be aligned.
243 unsigned int pad
= 0;
244 while (((head
+ pad
) % __alignof__(gxio_mpipe_rules_rule_t
)) != 0)
249 * ISSUE: Mark rules as broken on error?
251 if (head
+ pad
+ sizeof(*rule
) >= sizeof(list
->rules
))
252 return GXIO_MPIPE_ERR_RULES_FULL
;
254 /* Verify num_buckets is a power of 2. */
255 if (__builtin_popcount(num_buckets
) != 1)
256 return GXIO_MPIPE_ERR_RULES_INVALID
;
258 /* Add padding to previous rule. */
261 /* Start a new rule. */
262 list
->head
= head
+ pad
;
264 rule
= (gxio_mpipe_rules_rule_t
*) (list
->rules
+ list
->head
);
266 /* Default some values. */
269 rule
->capacity
= 16384;
271 /* Save the bucket info. */
272 rule
->bucket_mask
= num_buckets
- 1;
273 rule
->bucket_first
= bucket
;
275 for (i
= 8 - 1; i
>= 0; i
--) {
277 stacks
? stacks
->stacks
[i
] : rules
->context
->__stacks
.
281 rule
->stacks
.stacks
[i
] = stack
;
285 return GXIO_MPIPE_ERR_RULES_INVALID
;
287 /* NOTE: Only entries at the end of the array can be 255. */
288 for (i
= 8 - 1; i
> 0; i
--) {
289 if (rule
->stacks
.stacks
[i
] == 255) {
290 rule
->stacks
.stacks
[i
] = stack
;
292 gxio_mpipe_buffer_size_enum_to_buffer_size(i
-
297 rule
->size
= sizeof(*rule
);
298 list
->tail
= list
->head
+ rule
->size
;
303 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin
);
305 int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t
*rules
,
306 unsigned int channel
)
308 gxio_mpipe_rules_list_t
*list
= &rules
->list
;
310 gxio_mpipe_rules_rule_t
*rule
=
311 (gxio_mpipe_rules_rule_t
*) (list
->rules
+ list
->head
);
313 /* Verify channel. */
315 return GXIO_MPIPE_ERR_RULES_INVALID
;
319 return GXIO_MPIPE_ERR_RULES_EMPTY
;
321 rule
->channel_bits
|= (1UL << channel
);
326 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel
);
328 int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t
*rules
, uint8_t headroom
)
330 gxio_mpipe_rules_list_t
*list
= &rules
->list
;
332 gxio_mpipe_rules_rule_t
*rule
=
333 (gxio_mpipe_rules_rule_t
*) (list
->rules
+ list
->head
);
337 return GXIO_MPIPE_ERR_RULES_EMPTY
;
339 rule
->headroom
= headroom
;
344 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom
);
346 int gxio_mpipe_rules_commit(gxio_mpipe_rules_t
*rules
)
348 gxio_mpipe_rules_list_t
*list
= &rules
->list
;
350 offsetof(gxio_mpipe_rules_list_t
, rules
) + list
->tail
;
351 return gxio_mpipe_commit_rules(rules
->context
, list
, size
);
354 EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit
);
356 int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t
*iqueue
,
357 gxio_mpipe_context_t
*context
,
359 void *mem
, size_t mem_size
, unsigned int mem_flags
)
361 /* The init call below will verify that "mem_size" is legal. */
362 unsigned int num_entries
= mem_size
/ sizeof(gxio_mpipe_idesc_t
);
364 iqueue
->context
= context
;
365 iqueue
->idescs
= (gxio_mpipe_idesc_t
*)mem
;
367 iqueue
->num_entries
= num_entries
;
368 iqueue
->mask_num_entries
= num_entries
- 1;
369 iqueue
->log2_num_entries
= __builtin_ctz(num_entries
);
371 #ifdef __BIG_ENDIAN__
375 /* Initialize the "tail". */
376 __gxio_mmio_write(mem
, iqueue
->head
);
378 return gxio_mpipe_init_notif_ring(context
, ring
, mem
, mem_size
,
382 EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init
);
384 int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t
*equeue
,
385 gxio_mpipe_context_t
*context
,
386 unsigned int edma_ring_id
,
387 unsigned int channel
,
388 void *mem
, unsigned int mem_size
,
389 unsigned int mem_flags
)
391 /* The init call below will verify that "mem_size" is legal. */
392 unsigned int num_entries
= mem_size
/ sizeof(gxio_mpipe_edesc_t
);
394 /* Offset used to read number of completed commands. */
395 MPIPE_EDMA_POST_REGION_ADDR_t offset
;
397 int result
= gxio_mpipe_init_edma_ring(context
, edma_ring_id
, channel
,
398 mem
, mem_size
, mem_flags
);
402 memset(equeue
, 0, sizeof(*equeue
));
406 MPIPE_MMIO_ADDR__REGION_VAL_EDMA
-
407 MPIPE_MMIO_ADDR__REGION_VAL_IDMA
;
408 offset
.ring
= edma_ring_id
;
410 __gxio_dma_queue_init(&equeue
->dma_queue
,
411 context
->mmio_fast_base
+ offset
.word
,
413 equeue
->edescs
= mem
;
414 equeue
->mask_num_entries
= num_entries
- 1;
415 equeue
->log2_num_entries
= __builtin_ctz(num_entries
);
420 EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init
);
422 int gxio_mpipe_set_timestamp(gxio_mpipe_context_t
*context
,
423 const struct timespec
*ts
)
425 cycles_t cycles
= get_cycles();
426 return gxio_mpipe_set_timestamp_aux(context
, (uint64_t)ts
->tv_sec
,
427 (uint64_t)ts
->tv_nsec
,
431 int gxio_mpipe_get_timestamp(gxio_mpipe_context_t
*context
,
435 cycles_t cycles_prev
, cycles_now
, clock_rate
;
436 cycles_prev
= get_cycles();
437 ret
= gxio_mpipe_get_timestamp_aux(context
, (uint64_t *)&ts
->tv_sec
,
438 (uint64_t *)&ts
->tv_nsec
,
439 (uint64_t *)&cycles_now
);
444 clock_rate
= get_clock_rate();
445 ts
->tv_nsec
-= (cycles_now
- cycles_prev
) * 1000000000LL / clock_rate
;
446 if (ts
->tv_nsec
< 0) {
447 ts
->tv_nsec
+= 1000000000LL;
453 int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t
*context
, int64_t delta
)
455 return gxio_mpipe_adjust_timestamp_aux(context
, delta
);
458 /* Get our internal context used for link name access. This context is
459 * special in that it is not associated with an mPIPE service domain.
461 static gxio_mpipe_context_t
*_gxio_get_link_context(void)
463 static gxio_mpipe_context_t context
;
464 static gxio_mpipe_context_t
*contextp
;
465 static int tried_open
= 0;
466 static DEFINE_MUTEX(mutex
);
475 * "4" here is the maximum possible number of mPIPE shims; it's
476 * an exaggeration but we shouldn't ever go beyond 2 anyway.
478 for (i
= 0; i
< 4; i
++) {
481 snprintf(file
, sizeof(file
), "mpipe/%d/iorpc_info", i
);
482 context
.fd
= hv_dev_open((HV_VirtAddr
) file
, 0);
491 mutex_unlock(&mutex
);
496 int gxio_mpipe_link_enumerate_mac(int idx
, char *link_name
, uint8_t *link_mac
)
499 _gxio_mpipe_link_name_t name
;
500 _gxio_mpipe_link_mac_t mac
;
502 gxio_mpipe_context_t
*context
= _gxio_get_link_context();
504 return GXIO_ERR_NO_DEVICE
;
506 rv
= gxio_mpipe_info_enumerate_aux(context
, idx
, &name
, &mac
);
508 strncpy(link_name
, name
.name
, sizeof(name
.name
));
509 memcpy(link_mac
, mac
.mac
, sizeof(mac
.mac
));
515 EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac
);
517 int gxio_mpipe_link_open(gxio_mpipe_link_t
*link
,
518 gxio_mpipe_context_t
*context
, const char *link_name
,
521 _gxio_mpipe_link_name_t name
;
524 strncpy(name
.name
, link_name
, sizeof(name
.name
));
525 name
.name
[GXIO_MPIPE_LINK_NAME_LEN
- 1] = '\0';
527 rv
= gxio_mpipe_link_open_aux(context
, name
, flags
);
531 link
->context
= context
;
532 link
->channel
= rv
>> 8;
533 link
->mac
= rv
& 0xFF;
538 EXPORT_SYMBOL_GPL(gxio_mpipe_link_open
);
540 int gxio_mpipe_link_close(gxio_mpipe_link_t
*link
)
542 return gxio_mpipe_link_close_aux(link
->context
, link
->mac
);
545 EXPORT_SYMBOL_GPL(gxio_mpipe_link_close
);