1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
14 #include "ozprotocol.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
24 #define OZ_MAX_TX_POOL_SIZE 6
26 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
);
27 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
28 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
29 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
30 static int oz_send_isoc_frame(struct oz_pd
*pd
);
31 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
32 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
);
33 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
);
34 static void oz_isoc_destructor(struct sk_buff
*skb
);
35 static int oz_def_app_init(void);
36 static void oz_def_app_term(void);
37 static int oz_def_app_start(struct oz_pd
*pd
, int resume
);
38 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
);
39 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
);
42 * Counts the uncompleted isoc frames submitted to netcard.
44 static atomic_t g_submitted_isoc
= ATOMIC_INIT(0);
46 /* Application handler functions.
48 static const struct oz_app_if g_app_if
[OZ_APPID_MAX
] = {
89 static int oz_def_app_init(void)
97 static void oz_def_app_term(void)
104 static int oz_def_app_start(struct oz_pd
*pd
, int resume
)
112 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
)
119 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
)
124 * Context: softirq or process
126 void oz_pd_set_state(struct oz_pd
*pd
, unsigned state
)
131 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_IDLE\n");
133 case OZ_PD_S_CONNECTED
:
134 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_CONNECTED\n");
136 case OZ_PD_S_STOPPED
:
137 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_STOPPED\n");
140 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_SLEEP\n");
146 * Context: softirq or process
148 void oz_pd_get(struct oz_pd
*pd
)
150 atomic_inc(&pd
->ref_count
);
154 * Context: softirq or process
156 void oz_pd_put(struct oz_pd
*pd
)
158 if (atomic_dec_and_test(&pd
->ref_count
))
163 * Context: softirq-serialized
165 struct oz_pd
*oz_pd_alloc(const u8
*mac_addr
)
167 struct oz_pd
*pd
= kzalloc(sizeof(struct oz_pd
), GFP_ATOMIC
);
171 atomic_set(&pd
->ref_count
, 2);
172 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
173 spin_lock_init(&pd
->app_lock
[i
]);
174 pd
->last_rx_pkt_num
= 0xffffffff;
175 oz_pd_set_state(pd
, OZ_PD_S_IDLE
);
176 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
177 ether_addr_copy(pd
->mac_addr
, mac_addr
);
178 if (0 != oz_elt_buf_init(&pd
->elt_buff
)) {
182 spin_lock_init(&pd
->tx_frame_lock
);
183 INIT_LIST_HEAD(&pd
->tx_queue
);
184 INIT_LIST_HEAD(&pd
->farewell_list
);
185 pd
->last_sent_frame
= &pd
->tx_queue
;
186 spin_lock_init(&pd
->stream_lock
);
187 INIT_LIST_HEAD(&pd
->stream_list
);
188 tasklet_init(&pd
->heartbeat_tasklet
, oz_pd_heartbeat_handler
,
190 tasklet_init(&pd
->timeout_tasklet
, oz_pd_timeout_handler
,
192 hrtimer_init(&pd
->heartbeat
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
193 hrtimer_init(&pd
->timeout
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
194 pd
->heartbeat
.function
= oz_pd_heartbeat_event
;
195 pd
->timeout
.function
= oz_pd_timeout_event
;
201 * Context: softirq or process
203 static void oz_pd_free(struct work_struct
*work
)
206 struct oz_tx_frame
*f
;
207 struct oz_isoc_stream
*st
;
208 struct oz_farewell
*fwell
;
211 oz_pd_dbg(pd
, ON
, "Destroying PD\n");
212 pd
= container_of(work
, struct oz_pd
, workitem
);
213 /*Disable timer tasklets*/
214 tasklet_kill(&pd
->heartbeat_tasklet
);
215 tasklet_kill(&pd
->timeout_tasklet
);
216 /* Delete any streams.
218 e
= pd
->stream_list
.next
;
219 while (e
!= &pd
->stream_list
) {
220 st
= container_of(e
, struct oz_isoc_stream
, link
);
222 oz_isoc_stream_free(st
);
224 /* Free any queued tx frames.
226 e
= pd
->tx_queue
.next
;
227 while (e
!= &pd
->tx_queue
) {
228 f
= container_of(e
, struct oz_tx_frame
, link
);
232 oz_retire_frame(pd
, f
);
234 oz_elt_buf_term(&pd
->elt_buff
);
235 /* Free any farewells.
237 e
= pd
->farewell_list
.next
;
238 while (e
!= &pd
->farewell_list
) {
239 fwell
= container_of(e
, struct oz_farewell
, link
);
243 /* Deallocate all frames in tx pool.
245 while (pd
->tx_pool
) {
247 pd
->tx_pool
= e
->next
;
248 kfree(container_of(e
, struct oz_tx_frame
, link
));
251 dev_put(pd
->net_dev
);
256 * Context: softirq or Process
258 void oz_pd_destroy(struct oz_pd
*pd
)
260 if (hrtimer_active(&pd
->timeout
))
261 hrtimer_cancel(&pd
->timeout
);
262 if (hrtimer_active(&pd
->heartbeat
))
263 hrtimer_cancel(&pd
->heartbeat
);
265 INIT_WORK(&pd
->workitem
, oz_pd_free
);
266 if (!schedule_work(&pd
->workitem
))
267 oz_pd_dbg(pd
, ON
, "failed to schedule workitem\n");
271 * Context: softirq-serialized
273 int oz_services_start(struct oz_pd
*pd
, u16 apps
, int resume
)
275 const struct oz_app_if
*ai
;
278 oz_pd_dbg(pd
, ON
, "%s: (0x%x) resume(%d)\n", __func__
, apps
, resume
);
279 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
280 if (apps
& (1<<ai
->app_id
)) {
281 if (ai
->start(pd
, resume
)) {
284 "Unable to start service %d\n",
288 spin_lock_bh(&g_polling_lock
);
289 pd
->total_apps
|= (1<<ai
->app_id
);
291 pd
->paused_apps
&= ~(1<<ai
->app_id
);
292 spin_unlock_bh(&g_polling_lock
);
299 * Context: softirq or process
301 void oz_services_stop(struct oz_pd
*pd
, u16 apps
, int pause
)
303 const struct oz_app_if
*ai
;
305 oz_pd_dbg(pd
, ON
, "%s: (0x%x) pause(%d)\n", __func__
, apps
, pause
);
306 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
307 if (apps
& (1<<ai
->app_id
)) {
308 spin_lock_bh(&g_polling_lock
);
310 pd
->paused_apps
|= (1<<ai
->app_id
);
312 pd
->total_apps
&= ~(1<<ai
->app_id
);
313 pd
->paused_apps
&= ~(1<<ai
->app_id
);
315 spin_unlock_bh(&g_polling_lock
);
324 void oz_pd_heartbeat(struct oz_pd
*pd
, u16 apps
)
326 const struct oz_app_if
*ai
;
329 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
330 if (ai
->heartbeat
&& (apps
& (1<<ai
->app_id
))) {
331 if (ai
->heartbeat(pd
))
335 if ((!more
) && (hrtimer_active(&pd
->heartbeat
)))
336 hrtimer_cancel(&pd
->heartbeat
);
337 if (pd
->mode
& OZ_F_ISOC_ANYTIME
) {
339 while (count
-- && (oz_send_isoc_frame(pd
) >= 0))
345 * Context: softirq or process
347 void oz_pd_stop(struct oz_pd
*pd
)
351 oz_dbg(ON
, "oz_pd_stop() State = 0x%x\n", pd
->state
);
352 oz_pd_indicate_farewells(pd
);
353 spin_lock_bh(&g_polling_lock
);
354 stop_apps
= pd
->total_apps
;
357 spin_unlock_bh(&g_polling_lock
);
358 oz_services_stop(pd
, stop_apps
, 0);
359 spin_lock_bh(&g_polling_lock
);
360 oz_pd_set_state(pd
, OZ_PD_S_STOPPED
);
361 /* Remove from PD list.*/
363 spin_unlock_bh(&g_polling_lock
);
364 oz_dbg(ON
, "pd ref count = %d\n", atomic_read(&pd
->ref_count
));
371 int oz_pd_sleep(struct oz_pd
*pd
)
376 spin_lock_bh(&g_polling_lock
);
377 if (pd
->state
& (OZ_PD_S_SLEEP
| OZ_PD_S_STOPPED
)) {
378 spin_unlock_bh(&g_polling_lock
);
381 if (pd
->keep_alive
&& pd
->session_id
)
382 oz_pd_set_state(pd
, OZ_PD_S_SLEEP
);
386 stop_apps
= pd
->total_apps
;
387 spin_unlock_bh(&g_polling_lock
);
391 oz_services_stop(pd
, stop_apps
, 1);
392 oz_timer_add(pd
, OZ_TIMER_STOP
, pd
->keep_alive
);
400 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
)
402 struct oz_tx_frame
*f
= NULL
;
404 spin_lock_bh(&pd
->tx_frame_lock
);
406 f
= container_of(pd
->tx_pool
, struct oz_tx_frame
, link
);
407 pd
->tx_pool
= pd
->tx_pool
->next
;
410 spin_unlock_bh(&pd
->tx_frame_lock
);
412 f
= kmalloc(sizeof(struct oz_tx_frame
), GFP_ATOMIC
);
414 f
->total_size
= sizeof(struct oz_hdr
);
415 INIT_LIST_HEAD(&f
->link
);
416 INIT_LIST_HEAD(&f
->elt_list
);
422 * Context: softirq or process
424 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
426 pd
->nb_queued_isoc_frames
--;
427 list_del_init(&f
->link
);
428 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
429 f
->link
.next
= pd
->tx_pool
;
430 pd
->tx_pool
= &f
->link
;
435 oz_dbg(TX_FRAMES
, "Releasing ISOC Frame isoc_nb= %d\n",
436 pd
->nb_queued_isoc_frames
);
440 * Context: softirq or process
442 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
444 spin_lock_bh(&pd
->tx_frame_lock
);
445 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
446 f
->link
.next
= pd
->tx_pool
;
447 pd
->tx_pool
= &f
->link
;
451 spin_unlock_bh(&pd
->tx_frame_lock
);
456 * Context: softirq-serialized
458 static void oz_set_more_bit(struct sk_buff
*skb
)
460 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
462 oz_hdr
->control
|= OZ_F_MORE_DATA
;
466 * Context: softirq-serialized
468 static void oz_set_last_pkt_nb(struct oz_pd
*pd
, struct sk_buff
*skb
)
470 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
472 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
478 int oz_prepare_frame(struct oz_pd
*pd
, int empty
)
480 struct oz_tx_frame
*f
;
482 if ((pd
->mode
& OZ_MODE_MASK
) != OZ_MODE_TRIGGERED
)
484 if (pd
->nb_queued_frames
>= OZ_MAX_QUEUED_FRAMES
)
486 if (!empty
&& !oz_are_elts_available(&pd
->elt_buff
))
488 f
= oz_tx_frame_alloc(pd
);
493 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ACK_REQUESTED
;
494 ++pd
->last_tx_pkt_num
;
495 put_unaligned(cpu_to_le32(pd
->last_tx_pkt_num
), &f
->hdr
.pkt_num
);
497 oz_select_elts_for_tx(&pd
->elt_buff
, 0, &f
->total_size
,
498 pd
->max_tx_size
, &f
->elt_list
);
500 spin_lock(&pd
->tx_frame_lock
);
501 list_add_tail(&f
->link
, &pd
->tx_queue
);
502 pd
->nb_queued_frames
++;
503 spin_unlock(&pd
->tx_frame_lock
);
508 * Context: softirq-serialized
510 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
513 struct net_device
*dev
= pd
->net_dev
;
514 struct oz_hdr
*oz_hdr
;
518 /* Allocate skb with enough space for the lower layers as well
519 * as the space we need.
521 skb
= alloc_skb(f
->total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
524 /* Reserve the head room for lower layers.
526 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
527 skb_reset_network_header(skb
);
529 skb
->protocol
= htons(OZ_ETHERTYPE
);
530 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
531 dev
->dev_addr
, skb
->len
) < 0)
533 /* Push the tail to the end of the area we are going to copy to.
535 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, f
->total_size
);
536 f
->hdr
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
537 memcpy(oz_hdr
, &f
->hdr
, sizeof(struct oz_hdr
));
538 /* Copy the elements into the frame body.
540 elt
= (struct oz_elt
*)(oz_hdr
+1);
541 for (e
= f
->elt_list
.next
; e
!= &f
->elt_list
; e
= e
->next
) {
542 struct oz_elt_info
*ei
;
543 ei
= container_of(e
, struct oz_elt_info
, link
);
544 memcpy(elt
, ei
->data
, ei
->length
);
545 elt
= oz_next_elt(elt
);
554 * Context: softirq or process
556 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
559 struct oz_elt_info
*ei
;
561 e
= f
->elt_list
.next
;
562 while (e
!= &f
->elt_list
) {
563 ei
= container_of(e
, struct oz_elt_info
, link
);
565 list_del_init(&ei
->link
);
567 ei
->callback(pd
, ei
->context
);
568 spin_lock_bh(&pd
->elt_buff
.lock
);
569 oz_elt_info_free(&pd
->elt_buff
, ei
);
570 spin_unlock_bh(&pd
->elt_buff
.lock
);
572 oz_tx_frame_free(pd
, f
);
573 if (pd
->elt_buff
.free_elts
> pd
->elt_buff
.max_free_elts
)
574 oz_trim_elt_pool(&pd
->elt_buff
);
578 * Context: softirq-serialized
580 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
)
583 struct oz_tx_frame
*f
;
586 spin_lock(&pd
->tx_frame_lock
);
587 e
= pd
->last_sent_frame
->next
;
588 if (e
== &pd
->tx_queue
) {
589 spin_unlock(&pd
->tx_frame_lock
);
592 f
= container_of(e
, struct oz_tx_frame
, link
);
594 if (f
->skb
!= NULL
) {
596 oz_tx_isoc_free(pd
, f
);
597 spin_unlock(&pd
->tx_frame_lock
);
599 oz_set_more_bit(skb
);
600 oz_set_last_pkt_nb(pd
, skb
);
601 if ((int)atomic_read(&g_submitted_isoc
) <
602 OZ_MAX_SUBMITTED_ISOC
) {
603 if (dev_queue_xmit(skb
) < 0) {
604 oz_dbg(TX_FRAMES
, "Dropping ISOC Frame\n");
607 atomic_inc(&g_submitted_isoc
);
608 oz_dbg(TX_FRAMES
, "Sending ISOC Frame, nb_isoc= %d\n",
609 pd
->nb_queued_isoc_frames
);
613 oz_dbg(TX_FRAMES
, "Dropping ISOC Frame>\n");
618 pd
->last_sent_frame
= e
;
619 skb
= oz_build_frame(pd
, f
);
620 spin_unlock(&pd
->tx_frame_lock
);
624 oz_set_more_bit(skb
);
625 oz_dbg(TX_FRAMES
, "TX frame PN=0x%x\n", f
->hdr
.pkt_num
);
626 if (dev_queue_xmit(skb
) < 0)
633 * Context: softirq-serialized
635 void oz_send_queued_frames(struct oz_pd
*pd
, int backlog
)
637 while (oz_prepare_frame(pd
, 0) >= 0)
640 switch (pd
->mode
& (OZ_F_ISOC_NO_ELTS
| OZ_F_ISOC_ANYTIME
)) {
642 case OZ_F_ISOC_NO_ELTS
: {
643 backlog
+= pd
->nb_queued_isoc_frames
;
646 if (backlog
> OZ_MAX_SUBMITTED_ISOC
)
647 backlog
= OZ_MAX_SUBMITTED_ISOC
;
650 case OZ_NO_ELTS_ANYTIME
: {
651 if ((backlog
<= 0) && (pd
->isoc_sent
== 0))
662 if (oz_send_next_queued_frame(pd
, backlog
) < 0)
667 out
: oz_prepare_frame(pd
, 1);
668 oz_send_next_queued_frame(pd
, 0);
674 static int oz_send_isoc_frame(struct oz_pd
*pd
)
677 struct net_device
*dev
= pd
->net_dev
;
678 struct oz_hdr
*oz_hdr
;
681 struct list_head list
;
682 int total_size
= sizeof(struct oz_hdr
);
684 INIT_LIST_HEAD(&list
);
686 oz_select_elts_for_tx(&pd
->elt_buff
, 1, &total_size
,
687 pd
->max_tx_size
, &list
);
688 if (list
.next
== &list
)
690 skb
= alloc_skb(total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
692 oz_dbg(ON
, "Cannot alloc skb\n");
693 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
696 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
697 skb_reset_network_header(skb
);
699 skb
->protocol
= htons(OZ_ETHERTYPE
);
700 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
701 dev
->dev_addr
, skb
->len
) < 0) {
705 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, total_size
);
706 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
707 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
708 elt
= (struct oz_elt
*)(oz_hdr
+1);
710 for (e
= list
.next
; e
!= &list
; e
= e
->next
) {
711 struct oz_elt_info
*ei
;
712 ei
= container_of(e
, struct oz_elt_info
, link
);
713 memcpy(elt
, ei
->data
, ei
->length
);
714 elt
= oz_next_elt(elt
);
717 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
722 * Context: softirq-serialized
724 void oz_retire_tx_frames(struct oz_pd
*pd
, u8 lpn
)
727 struct oz_tx_frame
*f
;
728 struct list_head
*first
= NULL
;
729 struct list_head
*last
= NULL
;
733 spin_lock(&pd
->tx_frame_lock
);
734 e
= pd
->tx_queue
.next
;
735 while (e
!= &pd
->tx_queue
) {
736 f
= container_of(e
, struct oz_tx_frame
, link
);
737 pkt_num
= le32_to_cpu(get_unaligned(&f
->hdr
.pkt_num
));
738 diff
= (lpn
- (pkt_num
& OZ_LAST_PN_MASK
)) & OZ_LAST_PN_MASK
;
739 if ((diff
> OZ_LAST_PN_HALF_CYCLE
) || (pkt_num
== 0))
741 oz_dbg(TX_FRAMES
, "Releasing pkt_num= %u, nb= %d\n",
742 pkt_num
, pd
->nb_queued_frames
);
747 pd
->nb_queued_frames
--;
750 last
->next
->prev
= &pd
->tx_queue
;
751 pd
->tx_queue
.next
= last
->next
;
754 pd
->last_sent_frame
= &pd
->tx_queue
;
755 spin_unlock(&pd
->tx_frame_lock
);
757 f
= container_of(first
, struct oz_tx_frame
, link
);
759 oz_retire_frame(pd
, f
);
764 * Precondition: stream_lock must be held.
767 static struct oz_isoc_stream
*pd_stream_find(struct oz_pd
*pd
, u8 ep_num
)
770 struct oz_isoc_stream
*st
;
772 list_for_each(e
, &pd
->stream_list
) {
773 st
= container_of(e
, struct oz_isoc_stream
, link
);
774 if (st
->ep_num
== ep_num
)
783 int oz_isoc_stream_create(struct oz_pd
*pd
, u8 ep_num
)
785 struct oz_isoc_stream
*st
=
786 kzalloc(sizeof(struct oz_isoc_stream
), GFP_ATOMIC
);
790 spin_lock_bh(&pd
->stream_lock
);
791 if (!pd_stream_find(pd
, ep_num
)) {
792 list_add(&st
->link
, &pd
->stream_list
);
795 spin_unlock_bh(&pd
->stream_lock
);
801 * Context: softirq or process
803 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
)
812 int oz_isoc_stream_delete(struct oz_pd
*pd
, u8 ep_num
)
814 struct oz_isoc_stream
*st
;
816 spin_lock_bh(&pd
->stream_lock
);
817 st
= pd_stream_find(pd
, ep_num
);
820 spin_unlock_bh(&pd
->stream_lock
);
822 oz_isoc_stream_free(st
);
829 static void oz_isoc_destructor(struct sk_buff
*skb
)
831 atomic_dec(&g_submitted_isoc
);
837 int oz_send_isoc_unit(struct oz_pd
*pd
, u8 ep_num
, const u8
*data
, int len
)
839 struct net_device
*dev
= pd
->net_dev
;
840 struct oz_isoc_stream
*st
;
842 struct sk_buff
*skb
= NULL
;
843 struct oz_hdr
*oz_hdr
= NULL
;
846 spin_lock_bh(&pd
->stream_lock
);
847 st
= pd_stream_find(pd
, ep_num
);
851 nb_units
= st
->nb_units
;
856 spin_unlock_bh(&pd
->stream_lock
);
860 /* Allocate enough space for max size frame. */
861 skb
= alloc_skb(pd
->max_tx_size
+ OZ_ALLOCATED_SPACE(dev
),
865 /* Reserve the head room for lower layers. */
866 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
867 skb_reset_network_header(skb
);
869 skb
->protocol
= htons(OZ_ETHERTYPE
);
870 /* For audio packet set priority to AC_VO */
872 size
= sizeof(struct oz_hdr
) + sizeof(struct oz_isoc_large
);
873 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, size
);
875 memcpy(skb_put(skb
, len
), data
, len
);
877 if (++nb_units
< pd
->ms_per_isoc
) {
878 spin_lock_bh(&pd
->stream_lock
);
880 st
->nb_units
= nb_units
;
883 spin_unlock_bh(&pd
->stream_lock
);
886 struct oz_isoc_large iso
;
887 spin_lock_bh(&pd
->stream_lock
);
888 iso
.frame_number
= st
->frame_num
;
889 st
->frame_num
+= nb_units
;
890 spin_unlock_bh(&pd
->stream_lock
);
892 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
893 oz
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
895 iso
.endpoint
= ep_num
;
896 iso
.format
= OZ_DATA_F_ISOC_LARGE
;
897 iso
.ms_data
= nb_units
;
898 memcpy(oz_hdr
, &oz
, sizeof(oz
));
899 memcpy(oz_hdr
+1, &iso
, sizeof(iso
));
900 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
901 dev
->dev_addr
, skb
->len
) < 0)
904 skb
->destructor
= oz_isoc_destructor
;
905 /*Queue for Xmit if mode is not ANYTIME*/
906 if (!(pd
->mode
& OZ_F_ISOC_ANYTIME
)) {
907 struct oz_tx_frame
*isoc_unit
= NULL
;
908 int nb
= pd
->nb_queued_isoc_frames
;
909 if (nb
>= pd
->isoc_latency
) {
911 struct oz_tx_frame
*f
;
912 oz_dbg(TX_FRAMES
, "Dropping ISOC Unit nb= %d\n",
914 spin_lock(&pd
->tx_frame_lock
);
915 list_for_each(e
, &pd
->tx_queue
) {
916 f
= container_of(e
, struct oz_tx_frame
,
918 if (f
->skb
!= NULL
) {
919 oz_tx_isoc_free(pd
, f
);
923 spin_unlock(&pd
->tx_frame_lock
);
925 isoc_unit
= oz_tx_frame_alloc(pd
);
926 if (isoc_unit
== NULL
)
929 isoc_unit
->skb
= skb
;
930 spin_lock_bh(&pd
->tx_frame_lock
);
931 list_add_tail(&isoc_unit
->link
, &pd
->tx_queue
);
932 pd
->nb_queued_isoc_frames
++;
933 spin_unlock_bh(&pd
->tx_frame_lock
);
935 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
936 pd
->nb_queued_isoc_frames
, pd
->nb_queued_frames
);
940 /*In ANYTIME mode Xmit unit immediately*/
941 if (atomic_read(&g_submitted_isoc
) < OZ_MAX_SUBMITTED_ISOC
) {
942 atomic_inc(&g_submitted_isoc
);
943 if (dev_queue_xmit(skb
) < 0)
959 void oz_apps_init(void)
963 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
964 if (g_app_if
[i
].init
)
971 void oz_apps_term(void)
975 /* Terminate all the apps. */
976 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
977 if (g_app_if
[i
].term
)
982 * Context: softirq-serialized
984 void oz_handle_app_elt(struct oz_pd
*pd
, u8 app_id
, struct oz_elt
*elt
)
986 const struct oz_app_if
*ai
;
988 if (app_id
== 0 || app_id
> OZ_APPID_MAX
)
990 ai
= &g_app_if
[app_id
-1];
995 * Context: softirq or process
997 void oz_pd_indicate_farewells(struct oz_pd
*pd
)
999 struct oz_farewell
*f
;
1000 const struct oz_app_if
*ai
= &g_app_if
[OZ_APPID_USB
-1];
1003 spin_lock_bh(&g_polling_lock
);
1004 if (list_empty(&pd
->farewell_list
)) {
1005 spin_unlock_bh(&g_polling_lock
);
1008 f
= list_first_entry(&pd
->farewell_list
,
1009 struct oz_farewell
, link
);
1011 spin_unlock_bh(&g_polling_lock
);
1013 ai
->farewell(pd
, f
->ep_num
, f
->report
, f
->len
);