1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
13 #include "ozprotocol.h"
21 #include <asm/unaligned.h>
22 #include <linux/uaccess.h>
23 #include <net/psnap.h>
24 /*------------------------------------------------------------------------------
26 #define OZ_MAX_TX_POOL_SIZE 6
27 /* Maximum number of uncompleted isoc frames that can be pending.
29 #define OZ_MAX_SUBMITTED_ISOC 16
30 /*------------------------------------------------------------------------------
32 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
);
33 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
34 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
35 static int oz_send_isoc_frame(struct oz_pd
*pd
);
36 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
37 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
);
38 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int *more_data
);
39 static void oz_isoc_destructor(struct sk_buff
*skb
);
40 static int oz_def_app_init(void);
41 static void oz_def_app_term(void);
42 static int oz_def_app_start(struct oz_pd
*pd
, int resume
);
43 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
);
44 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
);
45 /*------------------------------------------------------------------------------
46 * Counts the uncompleted isoc frames submitted to netcard.
48 static atomic_t g_submitted_isoc
= ATOMIC_INIT(0);
49 /* Application handler functions.
51 static struct oz_app_if g_app_if
[OZ_APPID_MAX
] = {
88 /*------------------------------------------------------------------------------
91 static int oz_def_app_init(void)
95 /*------------------------------------------------------------------------------
98 static void oz_def_app_term(void)
101 /*------------------------------------------------------------------------------
104 static int oz_def_app_start(struct oz_pd
*pd
, int resume
)
108 /*------------------------------------------------------------------------------
111 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
)
114 /*------------------------------------------------------------------------------
117 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
)
120 /*------------------------------------------------------------------------------
121 * Context: softirq or process
123 void oz_pd_set_state(struct oz_pd
*pd
, unsigned state
)
126 oz_event_log(OZ_EVT_PD_STATE
, 0, 0, 0, state
);
130 oz_trace("PD State: OZ_PD_S_IDLE\n");
132 case OZ_PD_S_CONNECTED
:
133 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
135 case OZ_PD_S_STOPPED
:
136 oz_trace("PD State: OZ_PD_S_STOPPED\n");
139 oz_trace("PD State: OZ_PD_S_SLEEP\n");
142 #endif /* WANT_TRACE */
144 /*------------------------------------------------------------------------------
145 * Context: softirq or process
147 void oz_pd_get(struct oz_pd
*pd
)
149 atomic_inc(&pd
->ref_count
);
151 /*------------------------------------------------------------------------------
152 * Context: softirq or process
154 void oz_pd_put(struct oz_pd
*pd
)
156 if (atomic_dec_and_test(&pd
->ref_count
))
159 /*------------------------------------------------------------------------------
160 * Context: softirq-serialized
162 struct oz_pd
*oz_pd_alloc(u8
*mac_addr
)
164 struct oz_pd
*pd
= kzalloc(sizeof(struct oz_pd
), GFP_ATOMIC
);
167 atomic_set(&pd
->ref_count
, 2);
168 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
169 spin_lock_init(&pd
->app_lock
[i
]);
170 pd
->last_rx_pkt_num
= 0xffffffff;
171 oz_pd_set_state(pd
, OZ_PD_S_IDLE
);
172 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
173 memcpy(pd
->mac_addr
, mac_addr
, ETH_ALEN
);
174 if (0 != oz_elt_buf_init(&pd
->elt_buff
)) {
178 spin_lock_init(&pd
->tx_frame_lock
);
179 INIT_LIST_HEAD(&pd
->tx_queue
);
180 INIT_LIST_HEAD(&pd
->farewell_list
);
181 pd
->last_sent_frame
= &pd
->tx_queue
;
182 spin_lock_init(&pd
->stream_lock
);
183 INIT_LIST_HEAD(&pd
->stream_list
);
187 /*------------------------------------------------------------------------------
188 * Context: softirq or process
190 void oz_pd_destroy(struct oz_pd
*pd
)
193 struct oz_tx_frame
*f
;
194 struct oz_isoc_stream
*st
;
195 struct oz_farewell
*fwell
;
196 oz_trace("Destroying PD\n");
197 /* Delete any streams.
199 e
= pd
->stream_list
.next
;
200 while (e
!= &pd
->stream_list
) {
201 st
= container_of(e
, struct oz_isoc_stream
, link
);
203 oz_isoc_stream_free(st
);
205 /* Free any queued tx frames.
207 e
= pd
->tx_queue
.next
;
208 while (e
!= &pd
->tx_queue
) {
209 f
= container_of(e
, struct oz_tx_frame
, link
);
211 oz_retire_frame(pd
, f
);
213 oz_elt_buf_term(&pd
->elt_buff
);
214 /* Free any farewells.
216 e
= pd
->farewell_list
.next
;
217 while (e
!= &pd
->farewell_list
) {
218 fwell
= container_of(e
, struct oz_farewell
, link
);
222 /* Deallocate all frames in tx pool.
224 while (pd
->tx_pool
) {
226 pd
->tx_pool
= e
->next
;
227 kfree(container_of(e
, struct oz_tx_frame
, link
));
230 dev_put(pd
->net_dev
);
233 /*------------------------------------------------------------------------------
234 * Context: softirq-serialized
236 int oz_services_start(struct oz_pd
*pd
, u16 apps
, int resume
)
238 struct oz_app_if
*ai
;
240 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps
, resume
);
241 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
242 if (apps
& (1<<ai
->app_id
)) {
243 if (ai
->start(pd
, resume
)) {
245 oz_trace("Unabled to start service %d\n",
249 oz_polling_lock_bh();
250 pd
->total_apps
|= (1<<ai
->app_id
);
252 pd
->paused_apps
&= ~(1<<ai
->app_id
);
253 oz_polling_unlock_bh();
258 /*------------------------------------------------------------------------------
259 * Context: softirq or process
261 void oz_services_stop(struct oz_pd
*pd
, u16 apps
, int pause
)
263 struct oz_app_if
*ai
;
264 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps
, pause
);
265 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
266 if (apps
& (1<<ai
->app_id
)) {
267 oz_polling_lock_bh();
269 pd
->paused_apps
|= (1<<ai
->app_id
);
271 pd
->total_apps
&= ~(1<<ai
->app_id
);
272 pd
->paused_apps
&= ~(1<<ai
->app_id
);
274 oz_polling_unlock_bh();
279 /*------------------------------------------------------------------------------
282 void oz_pd_heartbeat(struct oz_pd
*pd
, u16 apps
)
284 struct oz_app_if
*ai
;
286 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
287 if (ai
->heartbeat
&& (apps
& (1<<ai
->app_id
))) {
288 if (ai
->heartbeat(pd
))
293 oz_pd_request_heartbeat(pd
);
294 if (pd
->mode
& OZ_F_ISOC_ANYTIME
) {
296 while (count
-- && (oz_send_isoc_frame(pd
) >= 0))
300 /*------------------------------------------------------------------------------
301 * Context: softirq or process
303 void oz_pd_stop(struct oz_pd
*pd
)
306 oz_trace("oz_pd_stop() State = 0x%x\n", pd
->state
);
307 oz_pd_indicate_farewells(pd
);
308 oz_polling_lock_bh();
309 stop_apps
= pd
->total_apps
;
312 oz_polling_unlock_bh();
313 oz_services_stop(pd
, stop_apps
, 0);
314 oz_polling_lock_bh();
315 oz_pd_set_state(pd
, OZ_PD_S_STOPPED
);
316 /* Remove from PD list.*/
318 oz_polling_unlock_bh();
319 oz_trace("pd ref count = %d\n", atomic_read(&pd
->ref_count
));
320 oz_timer_delete(pd
, 0);
323 /*------------------------------------------------------------------------------
326 int oz_pd_sleep(struct oz_pd
*pd
)
330 oz_polling_lock_bh();
331 if (pd
->state
& (OZ_PD_S_SLEEP
| OZ_PD_S_STOPPED
)) {
332 oz_polling_unlock_bh();
335 if (pd
->keep_alive_j
&& pd
->session_id
) {
336 oz_pd_set_state(pd
, OZ_PD_S_SLEEP
);
337 pd
->pulse_time_j
= jiffies
+ pd
->keep_alive_j
;
338 oz_trace("Sleep Now %lu until %lu\n",
339 jiffies
, pd
->pulse_time_j
);
343 stop_apps
= pd
->total_apps
;
344 oz_polling_unlock_bh();
348 oz_services_stop(pd
, stop_apps
, 1);
349 oz_timer_add(pd
, OZ_TIMER_STOP
, jiffies
+ pd
->keep_alive_j
, 1);
353 /*------------------------------------------------------------------------------
356 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
)
358 struct oz_tx_frame
*f
= 0;
359 spin_lock_bh(&pd
->tx_frame_lock
);
361 f
= container_of(pd
->tx_pool
, struct oz_tx_frame
, link
);
362 pd
->tx_pool
= pd
->tx_pool
->next
;
365 spin_unlock_bh(&pd
->tx_frame_lock
);
367 f
= kmalloc(sizeof(struct oz_tx_frame
), GFP_ATOMIC
);
369 f
->total_size
= sizeof(struct oz_hdr
);
370 INIT_LIST_HEAD(&f
->link
);
371 INIT_LIST_HEAD(&f
->elt_list
);
375 /*------------------------------------------------------------------------------
376 * Context: softirq or process
378 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
380 spin_lock_bh(&pd
->tx_frame_lock
);
381 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
382 f
->link
.next
= pd
->tx_pool
;
383 pd
->tx_pool
= &f
->link
;
387 spin_unlock_bh(&pd
->tx_frame_lock
);
391 /*------------------------------------------------------------------------------
394 int oz_prepare_frame(struct oz_pd
*pd
, int empty
)
396 struct oz_tx_frame
*f
;
397 if ((pd
->mode
& OZ_MODE_MASK
) != OZ_MODE_TRIGGERED
)
399 if (pd
->nb_queued_frames
>= OZ_MAX_QUEUED_FRAMES
)
401 if (!empty
&& !oz_are_elts_available(&pd
->elt_buff
))
403 f
= oz_tx_frame_alloc(pd
);
407 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ACK_REQUESTED
;
408 ++pd
->last_tx_pkt_num
;
409 put_unaligned(cpu_to_le32(pd
->last_tx_pkt_num
), &f
->hdr
.pkt_num
);
411 oz_select_elts_for_tx(&pd
->elt_buff
, 0, &f
->total_size
,
412 pd
->max_tx_size
, &f
->elt_list
);
414 spin_lock(&pd
->tx_frame_lock
);
415 list_add_tail(&f
->link
, &pd
->tx_queue
);
416 pd
->nb_queued_frames
++;
417 spin_unlock(&pd
->tx_frame_lock
);
420 /*------------------------------------------------------------------------------
421 * Context: softirq-serialized
423 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
425 struct sk_buff
*skb
= 0;
426 struct net_device
*dev
= pd
->net_dev
;
427 struct oz_hdr
*oz_hdr
;
430 /* Allocate skb with enough space for the lower layers as well
431 * as the space we need.
433 skb
= alloc_skb(f
->total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
436 /* Reserve the head room for lower layers.
438 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
439 skb_reset_network_header(skb
);
441 skb
->protocol
= htons(OZ_ETHERTYPE
);
442 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
443 dev
->dev_addr
, skb
->len
) < 0)
445 /* Push the tail to the end of the area we are going to copy to.
447 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, f
->total_size
);
448 f
->hdr
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
449 memcpy(oz_hdr
, &f
->hdr
, sizeof(struct oz_hdr
));
450 /* Copy the elements into the frame body.
452 elt
= (struct oz_elt
*)(oz_hdr
+1);
453 for (e
= f
->elt_list
.next
; e
!= &f
->elt_list
; e
= e
->next
) {
454 struct oz_elt_info
*ei
;
455 ei
= container_of(e
, struct oz_elt_info
, link
);
456 memcpy(elt
, ei
->data
, ei
->length
);
457 elt
= oz_next_elt(elt
);
464 /*------------------------------------------------------------------------------
465 * Context: softirq or process
467 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
470 struct oz_elt_info
*ei
;
471 e
= f
->elt_list
.next
;
472 while (e
!= &f
->elt_list
) {
473 ei
= container_of(e
, struct oz_elt_info
, link
);
475 list_del_init(&ei
->link
);
477 ei
->callback(pd
, ei
->context
);
478 spin_lock_bh(&pd
->elt_buff
.lock
);
479 oz_elt_info_free(&pd
->elt_buff
, ei
);
480 spin_unlock_bh(&pd
->elt_buff
.lock
);
482 oz_tx_frame_free(pd
, f
);
483 if (pd
->elt_buff
.free_elts
> pd
->elt_buff
.max_free_elts
)
484 oz_trim_elt_pool(&pd
->elt_buff
);
486 /*------------------------------------------------------------------------------
487 * Context: softirq-serialized
489 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int *more_data
)
492 struct oz_tx_frame
*f
;
495 spin_lock(&pd
->tx_frame_lock
);
496 e
= pd
->last_sent_frame
->next
;
497 if (e
== &pd
->tx_queue
) {
498 spin_unlock(&pd
->tx_frame_lock
);
501 pd
->last_sent_frame
= e
;
502 if (e
->next
!= &pd
->tx_queue
)
504 f
= container_of(e
, struct oz_tx_frame
, link
);
505 skb
= oz_build_frame(pd
, f
);
506 spin_unlock(&pd
->tx_frame_lock
);
507 oz_trace2(OZ_TRACE_TX_FRAMES
, "TX frame PN=0x%x\n", f
->hdr
.pkt_num
);
509 oz_event_log(OZ_EVT_TX_FRAME
,
511 (((u16
)f
->hdr
.control
)<<8)|f
->hdr
.last_pkt_num
,
513 if (dev_queue_xmit(skb
) < 0)
518 /*------------------------------------------------------------------------------
519 * Context: softirq-serialized
521 void oz_send_queued_frames(struct oz_pd
*pd
, int backlog
)
524 if (backlog
< OZ_MAX_QUEUED_FRAMES
) {
525 if (oz_send_next_queued_frame(pd
, &more
) >= 0) {
526 while (more
&& oz_send_next_queued_frame(pd
, &more
))
529 if (((pd
->mode
& OZ_F_ISOC_ANYTIME
) == 0)
530 || (pd
->isoc_sent
== 0)) {
531 if (oz_prepare_frame(pd
, 1) >= 0)
532 oz_send_next_queued_frame(pd
, &more
);
536 oz_send_next_queued_frame(pd
, &more
);
539 /*------------------------------------------------------------------------------
542 static int oz_send_isoc_frame(struct oz_pd
*pd
)
544 struct sk_buff
*skb
= 0;
545 struct net_device
*dev
= pd
->net_dev
;
546 struct oz_hdr
*oz_hdr
;
549 struct list_head list
;
550 int total_size
= sizeof(struct oz_hdr
);
551 INIT_LIST_HEAD(&list
);
553 oz_select_elts_for_tx(&pd
->elt_buff
, 1, &total_size
,
554 pd
->max_tx_size
, &list
);
555 if (list
.next
== &list
)
557 skb
= alloc_skb(total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
559 oz_trace("Cannot alloc skb\n");
560 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
563 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
564 skb_reset_network_header(skb
);
566 skb
->protocol
= htons(OZ_ETHERTYPE
);
567 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
568 dev
->dev_addr
, skb
->len
) < 0) {
572 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, total_size
);
573 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
574 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
575 elt
= (struct oz_elt
*)(oz_hdr
+1);
577 for (e
= list
.next
; e
!= &list
; e
= e
->next
) {
578 struct oz_elt_info
*ei
;
579 ei
= container_of(e
, struct oz_elt_info
, link
);
580 memcpy(elt
, ei
->data
, ei
->length
);
581 elt
= oz_next_elt(elt
);
583 oz_event_log(OZ_EVT_TX_ISOC
, 0, 0, 0, 0);
585 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
588 /*------------------------------------------------------------------------------
589 * Context: softirq-serialized
591 void oz_retire_tx_frames(struct oz_pd
*pd
, u8 lpn
)
594 struct oz_tx_frame
*f
;
595 struct list_head
*first
= 0;
596 struct list_head
*last
= 0;
600 spin_lock(&pd
->tx_frame_lock
);
601 e
= pd
->tx_queue
.next
;
602 while (e
!= &pd
->tx_queue
) {
603 f
= container_of(e
, struct oz_tx_frame
, link
);
604 pkt_num
= le32_to_cpu(get_unaligned(&f
->hdr
.pkt_num
));
605 diff
= (lpn
- (pkt_num
& OZ_LAST_PN_MASK
)) & OZ_LAST_PN_MASK
;
606 if (diff
> OZ_LAST_PN_HALF_CYCLE
)
612 pd
->nb_queued_frames
--;
615 last
->next
->prev
= &pd
->tx_queue
;
616 pd
->tx_queue
.next
= last
->next
;
619 pd
->last_sent_frame
= &pd
->tx_queue
;
620 spin_unlock(&pd
->tx_frame_lock
);
622 f
= container_of(first
, struct oz_tx_frame
, link
);
624 oz_retire_frame(pd
, f
);
627 /*------------------------------------------------------------------------------
628 * Precondition: stream_lock must be held.
631 static struct oz_isoc_stream
*pd_stream_find(struct oz_pd
*pd
, u8 ep_num
)
634 struct oz_isoc_stream
*st
;
635 list_for_each(e
, &pd
->stream_list
) {
636 st
= container_of(e
, struct oz_isoc_stream
, link
);
637 if (st
->ep_num
== ep_num
)
642 /*------------------------------------------------------------------------------
645 int oz_isoc_stream_create(struct oz_pd
*pd
, u8 ep_num
)
647 struct oz_isoc_stream
*st
=
648 kzalloc(sizeof(struct oz_isoc_stream
), GFP_ATOMIC
);
652 spin_lock_bh(&pd
->stream_lock
);
653 if (!pd_stream_find(pd
, ep_num
)) {
654 list_add(&st
->link
, &pd
->stream_list
);
657 spin_unlock_bh(&pd
->stream_lock
);
662 /*------------------------------------------------------------------------------
663 * Context: softirq or process
665 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
)
671 /*------------------------------------------------------------------------------
674 int oz_isoc_stream_delete(struct oz_pd
*pd
, u8 ep_num
)
676 struct oz_isoc_stream
*st
;
677 spin_lock_bh(&pd
->stream_lock
);
678 st
= pd_stream_find(pd
, ep_num
);
681 spin_unlock_bh(&pd
->stream_lock
);
683 oz_isoc_stream_free(st
);
686 /*------------------------------------------------------------------------------
689 static void oz_isoc_destructor(struct sk_buff
*skb
)
691 atomic_dec(&g_submitted_isoc
);
692 oz_event_log(OZ_EVT_TX_ISOC_DONE
, atomic_read(&g_submitted_isoc
),
695 /*------------------------------------------------------------------------------
698 int oz_send_isoc_unit(struct oz_pd
*pd
, u8 ep_num
, u8
*data
, int len
)
700 struct net_device
*dev
= pd
->net_dev
;
701 struct oz_isoc_stream
*st
;
703 struct sk_buff
*skb
= 0;
704 struct oz_hdr
*oz_hdr
= 0;
706 spin_lock_bh(&pd
->stream_lock
);
707 st
= pd_stream_find(pd
, ep_num
);
711 nb_units
= st
->nb_units
;
716 spin_unlock_bh(&pd
->stream_lock
);
720 /* Allocate enough space for max size frame. */
721 skb
= alloc_skb(pd
->max_tx_size
+ OZ_ALLOCATED_SPACE(dev
),
725 /* Reserve the head room for lower layers. */
726 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
727 skb_reset_network_header(skb
);
729 skb
->protocol
= htons(OZ_ETHERTYPE
);
730 size
= sizeof(struct oz_hdr
) + sizeof(struct oz_isoc_large
);
731 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, size
);
733 memcpy(skb_put(skb
, len
), data
, len
);
735 if (++nb_units
< pd
->ms_per_isoc
) {
736 spin_lock_bh(&pd
->stream_lock
);
738 st
->nb_units
= nb_units
;
741 spin_unlock_bh(&pd
->stream_lock
);
744 struct oz_isoc_large iso
;
745 spin_lock_bh(&pd
->stream_lock
);
746 iso
.frame_number
= st
->frame_num
;
747 st
->frame_num
+= nb_units
;
748 spin_unlock_bh(&pd
->stream_lock
);
750 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
751 oz
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
753 iso
.endpoint
= ep_num
;
754 iso
.format
= OZ_DATA_F_ISOC_LARGE
;
755 iso
.ms_data
= nb_units
;
756 memcpy(oz_hdr
, &oz
, sizeof(oz
));
757 memcpy(oz_hdr
+1, &iso
, sizeof(iso
));
758 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
759 dev
->dev_addr
, skb
->len
) < 0) {
763 if (atomic_read(&g_submitted_isoc
) < OZ_MAX_SUBMITTED_ISOC
) {
764 skb
->destructor
= oz_isoc_destructor
;
765 atomic_inc(&g_submitted_isoc
);
766 oz_event_log(OZ_EVT_TX_ISOC
, nb_units
, iso
.frame_number
,
767 skb
, atomic_read(&g_submitted_isoc
));
768 if (dev_queue_xmit(skb
) < 0)
771 oz_event_log(OZ_EVT_TX_ISOC_DROP
, 0, 0, 0, 0);
777 /*------------------------------------------------------------------------------
780 void oz_apps_init(void)
783 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
784 if (g_app_if
[i
].init
)
787 /*------------------------------------------------------------------------------
790 void oz_apps_term(void)
793 /* Terminate all the apps. */
794 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
795 if (g_app_if
[i
].term
)
798 /*------------------------------------------------------------------------------
799 * Context: softirq-serialized
801 void oz_handle_app_elt(struct oz_pd
*pd
, u8 app_id
, struct oz_elt
*elt
)
803 struct oz_app_if
*ai
;
804 if (app_id
== 0 || app_id
> OZ_APPID_MAX
)
806 ai
= &g_app_if
[app_id
-1];
809 /*------------------------------------------------------------------------------
810 * Context: softirq or process
812 void oz_pd_indicate_farewells(struct oz_pd
*pd
)
814 struct oz_farewell
*f
;
815 struct oz_app_if
*ai
= &g_app_if
[OZ_APPID_USB
-1];
817 oz_polling_lock_bh();
818 if (list_empty(&pd
->farewell_list
)) {
819 oz_polling_unlock_bh();
822 f
= list_first_entry(&pd
->farewell_list
,
823 struct oz_farewell
, link
);
825 oz_polling_unlock_bh();
827 ai
->farewell(pd
, f
->ep_num
, f
->report
, f
->len
);