Staging: unisys: Remove RETINT macro
[linux/fpc-iii.git] / drivers / staging / ozwpan / ozpd.c
blob10f1b3ac88322cfd3f81e9a048e082db14bccc9c
1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
13 #include "ozdbg.h"
14 #include "ozprotocol.h"
15 #include "ozeltbuf.h"
16 #include "ozpd.h"
17 #include "ozproto.h"
18 #include "ozcdev.h"
19 #include "ozusbsvc.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
24 #define OZ_MAX_TX_POOL_SIZE 6
26 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
27 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
28 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
29 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static int oz_send_isoc_frame(struct oz_pd *pd);
31 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
32 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
33 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
34 static void oz_isoc_destructor(struct sk_buff *skb);
35 static int oz_def_app_init(void);
36 static void oz_def_app_term(void);
37 static int oz_def_app_start(struct oz_pd *pd, int resume);
38 static void oz_def_app_stop(struct oz_pd *pd, int pause);
39 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
42 * Counts the uncompleted isoc frames submitted to netcard.
44 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
46 /* Application handler functions.
48 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
49 {oz_usb_init,
50 oz_usb_term,
51 oz_usb_start,
52 oz_usb_stop,
53 oz_usb_rx,
54 oz_usb_heartbeat,
55 oz_usb_farewell,
56 OZ_APPID_USB},
58 {oz_def_app_init,
59 oz_def_app_term,
60 oz_def_app_start,
61 oz_def_app_stop,
62 oz_def_app_rx,
63 NULL,
64 NULL,
65 OZ_APPID_UNUSED1},
67 {oz_def_app_init,
68 oz_def_app_term,
69 oz_def_app_start,
70 oz_def_app_stop,
71 oz_def_app_rx,
72 NULL,
73 NULL,
74 OZ_APPID_UNUSED2},
76 {oz_cdev_init,
77 oz_cdev_term,
78 oz_cdev_start,
79 oz_cdev_stop,
80 oz_cdev_rx,
81 NULL,
82 NULL,
83 OZ_APPID_SERIAL},
87 * Context: process
89 static int oz_def_app_init(void)
91 return 0;
95 * Context: process
97 static void oz_def_app_term(void)
102 * Context: softirq
104 static int oz_def_app_start(struct oz_pd *pd, int resume)
106 return 0;
110 * Context: softirq
112 static void oz_def_app_stop(struct oz_pd *pd, int pause)
117 * Context: softirq
119 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
124 * Context: softirq or process
126 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
128 pd->state = state;
129 switch (state) {
130 case OZ_PD_S_IDLE:
131 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
132 break;
133 case OZ_PD_S_CONNECTED:
134 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
135 break;
136 case OZ_PD_S_STOPPED:
137 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
138 break;
139 case OZ_PD_S_SLEEP:
140 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
141 break;
146 * Context: softirq or process
148 void oz_pd_get(struct oz_pd *pd)
150 atomic_inc(&pd->ref_count);
154 * Context: softirq or process
156 void oz_pd_put(struct oz_pd *pd)
158 if (atomic_dec_and_test(&pd->ref_count))
159 oz_pd_destroy(pd);
163 * Context: softirq-serialized
165 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
167 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
169 if (pd) {
170 int i;
171 atomic_set(&pd->ref_count, 2);
172 for (i = 0; i < OZ_APPID_MAX; i++)
173 spin_lock_init(&pd->app_lock[i]);
174 pd->last_rx_pkt_num = 0xffffffff;
175 oz_pd_set_state(pd, OZ_PD_S_IDLE);
176 pd->max_tx_size = OZ_MAX_TX_SIZE;
177 ether_addr_copy(pd->mac_addr, mac_addr);
178 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
179 kfree(pd);
180 pd = NULL;
182 spin_lock_init(&pd->tx_frame_lock);
183 INIT_LIST_HEAD(&pd->tx_queue);
184 INIT_LIST_HEAD(&pd->farewell_list);
185 pd->last_sent_frame = &pd->tx_queue;
186 spin_lock_init(&pd->stream_lock);
187 INIT_LIST_HEAD(&pd->stream_list);
188 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
189 (unsigned long)pd);
190 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
191 (unsigned long)pd);
192 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
193 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
194 pd->heartbeat.function = oz_pd_heartbeat_event;
195 pd->timeout.function = oz_pd_timeout_event;
197 return pd;
201 * Context: softirq or process
203 static void oz_pd_free(struct work_struct *work)
205 struct list_head *e;
206 struct oz_tx_frame *f;
207 struct oz_isoc_stream *st;
208 struct oz_farewell *fwell;
209 struct oz_pd *pd;
211 oz_pd_dbg(pd, ON, "Destroying PD\n");
212 pd = container_of(work, struct oz_pd, workitem);
213 /*Disable timer tasklets*/
214 tasklet_kill(&pd->heartbeat_tasklet);
215 tasklet_kill(&pd->timeout_tasklet);
216 /* Delete any streams.
218 e = pd->stream_list.next;
219 while (e != &pd->stream_list) {
220 st = container_of(e, struct oz_isoc_stream, link);
221 e = e->next;
222 oz_isoc_stream_free(st);
224 /* Free any queued tx frames.
226 e = pd->tx_queue.next;
227 while (e != &pd->tx_queue) {
228 f = container_of(e, struct oz_tx_frame, link);
229 e = e->next;
230 if (f->skb != NULL)
231 kfree_skb(f->skb);
232 oz_retire_frame(pd, f);
234 oz_elt_buf_term(&pd->elt_buff);
235 /* Free any farewells.
237 e = pd->farewell_list.next;
238 while (e != &pd->farewell_list) {
239 fwell = container_of(e, struct oz_farewell, link);
240 e = e->next;
241 kfree(fwell);
243 /* Deallocate all frames in tx pool.
245 while (pd->tx_pool) {
246 e = pd->tx_pool;
247 pd->tx_pool = e->next;
248 kfree(container_of(e, struct oz_tx_frame, link));
250 if (pd->net_dev)
251 dev_put(pd->net_dev);
252 kfree(pd);
256 * Context: softirq or Process
258 void oz_pd_destroy(struct oz_pd *pd)
260 if (hrtimer_active(&pd->timeout))
261 hrtimer_cancel(&pd->timeout);
262 if (hrtimer_active(&pd->heartbeat))
263 hrtimer_cancel(&pd->heartbeat);
265 INIT_WORK(&pd->workitem, oz_pd_free);
266 if (!schedule_work(&pd->workitem))
267 oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
271 * Context: softirq-serialized
273 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
275 const struct oz_app_if *ai;
276 int rc = 0;
278 oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
279 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
280 if (apps & (1<<ai->app_id)) {
281 if (ai->start(pd, resume)) {
282 rc = -1;
283 oz_pd_dbg(pd, ON,
284 "Unable to start service %d\n",
285 ai->app_id);
286 break;
288 spin_lock_bh(&g_polling_lock);
289 pd->total_apps |= (1<<ai->app_id);
290 if (resume)
291 pd->paused_apps &= ~(1<<ai->app_id);
292 spin_unlock_bh(&g_polling_lock);
295 return rc;
299 * Context: softirq or process
301 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
303 const struct oz_app_if *ai;
305 oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
306 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
307 if (apps & (1<<ai->app_id)) {
308 spin_lock_bh(&g_polling_lock);
309 if (pause) {
310 pd->paused_apps |= (1<<ai->app_id);
311 } else {
312 pd->total_apps &= ~(1<<ai->app_id);
313 pd->paused_apps &= ~(1<<ai->app_id);
315 spin_unlock_bh(&g_polling_lock);
316 ai->stop(pd, pause);
322 * Context: softirq
324 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
326 const struct oz_app_if *ai;
327 int more = 0;
329 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
330 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
331 if (ai->heartbeat(pd))
332 more = 1;
335 if ((!more) && (hrtimer_active(&pd->heartbeat)))
336 hrtimer_cancel(&pd->heartbeat);
337 if (pd->mode & OZ_F_ISOC_ANYTIME) {
338 int count = 8;
339 while (count-- && (oz_send_isoc_frame(pd) >= 0))
345 * Context: softirq or process
347 void oz_pd_stop(struct oz_pd *pd)
349 u16 stop_apps;
351 oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
352 oz_pd_indicate_farewells(pd);
353 spin_lock_bh(&g_polling_lock);
354 stop_apps = pd->total_apps;
355 pd->total_apps = 0;
356 pd->paused_apps = 0;
357 spin_unlock_bh(&g_polling_lock);
358 oz_services_stop(pd, stop_apps, 0);
359 spin_lock_bh(&g_polling_lock);
360 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
361 /* Remove from PD list.*/
362 list_del(&pd->link);
363 spin_unlock_bh(&g_polling_lock);
364 oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
365 oz_pd_put(pd);
369 * Context: softirq
371 int oz_pd_sleep(struct oz_pd *pd)
373 int do_stop = 0;
374 u16 stop_apps;
376 spin_lock_bh(&g_polling_lock);
377 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
378 spin_unlock_bh(&g_polling_lock);
379 return 0;
381 if (pd->keep_alive && pd->session_id)
382 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
383 else
384 do_stop = 1;
386 stop_apps = pd->total_apps;
387 spin_unlock_bh(&g_polling_lock);
388 if (do_stop) {
389 oz_pd_stop(pd);
390 } else {
391 oz_services_stop(pd, stop_apps, 1);
392 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
394 return do_stop;
398 * Context: softirq
400 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
402 struct oz_tx_frame *f = NULL;
404 spin_lock_bh(&pd->tx_frame_lock);
405 if (pd->tx_pool) {
406 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
407 pd->tx_pool = pd->tx_pool->next;
408 pd->tx_pool_count--;
410 spin_unlock_bh(&pd->tx_frame_lock);
411 if (f == NULL)
412 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
413 if (f) {
414 f->total_size = sizeof(struct oz_hdr);
415 INIT_LIST_HEAD(&f->link);
416 INIT_LIST_HEAD(&f->elt_list);
418 return f;
422 * Context: softirq or process
424 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
426 pd->nb_queued_isoc_frames--;
427 list_del_init(&f->link);
428 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
429 f->link.next = pd->tx_pool;
430 pd->tx_pool = &f->link;
431 pd->tx_pool_count++;
432 } else {
433 kfree(f);
435 oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
436 pd->nb_queued_isoc_frames);
440 * Context: softirq or process
442 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
444 spin_lock_bh(&pd->tx_frame_lock);
445 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
446 f->link.next = pd->tx_pool;
447 pd->tx_pool = &f->link;
448 pd->tx_pool_count++;
449 f = NULL;
451 spin_unlock_bh(&pd->tx_frame_lock);
452 kfree(f);
456 * Context: softirq-serialized
458 static void oz_set_more_bit(struct sk_buff *skb)
460 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
462 oz_hdr->control |= OZ_F_MORE_DATA;
466 * Context: softirq-serialized
468 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
470 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
472 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
476 * Context: softirq
478 int oz_prepare_frame(struct oz_pd *pd, int empty)
480 struct oz_tx_frame *f;
482 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
483 return -1;
484 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
485 return -1;
486 if (!empty && !oz_are_elts_available(&pd->elt_buff))
487 return -1;
488 f = oz_tx_frame_alloc(pd);
489 if (f == NULL)
490 return -1;
491 f->skb = NULL;
492 f->hdr.control =
493 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
494 ++pd->last_tx_pkt_num;
495 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
496 if (empty == 0) {
497 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
498 pd->max_tx_size, &f->elt_list);
500 spin_lock(&pd->tx_frame_lock);
501 list_add_tail(&f->link, &pd->tx_queue);
502 pd->nb_queued_frames++;
503 spin_unlock(&pd->tx_frame_lock);
504 return 0;
508 * Context: softirq-serialized
510 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
512 struct sk_buff *skb;
513 struct net_device *dev = pd->net_dev;
514 struct oz_hdr *oz_hdr;
515 struct oz_elt *elt;
516 struct list_head *e;
518 /* Allocate skb with enough space for the lower layers as well
519 * as the space we need.
521 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
522 if (skb == NULL)
523 return NULL;
524 /* Reserve the head room for lower layers.
526 skb_reserve(skb, LL_RESERVED_SPACE(dev));
527 skb_reset_network_header(skb);
528 skb->dev = dev;
529 skb->protocol = htons(OZ_ETHERTYPE);
530 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
531 dev->dev_addr, skb->len) < 0)
532 goto fail;
533 /* Push the tail to the end of the area we are going to copy to.
535 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
536 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
537 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
538 /* Copy the elements into the frame body.
540 elt = (struct oz_elt *)(oz_hdr+1);
541 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
542 struct oz_elt_info *ei;
543 ei = container_of(e, struct oz_elt_info, link);
544 memcpy(elt, ei->data, ei->length);
545 elt = oz_next_elt(elt);
547 return skb;
548 fail:
549 kfree_skb(skb);
550 return NULL;
554 * Context: softirq or process
556 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
558 struct list_head *e;
559 struct oz_elt_info *ei;
561 e = f->elt_list.next;
562 while (e != &f->elt_list) {
563 ei = container_of(e, struct oz_elt_info, link);
564 e = e->next;
565 list_del_init(&ei->link);
566 if (ei->callback)
567 ei->callback(pd, ei->context);
568 spin_lock_bh(&pd->elt_buff.lock);
569 oz_elt_info_free(&pd->elt_buff, ei);
570 spin_unlock_bh(&pd->elt_buff.lock);
572 oz_tx_frame_free(pd, f);
573 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
574 oz_trim_elt_pool(&pd->elt_buff);
578 * Context: softirq-serialized
580 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
582 struct sk_buff *skb;
583 struct oz_tx_frame *f;
584 struct list_head *e;
586 spin_lock(&pd->tx_frame_lock);
587 e = pd->last_sent_frame->next;
588 if (e == &pd->tx_queue) {
589 spin_unlock(&pd->tx_frame_lock);
590 return -1;
592 f = container_of(e, struct oz_tx_frame, link);
594 if (f->skb != NULL) {
595 skb = f->skb;
596 oz_tx_isoc_free(pd, f);
597 spin_unlock(&pd->tx_frame_lock);
598 if (more_data)
599 oz_set_more_bit(skb);
600 oz_set_last_pkt_nb(pd, skb);
601 if ((int)atomic_read(&g_submitted_isoc) <
602 OZ_MAX_SUBMITTED_ISOC) {
603 if (dev_queue_xmit(skb) < 0) {
604 oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
605 return -1;
607 atomic_inc(&g_submitted_isoc);
608 oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
609 pd->nb_queued_isoc_frames);
610 return 0;
611 } else {
612 kfree_skb(skb);
613 oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
614 return -1;
618 pd->last_sent_frame = e;
619 skb = oz_build_frame(pd, f);
620 spin_unlock(&pd->tx_frame_lock);
621 if (!skb)
622 return -1;
623 if (more_data)
624 oz_set_more_bit(skb);
625 oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
626 if (dev_queue_xmit(skb) < 0)
627 return -1;
629 return 0;
633 * Context: softirq-serialized
635 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
637 while (oz_prepare_frame(pd, 0) >= 0)
638 backlog++;
640 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
642 case OZ_F_ISOC_NO_ELTS: {
643 backlog += pd->nb_queued_isoc_frames;
644 if (backlog <= 0)
645 goto out;
646 if (backlog > OZ_MAX_SUBMITTED_ISOC)
647 backlog = OZ_MAX_SUBMITTED_ISOC;
648 break;
650 case OZ_NO_ELTS_ANYTIME: {
651 if ((backlog <= 0) && (pd->isoc_sent == 0))
652 goto out;
653 break;
655 default: {
656 if (backlog <= 0)
657 goto out;
658 break;
661 while (backlog--) {
662 if (oz_send_next_queued_frame(pd, backlog) < 0)
663 break;
665 return;
667 out: oz_prepare_frame(pd, 1);
668 oz_send_next_queued_frame(pd, 0);
672 * Context: softirq
674 static int oz_send_isoc_frame(struct oz_pd *pd)
676 struct sk_buff *skb;
677 struct net_device *dev = pd->net_dev;
678 struct oz_hdr *oz_hdr;
679 struct oz_elt *elt;
680 struct list_head *e;
681 struct list_head list;
682 int total_size = sizeof(struct oz_hdr);
684 INIT_LIST_HEAD(&list);
686 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
687 pd->max_tx_size, &list);
688 if (list.next == &list)
689 return 0;
690 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
691 if (skb == NULL) {
692 oz_dbg(ON, "Cannot alloc skb\n");
693 oz_elt_info_free_chain(&pd->elt_buff, &list);
694 return -1;
696 skb_reserve(skb, LL_RESERVED_SPACE(dev));
697 skb_reset_network_header(skb);
698 skb->dev = dev;
699 skb->protocol = htons(OZ_ETHERTYPE);
700 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
701 dev->dev_addr, skb->len) < 0) {
702 kfree_skb(skb);
703 return -1;
705 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
706 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
707 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
708 elt = (struct oz_elt *)(oz_hdr+1);
710 for (e = list.next; e != &list; e = e->next) {
711 struct oz_elt_info *ei;
712 ei = container_of(e, struct oz_elt_info, link);
713 memcpy(elt, ei->data, ei->length);
714 elt = oz_next_elt(elt);
716 dev_queue_xmit(skb);
717 oz_elt_info_free_chain(&pd->elt_buff, &list);
718 return 0;
722 * Context: softirq-serialized
724 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
726 struct list_head *e;
727 struct oz_tx_frame *f;
728 struct list_head *first = NULL;
729 struct list_head *last = NULL;
730 u8 diff;
731 u32 pkt_num;
733 spin_lock(&pd->tx_frame_lock);
734 e = pd->tx_queue.next;
735 while (e != &pd->tx_queue) {
736 f = container_of(e, struct oz_tx_frame, link);
737 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
738 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
739 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
740 break;
741 oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
742 pkt_num, pd->nb_queued_frames);
743 if (first == NULL)
744 first = e;
745 last = e;
746 e = e->next;
747 pd->nb_queued_frames--;
749 if (first) {
750 last->next->prev = &pd->tx_queue;
751 pd->tx_queue.next = last->next;
752 last->next = NULL;
754 pd->last_sent_frame = &pd->tx_queue;
755 spin_unlock(&pd->tx_frame_lock);
756 while (first) {
757 f = container_of(first, struct oz_tx_frame, link);
758 first = first->next;
759 oz_retire_frame(pd, f);
764 * Precondition: stream_lock must be held.
765 * Context: softirq
767 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
769 struct list_head *e;
770 struct oz_isoc_stream *st;
772 list_for_each(e, &pd->stream_list) {
773 st = container_of(e, struct oz_isoc_stream, link);
774 if (st->ep_num == ep_num)
775 return st;
777 return NULL;
781 * Context: softirq
783 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
785 struct oz_isoc_stream *st =
786 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
787 if (!st)
788 return -ENOMEM;
789 st->ep_num = ep_num;
790 spin_lock_bh(&pd->stream_lock);
791 if (!pd_stream_find(pd, ep_num)) {
792 list_add(&st->link, &pd->stream_list);
793 st = NULL;
795 spin_unlock_bh(&pd->stream_lock);
796 kfree(st);
797 return 0;
801 * Context: softirq or process
803 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
805 kfree_skb(st->skb);
806 kfree(st);
810 * Context: softirq
812 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
814 struct oz_isoc_stream *st;
816 spin_lock_bh(&pd->stream_lock);
817 st = pd_stream_find(pd, ep_num);
818 if (st)
819 list_del(&st->link);
820 spin_unlock_bh(&pd->stream_lock);
821 if (st)
822 oz_isoc_stream_free(st);
823 return 0;
827 * Context: any
829 static void oz_isoc_destructor(struct sk_buff *skb)
831 atomic_dec(&g_submitted_isoc);
835 * Context: softirq
837 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
839 struct net_device *dev = pd->net_dev;
840 struct oz_isoc_stream *st;
841 u8 nb_units = 0;
842 struct sk_buff *skb = NULL;
843 struct oz_hdr *oz_hdr = NULL;
844 int size = 0;
846 spin_lock_bh(&pd->stream_lock);
847 st = pd_stream_find(pd, ep_num);
848 if (st) {
849 skb = st->skb;
850 st->skb = NULL;
851 nb_units = st->nb_units;
852 st->nb_units = 0;
853 oz_hdr = st->oz_hdr;
854 size = st->size;
856 spin_unlock_bh(&pd->stream_lock);
857 if (!st)
858 return 0;
859 if (!skb) {
860 /* Allocate enough space for max size frame. */
861 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
862 GFP_ATOMIC);
863 if (skb == NULL)
864 return 0;
865 /* Reserve the head room for lower layers. */
866 skb_reserve(skb, LL_RESERVED_SPACE(dev));
867 skb_reset_network_header(skb);
868 skb->dev = dev;
869 skb->protocol = htons(OZ_ETHERTYPE);
870 /* For audio packet set priority to AC_VO */
871 skb->priority = 0x7;
872 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
873 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
875 memcpy(skb_put(skb, len), data, len);
876 size += len;
877 if (++nb_units < pd->ms_per_isoc) {
878 spin_lock_bh(&pd->stream_lock);
879 st->skb = skb;
880 st->nb_units = nb_units;
881 st->oz_hdr = oz_hdr;
882 st->size = size;
883 spin_unlock_bh(&pd->stream_lock);
884 } else {
885 struct oz_hdr oz;
886 struct oz_isoc_large iso;
887 spin_lock_bh(&pd->stream_lock);
888 iso.frame_number = st->frame_num;
889 st->frame_num += nb_units;
890 spin_unlock_bh(&pd->stream_lock);
891 oz.control =
892 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
893 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
894 oz.pkt_num = 0;
895 iso.endpoint = ep_num;
896 iso.format = OZ_DATA_F_ISOC_LARGE;
897 iso.ms_data = nb_units;
898 memcpy(oz_hdr, &oz, sizeof(oz));
899 memcpy(oz_hdr+1, &iso, sizeof(iso));
900 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
901 dev->dev_addr, skb->len) < 0)
902 goto out;
904 skb->destructor = oz_isoc_destructor;
905 /*Queue for Xmit if mode is not ANYTIME*/
906 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
907 struct oz_tx_frame *isoc_unit = NULL;
908 int nb = pd->nb_queued_isoc_frames;
909 if (nb >= pd->isoc_latency) {
910 struct list_head *e;
911 struct oz_tx_frame *f;
912 oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
913 nb);
914 spin_lock(&pd->tx_frame_lock);
915 list_for_each(e, &pd->tx_queue) {
916 f = container_of(e, struct oz_tx_frame,
917 link);
918 if (f->skb != NULL) {
919 oz_tx_isoc_free(pd, f);
920 break;
923 spin_unlock(&pd->tx_frame_lock);
925 isoc_unit = oz_tx_frame_alloc(pd);
926 if (isoc_unit == NULL)
927 goto out;
928 isoc_unit->hdr = oz;
929 isoc_unit->skb = skb;
930 spin_lock_bh(&pd->tx_frame_lock);
931 list_add_tail(&isoc_unit->link, &pd->tx_queue);
932 pd->nb_queued_isoc_frames++;
933 spin_unlock_bh(&pd->tx_frame_lock);
934 oz_dbg(TX_FRAMES,
935 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
936 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
937 return 0;
940 /*In ANYTIME mode Xmit unit immediately*/
941 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
942 atomic_inc(&g_submitted_isoc);
943 if (dev_queue_xmit(skb) < 0)
944 return -1;
945 else
946 return 0;
949 out: kfree_skb(skb);
950 return -1;
953 return 0;
957 * Context: process
959 void oz_apps_init(void)
961 int i;
963 for (i = 0; i < OZ_APPID_MAX; i++)
964 if (g_app_if[i].init)
965 g_app_if[i].init();
969 * Context: process
971 void oz_apps_term(void)
973 int i;
975 /* Terminate all the apps. */
976 for (i = 0; i < OZ_APPID_MAX; i++)
977 if (g_app_if[i].term)
978 g_app_if[i].term();
982 * Context: softirq-serialized
984 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
986 const struct oz_app_if *ai;
988 if (app_id == 0 || app_id > OZ_APPID_MAX)
989 return;
990 ai = &g_app_if[app_id-1];
991 ai->rx(pd, elt);
995 * Context: softirq or process
997 void oz_pd_indicate_farewells(struct oz_pd *pd)
999 struct oz_farewell *f;
1000 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
1002 while (1) {
1003 spin_lock_bh(&g_polling_lock);
1004 if (list_empty(&pd->farewell_list)) {
1005 spin_unlock_bh(&g_polling_lock);
1006 break;
1008 f = list_first_entry(&pd->farewell_list,
1009 struct oz_farewell, link);
1010 list_del(&f->link);
1011 spin_unlock_bh(&g_polling_lock);
1012 if (ai->farewell)
1013 ai->farewell(pd, f->ep_num, f->report, f->len);
1014 kfree(f);