1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/timer.h>
10 #include <linux/sched.h>
11 #include <linux/netdevice.h>
12 #include <linux/errno.h>
13 #include <linux/ieee80211.h>
15 #include "ozprotocol.h"
22 #include <asm/unaligned.h>
23 #include <linux/uaccess.h>
24 #include <net/psnap.h>
26 #define OZ_CF_CONN_SUCCESS 1
27 #define OZ_CF_CONN_FAILURE 2
32 #define OZ_MAX_TIMER_POOL_SIZE 16
35 struct packet_type ptype
;
36 char name
[OZ_MAX_BINDING_LEN
];
37 struct list_head link
;
41 * Static external variables.
43 static DEFINE_SPINLOCK(g_polling_lock
);
44 static LIST_HEAD(g_pd_list
);
45 static LIST_HEAD(g_binding
);
46 static DEFINE_SPINLOCK(g_binding_lock
);
47 static struct sk_buff_head g_rx_queue
;
48 static u8 g_session_id
;
49 static u16 g_apps
= 0x1;
50 static int g_processing_rx
;
53 * Context: softirq-serialized
55 static u8
oz_get_new_session_id(u8 exclude
)
57 if (++g_session_id
== 0)
59 if (g_session_id
== exclude
) {
60 if (++g_session_id
== 0)
67 * Context: softirq-serialized
69 static void oz_send_conn_rsp(struct oz_pd
*pd
, u8 status
)
72 struct net_device
*dev
= pd
->net_dev
;
73 struct oz_hdr
*oz_hdr
;
75 struct oz_elt_connect_rsp
*body
;
77 int sz
= sizeof(struct oz_hdr
) + sizeof(struct oz_elt
) +
78 sizeof(struct oz_elt_connect_rsp
);
79 skb
= alloc_skb(sz
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
82 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
83 skb_reset_network_header(skb
);
84 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, sz
);
85 elt
= (struct oz_elt
*)(oz_hdr
+1);
86 body
= (struct oz_elt_connect_rsp
*)(elt
+1);
88 skb
->protocol
= htons(OZ_ETHERTYPE
);
89 /* Fill in device header */
90 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
91 dev
->dev_addr
, skb
->len
) < 0) {
95 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
);
96 oz_hdr
->last_pkt_num
= 0;
97 put_unaligned(0, &oz_hdr
->pkt_num
);
98 elt
->type
= OZ_ELT_CONNECT_RSP
;
99 elt
->length
= sizeof(struct oz_elt_connect_rsp
);
100 memset(body
, 0, sizeof(struct oz_elt_connect_rsp
));
101 body
->status
= status
;
103 body
->mode
= pd
->mode
;
104 body
->session_id
= pd
->session_id
;
105 put_unaligned(cpu_to_le16(pd
->total_apps
), &body
->apps
);
107 oz_dbg(ON
, "TX: OZ_ELT_CONNECT_RSP %d", status
);
113 * Context: softirq-serialized
115 static void pd_set_keepalive(struct oz_pd
*pd
, u8 kalive
)
117 unsigned long keep_alive
= kalive
& OZ_KALIVE_VALUE_MASK
;
119 switch (kalive
& OZ_KALIVE_TYPE_MASK
) {
120 case OZ_KALIVE_SPECIAL
:
121 pd
->keep_alive
= keep_alive
* 1000*60*60*24*20;
124 pd
->keep_alive
= keep_alive
*1000;
127 pd
->keep_alive
= keep_alive
*1000*60;
129 case OZ_KALIVE_HOURS
:
130 pd
->keep_alive
= keep_alive
*1000*60*60;
135 oz_dbg(ON
, "Keepalive = %lu mSec\n", pd
->keep_alive
);
139 * Context: softirq-serialized
141 static void pd_set_presleep(struct oz_pd
*pd
, u8 presleep
, u8 start_timer
)
144 pd
->presleep
= presleep
*100;
146 pd
->presleep
= OZ_PRESLEEP_TOUT
;
148 spin_unlock(&g_polling_lock
);
149 oz_timer_add(pd
, OZ_TIMER_TOUT
, pd
->presleep
);
150 spin_lock(&g_polling_lock
);
152 oz_dbg(ON
, "Presleep time = %lu mSec\n", pd
->presleep
);
156 * Context: softirq-serialized
158 static struct oz_pd
*oz_connect_req(struct oz_pd
*cur_pd
, struct oz_elt
*elt
,
159 const u8
*pd_addr
, struct net_device
*net_dev
)
162 struct oz_elt_connect_req
*body
=
163 (struct oz_elt_connect_req
*)(elt
+1);
164 u8 rsp_status
= OZ_STATUS_SUCCESS
;
166 u16 new_apps
= g_apps
;
167 struct net_device
*old_net_dev
= NULL
;
168 struct oz_pd
*free_pd
= NULL
;
172 spin_lock_bh(&g_polling_lock
);
174 struct oz_pd
*pd2
= NULL
;
176 pd
= oz_pd_alloc(pd_addr
);
179 getnstimeofday(&pd
->last_rx_timestamp
);
180 spin_lock_bh(&g_polling_lock
);
181 list_for_each(e
, &g_pd_list
) {
182 pd2
= container_of(e
, struct oz_pd
, link
);
183 if (memcmp(pd2
->mac_addr
, pd_addr
, ETH_ALEN
) == 0) {
190 list_add_tail(&pd
->link
, &g_pd_list
);
193 spin_unlock_bh(&g_polling_lock
);
196 if (pd
->net_dev
!= net_dev
) {
197 old_net_dev
= pd
->net_dev
;
199 pd
->net_dev
= net_dev
;
201 oz_dbg(ON
, "Host vendor: %d\n", body
->host_vendor
);
202 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
203 pd
->mode
= body
->mode
;
204 pd
->pd_info
= body
->pd_info
;
205 if (pd
->mode
& OZ_F_ISOC_NO_ELTS
) {
206 pd
->ms_per_isoc
= body
->ms_per_isoc
;
207 if (!pd
->ms_per_isoc
)
210 switch (body
->ms_isoc_latency
& OZ_LATENCY_MASK
) {
211 case OZ_ONE_MS_LATENCY
:
212 pd
->isoc_latency
= (body
->ms_isoc_latency
&
213 ~OZ_LATENCY_MASK
) / pd
->ms_per_isoc
;
215 case OZ_TEN_MS_LATENCY
:
216 pd
->isoc_latency
= ((body
->ms_isoc_latency
&
217 ~OZ_LATENCY_MASK
) * 10) / pd
->ms_per_isoc
;
220 pd
->isoc_latency
= OZ_MAX_TX_QUEUE_ISOC
;
223 if (body
->max_len_div16
)
224 pd
->max_tx_size
= ((u16
)body
->max_len_div16
)<<4;
225 oz_dbg(ON
, "Max frame:%u Ms per isoc:%u\n",
226 pd
->max_tx_size
, pd
->ms_per_isoc
);
227 pd
->max_stream_buffering
= 3*1024;
228 pd
->pulse_period
= OZ_QUANTUM
;
229 pd_set_presleep(pd
, body
->presleep
, 0);
230 pd_set_keepalive(pd
, body
->keep_alive
);
232 new_apps
&= le16_to_cpu(get_unaligned(&body
->apps
));
233 if ((new_apps
& 0x1) && (body
->session_id
)) {
234 if (pd
->session_id
) {
235 if (pd
->session_id
!= body
->session_id
) {
236 rsp_status
= OZ_STATUS_SESSION_MISMATCH
;
240 new_apps
&= ~0x1; /* Resume not permitted */
242 oz_get_new_session_id(body
->session_id
);
245 if (pd
->session_id
&& !body
->session_id
) {
246 rsp_status
= OZ_STATUS_SESSION_TEARDOWN
;
249 new_apps
&= ~0x1; /* Resume not permitted */
251 oz_get_new_session_id(body
->session_id
);
255 if (rsp_status
== OZ_STATUS_SUCCESS
) {
256 u16 start_apps
= new_apps
& ~pd
->total_apps
& ~0x1;
257 u16 stop_apps
= pd
->total_apps
& ~new_apps
& ~0x1;
258 u16 resume_apps
= new_apps
& pd
->paused_apps
& ~0x1;
259 spin_unlock_bh(&g_polling_lock
);
260 oz_pd_set_state(pd
, OZ_PD_S_CONNECTED
);
261 oz_dbg(ON
, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
262 new_apps
, pd
->total_apps
, pd
->paused_apps
);
264 if (oz_services_start(pd
, start_apps
, 0))
265 rsp_status
= OZ_STATUS_TOO_MANY_PDS
;
268 if (oz_services_start(pd
, resume_apps
, 1))
269 rsp_status
= OZ_STATUS_TOO_MANY_PDS
;
271 oz_services_stop(pd
, stop_apps
, 0);
272 oz_pd_request_heartbeat(pd
);
274 spin_unlock_bh(&g_polling_lock
);
276 oz_send_conn_rsp(pd
, rsp_status
);
277 if (rsp_status
!= OZ_STATUS_SUCCESS
) {
284 dev_put(old_net_dev
);
286 oz_pd_destroy(free_pd
);
291 * Context: softirq-serialized
293 static void oz_add_farewell(struct oz_pd
*pd
, u8 ep_num
, u8 index
,
294 const u8
*report
, u8 len
)
296 struct oz_farewell
*f
;
297 struct oz_farewell
*f2
;
300 f
= kmalloc(sizeof(struct oz_farewell
) + len
, GFP_ATOMIC
);
306 memcpy(f
->report
, report
, len
);
307 oz_dbg(ON
, "RX: Adding farewell report\n");
308 spin_lock(&g_polling_lock
);
309 list_for_each_entry(f2
, &pd
->farewell_list
, link
) {
310 if ((f2
->ep_num
== ep_num
) && (f2
->index
== index
)) {
316 list_add_tail(&f
->link
, &pd
->farewell_list
);
317 spin_unlock(&g_polling_lock
);
323 * Context: softirq-serialized
325 static void oz_rx_frame(struct sk_buff
*skb
)
331 struct oz_pd
*pd
= NULL
;
332 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
333 struct timespec current_time
;
337 oz_dbg(RX_FRAMES
, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
338 oz_hdr
->pkt_num
, oz_hdr
->last_pkt_num
, oz_hdr
->control
);
339 mac_hdr
= skb_mac_header(skb
);
340 src_addr
= &mac_hdr
[ETH_ALEN
] ;
343 /* Check the version field */
344 if (oz_get_prot_ver(oz_hdr
->control
) != OZ_PROTOCOL_VERSION
) {
345 oz_dbg(ON
, "Incorrect protocol version: %d\n",
346 oz_get_prot_ver(oz_hdr
->control
));
350 pkt_num
= le32_to_cpu(get_unaligned(&oz_hdr
->pkt_num
));
352 pd
= oz_pd_find(src_addr
);
354 if (!(pd
->state
& OZ_PD_S_CONNECTED
))
355 oz_pd_set_state(pd
, OZ_PD_S_CONNECTED
);
356 getnstimeofday(¤t_time
);
357 if ((current_time
.tv_sec
!= pd
->last_rx_timestamp
.tv_sec
) ||
358 (pd
->presleep
< MSEC_PER_SEC
)) {
359 oz_timer_add(pd
, OZ_TIMER_TOUT
, pd
->presleep
);
360 pd
->last_rx_timestamp
= current_time
;
362 if (pkt_num
!= pd
->last_rx_pkt_num
) {
363 pd
->last_rx_pkt_num
= pkt_num
;
366 oz_dbg(ON
, "Duplicate frame\n");
370 if (pd
&& !dup
&& ((pd
->mode
& OZ_MODE_MASK
) == OZ_MODE_TRIGGERED
)) {
371 oz_dbg(RX_FRAMES
, "Received TRIGGER Frame\n");
372 pd
->last_sent_frame
= &pd
->tx_queue
;
373 if (oz_hdr
->control
& OZ_F_ACK
) {
374 /* Retire completed frames */
375 oz_retire_tx_frames(pd
, oz_hdr
->last_pkt_num
);
377 if ((oz_hdr
->control
& OZ_F_ACK_REQUESTED
) &&
378 (pd
->state
== OZ_PD_S_CONNECTED
)) {
379 int backlog
= pd
->nb_queued_frames
;
380 pd
->trigger_pkt_num
= pkt_num
;
381 /* Send queued frames */
382 oz_send_queued_frames(pd
, backlog
);
386 length
-= sizeof(struct oz_hdr
);
387 elt
= (struct oz_elt
*)((u8
*)oz_hdr
+ sizeof(struct oz_hdr
));
389 while (length
>= sizeof(struct oz_elt
)) {
390 length
-= sizeof(struct oz_elt
) + elt
->length
;
394 case OZ_ELT_CONNECT_REQ
:
395 oz_dbg(ON
, "RX: OZ_ELT_CONNECT_REQ\n");
396 pd
= oz_connect_req(pd
, elt
, src_addr
, skb
->dev
);
398 case OZ_ELT_DISCONNECT
:
399 oz_dbg(ON
, "RX: OZ_ELT_DISCONNECT\n");
403 case OZ_ELT_UPDATE_PARAM_REQ
: {
404 struct oz_elt_update_param
*body
=
405 (struct oz_elt_update_param
*)(elt
+ 1);
406 oz_dbg(ON
, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
407 if (pd
&& (pd
->state
& OZ_PD_S_CONNECTED
)) {
408 spin_lock(&g_polling_lock
);
409 pd_set_keepalive(pd
, body
->keepalive
);
410 pd_set_presleep(pd
, body
->presleep
, 1);
411 spin_unlock(&g_polling_lock
);
415 case OZ_ELT_FAREWELL_REQ
: {
416 struct oz_elt_farewell
*body
=
417 (struct oz_elt_farewell
*)(elt
+ 1);
418 oz_dbg(ON
, "RX: OZ_ELT_FAREWELL_REQ\n");
419 oz_add_farewell(pd
, body
->ep_num
,
420 body
->index
, body
->report
,
421 elt
->length
+ 1 - sizeof(*body
));
424 case OZ_ELT_APP_DATA
:
425 if (pd
&& (pd
->state
& OZ_PD_S_CONNECTED
)) {
426 struct oz_app_hdr
*app_hdr
=
427 (struct oz_app_hdr
*)(elt
+1);
430 oz_handle_app_elt(pd
, app_hdr
->app_id
, elt
);
434 oz_dbg(ON
, "RX: Unknown elt %02x\n", elt
->type
);
436 elt
= oz_next_elt(elt
);
447 void oz_protocol_term(void)
449 struct oz_binding
*b
, *t
;
451 /* Walk the list of bindings and remove each one.
453 spin_lock_bh(&g_binding_lock
);
454 list_for_each_entry_safe(b
, t
, &g_binding
, link
) {
456 spin_unlock_bh(&g_binding_lock
);
457 dev_remove_pack(&b
->ptype
);
459 dev_put(b
->ptype
.dev
);
461 spin_lock_bh(&g_binding_lock
);
463 spin_unlock_bh(&g_binding_lock
);
464 /* Walk the list of PDs and stop each one. This causes the PD to be
465 * removed from the list so we can just pull each one from the head
468 spin_lock_bh(&g_polling_lock
);
469 while (!list_empty(&g_pd_list
)) {
471 list_first_entry(&g_pd_list
, struct oz_pd
, link
);
473 spin_unlock_bh(&g_polling_lock
);
476 spin_lock_bh(&g_polling_lock
);
478 spin_unlock_bh(&g_polling_lock
);
479 oz_dbg(ON
, "Protocol stopped\n");
485 void oz_pd_heartbeat_handler(unsigned long data
)
487 struct oz_pd
*pd
= (struct oz_pd
*)data
;
490 spin_lock_bh(&g_polling_lock
);
491 if (pd
->state
& OZ_PD_S_CONNECTED
)
492 apps
= pd
->total_apps
;
493 spin_unlock_bh(&g_polling_lock
);
495 oz_pd_heartbeat(pd
, apps
);
502 void oz_pd_timeout_handler(unsigned long data
)
505 struct oz_pd
*pd
= (struct oz_pd
*)data
;
507 spin_lock_bh(&g_polling_lock
);
508 type
= pd
->timeout_type
;
509 spin_unlock_bh(&g_polling_lock
);
524 enum hrtimer_restart
oz_pd_heartbeat_event(struct hrtimer
*timer
)
528 pd
= container_of(timer
, struct oz_pd
, heartbeat
);
529 hrtimer_forward_now(timer
, ktime_set(pd
->pulse_period
/
530 MSEC_PER_SEC
, (pd
->pulse_period
% MSEC_PER_SEC
) * NSEC_PER_MSEC
));
532 tasklet_schedule(&pd
->heartbeat_tasklet
);
533 return HRTIMER_RESTART
;
539 enum hrtimer_restart
oz_pd_timeout_event(struct hrtimer
*timer
)
543 pd
= container_of(timer
, struct oz_pd
, timeout
);
545 tasklet_schedule(&pd
->timeout_tasklet
);
546 return HRTIMER_NORESTART
;
550 * Context: softirq or process
552 void oz_timer_add(struct oz_pd
*pd
, int type
, unsigned long due_time
)
554 spin_lock_bh(&g_polling_lock
);
558 if (hrtimer_active(&pd
->timeout
)) {
559 hrtimer_set_expires(&pd
->timeout
, ktime_set(due_time
/
560 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
562 hrtimer_start_expires(&pd
->timeout
, HRTIMER_MODE_REL
);
564 hrtimer_start(&pd
->timeout
, ktime_set(due_time
/
565 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
566 NSEC_PER_MSEC
), HRTIMER_MODE_REL
);
568 pd
->timeout_type
= type
;
570 case OZ_TIMER_HEARTBEAT
:
571 if (!hrtimer_active(&pd
->heartbeat
))
572 hrtimer_start(&pd
->heartbeat
, ktime_set(due_time
/
573 MSEC_PER_SEC
, (due_time
% MSEC_PER_SEC
) *
574 NSEC_PER_MSEC
), HRTIMER_MODE_REL
);
577 spin_unlock_bh(&g_polling_lock
);
581 * Context: softirq or process
583 void oz_pd_request_heartbeat(struct oz_pd
*pd
)
585 oz_timer_add(pd
, OZ_TIMER_HEARTBEAT
, pd
->pulse_period
> 0 ?
586 pd
->pulse_period
: OZ_QUANTUM
);
590 * Context: softirq or process
592 struct oz_pd
*oz_pd_find(const u8
*mac_addr
)
597 spin_lock_bh(&g_polling_lock
);
598 list_for_each(e
, &g_pd_list
) {
599 pd
= container_of(e
, struct oz_pd
, link
);
600 if (memcmp(pd
->mac_addr
, mac_addr
, ETH_ALEN
) == 0) {
601 atomic_inc(&pd
->ref_count
);
602 spin_unlock_bh(&g_polling_lock
);
606 spin_unlock_bh(&g_polling_lock
);
613 void oz_app_enable(int app_id
, int enable
)
615 if (app_id
<= OZ_APPID_MAX
) {
616 spin_lock_bh(&g_polling_lock
);
618 g_apps
|= (1<<app_id
);
620 g_apps
&= ~(1<<app_id
);
621 spin_unlock_bh(&g_polling_lock
);
628 static int oz_pkt_recv(struct sk_buff
*skb
, struct net_device
*dev
,
629 struct packet_type
*pt
, struct net_device
*orig_dev
)
631 skb
= skb_share_check(skb
, GFP_ATOMIC
);
634 spin_lock_bh(&g_rx_queue
.lock
);
635 if (g_processing_rx
) {
636 /* We already hold the lock so use __ variant.
638 __skb_queue_head(&g_rx_queue
, skb
);
639 spin_unlock_bh(&g_rx_queue
.lock
);
644 spin_unlock_bh(&g_rx_queue
.lock
);
646 spin_lock_bh(&g_rx_queue
.lock
);
647 if (skb_queue_empty(&g_rx_queue
)) {
649 spin_unlock_bh(&g_rx_queue
.lock
);
652 /* We already hold the lock so use __ variant.
654 skb
= __skb_dequeue(&g_rx_queue
);
663 void oz_binding_add(const char *net_dev
)
665 struct oz_binding
*binding
;
667 binding
= kmalloc(sizeof(struct oz_binding
), GFP_KERNEL
);
669 binding
->ptype
.type
= __constant_htons(OZ_ETHERTYPE
);
670 binding
->ptype
.func
= oz_pkt_recv
;
671 memcpy(binding
->name
, net_dev
, OZ_MAX_BINDING_LEN
);
672 if (net_dev
&& *net_dev
) {
673 oz_dbg(ON
, "Adding binding: %s\n", net_dev
);
675 dev_get_by_name(&init_net
, net_dev
);
676 if (binding
->ptype
.dev
== NULL
) {
677 oz_dbg(ON
, "Netdev %s not found\n", net_dev
);
682 oz_dbg(ON
, "Binding to all netcards\n");
683 binding
->ptype
.dev
= NULL
;
686 dev_add_pack(&binding
->ptype
);
687 spin_lock_bh(&g_binding_lock
);
688 list_add_tail(&binding
->link
, &g_binding
);
689 spin_unlock_bh(&g_binding_lock
);
697 static void pd_stop_all_for_device(struct net_device
*net_dev
)
704 spin_lock_bh(&g_polling_lock
);
705 list_for_each_entry_safe(pd
, n
, &g_pd_list
, link
) {
706 if (pd
->net_dev
== net_dev
) {
707 list_move(&pd
->link
, &h
);
711 spin_unlock_bh(&g_polling_lock
);
712 while (!list_empty(&h
)) {
713 pd
= list_first_entry(&h
, struct oz_pd
, link
);
722 void oz_binding_remove(const char *net_dev
)
724 struct oz_binding
*binding
;
727 oz_dbg(ON
, "Removing binding: %s\n", net_dev
);
728 spin_lock_bh(&g_binding_lock
);
729 list_for_each_entry(binding
, &g_binding
, link
) {
730 if (strncmp(binding
->name
, net_dev
, OZ_MAX_BINDING_LEN
) == 0) {
731 oz_dbg(ON
, "Binding '%s' found\n", net_dev
);
736 spin_unlock_bh(&g_binding_lock
);
738 dev_remove_pack(&binding
->ptype
);
739 if (binding
->ptype
.dev
) {
740 dev_put(binding
->ptype
.dev
);
741 pd_stop_all_for_device(binding
->ptype
.dev
);
743 list_del(&binding
->link
);
751 static char *oz_get_next_device_name(char *s
, char *dname
, int max_size
)
755 while (*s
&& (*s
!= ',') && max_size
> 1) {
766 int oz_protocol_init(char *devs
)
768 skb_queue_head_init(&g_rx_queue
);
769 if (devs
&& (devs
[0] == '*')) {
770 oz_binding_add(NULL
);
774 devs
= oz_get_next_device_name(devs
, d
, sizeof(d
));
785 int oz_get_pd_list(struct oz_mac_addr
*addr
, int max_count
)
791 spin_lock_bh(&g_polling_lock
);
792 list_for_each(e
, &g_pd_list
) {
793 if (count
>= max_count
)
795 pd
= container_of(e
, struct oz_pd
, link
);
796 memcpy(&addr
[count
++], pd
->mac_addr
, ETH_ALEN
);
798 spin_unlock_bh(&g_polling_lock
);
802 void oz_polling_lock_bh(void)
804 spin_lock_bh(&g_polling_lock
);
807 void oz_polling_unlock_bh(void)
809 spin_unlock_bh(&g_polling_lock
);