2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 #include <linux/completion.h>
34 #include <linux/export.h>
35 #include <linux/sched.h>
36 #include <linux/bitops.h>
37 #include <linux/skbuff.h>
40 #include <net/nfc/nci.h>
41 #include <net/nfc/nci_core.h>
42 #include <linux/nfc.h>
44 static void nci_cmd_work(struct work_struct
*work
);
45 static void nci_rx_work(struct work_struct
*work
);
46 static void nci_tx_work(struct work_struct
*work
);
48 /* ---- NCI requests ---- */
50 void nci_req_complete(struct nci_dev
*ndev
, int result
)
52 if (ndev
->req_status
== NCI_REQ_PEND
) {
53 ndev
->req_result
= result
;
54 ndev
->req_status
= NCI_REQ_DONE
;
55 complete(&ndev
->req_completion
);
59 static void nci_req_cancel(struct nci_dev
*ndev
, int err
)
61 if (ndev
->req_status
== NCI_REQ_PEND
) {
62 ndev
->req_result
= err
;
63 ndev
->req_status
= NCI_REQ_CANCELED
;
64 complete(&ndev
->req_completion
);
68 /* Execute request and wait for completion. */
69 static int __nci_request(struct nci_dev
*ndev
,
70 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
71 unsigned long opt
, __u32 timeout
)
76 ndev
->req_status
= NCI_REQ_PEND
;
78 init_completion(&ndev
->req_completion
);
81 wait_for_completion_interruptible_timeout(&ndev
->req_completion
,
84 pr_debug("wait_for_completion return %ld\n", completion_rc
);
86 if (completion_rc
> 0) {
87 switch (ndev
->req_status
) {
89 rc
= nci_to_errno(ndev
->req_result
);
92 case NCI_REQ_CANCELED
:
93 rc
= -ndev
->req_result
;
101 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
104 rc
= ((completion_rc
== 0) ? (-ETIMEDOUT
) : (completion_rc
));
107 ndev
->req_status
= ndev
->req_result
= 0;
112 static inline int nci_request(struct nci_dev
*ndev
,
113 void (*req
)(struct nci_dev
*ndev
,
115 unsigned long opt
, __u32 timeout
)
119 if (!test_bit(NCI_UP
, &ndev
->flags
))
122 /* Serialize all requests */
123 mutex_lock(&ndev
->req_lock
);
124 rc
= __nci_request(ndev
, req
, opt
, timeout
);
125 mutex_unlock(&ndev
->req_lock
);
130 static void nci_reset_req(struct nci_dev
*ndev
, unsigned long opt
)
132 struct nci_core_reset_cmd cmd
;
134 cmd
.reset_type
= NCI_RESET_TYPE_RESET_CONFIG
;
135 nci_send_cmd(ndev
, NCI_OP_CORE_RESET_CMD
, 1, &cmd
);
138 static void nci_init_req(struct nci_dev
*ndev
, unsigned long opt
)
140 nci_send_cmd(ndev
, NCI_OP_CORE_INIT_CMD
, 0, NULL
);
143 static void nci_init_complete_req(struct nci_dev
*ndev
, unsigned long opt
)
145 struct nci_rf_disc_map_cmd cmd
;
146 struct disc_map_config
*cfg
= cmd
.mapping_configs
;
147 __u8
*num
= &cmd
.num_mapping_configs
;
150 /* set rf mapping configurations */
153 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
154 for (i
= 0; i
< ndev
->num_supported_rf_interfaces
; i
++) {
155 if (ndev
->supported_rf_interfaces
[i
] ==
156 NCI_RF_INTERFACE_ISO_DEP
) {
157 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
158 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_POLL
|
159 NCI_DISC_MAP_MODE_LISTEN
;
160 cfg
[*num
].rf_interface
= NCI_RF_INTERFACE_ISO_DEP
;
162 } else if (ndev
->supported_rf_interfaces
[i
] ==
163 NCI_RF_INTERFACE_NFC_DEP
) {
164 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
165 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_POLL
|
166 NCI_DISC_MAP_MODE_LISTEN
;
167 cfg
[*num
].rf_interface
= NCI_RF_INTERFACE_NFC_DEP
;
171 if (*num
== NCI_MAX_NUM_MAPPING_CONFIGS
)
175 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_MAP_CMD
,
176 (1 + ((*num
) * sizeof(struct disc_map_config
))), &cmd
);
179 static void nci_rf_discover_req(struct nci_dev
*ndev
, unsigned long opt
)
181 struct nci_rf_disc_cmd cmd
;
182 __u32 protocols
= opt
;
184 cmd
.num_disc_configs
= 0;
186 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
187 (protocols
& NFC_PROTO_JEWEL_MASK
188 || protocols
& NFC_PROTO_MIFARE_MASK
189 || protocols
& NFC_PROTO_ISO14443_MASK
190 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
191 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
192 NCI_NFC_A_PASSIVE_POLL_MODE
;
193 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
194 cmd
.num_disc_configs
++;
197 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
198 (protocols
& NFC_PROTO_ISO14443_B_MASK
)) {
199 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
200 NCI_NFC_B_PASSIVE_POLL_MODE
;
201 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
202 cmd
.num_disc_configs
++;
205 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
206 (protocols
& NFC_PROTO_FELICA_MASK
207 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
208 cmd
.disc_configs
[cmd
.num_disc_configs
].rf_tech_and_mode
=
209 NCI_NFC_F_PASSIVE_POLL_MODE
;
210 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
211 cmd
.num_disc_configs
++;
214 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_CMD
,
215 (1 + (cmd
.num_disc_configs
* sizeof(struct disc_config
))),
219 struct nci_rf_discover_select_param
{
220 __u8 rf_discovery_id
;
224 static void nci_rf_discover_select_req(struct nci_dev
*ndev
, unsigned long opt
)
226 struct nci_rf_discover_select_param
*param
=
227 (struct nci_rf_discover_select_param
*)opt
;
228 struct nci_rf_discover_select_cmd cmd
;
230 cmd
.rf_discovery_id
= param
->rf_discovery_id
;
231 cmd
.rf_protocol
= param
->rf_protocol
;
233 switch (cmd
.rf_protocol
) {
234 case NCI_RF_PROTOCOL_ISO_DEP
:
235 cmd
.rf_interface
= NCI_RF_INTERFACE_ISO_DEP
;
238 case NCI_RF_PROTOCOL_NFC_DEP
:
239 cmd
.rf_interface
= NCI_RF_INTERFACE_NFC_DEP
;
243 cmd
.rf_interface
= NCI_RF_INTERFACE_FRAME
;
247 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_SELECT_CMD
,
248 sizeof(struct nci_rf_discover_select_cmd
), &cmd
);
251 static void nci_rf_deactivate_req(struct nci_dev
*ndev
, unsigned long opt
)
253 struct nci_rf_deactivate_cmd cmd
;
255 cmd
.type
= NCI_DEACTIVATE_TYPE_IDLE_MODE
;
257 nci_send_cmd(ndev
, NCI_OP_RF_DEACTIVATE_CMD
,
258 sizeof(struct nci_rf_deactivate_cmd
), &cmd
);
261 static int nci_open_device(struct nci_dev
*ndev
)
265 mutex_lock(&ndev
->req_lock
);
267 if (test_bit(NCI_UP
, &ndev
->flags
)) {
272 if (ndev
->ops
->open(ndev
)) {
277 atomic_set(&ndev
->cmd_cnt
, 1);
279 set_bit(NCI_INIT
, &ndev
->flags
);
281 rc
= __nci_request(ndev
, nci_reset_req
, 0,
282 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
285 rc
= __nci_request(ndev
, nci_init_req
, 0,
286 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
290 rc
= __nci_request(ndev
, nci_init_complete_req
, 0,
291 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
294 clear_bit(NCI_INIT
, &ndev
->flags
);
297 set_bit(NCI_UP
, &ndev
->flags
);
298 nci_clear_target_list(ndev
);
299 atomic_set(&ndev
->state
, NCI_IDLE
);
301 /* Init failed, cleanup */
302 skb_queue_purge(&ndev
->cmd_q
);
303 skb_queue_purge(&ndev
->rx_q
);
304 skb_queue_purge(&ndev
->tx_q
);
306 ndev
->ops
->close(ndev
);
311 mutex_unlock(&ndev
->req_lock
);
315 static int nci_close_device(struct nci_dev
*ndev
)
317 nci_req_cancel(ndev
, ENODEV
);
318 mutex_lock(&ndev
->req_lock
);
320 if (!test_and_clear_bit(NCI_UP
, &ndev
->flags
)) {
321 del_timer_sync(&ndev
->cmd_timer
);
322 del_timer_sync(&ndev
->data_timer
);
323 mutex_unlock(&ndev
->req_lock
);
327 /* Drop RX and TX queues */
328 skb_queue_purge(&ndev
->rx_q
);
329 skb_queue_purge(&ndev
->tx_q
);
331 /* Flush RX and TX wq */
332 flush_workqueue(ndev
->rx_wq
);
333 flush_workqueue(ndev
->tx_wq
);
336 skb_queue_purge(&ndev
->cmd_q
);
337 atomic_set(&ndev
->cmd_cnt
, 1);
339 set_bit(NCI_INIT
, &ndev
->flags
);
340 __nci_request(ndev
, nci_reset_req
, 0,
341 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
342 clear_bit(NCI_INIT
, &ndev
->flags
);
345 flush_workqueue(ndev
->cmd_wq
);
347 /* After this point our queues are empty
348 * and no works are scheduled. */
349 ndev
->ops
->close(ndev
);
354 mutex_unlock(&ndev
->req_lock
);
359 /* NCI command timer function */
360 static void nci_cmd_timer(unsigned long arg
)
362 struct nci_dev
*ndev
= (void *) arg
;
364 atomic_set(&ndev
->cmd_cnt
, 1);
365 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
368 /* NCI data exchange timer function */
369 static void nci_data_timer(unsigned long arg
)
371 struct nci_dev
*ndev
= (void *) arg
;
373 set_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
);
374 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
377 static int nci_dev_up(struct nfc_dev
*nfc_dev
)
379 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
381 return nci_open_device(ndev
);
384 static int nci_dev_down(struct nfc_dev
*nfc_dev
)
386 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
388 return nci_close_device(ndev
);
391 static int nci_start_poll(struct nfc_dev
*nfc_dev
,
392 __u32 im_protocols
, __u32 tm_protocols
)
394 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
397 if ((atomic_read(&ndev
->state
) == NCI_DISCOVERY
) ||
398 (atomic_read(&ndev
->state
) == NCI_W4_ALL_DISCOVERIES
)) {
399 pr_err("unable to start poll, since poll is already active\n");
403 if (ndev
->target_active_prot
) {
404 pr_err("there is an active target\n");
408 if ((atomic_read(&ndev
->state
) == NCI_W4_HOST_SELECT
) ||
409 (atomic_read(&ndev
->state
) == NCI_POLL_ACTIVE
)) {
410 pr_debug("target active or w4 select, implicitly deactivate\n");
412 rc
= nci_request(ndev
, nci_rf_deactivate_req
, 0,
413 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
418 rc
= nci_request(ndev
, nci_rf_discover_req
, im_protocols
,
419 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT
));
422 ndev
->poll_prots
= im_protocols
;
427 static void nci_stop_poll(struct nfc_dev
*nfc_dev
)
429 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
431 if ((atomic_read(&ndev
->state
) != NCI_DISCOVERY
) &&
432 (atomic_read(&ndev
->state
) != NCI_W4_ALL_DISCOVERIES
)) {
433 pr_err("unable to stop poll, since poll is not active\n");
437 nci_request(ndev
, nci_rf_deactivate_req
, 0,
438 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
441 static int nci_activate_target(struct nfc_dev
*nfc_dev
,
442 struct nfc_target
*target
, __u32 protocol
)
444 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
445 struct nci_rf_discover_select_param param
;
446 struct nfc_target
*nci_target
= NULL
;
450 pr_debug("target_idx %d, protocol 0x%x\n", target
->idx
, protocol
);
452 if ((atomic_read(&ndev
->state
) != NCI_W4_HOST_SELECT
) &&
453 (atomic_read(&ndev
->state
) != NCI_POLL_ACTIVE
)) {
454 pr_err("there is no available target to activate\n");
458 if (ndev
->target_active_prot
) {
459 pr_err("there is already an active target\n");
463 for (i
= 0; i
< ndev
->n_targets
; i
++) {
464 if (ndev
->targets
[i
].idx
== target
->idx
) {
465 nci_target
= &ndev
->targets
[i
];
471 pr_err("unable to find the selected target\n");
475 if (!(nci_target
->supported_protocols
& (1 << protocol
))) {
476 pr_err("target does not support the requested protocol 0x%x\n",
481 if (atomic_read(&ndev
->state
) == NCI_W4_HOST_SELECT
) {
482 param
.rf_discovery_id
= nci_target
->logical_idx
;
484 if (protocol
== NFC_PROTO_JEWEL
)
485 param
.rf_protocol
= NCI_RF_PROTOCOL_T1T
;
486 else if (protocol
== NFC_PROTO_MIFARE
)
487 param
.rf_protocol
= NCI_RF_PROTOCOL_T2T
;
488 else if (protocol
== NFC_PROTO_FELICA
)
489 param
.rf_protocol
= NCI_RF_PROTOCOL_T3T
;
490 else if (protocol
== NFC_PROTO_ISO14443
||
491 protocol
== NFC_PROTO_ISO14443_B
)
492 param
.rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
494 param
.rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
496 rc
= nci_request(ndev
, nci_rf_discover_select_req
,
497 (unsigned long)¶m
,
498 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT
));
502 ndev
->target_active_prot
= protocol
;
507 static void nci_deactivate_target(struct nfc_dev
*nfc_dev
,
508 struct nfc_target
*target
)
510 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
512 pr_debug("target_idx %d\n", target
->idx
);
514 if (!ndev
->target_active_prot
) {
515 pr_err("unable to deactivate target, no active target\n");
519 ndev
->target_active_prot
= 0;
521 if (atomic_read(&ndev
->state
) == NCI_POLL_ACTIVE
) {
522 nci_request(ndev
, nci_rf_deactivate_req
, 0,
523 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
527 static int nci_transceive(struct nfc_dev
*nfc_dev
, struct nfc_target
*target
,
529 data_exchange_cb_t cb
, void *cb_context
)
531 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
534 pr_debug("target_idx %d, len %d\n", target
->idx
, skb
->len
);
536 if (!ndev
->target_active_prot
) {
537 pr_err("unable to exchange data, no active target\n");
541 if (test_and_set_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
544 /* store cb and context to be used on receiving data */
545 ndev
->data_exchange_cb
= cb
;
546 ndev
->data_exchange_cb_context
= cb_context
;
548 rc
= nci_send_data(ndev
, NCI_STATIC_RF_CONN_ID
, skb
);
550 clear_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
);
555 static struct nfc_ops nci_nfc_ops
= {
556 .dev_up
= nci_dev_up
,
557 .dev_down
= nci_dev_down
,
558 .start_poll
= nci_start_poll
,
559 .stop_poll
= nci_stop_poll
,
560 .activate_target
= nci_activate_target
,
561 .deactivate_target
= nci_deactivate_target
,
562 .im_transceive
= nci_transceive
,
565 /* ---- Interface to NCI drivers ---- */
568 * nci_allocate_device - allocate a new nci device
570 * @ops: device operations
571 * @supported_protocols: NFC protocols supported by the device
573 struct nci_dev
*nci_allocate_device(struct nci_ops
*ops
,
574 __u32 supported_protocols
,
575 int tx_headroom
, int tx_tailroom
)
577 struct nci_dev
*ndev
;
579 pr_debug("supported_protocols 0x%x\n", supported_protocols
);
581 if (!ops
->open
|| !ops
->close
|| !ops
->send
)
584 if (!supported_protocols
)
587 ndev
= kzalloc(sizeof(struct nci_dev
), GFP_KERNEL
);
592 ndev
->tx_headroom
= tx_headroom
;
593 ndev
->tx_tailroom
= tx_tailroom
;
595 ndev
->nfc_dev
= nfc_allocate_device(&nci_nfc_ops
,
597 tx_headroom
+ NCI_DATA_HDR_SIZE
,
602 nfc_set_drvdata(ndev
->nfc_dev
, ndev
);
610 EXPORT_SYMBOL(nci_allocate_device
);
613 * nci_free_device - deallocate nci device
615 * @ndev: The nci device to deallocate
617 void nci_free_device(struct nci_dev
*ndev
)
619 nfc_free_device(ndev
->nfc_dev
);
622 EXPORT_SYMBOL(nci_free_device
);
625 * nci_register_device - register a nci device in the nfc subsystem
627 * @dev: The nci device to register
629 int nci_register_device(struct nci_dev
*ndev
)
632 struct device
*dev
= &ndev
->nfc_dev
->dev
;
635 rc
= nfc_register_device(ndev
->nfc_dev
);
641 INIT_WORK(&ndev
->cmd_work
, nci_cmd_work
);
642 snprintf(name
, sizeof(name
), "%s_nci_cmd_wq", dev_name(dev
));
643 ndev
->cmd_wq
= create_singlethread_workqueue(name
);
649 INIT_WORK(&ndev
->rx_work
, nci_rx_work
);
650 snprintf(name
, sizeof(name
), "%s_nci_rx_wq", dev_name(dev
));
651 ndev
->rx_wq
= create_singlethread_workqueue(name
);
654 goto destroy_cmd_wq_exit
;
657 INIT_WORK(&ndev
->tx_work
, nci_tx_work
);
658 snprintf(name
, sizeof(name
), "%s_nci_tx_wq", dev_name(dev
));
659 ndev
->tx_wq
= create_singlethread_workqueue(name
);
662 goto destroy_rx_wq_exit
;
665 skb_queue_head_init(&ndev
->cmd_q
);
666 skb_queue_head_init(&ndev
->rx_q
);
667 skb_queue_head_init(&ndev
->tx_q
);
669 setup_timer(&ndev
->cmd_timer
, nci_cmd_timer
,
670 (unsigned long) ndev
);
671 setup_timer(&ndev
->data_timer
, nci_data_timer
,
672 (unsigned long) ndev
);
674 mutex_init(&ndev
->req_lock
);
679 destroy_workqueue(ndev
->rx_wq
);
682 destroy_workqueue(ndev
->cmd_wq
);
685 nfc_unregister_device(ndev
->nfc_dev
);
690 EXPORT_SYMBOL(nci_register_device
);
693 * nci_unregister_device - unregister a nci device in the nfc subsystem
695 * @dev: The nci device to unregister
697 void nci_unregister_device(struct nci_dev
*ndev
)
699 nci_close_device(ndev
);
701 destroy_workqueue(ndev
->cmd_wq
);
702 destroy_workqueue(ndev
->rx_wq
);
703 destroy_workqueue(ndev
->tx_wq
);
705 nfc_unregister_device(ndev
->nfc_dev
);
707 EXPORT_SYMBOL(nci_unregister_device
);
710 * nci_recv_frame - receive frame from NCI drivers
712 * @skb: The sk_buff to receive
714 int nci_recv_frame(struct sk_buff
*skb
)
716 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
718 pr_debug("len %d\n", skb
->len
);
720 if (!ndev
|| (!test_bit(NCI_UP
, &ndev
->flags
)
721 && !test_bit(NCI_INIT
, &ndev
->flags
))) {
726 /* Queue frame for rx worker thread */
727 skb_queue_tail(&ndev
->rx_q
, skb
);
728 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
732 EXPORT_SYMBOL(nci_recv_frame
);
734 static int nci_send_frame(struct sk_buff
*skb
)
736 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
738 pr_debug("len %d\n", skb
->len
);
745 /* Get rid of skb owner, prior to sending to the driver. */
748 return ndev
->ops
->send(skb
);
751 /* Send NCI command */
752 int nci_send_cmd(struct nci_dev
*ndev
, __u16 opcode
, __u8 plen
, void *payload
)
754 struct nci_ctrl_hdr
*hdr
;
757 pr_debug("opcode 0x%x, plen %d\n", opcode
, plen
);
759 skb
= nci_skb_alloc(ndev
, (NCI_CTRL_HDR_SIZE
+ plen
), GFP_KERNEL
);
761 pr_err("no memory for command\n");
765 hdr
= (struct nci_ctrl_hdr
*) skb_put(skb
, NCI_CTRL_HDR_SIZE
);
766 hdr
->gid
= nci_opcode_gid(opcode
);
767 hdr
->oid
= nci_opcode_oid(opcode
);
770 nci_mt_set((__u8
*)hdr
, NCI_MT_CMD_PKT
);
771 nci_pbf_set((__u8
*)hdr
, NCI_PBF_LAST
);
774 memcpy(skb_put(skb
, plen
), payload
, plen
);
776 skb
->dev
= (void *) ndev
;
778 skb_queue_tail(&ndev
->cmd_q
, skb
);
779 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
784 /* ---- NCI TX Data worker thread ---- */
786 static void nci_tx_work(struct work_struct
*work
)
788 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, tx_work
);
791 pr_debug("credits_cnt %d\n", atomic_read(&ndev
->credits_cnt
));
793 /* Send queued tx data */
794 while (atomic_read(&ndev
->credits_cnt
)) {
795 skb
= skb_dequeue(&ndev
->tx_q
);
799 /* Check if data flow control is used */
800 if (atomic_read(&ndev
->credits_cnt
) !=
801 NCI_DATA_FLOW_CONTROL_NOT_USED
)
802 atomic_dec(&ndev
->credits_cnt
);
804 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
806 nci_conn_id(skb
->data
),
807 nci_plen(skb
->data
));
811 mod_timer(&ndev
->data_timer
,
812 jiffies
+ msecs_to_jiffies(NCI_DATA_TIMEOUT
));
816 /* ----- NCI RX worker thread (data & control) ----- */
818 static void nci_rx_work(struct work_struct
*work
)
820 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, rx_work
);
823 while ((skb
= skb_dequeue(&ndev
->rx_q
))) {
825 switch (nci_mt(skb
->data
)) {
827 nci_rsp_packet(ndev
, skb
);
831 nci_ntf_packet(ndev
, skb
);
834 case NCI_MT_DATA_PKT
:
835 nci_rx_data_packet(ndev
, skb
);
839 pr_err("unknown MT 0x%x\n", nci_mt(skb
->data
));
845 /* check if a data exchange timout has occurred */
846 if (test_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
)) {
847 /* complete the data exchange transaction, if exists */
848 if (test_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
849 nci_data_exchange_complete(ndev
, NULL
, -ETIMEDOUT
);
851 clear_bit(NCI_DATA_EXCHANGE_TO
, &ndev
->flags
);
855 /* ----- NCI TX CMD worker thread ----- */
857 static void nci_cmd_work(struct work_struct
*work
)
859 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, cmd_work
);
862 pr_debug("cmd_cnt %d\n", atomic_read(&ndev
->cmd_cnt
));
864 /* Send queued command */
865 if (atomic_read(&ndev
->cmd_cnt
)) {
866 skb
= skb_dequeue(&ndev
->cmd_q
);
870 atomic_dec(&ndev
->cmd_cnt
);
872 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
874 nci_opcode_gid(nci_opcode(skb
->data
)),
875 nci_opcode_oid(nci_opcode(skb
->data
)),
876 nci_plen(skb
->data
));
880 mod_timer(&ndev
->cmd_timer
,
881 jiffies
+ msecs_to_jiffies(NCI_CMD_TIMEOUT
));
885 MODULE_LICENSE("GPL");