2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/types.h>
29 #include <linux/workqueue.h>
30 #include <linux/completion.h>
31 #include <linux/sched.h>
32 #include <linux/bitops.h>
33 #include <linux/skbuff.h>
36 #include <net/nfc/nci.h>
37 #include <net/nfc/nci_core.h>
38 #include <linux/nfc.h>
40 static void nci_cmd_work(struct work_struct
*work
);
41 static void nci_rx_work(struct work_struct
*work
);
42 static void nci_tx_work(struct work_struct
*work
);
44 /* ---- NCI requests ---- */
46 void nci_req_complete(struct nci_dev
*ndev
, int result
)
48 if (ndev
->req_status
== NCI_REQ_PEND
) {
49 ndev
->req_result
= result
;
50 ndev
->req_status
= NCI_REQ_DONE
;
51 complete(&ndev
->req_completion
);
55 static void nci_req_cancel(struct nci_dev
*ndev
, int err
)
57 if (ndev
->req_status
== NCI_REQ_PEND
) {
58 ndev
->req_result
= err
;
59 ndev
->req_status
= NCI_REQ_CANCELED
;
60 complete(&ndev
->req_completion
);
64 /* Execute request and wait for completion. */
65 static int __nci_request(struct nci_dev
*ndev
,
66 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
71 unsigned long completion_rc
;
73 ndev
->req_status
= NCI_REQ_PEND
;
75 init_completion(&ndev
->req_completion
);
77 completion_rc
= wait_for_completion_interruptible_timeout(
78 &ndev
->req_completion
,
81 nfc_dbg("wait_for_completion return %ld", completion_rc
);
83 if (completion_rc
> 0) {
84 switch (ndev
->req_status
) {
86 rc
= nci_to_errno(ndev
->req_result
);
89 case NCI_REQ_CANCELED
:
90 rc
= -ndev
->req_result
;
98 nfc_err("wait_for_completion_interruptible_timeout failed %ld",
101 rc
= ((completion_rc
== 0) ? (-ETIMEDOUT
) : (completion_rc
));
104 ndev
->req_status
= ndev
->req_result
= 0;
109 static inline int nci_request(struct nci_dev
*ndev
,
110 void (*req
)(struct nci_dev
*ndev
, unsigned long opt
),
111 unsigned long opt
, __u32 timeout
)
115 if (!test_bit(NCI_UP
, &ndev
->flags
))
118 /* Serialize all requests */
119 mutex_lock(&ndev
->req_lock
);
120 rc
= __nci_request(ndev
, req
, opt
, timeout
);
121 mutex_unlock(&ndev
->req_lock
);
126 static void nci_reset_req(struct nci_dev
*ndev
, unsigned long opt
)
128 nci_send_cmd(ndev
, NCI_OP_CORE_RESET_CMD
, 0, NULL
);
131 static void nci_init_req(struct nci_dev
*ndev
, unsigned long opt
)
133 nci_send_cmd(ndev
, NCI_OP_CORE_INIT_CMD
, 0, NULL
);
136 static void nci_init_complete_req(struct nci_dev
*ndev
, unsigned long opt
)
138 struct nci_core_conn_create_cmd conn_cmd
;
139 struct nci_rf_disc_map_cmd cmd
;
140 struct disc_map_config
*cfg
= cmd
.mapping_configs
;
141 __u8
*num
= &cmd
.num_mapping_configs
;
144 /* create static rf connection */
145 conn_cmd
.target_handle
= 0;
146 conn_cmd
.num_target_specific_params
= 0;
147 nci_send_cmd(ndev
, NCI_OP_CORE_CONN_CREATE_CMD
, 2, &conn_cmd
);
149 /* set rf mapping configurations */
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i
= 0; i
< ndev
->num_supported_rf_interfaces
; i
++) {
154 if (ndev
->supported_rf_interfaces
[i
] ==
155 NCI_RF_INTERFACE_ISO_DEP
) {
156 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_ISO_DEP
;
157 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_BOTH
;
158 cfg
[*num
].rf_interface_type
= NCI_RF_INTERFACE_ISO_DEP
;
160 } else if (ndev
->supported_rf_interfaces
[i
] ==
161 NCI_RF_INTERFACE_NFC_DEP
) {
162 cfg
[*num
].rf_protocol
= NCI_RF_PROTOCOL_NFC_DEP
;
163 cfg
[*num
].mode
= NCI_DISC_MAP_MODE_BOTH
;
164 cfg
[*num
].rf_interface_type
= NCI_RF_INTERFACE_NFC_DEP
;
168 if (*num
== NCI_MAX_NUM_MAPPING_CONFIGS
)
172 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_MAP_CMD
,
173 (1 + ((*num
)*sizeof(struct disc_map_config
))),
177 static void nci_rf_discover_req(struct nci_dev
*ndev
, unsigned long opt
)
179 struct nci_rf_disc_cmd cmd
;
180 __u32 protocols
= opt
;
182 cmd
.num_disc_configs
= 0;
184 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
185 (protocols
& NFC_PROTO_JEWEL_MASK
186 || protocols
& NFC_PROTO_MIFARE_MASK
187 || protocols
& NFC_PROTO_ISO14443_MASK
188 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
189 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
190 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE
;
191 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
192 cmd
.num_disc_configs
++;
195 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
196 (protocols
& NFC_PROTO_ISO14443_MASK
)) {
197 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
198 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE
;
199 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
200 cmd
.num_disc_configs
++;
203 if ((cmd
.num_disc_configs
< NCI_MAX_NUM_RF_CONFIGS
) &&
204 (protocols
& NFC_PROTO_FELICA_MASK
205 || protocols
& NFC_PROTO_NFC_DEP_MASK
)) {
206 cmd
.disc_configs
[cmd
.num_disc_configs
].type
=
207 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE
;
208 cmd
.disc_configs
[cmd
.num_disc_configs
].frequency
= 1;
209 cmd
.num_disc_configs
++;
212 nci_send_cmd(ndev
, NCI_OP_RF_DISCOVER_CMD
,
213 (1 + (cmd
.num_disc_configs
*sizeof(struct disc_config
))),
217 static void nci_rf_deactivate_req(struct nci_dev
*ndev
, unsigned long opt
)
219 struct nci_rf_deactivate_cmd cmd
;
221 cmd
.type
= NCI_DEACTIVATE_TYPE_IDLE_MODE
;
223 nci_send_cmd(ndev
, NCI_OP_RF_DEACTIVATE_CMD
,
224 sizeof(struct nci_rf_deactivate_cmd
),
228 static int nci_open_device(struct nci_dev
*ndev
)
232 mutex_lock(&ndev
->req_lock
);
234 if (test_bit(NCI_UP
, &ndev
->flags
)) {
239 if (ndev
->ops
->open(ndev
)) {
244 atomic_set(&ndev
->cmd_cnt
, 1);
246 set_bit(NCI_INIT
, &ndev
->flags
);
248 rc
= __nci_request(ndev
, nci_reset_req
, 0,
249 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
252 rc
= __nci_request(ndev
, nci_init_req
, 0,
253 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
257 rc
= __nci_request(ndev
, nci_init_complete_req
, 0,
258 msecs_to_jiffies(NCI_INIT_TIMEOUT
));
261 clear_bit(NCI_INIT
, &ndev
->flags
);
264 set_bit(NCI_UP
, &ndev
->flags
);
266 /* Init failed, cleanup */
267 skb_queue_purge(&ndev
->cmd_q
);
268 skb_queue_purge(&ndev
->rx_q
);
269 skb_queue_purge(&ndev
->tx_q
);
271 ndev
->ops
->close(ndev
);
276 mutex_unlock(&ndev
->req_lock
);
280 static int nci_close_device(struct nci_dev
*ndev
)
282 nci_req_cancel(ndev
, ENODEV
);
283 mutex_lock(&ndev
->req_lock
);
285 if (!test_and_clear_bit(NCI_UP
, &ndev
->flags
)) {
286 del_timer_sync(&ndev
->cmd_timer
);
287 mutex_unlock(&ndev
->req_lock
);
291 /* Drop RX and TX queues */
292 skb_queue_purge(&ndev
->rx_q
);
293 skb_queue_purge(&ndev
->tx_q
);
295 /* Flush RX and TX wq */
296 flush_workqueue(ndev
->rx_wq
);
297 flush_workqueue(ndev
->tx_wq
);
300 skb_queue_purge(&ndev
->cmd_q
);
301 atomic_set(&ndev
->cmd_cnt
, 1);
303 set_bit(NCI_INIT
, &ndev
->flags
);
304 __nci_request(ndev
, nci_reset_req
, 0,
305 msecs_to_jiffies(NCI_RESET_TIMEOUT
));
306 clear_bit(NCI_INIT
, &ndev
->flags
);
309 flush_workqueue(ndev
->cmd_wq
);
311 /* After this point our queues are empty
312 * and no works are scheduled. */
313 ndev
->ops
->close(ndev
);
318 mutex_unlock(&ndev
->req_lock
);
323 /* NCI command timer function */
324 static void nci_cmd_timer(unsigned long arg
)
326 struct nci_dev
*ndev
= (void *) arg
;
330 atomic_set(&ndev
->cmd_cnt
, 1);
331 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
334 static int nci_dev_up(struct nfc_dev
*nfc_dev
)
336 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
340 return nci_open_device(ndev
);
343 static int nci_dev_down(struct nfc_dev
*nfc_dev
)
345 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
349 return nci_close_device(ndev
);
352 static int nci_start_poll(struct nfc_dev
*nfc_dev
, __u32 protocols
)
354 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
359 if (test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
360 nfc_err("unable to start poll, since poll is already active");
364 if (ndev
->target_active_prot
) {
365 nfc_err("there is an active target");
369 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
370 nfc_dbg("target is active, implicitly deactivate...");
372 rc
= nci_request(ndev
, nci_rf_deactivate_req
, 0,
373 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
378 rc
= nci_request(ndev
, nci_rf_discover_req
, protocols
,
379 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT
));
382 ndev
->poll_prots
= protocols
;
387 static void nci_stop_poll(struct nfc_dev
*nfc_dev
)
389 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
393 if (!test_bit(NCI_DISCOVERY
, &ndev
->flags
)) {
394 nfc_err("unable to stop poll, since poll is not active");
398 nci_request(ndev
, nci_rf_deactivate_req
, 0,
399 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
402 static int nci_activate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
405 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
407 nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx
, protocol
);
409 if (!test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
410 nfc_err("there is no available target to activate");
414 if (ndev
->target_active_prot
) {
415 nfc_err("there is already an active target");
419 if (!(ndev
->target_available_prots
& (1 << protocol
))) {
420 nfc_err("target does not support the requested protocol 0x%x",
425 ndev
->target_active_prot
= protocol
;
426 ndev
->target_available_prots
= 0;
431 static void nci_deactivate_target(struct nfc_dev
*nfc_dev
, __u32 target_idx
)
433 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
435 nfc_dbg("entry, target_idx %d", target_idx
);
437 if (!ndev
->target_active_prot
) {
438 nfc_err("unable to deactivate target, no active target");
442 ndev
->target_active_prot
= 0;
444 if (test_bit(NCI_POLL_ACTIVE
, &ndev
->flags
)) {
445 nci_request(ndev
, nci_rf_deactivate_req
, 0,
446 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT
));
450 static int nci_data_exchange(struct nfc_dev
*nfc_dev
, __u32 target_idx
,
452 data_exchange_cb_t cb
,
455 struct nci_dev
*ndev
= nfc_get_drvdata(nfc_dev
);
458 nfc_dbg("entry, target_idx %d, len %d", target_idx
, skb
->len
);
460 if (!ndev
->target_active_prot
) {
461 nfc_err("unable to exchange data, no active target");
465 if (test_and_set_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
))
468 /* store cb and context to be used on receiving data */
469 ndev
->data_exchange_cb
= cb
;
470 ndev
->data_exchange_cb_context
= cb_context
;
472 rc
= nci_send_data(ndev
, ndev
->conn_id
, skb
);
474 clear_bit(NCI_DATA_EXCHANGE
, &ndev
->flags
);
479 static struct nfc_ops nci_nfc_ops
= {
480 .dev_up
= nci_dev_up
,
481 .dev_down
= nci_dev_down
,
482 .start_poll
= nci_start_poll
,
483 .stop_poll
= nci_stop_poll
,
484 .activate_target
= nci_activate_target
,
485 .deactivate_target
= nci_deactivate_target
,
486 .data_exchange
= nci_data_exchange
,
489 /* ---- Interface to NCI drivers ---- */
492 * nci_allocate_device - allocate a new nci device
494 * @ops: device operations
495 * @supported_protocols: NFC protocols supported by the device
497 struct nci_dev
*nci_allocate_device(struct nci_ops
*ops
,
498 __u32 supported_protocols
,
502 struct nci_dev
*ndev
;
504 nfc_dbg("entry, supported_protocols 0x%x", supported_protocols
);
506 if (!ops
->open
|| !ops
->close
|| !ops
->send
)
509 if (!supported_protocols
)
512 ndev
= kzalloc(sizeof(struct nci_dev
), GFP_KERNEL
);
517 ndev
->tx_headroom
= tx_headroom
;
518 ndev
->tx_tailroom
= tx_tailroom
;
520 ndev
->nfc_dev
= nfc_allocate_device(&nci_nfc_ops
,
522 tx_headroom
+ NCI_DATA_HDR_SIZE
,
527 nfc_set_drvdata(ndev
->nfc_dev
, ndev
);
535 EXPORT_SYMBOL(nci_allocate_device
);
538 * nci_free_device - deallocate nci device
540 * @ndev: The nci device to deallocate
542 void nci_free_device(struct nci_dev
*ndev
)
546 nfc_free_device(ndev
->nfc_dev
);
549 EXPORT_SYMBOL(nci_free_device
);
552 * nci_register_device - register a nci device in the nfc subsystem
554 * @dev: The nci device to register
556 int nci_register_device(struct nci_dev
*ndev
)
559 struct device
*dev
= &ndev
->nfc_dev
->dev
;
564 rc
= nfc_register_device(ndev
->nfc_dev
);
570 INIT_WORK(&ndev
->cmd_work
, nci_cmd_work
);
571 snprintf(name
, sizeof(name
), "%s_nci_cmd_wq", dev_name(dev
));
572 ndev
->cmd_wq
= create_singlethread_workqueue(name
);
578 INIT_WORK(&ndev
->rx_work
, nci_rx_work
);
579 snprintf(name
, sizeof(name
), "%s_nci_rx_wq", dev_name(dev
));
580 ndev
->rx_wq
= create_singlethread_workqueue(name
);
583 goto destroy_cmd_wq_exit
;
586 INIT_WORK(&ndev
->tx_work
, nci_tx_work
);
587 snprintf(name
, sizeof(name
), "%s_nci_tx_wq", dev_name(dev
));
588 ndev
->tx_wq
= create_singlethread_workqueue(name
);
591 goto destroy_rx_wq_exit
;
594 skb_queue_head_init(&ndev
->cmd_q
);
595 skb_queue_head_init(&ndev
->rx_q
);
596 skb_queue_head_init(&ndev
->tx_q
);
598 setup_timer(&ndev
->cmd_timer
, nci_cmd_timer
,
599 (unsigned long) ndev
);
601 mutex_init(&ndev
->req_lock
);
606 destroy_workqueue(ndev
->rx_wq
);
609 destroy_workqueue(ndev
->cmd_wq
);
612 nfc_unregister_device(ndev
->nfc_dev
);
617 EXPORT_SYMBOL(nci_register_device
);
620 * nci_unregister_device - unregister a nci device in the nfc subsystem
622 * @dev: The nci device to unregister
624 void nci_unregister_device(struct nci_dev
*ndev
)
628 nci_close_device(ndev
);
630 destroy_workqueue(ndev
->cmd_wq
);
631 destroy_workqueue(ndev
->rx_wq
);
632 destroy_workqueue(ndev
->tx_wq
);
634 nfc_unregister_device(ndev
->nfc_dev
);
636 EXPORT_SYMBOL(nci_unregister_device
);
639 * nci_recv_frame - receive frame from NCI drivers
641 * @skb: The sk_buff to receive
643 int nci_recv_frame(struct sk_buff
*skb
)
645 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
647 nfc_dbg("entry, len %d", skb
->len
);
649 if (!ndev
|| (!test_bit(NCI_UP
, &ndev
->flags
)
650 && !test_bit(NCI_INIT
, &ndev
->flags
))) {
655 /* Queue frame for rx worker thread */
656 skb_queue_tail(&ndev
->rx_q
, skb
);
657 queue_work(ndev
->rx_wq
, &ndev
->rx_work
);
661 EXPORT_SYMBOL(nci_recv_frame
);
663 static int nci_send_frame(struct sk_buff
*skb
)
665 struct nci_dev
*ndev
= (struct nci_dev
*) skb
->dev
;
667 nfc_dbg("entry, len %d", skb
->len
);
674 /* Get rid of skb owner, prior to sending to the driver. */
677 return ndev
->ops
->send(skb
);
680 /* Send NCI command */
681 int nci_send_cmd(struct nci_dev
*ndev
, __u16 opcode
, __u8 plen
, void *payload
)
683 struct nci_ctrl_hdr
*hdr
;
686 nfc_dbg("entry, opcode 0x%x, plen %d", opcode
, plen
);
688 skb
= nci_skb_alloc(ndev
, (NCI_CTRL_HDR_SIZE
+ plen
), GFP_KERNEL
);
690 nfc_err("no memory for command");
694 hdr
= (struct nci_ctrl_hdr
*) skb_put(skb
, NCI_CTRL_HDR_SIZE
);
695 hdr
->gid
= nci_opcode_gid(opcode
);
696 hdr
->oid
= nci_opcode_oid(opcode
);
699 nci_mt_set((__u8
*)hdr
, NCI_MT_CMD_PKT
);
700 nci_pbf_set((__u8
*)hdr
, NCI_PBF_LAST
);
703 memcpy(skb_put(skb
, plen
), payload
, plen
);
705 skb
->dev
= (void *) ndev
;
707 skb_queue_tail(&ndev
->cmd_q
, skb
);
708 queue_work(ndev
->cmd_wq
, &ndev
->cmd_work
);
713 /* ---- NCI TX Data worker thread ---- */
715 static void nci_tx_work(struct work_struct
*work
)
717 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, tx_work
);
720 nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev
->credits_cnt
));
722 /* Send queued tx data */
723 while (atomic_read(&ndev
->credits_cnt
)) {
724 skb
= skb_dequeue(&ndev
->tx_q
);
728 atomic_dec(&ndev
->credits_cnt
);
730 nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
732 nci_conn_id(skb
->data
),
733 nci_plen(skb
->data
));
739 /* ----- NCI RX worker thread (data & control) ----- */
741 static void nci_rx_work(struct work_struct
*work
)
743 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, rx_work
);
746 while ((skb
= skb_dequeue(&ndev
->rx_q
))) {
748 switch (nci_mt(skb
->data
)) {
750 nci_rsp_packet(ndev
, skb
);
754 nci_ntf_packet(ndev
, skb
);
757 case NCI_MT_DATA_PKT
:
758 nci_rx_data_packet(ndev
, skb
);
762 nfc_err("unknown MT 0x%x", nci_mt(skb
->data
));
769 /* ----- NCI TX CMD worker thread ----- */
771 static void nci_cmd_work(struct work_struct
*work
)
773 struct nci_dev
*ndev
= container_of(work
, struct nci_dev
, cmd_work
);
776 nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev
->cmd_cnt
));
778 /* Send queued command */
779 if (atomic_read(&ndev
->cmd_cnt
)) {
780 skb
= skb_dequeue(&ndev
->cmd_q
);
784 atomic_dec(&ndev
->cmd_cnt
);
786 nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
788 nci_opcode_gid(nci_opcode(skb
->data
)),
789 nci_opcode_oid(nci_opcode(skb
->data
)),
790 nci_plen(skb
->data
));
794 mod_timer(&ndev
->cmd_timer
,
795 jiffies
+ msecs_to_jiffies(NCI_CMD_TIMEOUT
));