hyperv: Add processing of MTU reduced by the host
[linux/fpc-iii.git] / net / nfc / nci / core.c
blob90b16cb4005880214f1eca87cc5e72f2f75c7c34
1 /*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <http://www.gnu.org/licenses/>.
27 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
29 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/workqueue.h>
32 #include <linux/completion.h>
33 #include <linux/export.h>
34 #include <linux/sched.h>
35 #include <linux/bitops.h>
36 #include <linux/skbuff.h>
38 #include "../nfc.h"
39 #include <net/nfc/nci.h>
40 #include <net/nfc/nci_core.h>
41 #include <linux/nfc.h>
43 static void nci_cmd_work(struct work_struct *work);
44 static void nci_rx_work(struct work_struct *work);
45 static void nci_tx_work(struct work_struct *work);
47 /* ---- NCI requests ---- */
49 void nci_req_complete(struct nci_dev *ndev, int result)
51 if (ndev->req_status == NCI_REQ_PEND) {
52 ndev->req_result = result;
53 ndev->req_status = NCI_REQ_DONE;
54 complete(&ndev->req_completion);
58 static void nci_req_cancel(struct nci_dev *ndev, int err)
60 if (ndev->req_status == NCI_REQ_PEND) {
61 ndev->req_result = err;
62 ndev->req_status = NCI_REQ_CANCELED;
63 complete(&ndev->req_completion);
67 /* Execute request and wait for completion. */
68 static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt, __u32 timeout)
72 int rc = 0;
73 long completion_rc;
75 ndev->req_status = NCI_REQ_PEND;
77 reinit_completion(&ndev->req_completion);
78 req(ndev, opt);
79 completion_rc =
80 wait_for_completion_interruptible_timeout(&ndev->req_completion,
81 timeout);
83 pr_debug("wait_for_completion return %ld\n", completion_rc);
85 if (completion_rc > 0) {
86 switch (ndev->req_status) {
87 case NCI_REQ_DONE:
88 rc = nci_to_errno(ndev->req_result);
89 break;
91 case NCI_REQ_CANCELED:
92 rc = -ndev->req_result;
93 break;
95 default:
96 rc = -ETIMEDOUT;
97 break;
99 } else {
100 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
101 completion_rc);
103 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
106 ndev->req_status = ndev->req_result = 0;
108 return rc;
111 static inline int nci_request(struct nci_dev *ndev,
112 void (*req)(struct nci_dev *ndev,
113 unsigned long opt),
114 unsigned long opt, __u32 timeout)
116 int rc;
118 if (!test_bit(NCI_UP, &ndev->flags))
119 return -ENETDOWN;
121 /* Serialize all requests */
122 mutex_lock(&ndev->req_lock);
123 rc = __nci_request(ndev, req, opt, timeout);
124 mutex_unlock(&ndev->req_lock);
126 return rc;
129 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
131 struct nci_core_reset_cmd cmd;
133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
137 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
142 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
144 struct nci_rf_disc_map_cmd cmd;
145 struct disc_map_config *cfg = cmd.mapping_configs;
146 __u8 *num = &cmd.num_mapping_configs;
147 int i;
149 /* set rf mapping configurations */
150 *num = 0;
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
158 NCI_DISC_MAP_MODE_LISTEN;
159 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
160 (*num)++;
161 } else if (ndev->supported_rf_interfaces[i] ==
162 NCI_RF_INTERFACE_NFC_DEP) {
163 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
164 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
165 NCI_DISC_MAP_MODE_LISTEN;
166 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
167 (*num)++;
170 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
171 break;
174 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
175 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
178 struct nci_set_config_param {
179 __u8 id;
180 size_t len;
181 __u8 *val;
184 static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
186 struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
187 struct nci_core_set_config_cmd cmd;
189 BUG_ON(param->len > NCI_MAX_PARAM_LEN);
191 cmd.num_params = 1;
192 cmd.param.id = param->id;
193 cmd.param.len = param->len;
194 memcpy(cmd.param.val, param->val, param->len);
196 nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
199 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
201 struct nci_rf_disc_cmd cmd;
202 __u32 protocols = opt;
204 cmd.num_disc_configs = 0;
206 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
207 (protocols & NFC_PROTO_JEWEL_MASK ||
208 protocols & NFC_PROTO_MIFARE_MASK ||
209 protocols & NFC_PROTO_ISO14443_MASK ||
210 protocols & NFC_PROTO_NFC_DEP_MASK)) {
211 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
212 NCI_NFC_A_PASSIVE_POLL_MODE;
213 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
214 cmd.num_disc_configs++;
217 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
218 (protocols & NFC_PROTO_ISO14443_B_MASK)) {
219 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
220 NCI_NFC_B_PASSIVE_POLL_MODE;
221 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
222 cmd.num_disc_configs++;
225 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
226 (protocols & NFC_PROTO_FELICA_MASK ||
227 protocols & NFC_PROTO_NFC_DEP_MASK)) {
228 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
229 NCI_NFC_F_PASSIVE_POLL_MODE;
230 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
231 cmd.num_disc_configs++;
234 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
235 (protocols & NFC_PROTO_ISO15693_MASK)) {
236 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
237 NCI_NFC_V_PASSIVE_POLL_MODE;
238 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
239 cmd.num_disc_configs++;
242 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
243 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
244 &cmd);
247 struct nci_rf_discover_select_param {
248 __u8 rf_discovery_id;
249 __u8 rf_protocol;
252 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
254 struct nci_rf_discover_select_param *param =
255 (struct nci_rf_discover_select_param *)opt;
256 struct nci_rf_discover_select_cmd cmd;
258 cmd.rf_discovery_id = param->rf_discovery_id;
259 cmd.rf_protocol = param->rf_protocol;
261 switch (cmd.rf_protocol) {
262 case NCI_RF_PROTOCOL_ISO_DEP:
263 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
264 break;
266 case NCI_RF_PROTOCOL_NFC_DEP:
267 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
268 break;
270 default:
271 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
272 break;
275 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
276 sizeof(struct nci_rf_discover_select_cmd), &cmd);
279 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
281 struct nci_rf_deactivate_cmd cmd;
283 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
285 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
286 sizeof(struct nci_rf_deactivate_cmd), &cmd);
289 static int nci_open_device(struct nci_dev *ndev)
291 int rc = 0;
293 mutex_lock(&ndev->req_lock);
295 if (test_bit(NCI_UP, &ndev->flags)) {
296 rc = -EALREADY;
297 goto done;
300 if (ndev->ops->open(ndev)) {
301 rc = -EIO;
302 goto done;
305 atomic_set(&ndev->cmd_cnt, 1);
307 set_bit(NCI_INIT, &ndev->flags);
309 rc = __nci_request(ndev, nci_reset_req, 0,
310 msecs_to_jiffies(NCI_RESET_TIMEOUT));
312 if (ndev->ops->setup)
313 ndev->ops->setup(ndev);
315 if (!rc) {
316 rc = __nci_request(ndev, nci_init_req, 0,
317 msecs_to_jiffies(NCI_INIT_TIMEOUT));
320 if (!rc) {
321 rc = __nci_request(ndev, nci_init_complete_req, 0,
322 msecs_to_jiffies(NCI_INIT_TIMEOUT));
325 clear_bit(NCI_INIT, &ndev->flags);
327 if (!rc) {
328 set_bit(NCI_UP, &ndev->flags);
329 nci_clear_target_list(ndev);
330 atomic_set(&ndev->state, NCI_IDLE);
331 } else {
332 /* Init failed, cleanup */
333 skb_queue_purge(&ndev->cmd_q);
334 skb_queue_purge(&ndev->rx_q);
335 skb_queue_purge(&ndev->tx_q);
337 ndev->ops->close(ndev);
338 ndev->flags = 0;
341 done:
342 mutex_unlock(&ndev->req_lock);
343 return rc;
346 static int nci_close_device(struct nci_dev *ndev)
348 nci_req_cancel(ndev, ENODEV);
349 mutex_lock(&ndev->req_lock);
351 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
352 del_timer_sync(&ndev->cmd_timer);
353 del_timer_sync(&ndev->data_timer);
354 mutex_unlock(&ndev->req_lock);
355 return 0;
358 /* Drop RX and TX queues */
359 skb_queue_purge(&ndev->rx_q);
360 skb_queue_purge(&ndev->tx_q);
362 /* Flush RX and TX wq */
363 flush_workqueue(ndev->rx_wq);
364 flush_workqueue(ndev->tx_wq);
366 /* Reset device */
367 skb_queue_purge(&ndev->cmd_q);
368 atomic_set(&ndev->cmd_cnt, 1);
370 set_bit(NCI_INIT, &ndev->flags);
371 __nci_request(ndev, nci_reset_req, 0,
372 msecs_to_jiffies(NCI_RESET_TIMEOUT));
373 clear_bit(NCI_INIT, &ndev->flags);
375 del_timer_sync(&ndev->cmd_timer);
377 /* Flush cmd wq */
378 flush_workqueue(ndev->cmd_wq);
380 /* After this point our queues are empty
381 * and no works are scheduled. */
382 ndev->ops->close(ndev);
384 /* Clear flags */
385 ndev->flags = 0;
387 mutex_unlock(&ndev->req_lock);
389 return 0;
392 /* NCI command timer function */
393 static void nci_cmd_timer(unsigned long arg)
395 struct nci_dev *ndev = (void *) arg;
397 atomic_set(&ndev->cmd_cnt, 1);
398 queue_work(ndev->cmd_wq, &ndev->cmd_work);
401 /* NCI data exchange timer function */
402 static void nci_data_timer(unsigned long arg)
404 struct nci_dev *ndev = (void *) arg;
406 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
407 queue_work(ndev->rx_wq, &ndev->rx_work);
410 static int nci_dev_up(struct nfc_dev *nfc_dev)
412 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
414 return nci_open_device(ndev);
417 static int nci_dev_down(struct nfc_dev *nfc_dev)
419 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
421 return nci_close_device(ndev);
424 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val)
426 struct nci_set_config_param param;
428 if (!val || !len)
429 return 0;
431 param.id = id;
432 param.len = len;
433 param.val = val;
435 return __nci_request(ndev, nci_set_config_req, (unsigned long)&param,
436 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
438 EXPORT_SYMBOL(nci_set_config);
440 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
442 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
443 struct nci_set_config_param param;
445 param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
446 if ((param.val == NULL) || (param.len == 0))
447 return 0;
449 if (param.len > NFC_MAX_GT_LEN)
450 return -EINVAL;
452 param.id = NCI_PN_ATR_REQ_GEN_BYTES;
454 return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
455 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
458 static int nci_start_poll(struct nfc_dev *nfc_dev,
459 __u32 im_protocols, __u32 tm_protocols)
461 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
462 int rc;
464 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
465 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
466 pr_err("unable to start poll, since poll is already active\n");
467 return -EBUSY;
470 if (ndev->target_active_prot) {
471 pr_err("there is an active target\n");
472 return -EBUSY;
475 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
476 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
477 pr_debug("target active or w4 select, implicitly deactivate\n");
479 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
480 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
481 if (rc)
482 return -EBUSY;
485 if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
486 rc = nci_set_local_general_bytes(nfc_dev);
487 if (rc) {
488 pr_err("failed to set local general bytes\n");
489 return rc;
493 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
494 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
496 if (!rc)
497 ndev->poll_prots = im_protocols;
499 return rc;
502 static void nci_stop_poll(struct nfc_dev *nfc_dev)
504 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
506 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
507 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
508 pr_err("unable to stop poll, since poll is not active\n");
509 return;
512 nci_request(ndev, nci_rf_deactivate_req, 0,
513 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
516 static int nci_activate_target(struct nfc_dev *nfc_dev,
517 struct nfc_target *target, __u32 protocol)
519 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
520 struct nci_rf_discover_select_param param;
521 struct nfc_target *nci_target = NULL;
522 int i;
523 int rc = 0;
525 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
527 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
528 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
529 pr_err("there is no available target to activate\n");
530 return -EINVAL;
533 if (ndev->target_active_prot) {
534 pr_err("there is already an active target\n");
535 return -EBUSY;
538 for (i = 0; i < ndev->n_targets; i++) {
539 if (ndev->targets[i].idx == target->idx) {
540 nci_target = &ndev->targets[i];
541 break;
545 if (!nci_target) {
546 pr_err("unable to find the selected target\n");
547 return -EINVAL;
550 if (!(nci_target->supported_protocols & (1 << protocol))) {
551 pr_err("target does not support the requested protocol 0x%x\n",
552 protocol);
553 return -EINVAL;
556 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
557 param.rf_discovery_id = nci_target->logical_idx;
559 if (protocol == NFC_PROTO_JEWEL)
560 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
561 else if (protocol == NFC_PROTO_MIFARE)
562 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
563 else if (protocol == NFC_PROTO_FELICA)
564 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
565 else if (protocol == NFC_PROTO_ISO14443 ||
566 protocol == NFC_PROTO_ISO14443_B)
567 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
568 else
569 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
571 rc = nci_request(ndev, nci_rf_discover_select_req,
572 (unsigned long)&param,
573 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
576 if (!rc)
577 ndev->target_active_prot = protocol;
579 return rc;
582 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
583 struct nfc_target *target)
585 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
587 pr_debug("entry\n");
589 if (!ndev->target_active_prot) {
590 pr_err("unable to deactivate target, no active target\n");
591 return;
594 ndev->target_active_prot = 0;
596 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
597 nci_request(ndev, nci_rf_deactivate_req, 0,
598 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
602 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
603 __u8 comm_mode, __u8 *gb, size_t gb_len)
605 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
606 int rc;
608 pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
610 rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
611 if (rc)
612 return rc;
614 rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
615 ndev->remote_gb_len);
616 if (!rc)
617 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
618 NFC_RF_INITIATOR);
620 return rc;
623 static int nci_dep_link_down(struct nfc_dev *nfc_dev)
625 pr_debug("entry\n");
627 nci_deactivate_target(nfc_dev, NULL);
629 return 0;
633 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
634 struct sk_buff *skb,
635 data_exchange_cb_t cb, void *cb_context)
637 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
638 int rc;
640 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
642 if (!ndev->target_active_prot) {
643 pr_err("unable to exchange data, no active target\n");
644 return -EINVAL;
647 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
648 return -EBUSY;
650 /* store cb and context to be used on receiving data */
651 ndev->data_exchange_cb = cb;
652 ndev->data_exchange_cb_context = cb_context;
654 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
655 if (rc)
656 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
658 return rc;
661 static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
663 return 0;
666 static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
668 return 0;
671 static int nci_discover_se(struct nfc_dev *nfc_dev)
673 return 0;
676 static struct nfc_ops nci_nfc_ops = {
677 .dev_up = nci_dev_up,
678 .dev_down = nci_dev_down,
679 .start_poll = nci_start_poll,
680 .stop_poll = nci_stop_poll,
681 .dep_link_up = nci_dep_link_up,
682 .dep_link_down = nci_dep_link_down,
683 .activate_target = nci_activate_target,
684 .deactivate_target = nci_deactivate_target,
685 .im_transceive = nci_transceive,
686 .enable_se = nci_enable_se,
687 .disable_se = nci_disable_se,
688 .discover_se = nci_discover_se,
691 /* ---- Interface to NCI drivers ---- */
694 * nci_allocate_device - allocate a new nci device
696 * @ops: device operations
697 * @supported_protocols: NFC protocols supported by the device
699 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
700 __u32 supported_protocols,
701 int tx_headroom, int tx_tailroom)
703 struct nci_dev *ndev;
705 pr_debug("supported_protocols 0x%x\n", supported_protocols);
707 if (!ops->open || !ops->close || !ops->send)
708 return NULL;
710 if (!supported_protocols)
711 return NULL;
713 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
714 if (!ndev)
715 return NULL;
717 ndev->ops = ops;
718 ndev->tx_headroom = tx_headroom;
719 ndev->tx_tailroom = tx_tailroom;
720 init_completion(&ndev->req_completion);
722 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
723 supported_protocols,
724 tx_headroom + NCI_DATA_HDR_SIZE,
725 tx_tailroom);
726 if (!ndev->nfc_dev)
727 goto free_exit;
729 nfc_set_drvdata(ndev->nfc_dev, ndev);
731 return ndev;
733 free_exit:
734 kfree(ndev);
735 return NULL;
737 EXPORT_SYMBOL(nci_allocate_device);
740 * nci_free_device - deallocate nci device
742 * @ndev: The nci device to deallocate
744 void nci_free_device(struct nci_dev *ndev)
746 nfc_free_device(ndev->nfc_dev);
747 kfree(ndev);
749 EXPORT_SYMBOL(nci_free_device);
752 * nci_register_device - register a nci device in the nfc subsystem
754 * @dev: The nci device to register
756 int nci_register_device(struct nci_dev *ndev)
758 int rc;
759 struct device *dev = &ndev->nfc_dev->dev;
760 char name[32];
762 ndev->flags = 0;
764 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
765 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
766 ndev->cmd_wq = create_singlethread_workqueue(name);
767 if (!ndev->cmd_wq) {
768 rc = -ENOMEM;
769 goto exit;
772 INIT_WORK(&ndev->rx_work, nci_rx_work);
773 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
774 ndev->rx_wq = create_singlethread_workqueue(name);
775 if (!ndev->rx_wq) {
776 rc = -ENOMEM;
777 goto destroy_cmd_wq_exit;
780 INIT_WORK(&ndev->tx_work, nci_tx_work);
781 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
782 ndev->tx_wq = create_singlethread_workqueue(name);
783 if (!ndev->tx_wq) {
784 rc = -ENOMEM;
785 goto destroy_rx_wq_exit;
788 skb_queue_head_init(&ndev->cmd_q);
789 skb_queue_head_init(&ndev->rx_q);
790 skb_queue_head_init(&ndev->tx_q);
792 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
793 (unsigned long) ndev);
794 setup_timer(&ndev->data_timer, nci_data_timer,
795 (unsigned long) ndev);
797 mutex_init(&ndev->req_lock);
799 rc = nfc_register_device(ndev->nfc_dev);
800 if (rc)
801 goto destroy_rx_wq_exit;
803 goto exit;
805 destroy_rx_wq_exit:
806 destroy_workqueue(ndev->rx_wq);
808 destroy_cmd_wq_exit:
809 destroy_workqueue(ndev->cmd_wq);
811 exit:
812 return rc;
814 EXPORT_SYMBOL(nci_register_device);
817 * nci_unregister_device - unregister a nci device in the nfc subsystem
819 * @dev: The nci device to unregister
821 void nci_unregister_device(struct nci_dev *ndev)
823 nci_close_device(ndev);
825 destroy_workqueue(ndev->cmd_wq);
826 destroy_workqueue(ndev->rx_wq);
827 destroy_workqueue(ndev->tx_wq);
829 nfc_unregister_device(ndev->nfc_dev);
831 EXPORT_SYMBOL(nci_unregister_device);
834 * nci_recv_frame - receive frame from NCI drivers
836 * @ndev: The nci device
837 * @skb: The sk_buff to receive
839 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
841 pr_debug("len %d\n", skb->len);
843 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
844 !test_bit(NCI_INIT, &ndev->flags))) {
845 kfree_skb(skb);
846 return -ENXIO;
849 /* Queue frame for rx worker thread */
850 skb_queue_tail(&ndev->rx_q, skb);
851 queue_work(ndev->rx_wq, &ndev->rx_work);
853 return 0;
855 EXPORT_SYMBOL(nci_recv_frame);
857 static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
859 pr_debug("len %d\n", skb->len);
861 if (!ndev) {
862 kfree_skb(skb);
863 return -ENODEV;
866 /* Get rid of skb owner, prior to sending to the driver. */
867 skb_orphan(skb);
869 /* Send copy to sniffer */
870 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
871 RAW_PAYLOAD_NCI, NFC_DIRECTION_TX);
873 return ndev->ops->send(ndev, skb);
876 /* Send NCI command */
877 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
879 struct nci_ctrl_hdr *hdr;
880 struct sk_buff *skb;
882 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
884 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
885 if (!skb) {
886 pr_err("no memory for command\n");
887 return -ENOMEM;
890 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
891 hdr->gid = nci_opcode_gid(opcode);
892 hdr->oid = nci_opcode_oid(opcode);
893 hdr->plen = plen;
895 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
896 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
898 if (plen)
899 memcpy(skb_put(skb, plen), payload, plen);
901 skb_queue_tail(&ndev->cmd_q, skb);
902 queue_work(ndev->cmd_wq, &ndev->cmd_work);
904 return 0;
907 /* ---- NCI TX Data worker thread ---- */
909 static void nci_tx_work(struct work_struct *work)
911 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
912 struct sk_buff *skb;
914 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
916 /* Send queued tx data */
917 while (atomic_read(&ndev->credits_cnt)) {
918 skb = skb_dequeue(&ndev->tx_q);
919 if (!skb)
920 return;
922 /* Check if data flow control is used */
923 if (atomic_read(&ndev->credits_cnt) !=
924 NCI_DATA_FLOW_CONTROL_NOT_USED)
925 atomic_dec(&ndev->credits_cnt);
927 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
928 nci_pbf(skb->data),
929 nci_conn_id(skb->data),
930 nci_plen(skb->data));
932 nci_send_frame(ndev, skb);
934 mod_timer(&ndev->data_timer,
935 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
939 /* ----- NCI RX worker thread (data & control) ----- */
941 static void nci_rx_work(struct work_struct *work)
943 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
944 struct sk_buff *skb;
946 while ((skb = skb_dequeue(&ndev->rx_q))) {
948 /* Send copy to sniffer */
949 nfc_send_to_raw_sock(ndev->nfc_dev, skb,
950 RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
952 /* Process frame */
953 switch (nci_mt(skb->data)) {
954 case NCI_MT_RSP_PKT:
955 nci_rsp_packet(ndev, skb);
956 break;
958 case NCI_MT_NTF_PKT:
959 nci_ntf_packet(ndev, skb);
960 break;
962 case NCI_MT_DATA_PKT:
963 nci_rx_data_packet(ndev, skb);
964 break;
966 default:
967 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
968 kfree_skb(skb);
969 break;
973 /* check if a data exchange timout has occurred */
974 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
975 /* complete the data exchange transaction, if exists */
976 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
977 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
979 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
983 /* ----- NCI TX CMD worker thread ----- */
985 static void nci_cmd_work(struct work_struct *work)
987 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
988 struct sk_buff *skb;
990 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
992 /* Send queued command */
993 if (atomic_read(&ndev->cmd_cnt)) {
994 skb = skb_dequeue(&ndev->cmd_q);
995 if (!skb)
996 return;
998 atomic_dec(&ndev->cmd_cnt);
1000 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
1001 nci_pbf(skb->data),
1002 nci_opcode_gid(nci_opcode(skb->data)),
1003 nci_opcode_oid(nci_opcode(skb->data)),
1004 nci_plen(skb->data));
1006 nci_send_frame(ndev, skb);
1008 mod_timer(&ndev->cmd_timer,
1009 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
1013 MODULE_LICENSE("GPL");