hpsa: fix bad -ENOMEM return value in hpsa_big_passthru_ioctl
[linux/fpc-iii.git] / net / nfc / nci / core.c
blobb943d46a1644324757bd60950a07eb02cc50f7ac
1 /*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
5 * Copyright (C) 2011 Texas Instruments, Inc.
7 * Written by Ilan Elias <ilane@ti.com>
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
30 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/workqueue.h>
33 #include <linux/completion.h>
34 #include <linux/export.h>
35 #include <linux/sched.h>
36 #include <linux/bitops.h>
37 #include <linux/skbuff.h>
39 #include "../nfc.h"
40 #include <net/nfc/nci.h>
41 #include <net/nfc/nci_core.h>
42 #include <linux/nfc.h>
44 static void nci_cmd_work(struct work_struct *work);
45 static void nci_rx_work(struct work_struct *work);
46 static void nci_tx_work(struct work_struct *work);
48 /* ---- NCI requests ---- */
50 void nci_req_complete(struct nci_dev *ndev, int result)
52 if (ndev->req_status == NCI_REQ_PEND) {
53 ndev->req_result = result;
54 ndev->req_status = NCI_REQ_DONE;
55 complete(&ndev->req_completion);
59 static void nci_req_cancel(struct nci_dev *ndev, int err)
61 if (ndev->req_status == NCI_REQ_PEND) {
62 ndev->req_result = err;
63 ndev->req_status = NCI_REQ_CANCELED;
64 complete(&ndev->req_completion);
68 /* Execute request and wait for completion. */
69 static int __nci_request(struct nci_dev *ndev,
70 void (*req)(struct nci_dev *ndev, unsigned long opt),
71 unsigned long opt, __u32 timeout)
73 int rc = 0;
74 long completion_rc;
76 ndev->req_status = NCI_REQ_PEND;
78 init_completion(&ndev->req_completion);
79 req(ndev, opt);
80 completion_rc =
81 wait_for_completion_interruptible_timeout(&ndev->req_completion,
82 timeout);
84 pr_debug("wait_for_completion return %ld\n", completion_rc);
86 if (completion_rc > 0) {
87 switch (ndev->req_status) {
88 case NCI_REQ_DONE:
89 rc = nci_to_errno(ndev->req_result);
90 break;
92 case NCI_REQ_CANCELED:
93 rc = -ndev->req_result;
94 break;
96 default:
97 rc = -ETIMEDOUT;
98 break;
100 } else {
101 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
102 completion_rc);
104 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
107 ndev->req_status = ndev->req_result = 0;
109 return rc;
112 static inline int nci_request(struct nci_dev *ndev,
113 void (*req)(struct nci_dev *ndev,
114 unsigned long opt),
115 unsigned long opt, __u32 timeout)
117 int rc;
119 if (!test_bit(NCI_UP, &ndev->flags))
120 return -ENETDOWN;
122 /* Serialize all requests */
123 mutex_lock(&ndev->req_lock);
124 rc = __nci_request(ndev, req, opt, timeout);
125 mutex_unlock(&ndev->req_lock);
127 return rc;
130 static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
132 struct nci_core_reset_cmd cmd;
134 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
135 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
138 static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
140 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
143 static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
145 struct nci_rf_disc_map_cmd cmd;
146 struct disc_map_config *cfg = cmd.mapping_configs;
147 __u8 *num = &cmd.num_mapping_configs;
148 int i;
150 /* set rf mapping configurations */
151 *num = 0;
153 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
154 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
155 if (ndev->supported_rf_interfaces[i] ==
156 NCI_RF_INTERFACE_ISO_DEP) {
157 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
158 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
159 NCI_DISC_MAP_MODE_LISTEN;
160 cfg[*num].rf_interface = NCI_RF_INTERFACE_ISO_DEP;
161 (*num)++;
162 } else if (ndev->supported_rf_interfaces[i] ==
163 NCI_RF_INTERFACE_NFC_DEP) {
164 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
165 cfg[*num].mode = NCI_DISC_MAP_MODE_POLL |
166 NCI_DISC_MAP_MODE_LISTEN;
167 cfg[*num].rf_interface = NCI_RF_INTERFACE_NFC_DEP;
168 (*num)++;
171 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
172 break;
175 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
176 (1 + ((*num) * sizeof(struct disc_map_config))), &cmd);
179 struct nci_set_config_param {
180 __u8 id;
181 size_t len;
182 __u8 *val;
185 static void nci_set_config_req(struct nci_dev *ndev, unsigned long opt)
187 struct nci_set_config_param *param = (struct nci_set_config_param *)opt;
188 struct nci_core_set_config_cmd cmd;
190 BUG_ON(param->len > NCI_MAX_PARAM_LEN);
192 cmd.num_params = 1;
193 cmd.param.id = param->id;
194 cmd.param.len = param->len;
195 memcpy(cmd.param.val, param->val, param->len);
197 nci_send_cmd(ndev, NCI_OP_CORE_SET_CONFIG_CMD, (3 + param->len), &cmd);
200 static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
202 struct nci_rf_disc_cmd cmd;
203 __u32 protocols = opt;
205 cmd.num_disc_configs = 0;
207 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
208 (protocols & NFC_PROTO_JEWEL_MASK ||
209 protocols & NFC_PROTO_MIFARE_MASK ||
210 protocols & NFC_PROTO_ISO14443_MASK ||
211 protocols & NFC_PROTO_NFC_DEP_MASK)) {
212 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
213 NCI_NFC_A_PASSIVE_POLL_MODE;
214 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
215 cmd.num_disc_configs++;
218 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
219 (protocols & NFC_PROTO_ISO14443_B_MASK)) {
220 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
221 NCI_NFC_B_PASSIVE_POLL_MODE;
222 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
223 cmd.num_disc_configs++;
226 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
227 (protocols & NFC_PROTO_FELICA_MASK ||
228 protocols & NFC_PROTO_NFC_DEP_MASK)) {
229 cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
230 NCI_NFC_F_PASSIVE_POLL_MODE;
231 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
232 cmd.num_disc_configs++;
235 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
236 (1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
237 &cmd);
240 struct nci_rf_discover_select_param {
241 __u8 rf_discovery_id;
242 __u8 rf_protocol;
245 static void nci_rf_discover_select_req(struct nci_dev *ndev, unsigned long opt)
247 struct nci_rf_discover_select_param *param =
248 (struct nci_rf_discover_select_param *)opt;
249 struct nci_rf_discover_select_cmd cmd;
251 cmd.rf_discovery_id = param->rf_discovery_id;
252 cmd.rf_protocol = param->rf_protocol;
254 switch (cmd.rf_protocol) {
255 case NCI_RF_PROTOCOL_ISO_DEP:
256 cmd.rf_interface = NCI_RF_INTERFACE_ISO_DEP;
257 break;
259 case NCI_RF_PROTOCOL_NFC_DEP:
260 cmd.rf_interface = NCI_RF_INTERFACE_NFC_DEP;
261 break;
263 default:
264 cmd.rf_interface = NCI_RF_INTERFACE_FRAME;
265 break;
268 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_SELECT_CMD,
269 sizeof(struct nci_rf_discover_select_cmd), &cmd);
272 static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
274 struct nci_rf_deactivate_cmd cmd;
276 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
278 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
279 sizeof(struct nci_rf_deactivate_cmd), &cmd);
282 static int nci_open_device(struct nci_dev *ndev)
284 int rc = 0;
286 mutex_lock(&ndev->req_lock);
288 if (test_bit(NCI_UP, &ndev->flags)) {
289 rc = -EALREADY;
290 goto done;
293 if (ndev->ops->open(ndev)) {
294 rc = -EIO;
295 goto done;
298 atomic_set(&ndev->cmd_cnt, 1);
300 set_bit(NCI_INIT, &ndev->flags);
302 rc = __nci_request(ndev, nci_reset_req, 0,
303 msecs_to_jiffies(NCI_RESET_TIMEOUT));
305 if (!rc) {
306 rc = __nci_request(ndev, nci_init_req, 0,
307 msecs_to_jiffies(NCI_INIT_TIMEOUT));
310 if (!rc) {
311 rc = __nci_request(ndev, nci_init_complete_req, 0,
312 msecs_to_jiffies(NCI_INIT_TIMEOUT));
315 clear_bit(NCI_INIT, &ndev->flags);
317 if (!rc) {
318 set_bit(NCI_UP, &ndev->flags);
319 nci_clear_target_list(ndev);
320 atomic_set(&ndev->state, NCI_IDLE);
321 } else {
322 /* Init failed, cleanup */
323 skb_queue_purge(&ndev->cmd_q);
324 skb_queue_purge(&ndev->rx_q);
325 skb_queue_purge(&ndev->tx_q);
327 ndev->ops->close(ndev);
328 ndev->flags = 0;
331 done:
332 mutex_unlock(&ndev->req_lock);
333 return rc;
336 static int nci_close_device(struct nci_dev *ndev)
338 nci_req_cancel(ndev, ENODEV);
339 mutex_lock(&ndev->req_lock);
341 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
342 del_timer_sync(&ndev->cmd_timer);
343 del_timer_sync(&ndev->data_timer);
344 mutex_unlock(&ndev->req_lock);
345 return 0;
348 /* Drop RX and TX queues */
349 skb_queue_purge(&ndev->rx_q);
350 skb_queue_purge(&ndev->tx_q);
352 /* Flush RX and TX wq */
353 flush_workqueue(ndev->rx_wq);
354 flush_workqueue(ndev->tx_wq);
356 /* Reset device */
357 skb_queue_purge(&ndev->cmd_q);
358 atomic_set(&ndev->cmd_cnt, 1);
360 set_bit(NCI_INIT, &ndev->flags);
361 __nci_request(ndev, nci_reset_req, 0,
362 msecs_to_jiffies(NCI_RESET_TIMEOUT));
363 clear_bit(NCI_INIT, &ndev->flags);
365 /* Flush cmd wq */
366 flush_workqueue(ndev->cmd_wq);
368 /* After this point our queues are empty
369 * and no works are scheduled. */
370 ndev->ops->close(ndev);
372 /* Clear flags */
373 ndev->flags = 0;
375 mutex_unlock(&ndev->req_lock);
377 return 0;
380 /* NCI command timer function */
381 static void nci_cmd_timer(unsigned long arg)
383 struct nci_dev *ndev = (void *) arg;
385 atomic_set(&ndev->cmd_cnt, 1);
386 queue_work(ndev->cmd_wq, &ndev->cmd_work);
389 /* NCI data exchange timer function */
390 static void nci_data_timer(unsigned long arg)
392 struct nci_dev *ndev = (void *) arg;
394 set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
395 queue_work(ndev->rx_wq, &ndev->rx_work);
398 static int nci_dev_up(struct nfc_dev *nfc_dev)
400 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
402 return nci_open_device(ndev);
405 static int nci_dev_down(struct nfc_dev *nfc_dev)
407 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
409 return nci_close_device(ndev);
412 static int nci_set_local_general_bytes(struct nfc_dev *nfc_dev)
414 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
415 struct nci_set_config_param param;
416 __u8 local_gb[NFC_MAX_GT_LEN];
417 int i;
419 param.val = nfc_get_local_general_bytes(nfc_dev, &param.len);
420 if ((param.val == NULL) || (param.len == 0))
421 return 0;
423 if (param.len > NFC_MAX_GT_LEN)
424 return -EINVAL;
426 for (i = 0; i < param.len; i++)
427 local_gb[param.len-1-i] = param.val[i];
429 param.id = NCI_PN_ATR_REQ_GEN_BYTES;
430 param.val = local_gb;
432 return nci_request(ndev, nci_set_config_req, (unsigned long)&param,
433 msecs_to_jiffies(NCI_SET_CONFIG_TIMEOUT));
436 static int nci_start_poll(struct nfc_dev *nfc_dev,
437 __u32 im_protocols, __u32 tm_protocols)
439 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
440 int rc;
442 if ((atomic_read(&ndev->state) == NCI_DISCOVERY) ||
443 (atomic_read(&ndev->state) == NCI_W4_ALL_DISCOVERIES)) {
444 pr_err("unable to start poll, since poll is already active\n");
445 return -EBUSY;
448 if (ndev->target_active_prot) {
449 pr_err("there is an active target\n");
450 return -EBUSY;
453 if ((atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) ||
454 (atomic_read(&ndev->state) == NCI_POLL_ACTIVE)) {
455 pr_debug("target active or w4 select, implicitly deactivate\n");
457 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
458 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
459 if (rc)
460 return -EBUSY;
463 if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
464 rc = nci_set_local_general_bytes(nfc_dev);
465 if (rc) {
466 pr_err("failed to set local general bytes\n");
467 return rc;
471 rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
472 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
474 if (!rc)
475 ndev->poll_prots = im_protocols;
477 return rc;
480 static void nci_stop_poll(struct nfc_dev *nfc_dev)
482 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
484 if ((atomic_read(&ndev->state) != NCI_DISCOVERY) &&
485 (atomic_read(&ndev->state) != NCI_W4_ALL_DISCOVERIES)) {
486 pr_err("unable to stop poll, since poll is not active\n");
487 return;
490 nci_request(ndev, nci_rf_deactivate_req, 0,
491 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
494 static int nci_activate_target(struct nfc_dev *nfc_dev,
495 struct nfc_target *target, __u32 protocol)
497 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
498 struct nci_rf_discover_select_param param;
499 struct nfc_target *nci_target = NULL;
500 int i;
501 int rc = 0;
503 pr_debug("target_idx %d, protocol 0x%x\n", target->idx, protocol);
505 if ((atomic_read(&ndev->state) != NCI_W4_HOST_SELECT) &&
506 (atomic_read(&ndev->state) != NCI_POLL_ACTIVE)) {
507 pr_err("there is no available target to activate\n");
508 return -EINVAL;
511 if (ndev->target_active_prot) {
512 pr_err("there is already an active target\n");
513 return -EBUSY;
516 for (i = 0; i < ndev->n_targets; i++) {
517 if (ndev->targets[i].idx == target->idx) {
518 nci_target = &ndev->targets[i];
519 break;
523 if (!nci_target) {
524 pr_err("unable to find the selected target\n");
525 return -EINVAL;
528 if (!(nci_target->supported_protocols & (1 << protocol))) {
529 pr_err("target does not support the requested protocol 0x%x\n",
530 protocol);
531 return -EINVAL;
534 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
535 param.rf_discovery_id = nci_target->logical_idx;
537 if (protocol == NFC_PROTO_JEWEL)
538 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
539 else if (protocol == NFC_PROTO_MIFARE)
540 param.rf_protocol = NCI_RF_PROTOCOL_T2T;
541 else if (protocol == NFC_PROTO_FELICA)
542 param.rf_protocol = NCI_RF_PROTOCOL_T3T;
543 else if (protocol == NFC_PROTO_ISO14443 ||
544 protocol == NFC_PROTO_ISO14443_B)
545 param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
546 else
547 param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
549 rc = nci_request(ndev, nci_rf_discover_select_req,
550 (unsigned long)&param,
551 msecs_to_jiffies(NCI_RF_DISC_SELECT_TIMEOUT));
554 if (!rc)
555 ndev->target_active_prot = protocol;
557 return rc;
560 static void nci_deactivate_target(struct nfc_dev *nfc_dev,
561 struct nfc_target *target)
563 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
565 pr_debug("entry\n");
567 if (!ndev->target_active_prot) {
568 pr_err("unable to deactivate target, no active target\n");
569 return;
572 ndev->target_active_prot = 0;
574 if (atomic_read(&ndev->state) == NCI_POLL_ACTIVE) {
575 nci_request(ndev, nci_rf_deactivate_req, 0,
576 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
580 static int nci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
581 __u8 comm_mode, __u8 *gb, size_t gb_len)
583 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
584 int rc;
586 pr_debug("target_idx %d, comm_mode %d\n", target->idx, comm_mode);
588 rc = nci_activate_target(nfc_dev, target, NFC_PROTO_NFC_DEP);
589 if (rc)
590 return rc;
592 rc = nfc_set_remote_general_bytes(nfc_dev, ndev->remote_gb,
593 ndev->remote_gb_len);
594 if (!rc)
595 rc = nfc_dep_link_is_up(nfc_dev, target->idx, NFC_COMM_PASSIVE,
596 NFC_RF_INITIATOR);
598 return rc;
601 static int nci_dep_link_down(struct nfc_dev *nfc_dev)
603 pr_debug("entry\n");
605 nci_deactivate_target(nfc_dev, NULL);
607 return 0;
611 static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
612 struct sk_buff *skb,
613 data_exchange_cb_t cb, void *cb_context)
615 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
616 int rc;
618 pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
620 if (!ndev->target_active_prot) {
621 pr_err("unable to exchange data, no active target\n");
622 return -EINVAL;
625 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
626 return -EBUSY;
628 /* store cb and context to be used on receiving data */
629 ndev->data_exchange_cb = cb;
630 ndev->data_exchange_cb_context = cb_context;
632 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
633 if (rc)
634 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
636 return rc;
639 static int nci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
641 return 0;
644 static int nci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
646 return 0;
649 static int nci_discover_se(struct nfc_dev *nfc_dev)
651 return 0;
654 static struct nfc_ops nci_nfc_ops = {
655 .dev_up = nci_dev_up,
656 .dev_down = nci_dev_down,
657 .start_poll = nci_start_poll,
658 .stop_poll = nci_stop_poll,
659 .dep_link_up = nci_dep_link_up,
660 .dep_link_down = nci_dep_link_down,
661 .activate_target = nci_activate_target,
662 .deactivate_target = nci_deactivate_target,
663 .im_transceive = nci_transceive,
664 .enable_se = nci_enable_se,
665 .disable_se = nci_disable_se,
666 .discover_se = nci_discover_se,
669 /* ---- Interface to NCI drivers ---- */
672 * nci_allocate_device - allocate a new nci device
674 * @ops: device operations
675 * @supported_protocols: NFC protocols supported by the device
677 struct nci_dev *nci_allocate_device(struct nci_ops *ops,
678 __u32 supported_protocols,
679 int tx_headroom, int tx_tailroom)
681 struct nci_dev *ndev;
683 pr_debug("supported_protocols 0x%x\n", supported_protocols);
685 if (!ops->open || !ops->close || !ops->send)
686 return NULL;
688 if (!supported_protocols)
689 return NULL;
691 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
692 if (!ndev)
693 return NULL;
695 ndev->ops = ops;
696 ndev->tx_headroom = tx_headroom;
697 ndev->tx_tailroom = tx_tailroom;
699 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
700 supported_protocols,
701 tx_headroom + NCI_DATA_HDR_SIZE,
702 tx_tailroom);
703 if (!ndev->nfc_dev)
704 goto free_exit;
706 nfc_set_drvdata(ndev->nfc_dev, ndev);
708 return ndev;
710 free_exit:
711 kfree(ndev);
712 return NULL;
714 EXPORT_SYMBOL(nci_allocate_device);
717 * nci_free_device - deallocate nci device
719 * @ndev: The nci device to deallocate
721 void nci_free_device(struct nci_dev *ndev)
723 nfc_free_device(ndev->nfc_dev);
724 kfree(ndev);
726 EXPORT_SYMBOL(nci_free_device);
729 * nci_register_device - register a nci device in the nfc subsystem
731 * @dev: The nci device to register
733 int nci_register_device(struct nci_dev *ndev)
735 int rc;
736 struct device *dev = &ndev->nfc_dev->dev;
737 char name[32];
739 rc = nfc_register_device(ndev->nfc_dev);
740 if (rc)
741 goto exit;
743 ndev->flags = 0;
745 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
746 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
747 ndev->cmd_wq = create_singlethread_workqueue(name);
748 if (!ndev->cmd_wq) {
749 rc = -ENOMEM;
750 goto unreg_exit;
753 INIT_WORK(&ndev->rx_work, nci_rx_work);
754 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
755 ndev->rx_wq = create_singlethread_workqueue(name);
756 if (!ndev->rx_wq) {
757 rc = -ENOMEM;
758 goto destroy_cmd_wq_exit;
761 INIT_WORK(&ndev->tx_work, nci_tx_work);
762 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
763 ndev->tx_wq = create_singlethread_workqueue(name);
764 if (!ndev->tx_wq) {
765 rc = -ENOMEM;
766 goto destroy_rx_wq_exit;
769 skb_queue_head_init(&ndev->cmd_q);
770 skb_queue_head_init(&ndev->rx_q);
771 skb_queue_head_init(&ndev->tx_q);
773 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
774 (unsigned long) ndev);
775 setup_timer(&ndev->data_timer, nci_data_timer,
776 (unsigned long) ndev);
778 mutex_init(&ndev->req_lock);
780 goto exit;
782 destroy_rx_wq_exit:
783 destroy_workqueue(ndev->rx_wq);
785 destroy_cmd_wq_exit:
786 destroy_workqueue(ndev->cmd_wq);
788 unreg_exit:
789 nfc_unregister_device(ndev->nfc_dev);
791 exit:
792 return rc;
794 EXPORT_SYMBOL(nci_register_device);
797 * nci_unregister_device - unregister a nci device in the nfc subsystem
799 * @dev: The nci device to unregister
801 void nci_unregister_device(struct nci_dev *ndev)
803 nci_close_device(ndev);
805 destroy_workqueue(ndev->cmd_wq);
806 destroy_workqueue(ndev->rx_wq);
807 destroy_workqueue(ndev->tx_wq);
809 nfc_unregister_device(ndev->nfc_dev);
811 EXPORT_SYMBOL(nci_unregister_device);
814 * nci_recv_frame - receive frame from NCI drivers
816 * @ndev: The nci device
817 * @skb: The sk_buff to receive
819 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
821 pr_debug("len %d\n", skb->len);
823 if (!ndev || (!test_bit(NCI_UP, &ndev->flags) &&
824 !test_bit(NCI_INIT, &ndev->flags))) {
825 kfree_skb(skb);
826 return -ENXIO;
829 /* Queue frame for rx worker thread */
830 skb_queue_tail(&ndev->rx_q, skb);
831 queue_work(ndev->rx_wq, &ndev->rx_work);
833 return 0;
835 EXPORT_SYMBOL(nci_recv_frame);
837 static int nci_send_frame(struct nci_dev *ndev, struct sk_buff *skb)
839 pr_debug("len %d\n", skb->len);
841 if (!ndev) {
842 kfree_skb(skb);
843 return -ENODEV;
846 /* Get rid of skb owner, prior to sending to the driver. */
847 skb_orphan(skb);
849 return ndev->ops->send(ndev, skb);
852 /* Send NCI command */
853 int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
855 struct nci_ctrl_hdr *hdr;
856 struct sk_buff *skb;
858 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
860 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
861 if (!skb) {
862 pr_err("no memory for command\n");
863 return -ENOMEM;
866 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
867 hdr->gid = nci_opcode_gid(opcode);
868 hdr->oid = nci_opcode_oid(opcode);
869 hdr->plen = plen;
871 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
872 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
874 if (plen)
875 memcpy(skb_put(skb, plen), payload, plen);
877 skb_queue_tail(&ndev->cmd_q, skb);
878 queue_work(ndev->cmd_wq, &ndev->cmd_work);
880 return 0;
883 /* ---- NCI TX Data worker thread ---- */
885 static void nci_tx_work(struct work_struct *work)
887 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
888 struct sk_buff *skb;
890 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
892 /* Send queued tx data */
893 while (atomic_read(&ndev->credits_cnt)) {
894 skb = skb_dequeue(&ndev->tx_q);
895 if (!skb)
896 return;
898 /* Check if data flow control is used */
899 if (atomic_read(&ndev->credits_cnt) !=
900 NCI_DATA_FLOW_CONTROL_NOT_USED)
901 atomic_dec(&ndev->credits_cnt);
903 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
904 nci_pbf(skb->data),
905 nci_conn_id(skb->data),
906 nci_plen(skb->data));
908 nci_send_frame(ndev, skb);
910 mod_timer(&ndev->data_timer,
911 jiffies + msecs_to_jiffies(NCI_DATA_TIMEOUT));
915 /* ----- NCI RX worker thread (data & control) ----- */
917 static void nci_rx_work(struct work_struct *work)
919 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
920 struct sk_buff *skb;
922 while ((skb = skb_dequeue(&ndev->rx_q))) {
923 /* Process frame */
924 switch (nci_mt(skb->data)) {
925 case NCI_MT_RSP_PKT:
926 nci_rsp_packet(ndev, skb);
927 break;
929 case NCI_MT_NTF_PKT:
930 nci_ntf_packet(ndev, skb);
931 break;
933 case NCI_MT_DATA_PKT:
934 nci_rx_data_packet(ndev, skb);
935 break;
937 default:
938 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
939 kfree_skb(skb);
940 break;
944 /* check if a data exchange timout has occurred */
945 if (test_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags)) {
946 /* complete the data exchange transaction, if exists */
947 if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
948 nci_data_exchange_complete(ndev, NULL, -ETIMEDOUT);
950 clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
954 /* ----- NCI TX CMD worker thread ----- */
956 static void nci_cmd_work(struct work_struct *work)
958 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
959 struct sk_buff *skb;
961 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
963 /* Send queued command */
964 if (atomic_read(&ndev->cmd_cnt)) {
965 skb = skb_dequeue(&ndev->cmd_q);
966 if (!skb)
967 return;
969 atomic_dec(&ndev->cmd_cnt);
971 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
972 nci_pbf(skb->data),
973 nci_opcode_gid(nci_opcode(skb->data)),
974 nci_opcode_oid(nci_opcode(skb->data)),
975 nci_plen(skb->data));
977 nci_send_frame(ndev, skb);
979 mod_timer(&ndev->cmd_timer,
980 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
984 MODULE_LICENSE("GPL");