Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / net / ethernet / netronome / nfp / ccm.c
blob94476e41e261ab86889c9d7a54b3e3be8ee0b0e9
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2019 Netronome Systems, Inc. */
4 #include <linux/bitops.h>
6 #include "ccm.h"
7 #include "nfp_app.h"
8 #include "nfp_net.h"
10 #define NFP_CCM_TYPE_REPLY_BIT 7
11 #define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
13 #define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
15 #define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
17 static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
19 u16 used_tags;
21 used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
23 return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
26 static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
28 /* CCM is for FW communication which is request-reply. To make sure
29 * we don't reuse the message ID too early after timeout - limit the
30 * number of requests in flight.
32 if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
33 ccm_warn(ccm->app, "all FW request contexts busy!\n");
34 return -EAGAIN;
37 WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
38 return ccm->tag_alloc_next++;
41 static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
43 WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
45 while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
46 ccm->tag_alloc_last != ccm->tag_alloc_next)
47 ccm->tag_alloc_last++;
50 static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
52 unsigned int msg_tag;
53 struct sk_buff *skb;
55 skb_queue_walk(&ccm->replies, skb) {
56 msg_tag = nfp_ccm_get_tag(skb);
57 if (msg_tag == tag) {
58 nfp_ccm_free_tag(ccm, tag);
59 __skb_unlink(skb, &ccm->replies);
60 return skb;
64 return NULL;
67 static struct sk_buff *
68 nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
70 struct sk_buff *skb;
72 nfp_ctrl_lock(app->ctrl);
73 skb = __nfp_ccm_reply(ccm, tag);
74 nfp_ctrl_unlock(app->ctrl);
76 return skb;
79 static struct sk_buff *
80 nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
82 struct sk_buff *skb;
84 nfp_ctrl_lock(app->ctrl);
85 skb = __nfp_ccm_reply(ccm, tag);
86 if (!skb)
87 nfp_ccm_free_tag(ccm, tag);
88 nfp_ctrl_unlock(app->ctrl);
90 return skb;
93 static struct sk_buff *
94 nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
95 enum nfp_ccm_type type, int tag)
97 struct sk_buff *skb;
98 int i, err;
100 for (i = 0; i < 50; i++) {
101 udelay(4);
102 skb = nfp_ccm_reply(ccm, app, tag);
103 if (skb)
104 return skb;
107 err = wait_event_interruptible_timeout(ccm->wq,
108 skb = nfp_ccm_reply(ccm, app,
109 tag),
110 msecs_to_jiffies(5000));
111 /* We didn't get a response - try last time and atomically drop
112 * the tag even if no response is matched.
114 if (!skb)
115 skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
116 if (err < 0) {
117 ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
118 err == ERESTARTSYS ? "interrupted" : "error",
119 type, err);
120 return ERR_PTR(err);
122 if (!skb) {
123 ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
124 return ERR_PTR(-ETIMEDOUT);
127 return skb;
130 struct sk_buff *
131 nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
132 enum nfp_ccm_type type, unsigned int reply_size)
134 struct nfp_app *app = ccm->app;
135 struct nfp_ccm_hdr *hdr;
136 int reply_type, tag;
138 nfp_ctrl_lock(app->ctrl);
139 tag = nfp_ccm_alloc_tag(ccm);
140 if (tag < 0) {
141 nfp_ctrl_unlock(app->ctrl);
142 dev_kfree_skb_any(skb);
143 return ERR_PTR(tag);
146 hdr = (void *)skb->data;
147 hdr->ver = NFP_CCM_ABI_VERSION;
148 hdr->type = type;
149 hdr->tag = cpu_to_be16(tag);
151 __nfp_app_ctrl_tx(app, skb);
153 nfp_ctrl_unlock(app->ctrl);
155 skb = nfp_ccm_wait_reply(ccm, app, type, tag);
156 if (IS_ERR(skb))
157 return skb;
159 reply_type = nfp_ccm_get_type(skb);
160 if (reply_type != __NFP_CCM_REPLY(type)) {
161 ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
162 reply_type, __NFP_CCM_REPLY(type));
163 goto err_free;
165 /* 0 reply_size means caller will do the validation */
166 if (reply_size && skb->len != reply_size) {
167 ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
168 type, skb->len, reply_size);
169 goto err_free;
172 return skb;
173 err_free:
174 dev_kfree_skb_any(skb);
175 return ERR_PTR(-EIO);
178 void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
180 struct nfp_app *app = ccm->app;
181 unsigned int tag;
183 if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
184 ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
185 goto err_free;
188 nfp_ctrl_lock(app->ctrl);
190 tag = nfp_ccm_get_tag(skb);
191 if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
192 ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
193 tag);
194 goto err_unlock;
197 __skb_queue_tail(&ccm->replies, skb);
198 wake_up_interruptible_all(&ccm->wq);
200 nfp_ctrl_unlock(app->ctrl);
201 return;
203 err_unlock:
204 nfp_ctrl_unlock(app->ctrl);
205 err_free:
206 dev_kfree_skb_any(skb);
209 int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
211 ccm->app = app;
212 skb_queue_head_init(&ccm->replies);
213 init_waitqueue_head(&ccm->wq);
214 return 0;
217 void nfp_ccm_clean(struct nfp_ccm *ccm)
219 WARN_ON(!skb_queue_empty(&ccm->replies));