2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/dmapool.h>
34 #include "hns_roce_common.h"
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
38 #define CMD_POLL_TOKEN 0xffff
39 #define CMD_MAX_NUM 32
41 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev
*hr_dev
,
42 struct hns_roce_mbox_msg
*mbox_msg
)
46 ret
= hr_dev
->hw
->post_mbox(hr_dev
, mbox_msg
);
50 atomic64_inc(&hr_dev
->dfx_cnt
[HNS_ROCE_DFX_MBX_POSTED_CNT
]);
55 /* this should be called with "poll_sem" */
56 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev
*hr_dev
,
57 struct hns_roce_mbox_msg
*mbox_msg
)
61 ret
= hns_roce_cmd_mbox_post_hw(hr_dev
, mbox_msg
);
63 dev_err_ratelimited(hr_dev
->dev
,
64 "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
69 ret
= hr_dev
->hw
->poll_mbox_done(hr_dev
);
73 atomic64_inc(&hr_dev
->dfx_cnt
[HNS_ROCE_DFX_MBX_POLLED_CNT
]);
78 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev
*hr_dev
,
79 struct hns_roce_mbox_msg
*mbox_msg
)
83 down(&hr_dev
->cmd
.poll_sem
);
84 ret
= __hns_roce_cmd_mbox_poll(hr_dev
, mbox_msg
);
85 up(&hr_dev
->cmd
.poll_sem
);
90 void hns_roce_cmd_event(struct hns_roce_dev
*hr_dev
, u16 token
, u8 status
,
93 struct hns_roce_cmd_context
*context
=
94 &hr_dev
->cmd
.context
[token
% hr_dev
->cmd
.max_cmds
];
96 if (unlikely(token
!= context
->token
)) {
97 dev_err_ratelimited(hr_dev
->dev
,
98 "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
99 token
, context
->token
);
103 context
->result
= (status
== HNS_ROCE_CMD_SUCCESS
) ? 0 : (-EIO
);
104 context
->out_param
= out_param
;
105 complete(&context
->done
);
106 atomic64_inc(&hr_dev
->dfx_cnt
[HNS_ROCE_DFX_MBX_EVENT_CNT
]);
109 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev
*hr_dev
,
110 struct hns_roce_mbox_msg
*mbox_msg
)
112 struct hns_roce_cmdq
*cmd
= &hr_dev
->cmd
;
113 struct hns_roce_cmd_context
*context
;
114 struct device
*dev
= hr_dev
->dev
;
117 spin_lock(&cmd
->context_lock
);
120 context
= &cmd
->context
[cmd
->free_head
];
121 cmd
->free_head
= context
->next
;
122 } while (context
->busy
);
125 context
->token
+= cmd
->max_cmds
;
127 spin_unlock(&cmd
->context_lock
);
129 reinit_completion(&context
->done
);
131 mbox_msg
->token
= context
->token
;
132 ret
= hns_roce_cmd_mbox_post_hw(hr_dev
, mbox_msg
);
134 dev_err_ratelimited(dev
,
135 "failed to post mailbox 0x%x in event mode, ret = %d.\n",
140 if (!wait_for_completion_timeout(&context
->done
,
141 msecs_to_jiffies(HNS_ROCE_CMD_TIMEOUT_MSECS
))) {
142 dev_err_ratelimited(dev
, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
143 context
->token
, mbox_msg
->cmd
);
148 ret
= context
->result
;
150 dev_err_ratelimited(dev
, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
151 context
->token
, mbox_msg
->cmd
, ret
);
158 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev
*hr_dev
,
159 struct hns_roce_mbox_msg
*mbox_msg
)
163 down(&hr_dev
->cmd
.event_sem
);
164 ret
= __hns_roce_cmd_mbox_wait(hr_dev
, mbox_msg
);
165 up(&hr_dev
->cmd
.event_sem
);
170 int hns_roce_cmd_mbox(struct hns_roce_dev
*hr_dev
, u64 in_param
, u64 out_param
,
171 u8 cmd
, unsigned long tag
)
173 struct hns_roce_mbox_msg mbox_msg
= {};
176 if (hr_dev
->hw
->chk_mbox_avail
)
177 if (!hr_dev
->hw
->chk_mbox_avail(hr_dev
, &is_busy
))
178 return is_busy
? -EBUSY
: 0;
180 mbox_msg
.in_param
= in_param
;
181 mbox_msg
.out_param
= out_param
;
185 if (hr_dev
->cmd
.use_events
) {
186 mbox_msg
.event_en
= 1;
188 return hns_roce_cmd_mbox_wait(hr_dev
, &mbox_msg
);
190 mbox_msg
.event_en
= 0;
191 mbox_msg
.token
= CMD_POLL_TOKEN
;
193 return hns_roce_cmd_mbox_poll(hr_dev
, &mbox_msg
);
197 int hns_roce_cmd_init(struct hns_roce_dev
*hr_dev
)
199 sema_init(&hr_dev
->cmd
.poll_sem
, 1);
200 hr_dev
->cmd
.use_events
= 0;
201 hr_dev
->cmd
.max_cmds
= CMD_MAX_NUM
;
202 hr_dev
->cmd
.pool
= dma_pool_create("hns_roce_cmd", hr_dev
->dev
,
203 HNS_ROCE_MAILBOX_SIZE
,
204 HNS_ROCE_MAILBOX_SIZE
, 0);
205 if (!hr_dev
->cmd
.pool
)
211 void hns_roce_cmd_cleanup(struct hns_roce_dev
*hr_dev
)
213 dma_pool_destroy(hr_dev
->cmd
.pool
);
216 int hns_roce_cmd_use_events(struct hns_roce_dev
*hr_dev
)
218 struct hns_roce_cmdq
*hr_cmd
= &hr_dev
->cmd
;
222 kcalloc(hr_cmd
->max_cmds
, sizeof(*hr_cmd
->context
), GFP_KERNEL
);
223 if (!hr_cmd
->context
) {
228 for (i
= 0; i
< hr_cmd
->max_cmds
; ++i
) {
229 hr_cmd
->context
[i
].token
= i
;
230 hr_cmd
->context
[i
].next
= i
+ 1;
231 init_completion(&hr_cmd
->context
[i
].done
);
233 hr_cmd
->context
[hr_cmd
->max_cmds
- 1].next
= 0;
234 hr_cmd
->free_head
= 0;
236 sema_init(&hr_cmd
->event_sem
, hr_cmd
->max_cmds
);
237 spin_lock_init(&hr_cmd
->context_lock
);
239 hr_cmd
->use_events
= 1;
244 void hns_roce_cmd_use_polling(struct hns_roce_dev
*hr_dev
)
246 struct hns_roce_cmdq
*hr_cmd
= &hr_dev
->cmd
;
248 kfree(hr_cmd
->context
);
249 hr_cmd
->use_events
= 0;
252 struct hns_roce_cmd_mailbox
*
253 hns_roce_alloc_cmd_mailbox(struct hns_roce_dev
*hr_dev
)
255 struct hns_roce_cmd_mailbox
*mailbox
;
257 mailbox
= kmalloc(sizeof(*mailbox
), GFP_KERNEL
);
259 return ERR_PTR(-ENOMEM
);
262 dma_pool_alloc(hr_dev
->cmd
.pool
, GFP_KERNEL
, &mailbox
->dma
);
265 return ERR_PTR(-ENOMEM
);
271 void hns_roce_free_cmd_mailbox(struct hns_roce_dev
*hr_dev
,
272 struct hns_roce_cmd_mailbox
*mailbox
)
277 dma_pool_free(hr_dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
281 int hns_roce_create_hw_ctx(struct hns_roce_dev
*dev
,
282 struct hns_roce_cmd_mailbox
*mailbox
,
283 u8 cmd
, unsigned long idx
)
285 return hns_roce_cmd_mbox(dev
, mailbox
->dma
, 0, cmd
, idx
);
288 int hns_roce_destroy_hw_ctx(struct hns_roce_dev
*dev
, u8 cmd
, unsigned long idx
)
290 return hns_roce_cmd_mbox(dev
, 0, 0, cmd
, idx
);