2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/dmapool.h>
34 #include <linux/platform_device.h>
35 #include "hns_roce_common.h"
36 #include "hns_roce_device.h"
37 #include "hns_roce_cmd.h"
39 #define CMD_POLL_TOKEN 0xffff
40 #define CMD_MAX_NUM 32
41 #define STATUS_MASK 0xff
42 #define CMD_TOKEN_MASK 0x1f
43 #define GO_BIT_TIMEOUT_MSECS 10000
46 HCR_TOKEN_OFFSET
= 0x14,
47 HCR_STATUS_OFFSET
= 0x18,
51 static int cmd_pending(struct hns_roce_dev
*hr_dev
)
53 u32 status
= readl(hr_dev
->cmd
.hcr
+ HCR_TOKEN_OFFSET
);
55 return (!!(status
& (1 << HCR_GO_BIT
)));
58 /* this function should be serialized with "hcr_mutex" */
59 static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev
*hr_dev
,
60 u64 in_param
, u64 out_param
,
61 u32 in_modifier
, u8 op_modifier
, u16 op
,
64 struct hns_roce_cmdq
*cmd
= &hr_dev
->cmd
;
65 struct device
*dev
= &hr_dev
->pdev
->dev
;
66 u32 __iomem
*hcr
= (u32
*)cmd
->hcr
;
71 end
= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
) + jiffies
;
72 while (cmd_pending(hr_dev
)) {
73 if (time_after(jiffies
, end
)) {
74 dev_dbg(dev
, "jiffies=%d end=%d\n", (int)jiffies
,
81 roce_set_field(val
, ROCEE_MB6_ROCEE_MB_CMD_M
, ROCEE_MB6_ROCEE_MB_CMD_S
,
83 roce_set_field(val
, ROCEE_MB6_ROCEE_MB_CMD_MDF_M
,
84 ROCEE_MB6_ROCEE_MB_CMD_MDF_S
, op_modifier
);
85 roce_set_bit(val
, ROCEE_MB6_ROCEE_MB_EVENT_S
, event
);
86 roce_set_bit(val
, ROCEE_MB6_ROCEE_MB_HW_RUN_S
, 1);
87 roce_set_field(val
, ROCEE_MB6_ROCEE_MB_TOKEN_M
,
88 ROCEE_MB6_ROCEE_MB_TOKEN_S
, token
);
90 __raw_writeq(cpu_to_le64(in_param
), hcr
+ 0);
91 __raw_writeq(cpu_to_le64(out_param
), hcr
+ 2);
92 __raw_writel(cpu_to_le32(in_modifier
), hcr
+ 4);
96 __raw_writel(cpu_to_le32(val
), hcr
+ 5);
105 static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev
*hr_dev
, u64 in_param
,
106 u64 out_param
, u32 in_modifier
,
107 u8 op_modifier
, u16 op
, u16 token
,
110 struct hns_roce_cmdq
*cmd
= &hr_dev
->cmd
;
113 mutex_lock(&cmd
->hcr_mutex
);
114 ret
= __hns_roce_cmd_mbox_post_hw(hr_dev
, in_param
, out_param
,
115 in_modifier
, op_modifier
, op
, token
,
117 mutex_unlock(&cmd
->hcr_mutex
);
122 /* this should be called with "poll_sem" */
123 static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev
*hr_dev
, u64 in_param
,
124 u64 out_param
, unsigned long in_modifier
,
125 u8 op_modifier
, u16 op
,
126 unsigned long timeout
)
128 struct device
*dev
= &hr_dev
->pdev
->dev
;
129 u8 __iomem
*hcr
= hr_dev
->cmd
.hcr
;
130 unsigned long end
= 0;
134 ret
= hns_roce_cmd_mbox_post_hw(hr_dev
, in_param
, out_param
,
135 in_modifier
, op_modifier
, op
,
138 dev_err(dev
, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
142 end
= msecs_to_jiffies(timeout
) + jiffies
;
143 while (cmd_pending(hr_dev
) && time_before(jiffies
, end
))
146 if (cmd_pending(hr_dev
)) {
147 dev_err(dev
, "[cmd_poll]hw run cmd TIMEDOUT!\n");
152 status
= le32_to_cpu((__force __be32
)
153 __raw_readl(hcr
+ HCR_STATUS_OFFSET
));
154 if ((status
& STATUS_MASK
) != 0x1) {
155 dev_err(dev
, "mailbox status 0x%x!\n", status
);
164 static int hns_roce_cmd_mbox_poll(struct hns_roce_dev
*hr_dev
, u64 in_param
,
165 u64 out_param
, unsigned long in_modifier
,
166 u8 op_modifier
, u16 op
, unsigned long timeout
)
170 down(&hr_dev
->cmd
.poll_sem
);
171 ret
= __hns_roce_cmd_mbox_poll(hr_dev
, in_param
, out_param
, in_modifier
,
172 op_modifier
, op
, timeout
);
173 up(&hr_dev
->cmd
.poll_sem
);
178 void hns_roce_cmd_event(struct hns_roce_dev
*hr_dev
, u16 token
, u8 status
,
181 struct hns_roce_cmd_context
182 *context
= &hr_dev
->cmd
.context
[token
& hr_dev
->cmd
.token_mask
];
184 if (token
!= context
->token
)
187 context
->result
= (status
== HNS_ROCE_CMD_SUCCESS
) ? 0 : (-EIO
);
188 context
->out_param
= out_param
;
189 complete(&context
->done
);
192 /* this should be called with "use_events" */
193 static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev
*hr_dev
, u64 in_param
,
194 u64 out_param
, unsigned long in_modifier
,
195 u8 op_modifier
, u16 op
,
196 unsigned long timeout
)
198 struct hns_roce_cmdq
*cmd
= &hr_dev
->cmd
;
199 struct device
*dev
= &hr_dev
->pdev
->dev
;
200 struct hns_roce_cmd_context
*context
;
203 spin_lock(&cmd
->context_lock
);
204 WARN_ON(cmd
->free_head
< 0);
205 context
= &cmd
->context
[cmd
->free_head
];
206 context
->token
+= cmd
->token_mask
+ 1;
207 cmd
->free_head
= context
->next
;
208 spin_unlock(&cmd
->context_lock
);
210 init_completion(&context
->done
);
212 ret
= hns_roce_cmd_mbox_post_hw(hr_dev
, in_param
, out_param
,
213 in_modifier
, op_modifier
, op
,
219 * It is timeout when wait_for_completion_timeout return 0
220 * The return value is the time limit set in advance
221 * how many seconds showing
223 if (!wait_for_completion_timeout(&context
->done
,
224 msecs_to_jiffies(timeout
))) {
225 dev_err(dev
, "[cmd]wait_for_completion_timeout timeout\n");
230 ret
= context
->result
;
232 dev_err(dev
, "[cmd]event mod cmd process error!err=%d\n", ret
);
237 spin_lock(&cmd
->context_lock
);
238 context
->next
= cmd
->free_head
;
239 cmd
->free_head
= context
- cmd
->context
;
240 spin_unlock(&cmd
->context_lock
);
245 static int hns_roce_cmd_mbox_wait(struct hns_roce_dev
*hr_dev
, u64 in_param
,
246 u64 out_param
, unsigned long in_modifier
,
247 u8 op_modifier
, u16 op
, unsigned long timeout
)
251 down(&hr_dev
->cmd
.event_sem
);
252 ret
= __hns_roce_cmd_mbox_wait(hr_dev
, in_param
, out_param
,
253 in_modifier
, op_modifier
, op
, timeout
);
254 up(&hr_dev
->cmd
.event_sem
);
259 int hns_roce_cmd_mbox(struct hns_roce_dev
*hr_dev
, u64 in_param
, u64 out_param
,
260 unsigned long in_modifier
, u8 op_modifier
, u16 op
,
261 unsigned long timeout
)
263 if (hr_dev
->cmd
.use_events
)
264 return hns_roce_cmd_mbox_wait(hr_dev
, in_param
, out_param
,
265 in_modifier
, op_modifier
, op
,
268 return hns_roce_cmd_mbox_poll(hr_dev
, in_param
, out_param
,
269 in_modifier
, op_modifier
, op
,
273 int hns_roce_cmd_init(struct hns_roce_dev
*hr_dev
)
275 struct device
*dev
= &hr_dev
->pdev
->dev
;
277 mutex_init(&hr_dev
->cmd
.hcr_mutex
);
278 sema_init(&hr_dev
->cmd
.poll_sem
, 1);
279 hr_dev
->cmd
.use_events
= 0;
280 hr_dev
->cmd
.toggle
= 1;
281 hr_dev
->cmd
.max_cmds
= CMD_MAX_NUM
;
282 hr_dev
->cmd
.hcr
= hr_dev
->reg_base
+ ROCEE_MB1_REG
;
283 hr_dev
->cmd
.pool
= dma_pool_create("hns_roce_cmd", dev
,
284 HNS_ROCE_MAILBOX_SIZE
,
285 HNS_ROCE_MAILBOX_SIZE
, 0);
286 if (!hr_dev
->cmd
.pool
)
292 void hns_roce_cmd_cleanup(struct hns_roce_dev
*hr_dev
)
294 dma_pool_destroy(hr_dev
->cmd
.pool
);
297 int hns_roce_cmd_use_events(struct hns_roce_dev
*hr_dev
)
299 struct hns_roce_cmdq
*hr_cmd
= &hr_dev
->cmd
;
302 hr_cmd
->context
= kmalloc(hr_cmd
->max_cmds
*
303 sizeof(struct hns_roce_cmd_context
),
305 if (!hr_cmd
->context
)
308 for (i
= 0; i
< hr_cmd
->max_cmds
; ++i
) {
309 hr_cmd
->context
[i
].token
= i
;
310 hr_cmd
->context
[i
].next
= i
+ 1;
313 hr_cmd
->context
[hr_cmd
->max_cmds
- 1].next
= -1;
314 hr_cmd
->free_head
= 0;
316 sema_init(&hr_cmd
->event_sem
, hr_cmd
->max_cmds
);
317 spin_lock_init(&hr_cmd
->context_lock
);
319 hr_cmd
->token_mask
= CMD_TOKEN_MASK
;
320 hr_cmd
->use_events
= 1;
322 down(&hr_cmd
->poll_sem
);
327 void hns_roce_cmd_use_polling(struct hns_roce_dev
*hr_dev
)
329 struct hns_roce_cmdq
*hr_cmd
= &hr_dev
->cmd
;
332 hr_cmd
->use_events
= 0;
334 for (i
= 0; i
< hr_cmd
->max_cmds
; ++i
)
335 down(&hr_cmd
->event_sem
);
337 kfree(hr_cmd
->context
);
338 up(&hr_cmd
->poll_sem
);
341 struct hns_roce_cmd_mailbox
342 *hns_roce_alloc_cmd_mailbox(struct hns_roce_dev
*hr_dev
)
344 struct hns_roce_cmd_mailbox
*mailbox
;
346 mailbox
= kmalloc(sizeof(*mailbox
), GFP_KERNEL
);
348 return ERR_PTR(-ENOMEM
);
350 mailbox
->buf
= dma_pool_alloc(hr_dev
->cmd
.pool
, GFP_KERNEL
,
354 return ERR_PTR(-ENOMEM
);
360 void hns_roce_free_cmd_mailbox(struct hns_roce_dev
*hr_dev
,
361 struct hns_roce_cmd_mailbox
*mailbox
)
366 dma_pool_free(hr_dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);