1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2020 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <asm/byteorder.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/errno.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_ether.h>
17 #include <linux/irq.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/netdevice.h>
21 #include <linux/pci.h>
22 #include <linux/skbuff.h>
26 #include "bnxt_hwrm.h"
28 static u64
hwrm_calc_sentinel(struct bnxt_hwrm_ctx
*ctx
, u16 req_type
)
30 return (((uintptr_t)ctx
) + req_type
) ^ BNXT_HWRM_SENTINEL
;
34 * __hwrm_req_init() - Initialize an HWRM request.
35 * @bp: The driver context.
36 * @req: A pointer to the request pointer to initialize.
37 * @req_type: The request type. This will be converted to the little endian
38 * before being written to the req_type field of the returned request.
39 * @req_len: The length of the request to be allocated.
41 * Allocate DMA resources and initialize a new HWRM request object of the
42 * given type. The response address field in the request is configured with
43 * the DMA bus address that has been mapped for the response and the passed
44 * request is pointed to kernel virtual memory mapped for the request (such
45 * that short_input indirection can be accomplished without copying). The
46 * request’s target and completion ring are initialized to default values and
47 * can be overridden by writing to the returned request object directly.
49 * The initialized request can be further customized by writing to its fields
50 * directly, taking care to covert such fields to little endian. The request
51 * object will be consumed (and all its associated resources release) upon
52 * passing it to hwrm_req_send() unless ownership of the request has been
53 * claimed by the caller via a call to hwrm_req_hold(). If the request is not
54 * consumed, either because it is never sent or because ownership has been
55 * claimed, then it must be released by a call to hwrm_req_drop().
57 * Return: zero on success, negative error code otherwise:
58 * E2BIG: the type of request pointer is too large to fit.
59 * ENOMEM: an allocation failure occurred.
61 int __hwrm_req_init(struct bnxt
*bp
, void **req
, u16 req_type
, u32 req_len
)
63 struct bnxt_hwrm_ctx
*ctx
;
64 dma_addr_t dma_handle
;
67 if (req_len
> BNXT_HWRM_CTX_OFFSET
)
70 req_addr
= dma_pool_alloc(bp
->hwrm_dma_pool
, GFP_KERNEL
| __GFP_ZERO
,
75 ctx
= (struct bnxt_hwrm_ctx
*)(req_addr
+ BNXT_HWRM_CTX_OFFSET
);
76 /* safety first, sentinel used to check for invalid requests */
77 ctx
->sentinel
= hwrm_calc_sentinel(ctx
, req_type
);
78 ctx
->req_len
= req_len
;
79 ctx
->req
= (struct input
*)req_addr
;
80 ctx
->resp
= (struct output
*)(req_addr
+ BNXT_HWRM_RESP_OFFSET
);
81 ctx
->dma_handle
= dma_handle
;
82 ctx
->flags
= 0; /* __GFP_ZERO, but be explicit regarding ownership */
83 ctx
->timeout
= bp
->hwrm_cmd_timeout
?: DFLT_HWRM_CMD_TIMEOUT
;
84 ctx
->allocated
= BNXT_HWRM_DMA_SIZE
- BNXT_HWRM_CTX_OFFSET
;
85 ctx
->gfp
= GFP_KERNEL
;
86 ctx
->slice_addr
= NULL
;
88 /* initialize common request fields */
89 ctx
->req
->req_type
= cpu_to_le16(req_type
);
90 ctx
->req
->resp_addr
= cpu_to_le64(dma_handle
+ BNXT_HWRM_RESP_OFFSET
);
91 ctx
->req
->cmpl_ring
= cpu_to_le16(BNXT_HWRM_NO_CMPL_RING
);
92 ctx
->req
->target_id
= cpu_to_le16(BNXT_HWRM_TARGET
);
98 static struct bnxt_hwrm_ctx
*__hwrm_ctx(struct bnxt
*bp
, u8
*req_addr
)
100 void *ctx_addr
= req_addr
+ BNXT_HWRM_CTX_OFFSET
;
101 struct input
*req
= (struct input
*)req_addr
;
102 struct bnxt_hwrm_ctx
*ctx
= ctx_addr
;
106 /* can only be due to software bug, be loud */
107 netdev_err(bp
->dev
, "null HWRM request");
112 /* HWRM API has no type safety, verify sentinel to validate address */
113 sentinel
= hwrm_calc_sentinel(ctx
, le16_to_cpu(req
->req_type
));
114 if (ctx
->sentinel
!= sentinel
) {
115 /* can only be due to software bug, be loud */
116 netdev_err(bp
->dev
, "HWRM sentinel mismatch, req_type = %u\n",
117 (u32
)le16_to_cpu(req
->req_type
));
126 * hwrm_req_timeout() - Set the completion timeout for the request.
127 * @bp: The driver context.
128 * @req: The request to set the timeout.
129 * @timeout: The timeout in milliseconds.
131 * Set the timeout associated with the request for subsequent calls to
132 * hwrm_req_send(). Some requests are long running and require a different
133 * timeout than the default.
135 void hwrm_req_timeout(struct bnxt
*bp
, void *req
, unsigned int timeout
)
137 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
140 ctx
->timeout
= timeout
;
144 * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
145 * @bp: The driver context.
146 * @req: The request for which calls to hwrm_req_dma_slice() will have altered
148 * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
149 * whenever it is used to allocate backing memory for slices. Note that
150 * calls to hwrm_req_dma_slice() will not always result in new allocations,
151 * however, memory suballocated from the request buffer is already
154 * Sets the GFP allocation flags associated with the request for subsequent
155 * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
156 * for slice allocations.
158 void hwrm_req_alloc_flags(struct bnxt
*bp
, void *req
, gfp_t gfp
)
160 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
167 * hwrm_req_replace() - Replace request data.
168 * @bp: The driver context.
169 * @req: The request to modify. A call to hwrm_req_replace() is conceptually
170 * an assignment of new_req to req. Subsequent calls to HWRM API functions,
171 * such as hwrm_req_send(), should thus use req and not new_req (in fact,
172 * calls to HWRM API functions will fail if non-managed request objects
174 * @len: The length of new_req.
175 * @new_req: The pre-built request to copy or reference.
177 * Replaces the request data in req with that of new_req. This is useful in
178 * scenarios where a request object has already been constructed by a third
179 * party prior to creating a resource managed request using hwrm_req_init().
180 * Depending on the length, hwrm_req_replace() will either copy the new
181 * request data into the DMA memory allocated for req, or it will simply
182 * reference the new request and use it in lieu of req during subsequent
183 * calls to hwrm_req_send(). The resource management is associated with
184 * req and is independent of and does not apply to new_req. The caller must
185 * ensure that the lifetime of new_req is least as long as req. Any slices
186 * that may have been associated with the original request are released.
188 * Return: zero on success, negative error code otherwise:
189 * E2BIG: Request is too large.
190 * EINVAL: Invalid request to modify.
192 int hwrm_req_replace(struct bnxt
*bp
, void *req
, void *new_req
, u32 len
)
194 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
195 struct input
*internal_req
= req
;
201 if (len
> BNXT_HWRM_CTX_OFFSET
)
204 /* free any existing slices */
205 ctx
->allocated
= BNXT_HWRM_DMA_SIZE
- BNXT_HWRM_CTX_OFFSET
;
206 if (ctx
->slice_addr
) {
207 dma_free_coherent(&bp
->pdev
->dev
, ctx
->slice_size
,
208 ctx
->slice_addr
, ctx
->slice_handle
);
209 ctx
->slice_addr
= NULL
;
211 ctx
->gfp
= GFP_KERNEL
;
213 if ((bp
->fw_cap
& BNXT_FW_CAP_SHORT_CMD
) || len
> BNXT_HWRM_MAX_REQ_LEN
) {
214 memcpy(internal_req
, new_req
, len
);
216 internal_req
->req_type
= ((struct input
*)new_req
)->req_type
;
221 ctx
->req
->resp_addr
= cpu_to_le64(ctx
->dma_handle
+
222 BNXT_HWRM_RESP_OFFSET
);
224 /* update sentinel for potentially new request type */
225 req_type
= le16_to_cpu(internal_req
->req_type
);
226 ctx
->sentinel
= hwrm_calc_sentinel(ctx
, req_type
);
232 * hwrm_req_flags() - Set non internal flags of the ctx
233 * @bp: The driver context.
234 * @req: The request containing the HWRM command
235 * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
237 * ctx flags can be used by the callers to instruct how the subsequent
238 * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
239 * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
240 * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
241 * even if FW is not responding.
242 * This generic function can be used to set any flag that is not an internal flag
243 * of the HWRM module.
245 void hwrm_req_flags(struct bnxt
*bp
, void *req
, enum bnxt_hwrm_ctx_flags flags
)
247 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
250 ctx
->flags
|= (flags
& HWRM_API_FLAGS
);
254 * hwrm_req_hold() - Claim ownership of the request's resources.
255 * @bp: The driver context.
256 * @req: A pointer to the request to own. The request will no longer be
257 * consumed by calls to hwrm_req_send().
259 * Take ownership of the request. Ownership places responsibility on the
260 * caller to free the resources associated with the request via a call to
261 * hwrm_req_drop(). The caller taking ownership implies that a subsequent
262 * call to hwrm_req_send() will not consume the request (ie. sending will
263 * not free the associated resources if the request is owned by the caller).
264 * Taking ownership returns a reference to the response. Retaining and
265 * accessing the response data is the most common reason to take ownership
266 * of the request. Ownership can also be acquired in order to reuse the same
267 * request object across multiple invocations of hwrm_req_send().
269 * Return: A pointer to the response object.
271 * The resources associated with the response will remain available to the
272 * caller until ownership of the request is relinquished via a call to
273 * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
274 * a valid request is provided. A returned NULL value would imply a driver
275 * bug and the implementation will complain loudly in the logs to aid in
276 * detection. It should not be necessary to check the result for NULL.
278 void *hwrm_req_hold(struct bnxt
*bp
, void *req
)
280 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
281 struct input
*input
= (struct input
*)req
;
286 if (ctx
->flags
& BNXT_HWRM_INTERNAL_CTX_OWNED
) {
287 /* can only be due to software bug, be loud */
288 netdev_err(bp
->dev
, "HWRM context already owned, req_type = %u\n",
289 (u32
)le16_to_cpu(input
->req_type
));
294 ctx
->flags
|= BNXT_HWRM_INTERNAL_CTX_OWNED
;
295 return ((u8
*)req
) + BNXT_HWRM_RESP_OFFSET
;
298 static void __hwrm_ctx_drop(struct bnxt
*bp
, struct bnxt_hwrm_ctx
*ctx
)
300 void *addr
= ((u8
*)ctx
) - BNXT_HWRM_CTX_OFFSET
;
301 dma_addr_t dma_handle
= ctx
->dma_handle
; /* save before invalidate */
303 /* unmap any auxiliary DMA slice */
305 dma_free_coherent(&bp
->pdev
->dev
, ctx
->slice_size
,
306 ctx
->slice_addr
, ctx
->slice_handle
);
308 /* invalidate, ensure ownership, sentinel and dma_handle are cleared */
309 memset(ctx
, 0, sizeof(struct bnxt_hwrm_ctx
));
311 /* return the buffer to the DMA pool */
313 dma_pool_free(bp
->hwrm_dma_pool
, addr
, dma_handle
);
317 * hwrm_req_drop() - Release all resources associated with the request.
318 * @bp: The driver context.
319 * @req: The request to consume, releasing the associated resources. The
320 * request object, any slices, and its associated response are no
323 * It is legal to call hwrm_req_drop() on an unowned request, provided it
324 * has not already been consumed by hwrm_req_send() (for example, to release
325 * an aborted request). A given request should not be dropped more than once,
326 * nor should it be dropped after having been consumed by hwrm_req_send(). To
327 * do so is an error (the context will not be found and a stack trace will be
328 * rendered in the kernel log).
330 void hwrm_req_drop(struct bnxt
*bp
, void *req
)
332 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
335 __hwrm_ctx_drop(bp
, ctx
);
338 static int __hwrm_to_stderr(u32 hwrm_err
)
341 case HWRM_ERR_CODE_SUCCESS
:
343 case HWRM_ERR_CODE_RESOURCE_LOCKED
:
345 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED
:
347 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR
:
349 case HWRM_ERR_CODE_INVALID_PARAMS
:
350 case HWRM_ERR_CODE_INVALID_FLAGS
:
351 case HWRM_ERR_CODE_INVALID_ENABLES
:
352 case HWRM_ERR_CODE_UNSUPPORTED_TLV
:
353 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR
:
355 case HWRM_ERR_CODE_NO_BUFFER
:
357 case HWRM_ERR_CODE_HOT_RESET_PROGRESS
:
358 case HWRM_ERR_CODE_BUSY
:
360 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED
:
362 case HWRM_ERR_CODE_PF_UNAVAILABLE
:
369 static struct bnxt_hwrm_wait_token
*
370 __hwrm_acquire_token(struct bnxt
*bp
, enum bnxt_hwrm_chnl dst
)
372 struct bnxt_hwrm_wait_token
*token
;
374 token
= kzalloc(sizeof(*token
), GFP_KERNEL
);
378 mutex_lock(&bp
->hwrm_cmd_lock
);
381 token
->state
= BNXT_HWRM_PENDING
;
382 if (dst
== BNXT_HWRM_CHNL_CHIMP
) {
383 token
->seq_id
= bp
->hwrm_cmd_seq
++;
384 hlist_add_head_rcu(&token
->node
, &bp
->hwrm_pending_list
);
386 token
->seq_id
= bp
->hwrm_cmd_kong_seq
++;
393 __hwrm_release_token(struct bnxt
*bp
, struct bnxt_hwrm_wait_token
*token
)
395 if (token
->dst
== BNXT_HWRM_CHNL_CHIMP
) {
396 hlist_del_rcu(&token
->node
);
397 kfree_rcu(token
, rcu
);
401 mutex_unlock(&bp
->hwrm_cmd_lock
);
405 hwrm_update_token(struct bnxt
*bp
, u16 seq_id
, enum bnxt_hwrm_wait_state state
)
407 struct bnxt_hwrm_wait_token
*token
;
410 hlist_for_each_entry_rcu(token
, &bp
->hwrm_pending_list
, node
) {
411 if (token
->seq_id
== seq_id
) {
412 WRITE_ONCE(token
->state
, state
);
418 netdev_err(bp
->dev
, "Invalid hwrm seq id %d\n", seq_id
);
421 static void hwrm_req_dbg(struct bnxt
*bp
, struct input
*req
)
423 u32 ring
= le16_to_cpu(req
->cmpl_ring
);
424 u32 type
= le16_to_cpu(req
->req_type
);
425 u32 tgt
= le16_to_cpu(req
->target_id
);
426 u32 seq
= le16_to_cpu(req
->seq_id
);
429 if (unlikely(ring
!= (u16
)BNXT_HWRM_NO_CMPL_RING
))
430 snprintf(opt
, 16, " ring %d\n", ring
);
432 if (unlikely(tgt
!= BNXT_HWRM_TARGET
))
433 snprintf(opt
+ strlen(opt
) - 1, 16, " tgt 0x%x\n", tgt
);
435 netdev_dbg(bp
->dev
, "sent hwrm req_type 0x%x seq id 0x%x%s",
439 #define hwrm_err(bp, ctx, fmt, ...) \
441 if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
442 netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
444 netdev_err((bp)->dev, fmt, __VA_ARGS__); \
447 static bool hwrm_wait_must_abort(struct bnxt
*bp
, u32 req_type
, u32
*fw_status
)
449 if (req_type
== HWRM_VER_GET
)
452 if (!bp
->fw_health
|| !bp
->fw_health
->status_reliable
)
455 *fw_status
= bnxt_fw_health_readl(bp
, BNXT_FW_HEALTH_REG
);
456 return *fw_status
&& !BNXT_FW_IS_HEALTHY(*fw_status
);
459 static int __hwrm_send(struct bnxt
*bp
, struct bnxt_hwrm_ctx
*ctx
)
461 u32 doorbell_offset
= BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER
;
462 enum bnxt_hwrm_chnl dst
= BNXT_HWRM_CHNL_CHIMP
;
463 u32 bar_offset
= BNXT_GRCPF_REG_CHIMP_COMM
;
464 struct bnxt_hwrm_wait_token
*token
= NULL
;
465 struct hwrm_short_input short_input
= {0};
466 u16 max_req_len
= BNXT_HWRM_MAX_REQ_LEN
;
467 unsigned int i
, timeout
, tmo_count
;
468 u32
*data
= (u32
*)ctx
->req
;
469 u32 msg_len
= ctx
->req_len
;
475 if (ctx
->flags
& BNXT_HWRM_INTERNAL_RESP_DIRTY
)
476 memset(ctx
->resp
, 0, PAGE_SIZE
);
478 req_type
= le16_to_cpu(ctx
->req
->req_type
);
479 if (BNXT_NO_FW_ACCESS(bp
) &&
480 (req_type
!= HWRM_FUNC_RESET
&& req_type
!= HWRM_VER_GET
)) {
481 netdev_dbg(bp
->dev
, "hwrm req_type 0x%x skipped, FW channel down\n",
486 if (msg_len
> BNXT_HWRM_MAX_REQ_LEN
&&
487 msg_len
> bp
->hwrm_max_ext_req_len
) {
488 netdev_warn(bp
->dev
, "oversized hwrm request, req_type 0x%x",
494 if (bnxt_kong_hwrm_message(bp
, ctx
->req
)) {
495 dst
= BNXT_HWRM_CHNL_KONG
;
496 bar_offset
= BNXT_GRCPF_REG_KONG_COMM
;
497 doorbell_offset
= BNXT_GRCPF_REG_KONG_COMM_TRIGGER
;
498 if (le16_to_cpu(ctx
->req
->cmpl_ring
) != INVALID_HW_RING_ID
) {
499 netdev_err(bp
->dev
, "Ring completions not supported for KONG commands, req_type = %d\n",
506 token
= __hwrm_acquire_token(bp
, dst
);
511 ctx
->req
->seq_id
= cpu_to_le16(token
->seq_id
);
513 if ((bp
->fw_cap
& BNXT_FW_CAP_SHORT_CMD
) ||
514 msg_len
> BNXT_HWRM_MAX_REQ_LEN
) {
515 short_input
.req_type
= ctx
->req
->req_type
;
516 short_input
.signature
=
517 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD
);
518 short_input
.size
= cpu_to_le16(msg_len
);
519 short_input
.req_addr
= cpu_to_le64(ctx
->dma_handle
);
521 data
= (u32
*)&short_input
;
522 msg_len
= sizeof(short_input
);
524 max_req_len
= BNXT_HWRM_SHORT_REQ_LEN
;
527 /* Ensure any associated DMA buffers are written before doorbell */
530 /* Write request msg to hwrm channel */
531 __iowrite32_copy(bp
->bar0
+ bar_offset
, data
, msg_len
/ 4);
533 for (i
= msg_len
; i
< max_req_len
; i
+= 4)
534 writel(0, bp
->bar0
+ bar_offset
+ i
);
536 /* Ring channel doorbell */
537 writel(1, bp
->bar0
+ doorbell_offset
);
539 hwrm_req_dbg(bp
, ctx
->req
);
541 if (!pci_is_enabled(bp
->pdev
)) {
546 /* Limit timeout to an upper limit */
547 timeout
= min(ctx
->timeout
, bp
->hwrm_cmd_max_timeout
?: HWRM_CMD_MAX_TIMEOUT
);
548 /* convert timeout to usec */
552 /* Short timeout for the first few iterations:
553 * number of loops = number of loops for short timeout +
554 * number of loops for standard timeout.
556 tmo_count
= HWRM_SHORT_TIMEOUT_COUNTER
;
557 timeout
= timeout
- HWRM_SHORT_MIN_TIMEOUT
* HWRM_SHORT_TIMEOUT_COUNTER
;
558 tmo_count
+= DIV_ROUND_UP(timeout
, HWRM_MIN_TIMEOUT
);
560 if (le16_to_cpu(ctx
->req
->cmpl_ring
) != INVALID_HW_RING_ID
) {
561 /* Wait until hwrm response cmpl interrupt is processed */
562 while (READ_ONCE(token
->state
) < BNXT_HWRM_COMPLETE
&&
564 /* Abort the wait for completion if the FW health
567 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
569 /* on first few passes, just barely sleep */
570 if (i
< HWRM_SHORT_TIMEOUT_COUNTER
) {
571 usleep_range(HWRM_SHORT_MIN_TIMEOUT
,
572 HWRM_SHORT_MAX_TIMEOUT
);
574 if (hwrm_wait_must_abort(bp
, req_type
, &sts
)) {
575 hwrm_err(bp
, ctx
, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
579 usleep_range(HWRM_MIN_TIMEOUT
,
584 if (READ_ONCE(token
->state
) != BNXT_HWRM_COMPLETE
) {
585 hwrm_err(bp
, ctx
, "Resp cmpl intr err msg: 0x%x\n",
589 len
= le16_to_cpu(READ_ONCE(ctx
->resp
->resp_len
));
590 valid
= ((u8
*)ctx
->resp
) + len
- 1;
592 __le16 seen_out_of_seq
= ctx
->req
->seq_id
; /* will never see */
595 /* Check if response len is updated */
596 for (i
= 0; i
< tmo_count
; i
++) {
597 /* Abort the wait for completion if the FW health
600 if (test_bit(BNXT_STATE_FW_FATAL_COND
, &bp
->state
))
604 READ_ONCE(token
->state
) == BNXT_HWRM_DEFERRED
) {
605 __hwrm_release_token(bp
, token
);
609 len
= le16_to_cpu(READ_ONCE(ctx
->resp
->resp_len
));
611 __le16 resp_seq
= READ_ONCE(ctx
->resp
->seq_id
);
613 if (resp_seq
== ctx
->req
->seq_id
)
615 if (resp_seq
!= seen_out_of_seq
) {
616 netdev_warn(bp
->dev
, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
617 le16_to_cpu(resp_seq
),
619 le16_to_cpu(ctx
->req
->seq_id
));
620 seen_out_of_seq
= resp_seq
;
624 /* on first few passes, just barely sleep */
625 if (i
< HWRM_SHORT_TIMEOUT_COUNTER
) {
626 usleep_range(HWRM_SHORT_MIN_TIMEOUT
,
627 HWRM_SHORT_MAX_TIMEOUT
);
629 if (hwrm_wait_must_abort(bp
, req_type
, &sts
)) {
630 hwrm_err(bp
, ctx
, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
632 le16_to_cpu(ctx
->req
->seq_id
),
636 usleep_range(HWRM_MIN_TIMEOUT
,
641 if (i
>= tmo_count
) {
642 hwrm_err(bp
, ctx
, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
643 hwrm_total_timeout(i
), req_type
,
644 le16_to_cpu(ctx
->req
->seq_id
), len
);
648 /* Last byte of resp contains valid bit */
649 valid
= ((u8
*)ctx
->resp
) + len
- 1;
650 for (j
= 0; j
< HWRM_VALID_BIT_DELAY_USEC
; ) {
651 /* make sure we read from updated DMA memory */
659 usleep_range(20, 30);
664 if (j
>= HWRM_VALID_BIT_DELAY_USEC
) {
665 hwrm_err(bp
, ctx
, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
666 hwrm_total_timeout(i
) + j
, req_type
,
667 le16_to_cpu(ctx
->req
->seq_id
), len
, *valid
);
672 /* Zero valid bit for compatibility. Valid bit in an older spec
673 * may become a new field in a newer spec. We must make sure that
674 * a new field not implemented by old spec will read zero.
677 rc
= le16_to_cpu(ctx
->resp
->error_code
);
678 if (rc
== HWRM_ERR_CODE_BUSY
&& !(ctx
->flags
& BNXT_HWRM_CTX_SILENT
))
679 netdev_warn(bp
->dev
, "FW returned busy, hwrm req_type 0x%x\n",
681 else if (rc
&& rc
!= HWRM_ERR_CODE_PF_UNAVAILABLE
)
682 hwrm_err(bp
, ctx
, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
683 req_type
, le16_to_cpu(ctx
->req
->seq_id
), rc
);
684 rc
= __hwrm_to_stderr(rc
);
687 __hwrm_release_token(bp
, token
);
688 if (ctx
->flags
& BNXT_HWRM_INTERNAL_CTX_OWNED
)
689 ctx
->flags
|= BNXT_HWRM_INTERNAL_RESP_DIRTY
;
691 __hwrm_ctx_drop(bp
, ctx
);
696 * hwrm_req_send() - Execute an HWRM command.
697 * @bp: The driver context.
698 * @req: A pointer to the request to send. The DMA resources associated with
699 * the request will be released (ie. the request will be consumed) unless
700 * ownership of the request has been assumed by the caller via a call to
703 * Send an HWRM request to the device and wait for a response. The request is
704 * consumed if it is not owned by the caller. This function will block until
705 * the request has either completed or times out due to an error.
707 * Return: A result code.
709 * The result is zero on success, otherwise the negative error code indicates
710 * one of the following errors:
711 * E2BIG: The request was too large.
712 * EBUSY: The firmware is in a fatal state or the request timed out
713 * EACCESS: HWRM access denied.
714 * ENOSPC: HWRM resource allocation error.
715 * EINVAL: Request parameters are invalid.
716 * ENOMEM: HWRM has no buffers.
717 * EAGAIN: HWRM busy or reset in progress.
718 * EOPNOTSUPP: Invalid request type.
719 * EIO: Any other error.
720 * Error handling is orthogonal to request ownership. An unowned request will
721 * still be consumed on error. If the caller owns the request, then the caller
722 * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
723 * always consume the request.
725 int hwrm_req_send(struct bnxt
*bp
, void *req
)
727 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
732 return __hwrm_send(bp
, ctx
);
736 * hwrm_req_send_silent() - A silent version of hwrm_req_send().
737 * @bp: The driver context.
738 * @req: The request to send without logging.
740 * The same as hwrm_req_send(), except that the request is silenced using
741 * hwrm_req_silence() prior the call. This version of the function is
742 * provided solely to preserve the legacy API’s flavor for this functionality.
744 * Return: A result code, see hwrm_req_send().
746 int hwrm_req_send_silent(struct bnxt
*bp
, void *req
)
748 hwrm_req_flags(bp
, req
, BNXT_HWRM_CTX_SILENT
);
749 return hwrm_req_send(bp
, req
);
753 * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
754 * @bp: The driver context.
755 * @req: The request for which indirect data will be associated.
756 * @size: The size of the allocation.
757 * @dma_handle: The bus address associated with the allocation. The HWRM API has
758 * no knowledge about the type of the request and so cannot infer how the
759 * caller intends to use the indirect data. Thus, the caller is
760 * responsible for configuring the request object appropriately to
761 * point to the associated indirect memory. Note, DMA handle has the
762 * same definition as it does in dma_alloc_coherent(), the caller is
763 * responsible for endian conversions via cpu_to_le64() before assigning
766 * Allocates DMA mapped memory for indirect data related to a request. The
767 * lifetime of the DMA resources will be bound to that of the request (ie.
768 * they will be automatically released when the request is either consumed by
769 * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
770 * efficiently suballocated out of the request buffer space, hence the name
771 * slice, while larger requests are satisfied via an underlying call to
772 * dma_alloc_coherent(). Multiple suballocations are supported, however, only
773 * one externally mapped region is.
775 * Return: The kernel virtual address of the DMA mapping.
778 hwrm_req_dma_slice(struct bnxt
*bp
, void *req
, u32 size
, dma_addr_t
*dma_handle
)
780 struct bnxt_hwrm_ctx
*ctx
= __hwrm_ctx(bp
, req
);
781 u8
*end
= ((u8
*)req
) + BNXT_HWRM_DMA_SIZE
;
782 struct input
*input
= req
;
783 u8
*addr
, *req_addr
= req
;
784 u32 max_offset
, offset
;
789 max_offset
= BNXT_HWRM_DMA_SIZE
- ctx
->allocated
;
790 offset
= max_offset
- size
;
791 offset
= ALIGN_DOWN(offset
, BNXT_HWRM_DMA_ALIGN
);
792 addr
= req_addr
+ offset
;
794 if (addr
< req_addr
+ max_offset
&& req_addr
+ ctx
->req_len
<= addr
) {
795 ctx
->allocated
= end
- addr
;
796 *dma_handle
= ctx
->dma_handle
+ offset
;
800 /* could not suballocate from ctx buffer, try create a new mapping */
801 if (ctx
->slice_addr
) {
802 /* if one exists, can only be due to software bug, be loud */
803 netdev_err(bp
->dev
, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
804 (u32
)le16_to_cpu(input
->req_type
));
809 addr
= dma_alloc_coherent(&bp
->pdev
->dev
, size
, dma_handle
, ctx
->gfp
);
814 ctx
->slice_addr
= addr
;
815 ctx
->slice_size
= size
;
816 ctx
->slice_handle
= *dma_handle
;