1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 #include <linux/errno.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/workqueue.h>
10 #include <linux/spinlock.h>
11 #include <linux/mempool.h>
12 #include <scsi/scsi_tcq.h>
16 #include "cq_enet_desc.h"
17 #include "snic_fwint.h"
20 snic_wq_cmpl_frame_send(struct vnic_wq
*wq
,
21 struct cq_desc
*cq_desc
,
22 struct vnic_wq_buf
*buf
,
25 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
27 SNIC_BUG_ON(buf
->os_buf
== NULL
);
29 if (snic_log_level
& SNIC_DESC_LOGGING
)
30 SNIC_HOST_INFO(snic
->shost
,
31 "Ack received for snic_host_req %p.\n",
34 SNIC_TRC(snic
->shost
->host_no
, 0, 0,
35 ((ulong
)(buf
->os_buf
) - sizeof(struct snic_req_info
)), 0, 0,
42 snic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
43 struct cq_desc
*cq_desc
,
49 struct snic
*snic
= svnic_dev_priv(vdev
);
52 SNIC_BUG_ON(q_num
!= 0);
54 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
55 svnic_wq_service(&snic
->wq
[q_num
],
58 snic_wq_cmpl_frame_send
,
60 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
63 } /* end of snic_cmpl_handler_cont */
66 snic_wq_cmpl_handler(struct snic
*snic
, int work_to_do
)
68 unsigned int work_done
= 0;
71 snic
->s_stats
.misc
.last_ack_time
= jiffies
;
72 for (i
= 0; i
< snic
->wq_count
; i
++) {
73 work_done
+= svnic_cq_service(&snic
->cq
[i
],
75 snic_wq_cmpl_handler_cont
,
80 } /* end of snic_wq_cmpl_handler */
83 snic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
86 struct snic_host_req
*req
= buf
->os_buf
;
87 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
88 struct snic_req_info
*rqi
= NULL
;
91 dma_unmap_single(&snic
->pdev
->dev
, buf
->dma_addr
, buf
->len
,
94 rqi
= req_to_rqi(req
);
95 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
96 if (list_empty(&rqi
->list
)) {
97 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
101 SNIC_BUG_ON(rqi
->list
.next
== NULL
); /* if not added to spl_cmd_list */
102 list_del_init(&rqi
->list
);
103 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
106 snic_pci_unmap_rsp_buf(snic
, rqi
);
107 kfree((void *)rqi
->sge_va
);
110 snic_req_free(snic
, rqi
);
111 SNIC_HOST_INFO(snic
->shost
, "snic_free_wq_buf .. freed.\n");
117 /* Criteria to select work queue in multi queue mode */
119 snic_select_wq(struct snic
*snic
)
121 /* No multi queue support for now */
122 BUILD_BUG_ON(SNIC_WQ_MAX
> 1);
128 snic_wqdesc_avail(struct snic
*snic
, int q_num
, int req_type
)
130 int nr_wqdesc
= snic
->config
.wq_enet_desc_count
;
134 * Multi Queue case, additional care is required.
135 * Per WQ active requests need to be maintained.
137 SNIC_HOST_INFO(snic
->shost
, "desc_avail: Multi Queue case.\n");
138 SNIC_BUG_ON(q_num
> 0);
143 nr_wqdesc
-= atomic64_read(&snic
->s_stats
.fw
.actv_reqs
);
145 return ((req_type
== SNIC_REQ_HBA_RESET
) ? nr_wqdesc
: nr_wqdesc
- 1);
149 snic_queue_wq_desc(struct snic
*snic
, void *os_buf
, u16 len
)
153 struct snic_fw_stats
*fwstats
= &snic
->s_stats
.fw
;
154 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
159 snic_print_desc(__func__
, os_buf
, len
);
161 /* Map request buffer */
162 pa
= dma_map_single(&snic
->pdev
->dev
, os_buf
, len
, DMA_TO_DEVICE
);
163 if (dma_mapping_error(&snic
->pdev
->dev
, pa
)) {
164 SNIC_HOST_ERR(snic
->shost
, "qdesc: PCI DMA Mapping Fail.\n");
169 req
->req_pa
= (ulong
)pa
;
171 q_num
= snic_select_wq(snic
);
173 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
174 desc_avail
= snic_wqdesc_avail(snic
, q_num
, req
->hdr
.type
);
175 if (desc_avail
<= 0) {
176 dma_unmap_single(&snic
->pdev
->dev
, pa
, len
, DMA_TO_DEVICE
);
178 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
179 atomic64_inc(&snic
->s_stats
.misc
.wq_alloc_fail
);
180 SNIC_DBG("host = %d, WQ is Full\n", snic
->shost
->host_no
);
185 snic_queue_wq_eth_desc(&snic
->wq
[q_num
], os_buf
, pa
, len
, 0, 0, 1);
188 * note: when multi queue enabled, fw actv_reqs should be per queue.
190 act_reqs
= atomic64_inc_return(&fwstats
->actv_reqs
);
191 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
193 if (act_reqs
> atomic64_read(&fwstats
->max_actv_reqs
))
194 atomic64_set(&fwstats
->max_actv_reqs
, act_reqs
);
197 } /* end of snic_queue_wq_desc() */
200 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
201 * Purpose : Used during driver unload to clean up the requests.
204 snic_handle_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
208 INIT_LIST_HEAD(&rqi
->list
);
210 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
211 list_add_tail(&rqi
->list
, &snic
->spl_cmd_list
);
212 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
217 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
219 struct snic_req_info
*
220 snic_req_init(struct snic
*snic
, int sg_cnt
)
223 struct snic_req_info
*rqi
= NULL
;
225 typ
= (sg_cnt
<= SNIC_REQ_CACHE_DFLT_SGL
) ?
226 SNIC_REQ_CACHE_DFLT_SGL
: SNIC_REQ_CACHE_MAX_SGL
;
228 rqi
= mempool_alloc(snic
->req_pool
[typ
], GFP_ATOMIC
);
230 atomic64_inc(&snic
->s_stats
.io
.alloc_fail
);
231 SNIC_HOST_ERR(snic
->shost
,
232 "Failed to allocate memory from snic req pool id = %d\n",
237 memset(rqi
, 0, sizeof(*rqi
));
238 rqi
->rq_pool_type
= typ
;
239 rqi
->start_time
= jiffies
;
240 rqi
->req
= (struct snic_host_req
*) (rqi
+ 1);
241 rqi
->req_len
= sizeof(struct snic_host_req
);
244 rqi
->req
= (struct snic_host_req
*)(rqi
+ 1);
249 rqi
->req_len
+= (sg_cnt
* sizeof(struct snic_sg_desc
));
251 if (sg_cnt
> atomic64_read(&snic
->s_stats
.io
.max_sgl
))
252 atomic64_set(&snic
->s_stats
.io
.max_sgl
, sg_cnt
);
254 SNIC_BUG_ON(sg_cnt
> SNIC_MAX_SG_DESC_CNT
);
255 atomic64_inc(&snic
->s_stats
.io
.sgl_cnt
[sg_cnt
- 1]);
258 memset(rqi
->req
, 0, rqi
->req_len
);
260 /* pre initialization of init_ctx to support req_to_rqi */
261 rqi
->req
->hdr
.init_ctx
= (ulong
) rqi
;
263 SNIC_SCSI_DBG(snic
->shost
, "Req_alloc:rqi = %p allocatd.\n", rqi
);
266 } /* end of snic_req_init */
269 * snic_abort_req_init : Inits abort request.
271 struct snic_host_req
*
272 snic_abort_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
274 struct snic_host_req
*req
= NULL
;
278 /* If abort to be issued second time, then reuse */
280 return rqi
->abort_req
;
283 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
285 SNIC_HOST_ERR(snic
->shost
, "abts:Failed to alloc tm req.\n");
291 rqi
->abort_req
= req
;
292 memset(req
, 0, sizeof(struct snic_host_req
));
293 /* pre initialization of init_ctx to support req_to_rqi */
294 req
->hdr
.init_ctx
= (ulong
) rqi
;
297 } /* end of snic_abort_req_init */
300 * snic_dr_req_init : Inits device reset req
302 struct snic_host_req
*
303 snic_dr_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
305 struct snic_host_req
*req
= NULL
;
309 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
311 SNIC_HOST_ERR(snic
->shost
, "dr:Failed to alloc tm req.\n");
317 SNIC_BUG_ON(rqi
->dr_req
!= NULL
);
319 memset(req
, 0, sizeof(struct snic_host_req
));
320 /* pre initialization of init_ctx to support req_to_rqi */
321 req
->hdr
.init_ctx
= (ulong
) rqi
;
324 } /* end of snic_dr_req_init */
326 /* frees snic_req_info and snic_host_req */
328 snic_req_free(struct snic
*snic
, struct snic_req_info
*rqi
)
330 SNIC_BUG_ON(rqi
->req
== rqi
->abort_req
);
331 SNIC_BUG_ON(rqi
->req
== rqi
->dr_req
);
332 SNIC_BUG_ON(rqi
->sge_va
!= 0);
334 SNIC_SCSI_DBG(snic
->shost
,
335 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
336 rqi
, rqi
->req
, rqi
->abort_req
, rqi
->dr_req
);
338 if (rqi
->abort_req
) {
339 if (rqi
->abort_req
->req_pa
)
340 dma_unmap_single(&snic
->pdev
->dev
,
341 rqi
->abort_req
->req_pa
,
342 sizeof(struct snic_host_req
),
345 mempool_free(rqi
->abort_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
349 if (rqi
->dr_req
->req_pa
)
350 dma_unmap_single(&snic
->pdev
->dev
,
352 sizeof(struct snic_host_req
),
355 mempool_free(rqi
->dr_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
358 if (rqi
->req
->req_pa
)
359 dma_unmap_single(&snic
->pdev
->dev
,
364 mempool_free(rqi
, snic
->req_pool
[rqi
->rq_pool_type
]);
368 snic_pci_unmap_rsp_buf(struct snic
*snic
, struct snic_req_info
*rqi
)
370 struct snic_sg_desc
*sgd
;
372 sgd
= req_to_sgl(rqi_to_req(rqi
));
373 SNIC_BUG_ON(sgd
[0].addr
== 0);
374 dma_unmap_single(&snic
->pdev
->dev
,
375 le64_to_cpu(sgd
[0].addr
),
376 le32_to_cpu(sgd
[0].len
),
381 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
384 snic_free_all_untagged_reqs(struct snic
*snic
)
386 struct snic_req_info
*rqi
;
387 struct list_head
*cur
, *nxt
;
390 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
391 list_for_each_safe(cur
, nxt
, &snic
->spl_cmd_list
) {
392 rqi
= list_entry(cur
, struct snic_req_info
, list
);
393 list_del_init(&rqi
->list
);
395 snic_pci_unmap_rsp_buf(snic
, rqi
);
396 kfree((void *)rqi
->sge_va
);
400 snic_req_free(snic
, rqi
);
402 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
406 * snic_release_untagged_req : Unlinks the untagged req and frees it.
409 snic_release_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
413 spin_lock_irqsave(&snic
->snic_lock
, flags
);
414 if (snic
->in_remove
) {
415 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
418 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
420 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
421 if (list_empty(&rqi
->list
)) {
422 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
425 list_del_init(&rqi
->list
);
426 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
427 snic_req_free(snic
, rqi
);
433 /* dump buf in hex fmt */
435 snic_hex_dump(char *pfx
, char *data
, int len
)
437 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx
, len
);
438 print_hex_dump_bytes(pfx
, DUMP_PREFIX_NONE
, data
, len
);
441 #define LINE_BUFSZ 128 /* for snic_print_desc fn */
443 snic_dump_desc(const char *fn
, char *os_buf
, int len
)
445 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
446 struct snic_fw_req
*fwreq
= (struct snic_fw_req
*) os_buf
;
447 struct snic_req_info
*rqi
= NULL
;
448 char line
[LINE_BUFSZ
] = { '\0' };
449 char *cmd_str
= NULL
;
451 if (req
->hdr
.type
>= SNIC_RSP_REPORT_TGTS_CMPL
)
452 rqi
= (struct snic_req_info
*) fwreq
->hdr
.init_ctx
;
454 rqi
= (struct snic_req_info
*) req
->hdr
.init_ctx
;
456 SNIC_BUG_ON(rqi
== NULL
|| rqi
->req
== NULL
);
457 switch (req
->hdr
.type
) {
458 case SNIC_REQ_REPORT_TGTS
:
459 cmd_str
= "report-tgt : ";
460 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_REPORT_TGTS :");
464 cmd_str
= "icmnd : ";
465 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ICMND : 0x%x :",
466 req
->u
.icmnd
.cdb
[0]);
471 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ITMF :");
474 case SNIC_REQ_HBA_RESET
:
475 cmd_str
= "hba reset :";
476 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_HBA_RESET :");
479 case SNIC_REQ_EXCH_VER
:
480 cmd_str
= "exch ver : ";
481 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_EXCH_VER :");
484 case SNIC_REQ_TGT_INFO
:
485 cmd_str
= "tgt info : ";
488 case SNIC_RSP_REPORT_TGTS_CMPL
:
489 cmd_str
= "report tgt cmpl : ";
490 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_REPORT_TGTS_CMPL :");
493 case SNIC_RSP_ICMND_CMPL
:
494 cmd_str
= "icmnd_cmpl : ";
495 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ICMND_CMPL : 0x%x :",
496 rqi
->req
->u
.icmnd
.cdb
[0]);
499 case SNIC_RSP_ITMF_CMPL
:
500 cmd_str
= "itmf_cmpl : ";
501 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ITMF_CMPL :");
504 case SNIC_RSP_HBA_RESET_CMPL
:
505 cmd_str
= "hba_reset_cmpl : ";
506 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_HBA_RESET_CMPL :");
509 case SNIC_RSP_EXCH_VER_CMPL
:
510 cmd_str
= "exch_ver_cmpl : ";
511 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_EXCH_VER_CMPL :");
515 cmd_str
= "msg ack : ";
516 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ACK :");
519 case SNIC_MSG_ASYNC_EVNOTIFY
:
520 cmd_str
= "async notify : ";
521 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ASYNC_EVNOTIFY :");
525 cmd_str
= "unknown : ";
530 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
531 fn
, line
, req
->hdr
.cmnd_id
, req
->hdr
.sg_cnt
, req
->hdr
.status
,
534 /* Enable it, to dump byte stream */
535 if (snic_log_level
& 0x20)
536 snic_hex_dump(cmd_str
, os_buf
, len
);
537 } /* end of __snic_print_desc */
540 snic_print_desc(const char *fn
, char *os_buf
, int len
)
542 if (snic_log_level
& SNIC_DESC_LOGGING
)
543 snic_dump_desc(fn
, os_buf
, len
);
547 snic_calc_io_process_time(struct snic
*snic
, struct snic_req_info
*rqi
)
551 duration
= jiffies
- rqi
->start_time
;
553 if (duration
> atomic64_read(&snic
->s_stats
.io
.max_time
))
554 atomic64_set(&snic
->s_stats
.io
.max_time
, duration
);