2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/spinlock.h>
25 #include <linux/mempool.h>
26 #include <scsi/scsi_tcq.h>
30 #include "cq_enet_desc.h"
31 #include "snic_fwint.h"
34 snic_wq_cmpl_frame_send(struct vnic_wq
*wq
,
35 struct cq_desc
*cq_desc
,
36 struct vnic_wq_buf
*buf
,
39 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
41 SNIC_BUG_ON(buf
->os_buf
== NULL
);
43 if (snic_log_level
& SNIC_DESC_LOGGING
)
44 SNIC_HOST_INFO(snic
->shost
,
45 "Ack received for snic_host_req %p.\n",
48 SNIC_TRC(snic
->shost
->host_no
, 0, 0,
49 ((ulong
)(buf
->os_buf
) - sizeof(struct snic_req_info
)), 0, 0,
56 snic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
57 struct cq_desc
*cq_desc
,
63 struct snic
*snic
= svnic_dev_priv(vdev
);
66 SNIC_BUG_ON(q_num
!= 0);
68 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
69 svnic_wq_service(&snic
->wq
[q_num
],
72 snic_wq_cmpl_frame_send
,
74 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
77 } /* end of snic_cmpl_handler_cont */
80 snic_wq_cmpl_handler(struct snic
*snic
, int work_to_do
)
82 unsigned int work_done
= 0;
85 snic
->s_stats
.misc
.last_ack_time
= jiffies
;
86 for (i
= 0; i
< snic
->wq_count
; i
++) {
87 work_done
+= svnic_cq_service(&snic
->cq
[i
],
89 snic_wq_cmpl_handler_cont
,
94 } /* end of snic_wq_cmpl_handler */
97 snic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
100 struct snic_host_req
*req
= buf
->os_buf
;
101 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
102 struct snic_req_info
*rqi
= NULL
;
105 pci_unmap_single(snic
->pdev
, buf
->dma_addr
, buf
->len
, PCI_DMA_TODEVICE
);
107 rqi
= req_to_rqi(req
);
108 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
109 if (list_empty(&rqi
->list
)) {
110 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
114 SNIC_BUG_ON(rqi
->list
.next
== NULL
); /* if not added to spl_cmd_list */
115 list_del_init(&rqi
->list
);
116 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
119 snic_pci_unmap_rsp_buf(snic
, rqi
);
120 kfree((void *)rqi
->sge_va
);
123 snic_req_free(snic
, rqi
);
124 SNIC_HOST_INFO(snic
->shost
, "snic_free_wq_buf .. freed.\n");
130 /* Criteria to select work queue in multi queue mode */
132 snic_select_wq(struct snic
*snic
)
134 /* No multi queue support for now */
135 BUILD_BUG_ON(SNIC_WQ_MAX
> 1);
141 snic_wqdesc_avail(struct snic
*snic
, int q_num
, int req_type
)
143 int nr_wqdesc
= snic
->config
.wq_enet_desc_count
;
147 * Multi Queue case, additional care is required.
148 * Per WQ active requests need to be maintained.
150 SNIC_HOST_INFO(snic
->shost
, "desc_avail: Multi Queue case.\n");
151 SNIC_BUG_ON(q_num
> 0);
156 nr_wqdesc
-= atomic64_read(&snic
->s_stats
.fw
.actv_reqs
);
158 return ((req_type
== SNIC_REQ_HBA_RESET
) ? nr_wqdesc
: nr_wqdesc
- 1);
162 snic_queue_wq_desc(struct snic
*snic
, void *os_buf
, u16 len
)
166 struct snic_fw_stats
*fwstats
= &snic
->s_stats
.fw
;
167 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
172 snic_print_desc(__func__
, os_buf
, len
);
174 /* Map request buffer */
175 pa
= pci_map_single(snic
->pdev
, os_buf
, len
, PCI_DMA_TODEVICE
);
176 if (pci_dma_mapping_error(snic
->pdev
, pa
)) {
177 SNIC_HOST_ERR(snic
->shost
, "qdesc: PCI DMA Mapping Fail.\n");
182 req
->req_pa
= (ulong
)pa
;
184 q_num
= snic_select_wq(snic
);
186 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
187 desc_avail
= snic_wqdesc_avail(snic
, q_num
, req
->hdr
.type
);
188 if (desc_avail
<= 0) {
189 pci_unmap_single(snic
->pdev
, pa
, len
, PCI_DMA_TODEVICE
);
191 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
192 atomic64_inc(&snic
->s_stats
.misc
.wq_alloc_fail
);
193 SNIC_DBG("host = %d, WQ is Full\n", snic
->shost
->host_no
);
198 snic_queue_wq_eth_desc(&snic
->wq
[q_num
], os_buf
, pa
, len
, 0, 0, 1);
201 * note: when multi queue enabled, fw actv_reqs should be per queue.
203 act_reqs
= atomic64_inc_return(&fwstats
->actv_reqs
);
204 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
206 if (act_reqs
> atomic64_read(&fwstats
->max_actv_reqs
))
207 atomic64_set(&fwstats
->max_actv_reqs
, act_reqs
);
210 } /* end of snic_queue_wq_desc() */
213 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
214 * Purpose : Used during driver unload to clean up the requests.
217 snic_handle_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
221 INIT_LIST_HEAD(&rqi
->list
);
223 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
224 list_add_tail(&rqi
->list
, &snic
->spl_cmd_list
);
225 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
230 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
232 struct snic_req_info
*
233 snic_req_init(struct snic
*snic
, int sg_cnt
)
236 struct snic_req_info
*rqi
= NULL
;
238 typ
= (sg_cnt
<= SNIC_REQ_CACHE_DFLT_SGL
) ?
239 SNIC_REQ_CACHE_DFLT_SGL
: SNIC_REQ_CACHE_MAX_SGL
;
241 rqi
= mempool_alloc(snic
->req_pool
[typ
], GFP_ATOMIC
);
243 atomic64_inc(&snic
->s_stats
.io
.alloc_fail
);
244 SNIC_HOST_ERR(snic
->shost
,
245 "Failed to allocate memory from snic req pool id = %d\n",
250 memset(rqi
, 0, sizeof(*rqi
));
251 rqi
->rq_pool_type
= typ
;
252 rqi
->start_time
= jiffies
;
253 rqi
->req
= (struct snic_host_req
*) (rqi
+ 1);
254 rqi
->req_len
= sizeof(struct snic_host_req
);
257 rqi
->req
= (struct snic_host_req
*)(rqi
+ 1);
262 rqi
->req_len
+= (sg_cnt
* sizeof(struct snic_sg_desc
));
264 if (sg_cnt
> atomic64_read(&snic
->s_stats
.io
.max_sgl
))
265 atomic64_set(&snic
->s_stats
.io
.max_sgl
, sg_cnt
);
267 SNIC_BUG_ON(sg_cnt
> SNIC_MAX_SG_DESC_CNT
);
268 atomic64_inc(&snic
->s_stats
.io
.sgl_cnt
[sg_cnt
- 1]);
271 memset(rqi
->req
, 0, rqi
->req_len
);
273 /* pre initialization of init_ctx to support req_to_rqi */
274 rqi
->req
->hdr
.init_ctx
= (ulong
) rqi
;
276 SNIC_SCSI_DBG(snic
->shost
, "Req_alloc:rqi = %p allocatd.\n", rqi
);
279 } /* end of snic_req_init */
282 * snic_abort_req_init : Inits abort request.
284 struct snic_host_req
*
285 snic_abort_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
287 struct snic_host_req
*req
= NULL
;
291 /* If abort to be issued second time, then reuse */
293 return rqi
->abort_req
;
296 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
298 SNIC_HOST_ERR(snic
->shost
, "abts:Failed to alloc tm req.\n");
304 rqi
->abort_req
= req
;
305 memset(req
, 0, sizeof(struct snic_host_req
));
306 /* pre initialization of init_ctx to support req_to_rqi */
307 req
->hdr
.init_ctx
= (ulong
) rqi
;
310 } /* end of snic_abort_req_init */
313 * snic_dr_req_init : Inits device reset req
315 struct snic_host_req
*
316 snic_dr_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
318 struct snic_host_req
*req
= NULL
;
322 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
324 SNIC_HOST_ERR(snic
->shost
, "dr:Failed to alloc tm req.\n");
330 SNIC_BUG_ON(rqi
->dr_req
!= NULL
);
332 memset(req
, 0, sizeof(struct snic_host_req
));
333 /* pre initialization of init_ctx to support req_to_rqi */
334 req
->hdr
.init_ctx
= (ulong
) rqi
;
337 } /* end of snic_dr_req_init */
339 /* frees snic_req_info and snic_host_req */
341 snic_req_free(struct snic
*snic
, struct snic_req_info
*rqi
)
343 SNIC_BUG_ON(rqi
->req
== rqi
->abort_req
);
344 SNIC_BUG_ON(rqi
->req
== rqi
->dr_req
);
345 SNIC_BUG_ON(rqi
->sge_va
!= 0);
347 SNIC_SCSI_DBG(snic
->shost
,
348 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
349 rqi
, rqi
->req
, rqi
->abort_req
, rqi
->dr_req
);
351 if (rqi
->abort_req
) {
352 if (rqi
->abort_req
->req_pa
)
353 pci_unmap_single(snic
->pdev
,
354 rqi
->abort_req
->req_pa
,
355 sizeof(struct snic_host_req
),
358 mempool_free(rqi
->abort_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
362 if (rqi
->dr_req
->req_pa
)
363 pci_unmap_single(snic
->pdev
,
365 sizeof(struct snic_host_req
),
368 mempool_free(rqi
->dr_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
371 if (rqi
->req
->req_pa
)
372 pci_unmap_single(snic
->pdev
,
377 mempool_free(rqi
, snic
->req_pool
[rqi
->rq_pool_type
]);
381 snic_pci_unmap_rsp_buf(struct snic
*snic
, struct snic_req_info
*rqi
)
383 struct snic_sg_desc
*sgd
;
385 sgd
= req_to_sgl(rqi_to_req(rqi
));
386 SNIC_BUG_ON(sgd
[0].addr
== 0);
387 pci_unmap_single(snic
->pdev
,
388 le64_to_cpu(sgd
[0].addr
),
389 le32_to_cpu(sgd
[0].len
),
394 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
397 snic_free_all_untagged_reqs(struct snic
*snic
)
399 struct snic_req_info
*rqi
;
400 struct list_head
*cur
, *nxt
;
403 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
404 list_for_each_safe(cur
, nxt
, &snic
->spl_cmd_list
) {
405 rqi
= list_entry(cur
, struct snic_req_info
, list
);
406 list_del_init(&rqi
->list
);
408 snic_pci_unmap_rsp_buf(snic
, rqi
);
409 kfree((void *)rqi
->sge_va
);
413 snic_req_free(snic
, rqi
);
415 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
419 * snic_release_untagged_req : Unlinks the untagged req and frees it.
422 snic_release_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
426 spin_lock_irqsave(&snic
->snic_lock
, flags
);
427 if (snic
->in_remove
) {
428 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
431 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
433 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
434 if (list_empty(&rqi
->list
)) {
435 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
438 list_del_init(&rqi
->list
);
439 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
440 snic_req_free(snic
, rqi
);
446 /* dump buf in hex fmt */
448 snic_hex_dump(char *pfx
, char *data
, int len
)
450 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx
, len
);
451 print_hex_dump_bytes(pfx
, DUMP_PREFIX_NONE
, data
, len
);
454 #define LINE_BUFSZ 128 /* for snic_print_desc fn */
456 snic_dump_desc(const char *fn
, char *os_buf
, int len
)
458 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
459 struct snic_fw_req
*fwreq
= (struct snic_fw_req
*) os_buf
;
460 struct snic_req_info
*rqi
= NULL
;
461 char line
[LINE_BUFSZ
] = { '\0' };
462 char *cmd_str
= NULL
;
464 if (req
->hdr
.type
>= SNIC_RSP_REPORT_TGTS_CMPL
)
465 rqi
= (struct snic_req_info
*) fwreq
->hdr
.init_ctx
;
467 rqi
= (struct snic_req_info
*) req
->hdr
.init_ctx
;
469 SNIC_BUG_ON(rqi
== NULL
|| rqi
->req
== NULL
);
470 switch (req
->hdr
.type
) {
471 case SNIC_REQ_REPORT_TGTS
:
472 cmd_str
= "report-tgt : ";
473 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_REPORT_TGTS :");
477 cmd_str
= "icmnd : ";
478 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ICMND : 0x%x :",
479 req
->u
.icmnd
.cdb
[0]);
484 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ITMF :");
487 case SNIC_REQ_HBA_RESET
:
488 cmd_str
= "hba reset :";
489 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_HBA_RESET :");
492 case SNIC_REQ_EXCH_VER
:
493 cmd_str
= "exch ver : ";
494 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_EXCH_VER :");
497 case SNIC_REQ_TGT_INFO
:
498 cmd_str
= "tgt info : ";
501 case SNIC_RSP_REPORT_TGTS_CMPL
:
502 cmd_str
= "report tgt cmpl : ";
503 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_REPORT_TGTS_CMPL :");
506 case SNIC_RSP_ICMND_CMPL
:
507 cmd_str
= "icmnd_cmpl : ";
508 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ICMND_CMPL : 0x%x :",
509 rqi
->req
->u
.icmnd
.cdb
[0]);
512 case SNIC_RSP_ITMF_CMPL
:
513 cmd_str
= "itmf_cmpl : ";
514 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ITMF_CMPL :");
517 case SNIC_RSP_HBA_RESET_CMPL
:
518 cmd_str
= "hba_reset_cmpl : ";
519 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_HBA_RESET_CMPL :");
522 case SNIC_RSP_EXCH_VER_CMPL
:
523 cmd_str
= "exch_ver_cmpl : ";
524 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_EXCH_VER_CMPL :");
528 cmd_str
= "msg ack : ";
529 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ACK :");
532 case SNIC_MSG_ASYNC_EVNOTIFY
:
533 cmd_str
= "async notify : ";
534 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ASYNC_EVNOTIFY :");
538 cmd_str
= "unknown : ";
543 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
544 fn
, line
, req
->hdr
.cmnd_id
, req
->hdr
.sg_cnt
, req
->hdr
.status
,
547 /* Enable it, to dump byte stream */
548 if (snic_log_level
& 0x20)
549 snic_hex_dump(cmd_str
, os_buf
, len
);
550 } /* end of __snic_print_desc */
553 snic_print_desc(const char *fn
, char *os_buf
, int len
)
555 if (snic_log_level
& SNIC_DESC_LOGGING
)
556 snic_dump_desc(fn
, os_buf
, len
);
560 snic_calc_io_process_time(struct snic
*snic
, struct snic_req_info
*rqi
)
564 duration
= jiffies
- rqi
->start_time
;
566 if (duration
> atomic64_read(&snic
->s_stats
.io
.max_time
))
567 atomic64_set(&snic
->s_stats
.io
.max_time
, duration
);