2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/spinlock.h>
25 #include <linux/mempool.h>
26 #include <scsi/scsi_tcq.h>
30 #include "cq_enet_desc.h"
31 #include "snic_fwint.h"
34 snic_wq_cmpl_frame_send(struct vnic_wq
*wq
,
35 struct cq_desc
*cq_desc
,
36 struct vnic_wq_buf
*buf
,
39 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
41 SNIC_BUG_ON(buf
->os_buf
== NULL
);
43 if (snic_log_level
& SNIC_DESC_LOGGING
)
44 SNIC_HOST_INFO(snic
->shost
,
45 "Ack received for snic_host_req %p.\n",
48 SNIC_TRC(snic
->shost
->host_no
, 0, 0,
49 ((ulong
)(buf
->os_buf
) - sizeof(struct snic_req_info
)), 0, 0,
51 pci_unmap_single(snic
->pdev
, buf
->dma_addr
, buf
->len
, PCI_DMA_TODEVICE
);
56 snic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
57 struct cq_desc
*cq_desc
,
63 struct snic
*snic
= svnic_dev_priv(vdev
);
66 SNIC_BUG_ON(q_num
!= 0);
68 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
69 svnic_wq_service(&snic
->wq
[q_num
],
72 snic_wq_cmpl_frame_send
,
74 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
77 } /* end of snic_cmpl_handler_cont */
80 snic_wq_cmpl_handler(struct snic
*snic
, int work_to_do
)
82 unsigned int work_done
= 0;
85 snic
->s_stats
.misc
.last_ack_time
= jiffies
;
86 for (i
= 0; i
< snic
->wq_count
; i
++) {
87 work_done
+= svnic_cq_service(&snic
->cq
[i
],
89 snic_wq_cmpl_handler_cont
,
94 } /* end of snic_wq_cmpl_handler */
97 snic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
100 struct snic_host_req
*req
= buf
->os_buf
;
101 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
102 struct snic_req_info
*rqi
= NULL
;
105 pci_unmap_single(snic
->pdev
, buf
->dma_addr
, buf
->len
, PCI_DMA_TODEVICE
);
107 rqi
= req_to_rqi(req
);
108 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
109 if (list_empty(&rqi
->list
)) {
110 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
114 SNIC_BUG_ON(rqi
->list
.next
== NULL
); /* if not added to spl_cmd_list */
115 list_del_init(&rqi
->list
);
116 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
119 snic_pci_unmap_rsp_buf(snic
, rqi
);
120 kfree((void *)rqi
->sge_va
);
123 snic_req_free(snic
, rqi
);
124 SNIC_HOST_INFO(snic
->shost
, "snic_free_wq_buf .. freed.\n");
130 /* Criteria to select work queue in multi queue mode */
132 snic_select_wq(struct snic
*snic
)
134 /* No multi queue support for now */
135 BUILD_BUG_ON(SNIC_WQ_MAX
> 1);
141 snic_queue_wq_desc(struct snic
*snic
, void *os_buf
, u16 len
)
145 struct snic_fw_stats
*fwstats
= &snic
->s_stats
.fw
;
149 snic_print_desc(__func__
, os_buf
, len
);
151 /* Map request buffer */
152 pa
= pci_map_single(snic
->pdev
, os_buf
, len
, PCI_DMA_TODEVICE
);
153 if (pci_dma_mapping_error(snic
->pdev
, pa
)) {
154 SNIC_HOST_ERR(snic
->shost
, "qdesc: PCI DMA Mapping Fail.\n");
159 q_num
= snic_select_wq(snic
);
161 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
162 if (!svnic_wq_desc_avail(snic
->wq
)) {
163 pci_unmap_single(snic
->pdev
, pa
, len
, PCI_DMA_TODEVICE
);
164 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
165 atomic64_inc(&snic
->s_stats
.misc
.wq_alloc_fail
);
166 SNIC_DBG("host = %d, WQ is Full\n", snic
->shost
->host_no
);
171 snic_queue_wq_eth_desc(&snic
->wq
[q_num
], os_buf
, pa
, len
, 0, 0, 1);
172 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
175 act_reqs
= atomic64_inc_return(&fwstats
->actv_reqs
);
176 if (act_reqs
> atomic64_read(&fwstats
->max_actv_reqs
))
177 atomic64_set(&fwstats
->max_actv_reqs
, act_reqs
);
180 } /* end of snic_queue_wq_desc() */
183 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
184 * Purpose : Used during driver unload to clean up the requests.
187 snic_handle_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
191 INIT_LIST_HEAD(&rqi
->list
);
193 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
194 list_add_tail(&rqi
->list
, &snic
->spl_cmd_list
);
195 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
200 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
202 struct snic_req_info
*
203 snic_req_init(struct snic
*snic
, int sg_cnt
)
206 struct snic_req_info
*rqi
= NULL
;
208 typ
= (sg_cnt
<= SNIC_REQ_CACHE_DFLT_SGL
) ?
209 SNIC_REQ_CACHE_DFLT_SGL
: SNIC_REQ_CACHE_MAX_SGL
;
211 rqi
= mempool_alloc(snic
->req_pool
[typ
], GFP_ATOMIC
);
213 atomic64_inc(&snic
->s_stats
.io
.alloc_fail
);
214 SNIC_HOST_ERR(snic
->shost
,
215 "Failed to allocate memory from snic req pool id = %d\n",
220 memset(rqi
, 0, sizeof(*rqi
));
221 rqi
->rq_pool_type
= typ
;
222 rqi
->start_time
= jiffies
;
223 rqi
->req
= (struct snic_host_req
*) (rqi
+ 1);
224 rqi
->req_len
= sizeof(struct snic_host_req
);
227 rqi
->req
= (struct snic_host_req
*)(rqi
+ 1);
232 rqi
->req_len
+= (sg_cnt
* sizeof(struct snic_sg_desc
));
234 if (sg_cnt
> atomic64_read(&snic
->s_stats
.io
.max_sgl
))
235 atomic64_set(&snic
->s_stats
.io
.max_sgl
, sg_cnt
);
237 SNIC_BUG_ON(sg_cnt
> SNIC_MAX_SG_DESC_CNT
);
238 atomic64_inc(&snic
->s_stats
.io
.sgl_cnt
[sg_cnt
- 1]);
241 memset(rqi
->req
, 0, rqi
->req_len
);
243 /* pre initialization of init_ctx to support req_to_rqi */
244 rqi
->req
->hdr
.init_ctx
= (ulong
) rqi
;
246 SNIC_SCSI_DBG(snic
->shost
, "Req_alloc:rqi = %p allocatd.\n", rqi
);
249 } /* end of snic_req_init */
252 * snic_abort_req_init : Inits abort request.
254 struct snic_host_req
*
255 snic_abort_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
257 struct snic_host_req
*req
= NULL
;
261 /* If abort to be issued second time, then reuse */
263 return rqi
->abort_req
;
266 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
268 SNIC_HOST_ERR(snic
->shost
, "abts:Failed to alloc tm req.\n");
274 rqi
->abort_req
= req
;
275 memset(req
, 0, sizeof(struct snic_host_req
));
276 /* pre initialization of init_ctx to support req_to_rqi */
277 req
->hdr
.init_ctx
= (ulong
) rqi
;
280 } /* end of snic_abort_req_init */
283 * snic_dr_req_init : Inits device reset req
285 struct snic_host_req
*
286 snic_dr_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
288 struct snic_host_req
*req
= NULL
;
292 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
294 SNIC_HOST_ERR(snic
->shost
, "dr:Failed to alloc tm req.\n");
300 SNIC_BUG_ON(rqi
->dr_req
!= NULL
);
302 memset(req
, 0, sizeof(struct snic_host_req
));
303 /* pre initialization of init_ctx to support req_to_rqi */
304 req
->hdr
.init_ctx
= (ulong
) rqi
;
307 } /* end of snic_dr_req_init */
309 /* frees snic_req_info and snic_host_req */
311 snic_req_free(struct snic
*snic
, struct snic_req_info
*rqi
)
313 SNIC_BUG_ON(rqi
->req
== rqi
->abort_req
);
314 SNIC_BUG_ON(rqi
->req
== rqi
->dr_req
);
315 SNIC_BUG_ON(rqi
->sge_va
!= 0);
317 SNIC_SCSI_DBG(snic
->shost
,
318 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
319 rqi
, rqi
->req
, rqi
->abort_req
, rqi
->dr_req
);
322 mempool_free(rqi
->abort_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
325 mempool_free(rqi
->dr_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
327 mempool_free(rqi
, snic
->req_pool
[rqi
->rq_pool_type
]);
331 snic_pci_unmap_rsp_buf(struct snic
*snic
, struct snic_req_info
*rqi
)
333 struct snic_sg_desc
*sgd
;
335 sgd
= req_to_sgl(rqi_to_req(rqi
));
336 SNIC_BUG_ON(sgd
[0].addr
== 0);
337 pci_unmap_single(snic
->pdev
,
338 le64_to_cpu(sgd
[0].addr
),
339 le32_to_cpu(sgd
[0].len
),
344 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
347 snic_free_all_untagged_reqs(struct snic
*snic
)
349 struct snic_req_info
*rqi
;
350 struct list_head
*cur
, *nxt
;
353 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
354 list_for_each_safe(cur
, nxt
, &snic
->spl_cmd_list
) {
355 rqi
= list_entry(cur
, struct snic_req_info
, list
);
356 list_del_init(&rqi
->list
);
358 snic_pci_unmap_rsp_buf(snic
, rqi
);
359 kfree((void *)rqi
->sge_va
);
363 snic_req_free(snic
, rqi
);
365 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
369 * snic_release_untagged_req : Unlinks the untagged req and frees it.
372 snic_release_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
376 spin_lock_irqsave(&snic
->snic_lock
, flags
);
377 if (snic
->in_remove
) {
378 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
381 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
383 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
384 if (list_empty(&rqi
->list
)) {
385 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
388 list_del_init(&rqi
->list
);
389 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
390 snic_req_free(snic
, rqi
);
396 /* dump buf in hex fmt */
398 snic_hex_dump(char *pfx
, char *data
, int len
)
400 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx
, len
);
401 print_hex_dump_bytes(pfx
, DUMP_PREFIX_NONE
, data
, len
);
404 #define LINE_BUFSZ 128 /* for snic_print_desc fn */
406 snic_dump_desc(const char *fn
, char *os_buf
, int len
)
408 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
409 struct snic_fw_req
*fwreq
= (struct snic_fw_req
*) os_buf
;
410 struct snic_req_info
*rqi
= NULL
;
411 char line
[LINE_BUFSZ
] = { '\0' };
412 char *cmd_str
= NULL
;
414 if (req
->hdr
.type
>= SNIC_RSP_REPORT_TGTS_CMPL
)
415 rqi
= (struct snic_req_info
*) fwreq
->hdr
.init_ctx
;
417 rqi
= (struct snic_req_info
*) req
->hdr
.init_ctx
;
419 SNIC_BUG_ON(rqi
== NULL
|| rqi
->req
== NULL
);
420 switch (req
->hdr
.type
) {
421 case SNIC_REQ_REPORT_TGTS
:
422 cmd_str
= "report-tgt : ";
423 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_REPORT_TGTS :");
427 cmd_str
= "icmnd : ";
428 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ICMND : 0x%x :",
429 req
->u
.icmnd
.cdb
[0]);
434 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ITMF :");
437 case SNIC_REQ_HBA_RESET
:
438 cmd_str
= "hba reset :";
439 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_HBA_RESET :");
442 case SNIC_REQ_EXCH_VER
:
443 cmd_str
= "exch ver : ";
444 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_EXCH_VER :");
447 case SNIC_REQ_TGT_INFO
:
448 cmd_str
= "tgt info : ";
451 case SNIC_RSP_REPORT_TGTS_CMPL
:
452 cmd_str
= "report tgt cmpl : ";
453 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_REPORT_TGTS_CMPL :");
456 case SNIC_RSP_ICMND_CMPL
:
457 cmd_str
= "icmnd_cmpl : ";
458 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ICMND_CMPL : 0x%x :",
459 rqi
->req
->u
.icmnd
.cdb
[0]);
462 case SNIC_RSP_ITMF_CMPL
:
463 cmd_str
= "itmf_cmpl : ";
464 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ITMF_CMPL :");
467 case SNIC_RSP_HBA_RESET_CMPL
:
468 cmd_str
= "hba_reset_cmpl : ";
469 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_HBA_RESET_CMPL :");
472 case SNIC_RSP_EXCH_VER_CMPL
:
473 cmd_str
= "exch_ver_cmpl : ";
474 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_EXCH_VER_CMPL :");
478 cmd_str
= "msg ack : ";
479 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ACK :");
482 case SNIC_MSG_ASYNC_EVNOTIFY
:
483 cmd_str
= "async notify : ";
484 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ASYNC_EVNOTIFY :");
488 cmd_str
= "unknown : ";
493 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
494 fn
, line
, req
->hdr
.cmnd_id
, req
->hdr
.sg_cnt
, req
->hdr
.status
,
497 /* Enable it, to dump byte stream */
498 if (snic_log_level
& 0x20)
499 snic_hex_dump(cmd_str
, os_buf
, len
);
500 } /* end of __snic_print_desc */
503 snic_print_desc(const char *fn
, char *os_buf
, int len
)
505 if (snic_log_level
& SNIC_DESC_LOGGING
)
506 snic_dump_desc(fn
, os_buf
, len
);
510 snic_calc_io_process_time(struct snic
*snic
, struct snic_req_info
*rqi
)
514 duration
= jiffies
- rqi
->start_time
;
516 if (duration
> atomic64_read(&snic
->s_stats
.io
.max_time
))
517 atomic64_set(&snic
->s_stats
.io
.max_time
, duration
);