2 * Copyright 2014 Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/workqueue.h>
24 #include <linux/spinlock.h>
25 #include <linux/mempool.h>
26 #include <scsi/scsi_tcq.h>
30 #include "cq_enet_desc.h"
31 #include "snic_fwint.h"
34 snic_wq_cmpl_frame_send(struct vnic_wq
*wq
,
35 struct cq_desc
*cq_desc
,
36 struct vnic_wq_buf
*buf
,
39 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
41 SNIC_BUG_ON(buf
->os_buf
== NULL
);
43 if (snic_log_level
& SNIC_DESC_LOGGING
)
44 SNIC_HOST_INFO(snic
->shost
,
45 "Ack received for snic_host_req %p.\n",
48 SNIC_TRC(snic
->shost
->host_no
, 0, 0,
49 ((ulong
)(buf
->os_buf
) - sizeof(struct snic_req_info
)), 0, 0,
56 snic_wq_cmpl_handler_cont(struct vnic_dev
*vdev
,
57 struct cq_desc
*cq_desc
,
63 struct snic
*snic
= svnic_dev_priv(vdev
);
66 SNIC_BUG_ON(q_num
!= 0);
68 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
69 svnic_wq_service(&snic
->wq
[q_num
],
72 snic_wq_cmpl_frame_send
,
74 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
77 } /* end of snic_cmpl_handler_cont */
80 snic_wq_cmpl_handler(struct snic
*snic
, int work_to_do
)
82 unsigned int work_done
= 0;
85 snic
->s_stats
.misc
.last_ack_time
= jiffies
;
86 for (i
= 0; i
< snic
->wq_count
; i
++) {
87 work_done
+= svnic_cq_service(&snic
->cq
[i
],
89 snic_wq_cmpl_handler_cont
,
94 } /* end of snic_wq_cmpl_handler */
97 snic_free_wq_buf(struct vnic_wq
*wq
, struct vnic_wq_buf
*buf
)
100 struct snic_host_req
*req
= buf
->os_buf
;
101 struct snic
*snic
= svnic_dev_priv(wq
->vdev
);
102 struct snic_req_info
*rqi
= NULL
;
105 dma_unmap_single(&snic
->pdev
->dev
, buf
->dma_addr
, buf
->len
,
108 rqi
= req_to_rqi(req
);
109 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
110 if (list_empty(&rqi
->list
)) {
111 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
115 SNIC_BUG_ON(rqi
->list
.next
== NULL
); /* if not added to spl_cmd_list */
116 list_del_init(&rqi
->list
);
117 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
120 snic_pci_unmap_rsp_buf(snic
, rqi
);
121 kfree((void *)rqi
->sge_va
);
124 snic_req_free(snic
, rqi
);
125 SNIC_HOST_INFO(snic
->shost
, "snic_free_wq_buf .. freed.\n");
131 /* Criteria to select work queue in multi queue mode */
133 snic_select_wq(struct snic
*snic
)
135 /* No multi queue support for now */
136 BUILD_BUG_ON(SNIC_WQ_MAX
> 1);
142 snic_wqdesc_avail(struct snic
*snic
, int q_num
, int req_type
)
144 int nr_wqdesc
= snic
->config
.wq_enet_desc_count
;
148 * Multi Queue case, additional care is required.
149 * Per WQ active requests need to be maintained.
151 SNIC_HOST_INFO(snic
->shost
, "desc_avail: Multi Queue case.\n");
152 SNIC_BUG_ON(q_num
> 0);
157 nr_wqdesc
-= atomic64_read(&snic
->s_stats
.fw
.actv_reqs
);
159 return ((req_type
== SNIC_REQ_HBA_RESET
) ? nr_wqdesc
: nr_wqdesc
- 1);
163 snic_queue_wq_desc(struct snic
*snic
, void *os_buf
, u16 len
)
167 struct snic_fw_stats
*fwstats
= &snic
->s_stats
.fw
;
168 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
173 snic_print_desc(__func__
, os_buf
, len
);
175 /* Map request buffer */
176 pa
= dma_map_single(&snic
->pdev
->dev
, os_buf
, len
, DMA_TO_DEVICE
);
177 if (dma_mapping_error(&snic
->pdev
->dev
, pa
)) {
178 SNIC_HOST_ERR(snic
->shost
, "qdesc: PCI DMA Mapping Fail.\n");
183 req
->req_pa
= (ulong
)pa
;
185 q_num
= snic_select_wq(snic
);
187 spin_lock_irqsave(&snic
->wq_lock
[q_num
], flags
);
188 desc_avail
= snic_wqdesc_avail(snic
, q_num
, req
->hdr
.type
);
189 if (desc_avail
<= 0) {
190 dma_unmap_single(&snic
->pdev
->dev
, pa
, len
, DMA_TO_DEVICE
);
192 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
193 atomic64_inc(&snic
->s_stats
.misc
.wq_alloc_fail
);
194 SNIC_DBG("host = %d, WQ is Full\n", snic
->shost
->host_no
);
199 snic_queue_wq_eth_desc(&snic
->wq
[q_num
], os_buf
, pa
, len
, 0, 0, 1);
202 * note: when multi queue enabled, fw actv_reqs should be per queue.
204 act_reqs
= atomic64_inc_return(&fwstats
->actv_reqs
);
205 spin_unlock_irqrestore(&snic
->wq_lock
[q_num
], flags
);
207 if (act_reqs
> atomic64_read(&fwstats
->max_actv_reqs
))
208 atomic64_set(&fwstats
->max_actv_reqs
, act_reqs
);
211 } /* end of snic_queue_wq_desc() */
214 * snic_handle_untagged_req: Adds snic specific requests to spl_cmd_list.
215 * Purpose : Used during driver unload to clean up the requests.
218 snic_handle_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
222 INIT_LIST_HEAD(&rqi
->list
);
224 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
225 list_add_tail(&rqi
->list
, &snic
->spl_cmd_list
);
226 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
231 * Allocates snic_req_info + snic_host_req + sgl data, and initializes.
233 struct snic_req_info
*
234 snic_req_init(struct snic
*snic
, int sg_cnt
)
237 struct snic_req_info
*rqi
= NULL
;
239 typ
= (sg_cnt
<= SNIC_REQ_CACHE_DFLT_SGL
) ?
240 SNIC_REQ_CACHE_DFLT_SGL
: SNIC_REQ_CACHE_MAX_SGL
;
242 rqi
= mempool_alloc(snic
->req_pool
[typ
], GFP_ATOMIC
);
244 atomic64_inc(&snic
->s_stats
.io
.alloc_fail
);
245 SNIC_HOST_ERR(snic
->shost
,
246 "Failed to allocate memory from snic req pool id = %d\n",
251 memset(rqi
, 0, sizeof(*rqi
));
252 rqi
->rq_pool_type
= typ
;
253 rqi
->start_time
= jiffies
;
254 rqi
->req
= (struct snic_host_req
*) (rqi
+ 1);
255 rqi
->req_len
= sizeof(struct snic_host_req
);
258 rqi
->req
= (struct snic_host_req
*)(rqi
+ 1);
263 rqi
->req_len
+= (sg_cnt
* sizeof(struct snic_sg_desc
));
265 if (sg_cnt
> atomic64_read(&snic
->s_stats
.io
.max_sgl
))
266 atomic64_set(&snic
->s_stats
.io
.max_sgl
, sg_cnt
);
268 SNIC_BUG_ON(sg_cnt
> SNIC_MAX_SG_DESC_CNT
);
269 atomic64_inc(&snic
->s_stats
.io
.sgl_cnt
[sg_cnt
- 1]);
272 memset(rqi
->req
, 0, rqi
->req_len
);
274 /* pre initialization of init_ctx to support req_to_rqi */
275 rqi
->req
->hdr
.init_ctx
= (ulong
) rqi
;
277 SNIC_SCSI_DBG(snic
->shost
, "Req_alloc:rqi = %p allocatd.\n", rqi
);
280 } /* end of snic_req_init */
283 * snic_abort_req_init : Inits abort request.
285 struct snic_host_req
*
286 snic_abort_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
288 struct snic_host_req
*req
= NULL
;
292 /* If abort to be issued second time, then reuse */
294 return rqi
->abort_req
;
297 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
299 SNIC_HOST_ERR(snic
->shost
, "abts:Failed to alloc tm req.\n");
305 rqi
->abort_req
= req
;
306 memset(req
, 0, sizeof(struct snic_host_req
));
307 /* pre initialization of init_ctx to support req_to_rqi */
308 req
->hdr
.init_ctx
= (ulong
) rqi
;
311 } /* end of snic_abort_req_init */
314 * snic_dr_req_init : Inits device reset req
316 struct snic_host_req
*
317 snic_dr_req_init(struct snic
*snic
, struct snic_req_info
*rqi
)
319 struct snic_host_req
*req
= NULL
;
323 req
= mempool_alloc(snic
->req_pool
[SNIC_REQ_TM_CACHE
], GFP_ATOMIC
);
325 SNIC_HOST_ERR(snic
->shost
, "dr:Failed to alloc tm req.\n");
331 SNIC_BUG_ON(rqi
->dr_req
!= NULL
);
333 memset(req
, 0, sizeof(struct snic_host_req
));
334 /* pre initialization of init_ctx to support req_to_rqi */
335 req
->hdr
.init_ctx
= (ulong
) rqi
;
338 } /* end of snic_dr_req_init */
340 /* frees snic_req_info and snic_host_req */
342 snic_req_free(struct snic
*snic
, struct snic_req_info
*rqi
)
344 SNIC_BUG_ON(rqi
->req
== rqi
->abort_req
);
345 SNIC_BUG_ON(rqi
->req
== rqi
->dr_req
);
346 SNIC_BUG_ON(rqi
->sge_va
!= 0);
348 SNIC_SCSI_DBG(snic
->shost
,
349 "Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
350 rqi
, rqi
->req
, rqi
->abort_req
, rqi
->dr_req
);
352 if (rqi
->abort_req
) {
353 if (rqi
->abort_req
->req_pa
)
354 dma_unmap_single(&snic
->pdev
->dev
,
355 rqi
->abort_req
->req_pa
,
356 sizeof(struct snic_host_req
),
359 mempool_free(rqi
->abort_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
363 if (rqi
->dr_req
->req_pa
)
364 dma_unmap_single(&snic
->pdev
->dev
,
366 sizeof(struct snic_host_req
),
369 mempool_free(rqi
->dr_req
, snic
->req_pool
[SNIC_REQ_TM_CACHE
]);
372 if (rqi
->req
->req_pa
)
373 dma_unmap_single(&snic
->pdev
->dev
,
378 mempool_free(rqi
, snic
->req_pool
[rqi
->rq_pool_type
]);
382 snic_pci_unmap_rsp_buf(struct snic
*snic
, struct snic_req_info
*rqi
)
384 struct snic_sg_desc
*sgd
;
386 sgd
= req_to_sgl(rqi_to_req(rqi
));
387 SNIC_BUG_ON(sgd
[0].addr
== 0);
388 dma_unmap_single(&snic
->pdev
->dev
,
389 le64_to_cpu(sgd
[0].addr
),
390 le32_to_cpu(sgd
[0].len
),
395 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
398 snic_free_all_untagged_reqs(struct snic
*snic
)
400 struct snic_req_info
*rqi
;
401 struct list_head
*cur
, *nxt
;
404 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
405 list_for_each_safe(cur
, nxt
, &snic
->spl_cmd_list
) {
406 rqi
= list_entry(cur
, struct snic_req_info
, list
);
407 list_del_init(&rqi
->list
);
409 snic_pci_unmap_rsp_buf(snic
, rqi
);
410 kfree((void *)rqi
->sge_va
);
414 snic_req_free(snic
, rqi
);
416 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
420 * snic_release_untagged_req : Unlinks the untagged req and frees it.
423 snic_release_untagged_req(struct snic
*snic
, struct snic_req_info
*rqi
)
427 spin_lock_irqsave(&snic
->snic_lock
, flags
);
428 if (snic
->in_remove
) {
429 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
432 spin_unlock_irqrestore(&snic
->snic_lock
, flags
);
434 spin_lock_irqsave(&snic
->spl_cmd_lock
, flags
);
435 if (list_empty(&rqi
->list
)) {
436 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
439 list_del_init(&rqi
->list
);
440 spin_unlock_irqrestore(&snic
->spl_cmd_lock
, flags
);
441 snic_req_free(snic
, rqi
);
447 /* dump buf in hex fmt */
449 snic_hex_dump(char *pfx
, char *data
, int len
)
451 SNIC_INFO("%s Dumping Data of Len = %d\n", pfx
, len
);
452 print_hex_dump_bytes(pfx
, DUMP_PREFIX_NONE
, data
, len
);
455 #define LINE_BUFSZ 128 /* for snic_print_desc fn */
457 snic_dump_desc(const char *fn
, char *os_buf
, int len
)
459 struct snic_host_req
*req
= (struct snic_host_req
*) os_buf
;
460 struct snic_fw_req
*fwreq
= (struct snic_fw_req
*) os_buf
;
461 struct snic_req_info
*rqi
= NULL
;
462 char line
[LINE_BUFSZ
] = { '\0' };
463 char *cmd_str
= NULL
;
465 if (req
->hdr
.type
>= SNIC_RSP_REPORT_TGTS_CMPL
)
466 rqi
= (struct snic_req_info
*) fwreq
->hdr
.init_ctx
;
468 rqi
= (struct snic_req_info
*) req
->hdr
.init_ctx
;
470 SNIC_BUG_ON(rqi
== NULL
|| rqi
->req
== NULL
);
471 switch (req
->hdr
.type
) {
472 case SNIC_REQ_REPORT_TGTS
:
473 cmd_str
= "report-tgt : ";
474 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_REPORT_TGTS :");
478 cmd_str
= "icmnd : ";
479 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ICMND : 0x%x :",
480 req
->u
.icmnd
.cdb
[0]);
485 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_ITMF :");
488 case SNIC_REQ_HBA_RESET
:
489 cmd_str
= "hba reset :";
490 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_HBA_RESET :");
493 case SNIC_REQ_EXCH_VER
:
494 cmd_str
= "exch ver : ";
495 snprintf(line
, LINE_BUFSZ
, "SNIC_REQ_EXCH_VER :");
498 case SNIC_REQ_TGT_INFO
:
499 cmd_str
= "tgt info : ";
502 case SNIC_RSP_REPORT_TGTS_CMPL
:
503 cmd_str
= "report tgt cmpl : ";
504 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_REPORT_TGTS_CMPL :");
507 case SNIC_RSP_ICMND_CMPL
:
508 cmd_str
= "icmnd_cmpl : ";
509 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ICMND_CMPL : 0x%x :",
510 rqi
->req
->u
.icmnd
.cdb
[0]);
513 case SNIC_RSP_ITMF_CMPL
:
514 cmd_str
= "itmf_cmpl : ";
515 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_ITMF_CMPL :");
518 case SNIC_RSP_HBA_RESET_CMPL
:
519 cmd_str
= "hba_reset_cmpl : ";
520 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_HBA_RESET_CMPL :");
523 case SNIC_RSP_EXCH_VER_CMPL
:
524 cmd_str
= "exch_ver_cmpl : ";
525 snprintf(line
, LINE_BUFSZ
, "SNIC_RSP_EXCH_VER_CMPL :");
529 cmd_str
= "msg ack : ";
530 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ACK :");
533 case SNIC_MSG_ASYNC_EVNOTIFY
:
534 cmd_str
= "async notify : ";
535 snprintf(line
, LINE_BUFSZ
, "SNIC_MSG_ASYNC_EVNOTIFY :");
539 cmd_str
= "unknown : ";
544 SNIC_INFO("%s:%s >>cmndid=%x:sg_cnt = %x:status = %x:ctx = %lx.\n",
545 fn
, line
, req
->hdr
.cmnd_id
, req
->hdr
.sg_cnt
, req
->hdr
.status
,
548 /* Enable it, to dump byte stream */
549 if (snic_log_level
& 0x20)
550 snic_hex_dump(cmd_str
, os_buf
, len
);
551 } /* end of __snic_print_desc */
554 snic_print_desc(const char *fn
, char *os_buf
, int len
)
556 if (snic_log_level
& SNIC_DESC_LOGGING
)
557 snic_dump_desc(fn
, os_buf
, len
);
561 snic_calc_io_process_time(struct snic
*snic
, struct snic_req_info
*rqi
)
565 duration
= jiffies
- rqi
->start_time
;
567 if (duration
> atomic64_read(&snic
->s_stats
.io
.max_time
))
568 atomic64_set(&snic
->s_stats
.io
.max_time
, duration
);