2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/cache.h>
40 #include "csio_defs.h"
42 #include "t4fw_api_stor.h"
45 * SGE register field values.
47 #define X_INGPCIEBOUNDARY_32B 0
48 #define X_INGPCIEBOUNDARY_64B 1
49 #define X_INGPCIEBOUNDARY_128B 2
50 #define X_INGPCIEBOUNDARY_256B 3
51 #define X_INGPCIEBOUNDARY_512B 4
52 #define X_INGPCIEBOUNDARY_1024B 5
53 #define X_INGPCIEBOUNDARY_2048B 6
54 #define X_INGPCIEBOUNDARY_4096B 7
57 #define X_TIMERREG_COUNTER0 0
58 #define X_TIMERREG_COUNTER1 1
59 #define X_TIMERREG_COUNTER2 2
60 #define X_TIMERREG_COUNTER3 3
61 #define X_TIMERREG_COUNTER4 4
62 #define X_TIMERREG_COUNTER5 5
63 #define X_TIMERREG_RESTART_COUNTER 6
64 #define X_TIMERREG_UPDATE_CIDX 7
67 * Egress Context field values
69 #define X_FETCHBURSTMIN_16B 0
70 #define X_FETCHBURSTMIN_32B 1
71 #define X_FETCHBURSTMIN_64B 2
72 #define X_FETCHBURSTMIN_128B 3
74 #define X_FETCHBURSTMAX_64B 0
75 #define X_FETCHBURSTMAX_128B 1
76 #define X_FETCHBURSTMAX_256B 2
77 #define X_FETCHBURSTMAX_512B 3
79 #define X_HOSTFCMODE_NONE 0
80 #define X_HOSTFCMODE_INGRESS_QUEUE 1
81 #define X_HOSTFCMODE_STATUS_PAGE 2
82 #define X_HOSTFCMODE_BOTH 3
85 * Ingress Context field values
87 #define X_UPDATESCHEDULING_TIMER 0
88 #define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
90 #define X_UPDATEDELIVERY_NONE 0
91 #define X_UPDATEDELIVERY_INTERRUPT 1
92 #define X_UPDATEDELIVERY_STATUS_PAGE 2
93 #define X_UPDATEDELIVERY_BOTH 3
95 #define X_INTERRUPTDESTINATION_PCIE 0
96 #define X_INTERRUPTDESTINATION_IQ 1
98 #define X_RSPD_TYPE_FLBUF 0
99 #define X_RSPD_TYPE_CPL 1
100 #define X_RSPD_TYPE_INTR 2
102 /* WR status is at the same position as retval in a CMD header */
103 #define csio_wr_status(_wr) \
104 (FW_CMD_RETVAL_G(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
108 extern int csio_intr_coalesce_cnt
;
109 extern int csio_intr_coalesce_time
;
111 /* Ingress queue params */
112 struct csio_iq_params
{
136 uint16_t iqandstindex
;
143 uint8_t iqintcntthresh
;
153 uint8_t iqflintiqhsen
;
155 uint8_t iqflintcongen
;
156 uint8_t iqflintcngchmap
;
160 uint8_t fl0hostfcmode
;
171 uint8_t fl0cidxfthresho
;
172 uint8_t fl0cidxfthresh
;
180 uint8_t fl1hostfcmode
;
191 uint8_t fl1cidxfthresho
;
192 uint8_t fl1cidxfthresh
;
199 /* Egress queue params */
200 struct csio_eq_params
{
211 uint8_t hostfcmode
:2;
223 uint8_t cidxfthresho
:1;
224 uint8_t cidxfthresh
:3;
231 struct csio_dma_buf
{
232 struct list_head list
;
233 void *vaddr
; /* Virtual address */
234 dma_addr_t paddr
; /* Physical address */
235 uint32_t len
; /* Buffer size */
238 /* Generic I/O request structure */
240 struct csio_sm sm
; /* SM, List
241 * should be the first member
243 int iq_idx
; /* Ingress queue index */
244 int eq_idx
; /* Egress queue index */
245 uint32_t nsge
; /* Number of SG elements */
246 uint32_t tmo
; /* Driver timeout */
247 uint32_t datadir
; /* Data direction */
248 struct csio_dma_buf dma_buf
; /* Req/resp DMA buffers */
249 uint16_t wr_status
; /* WR completion status */
250 int16_t drv_status
; /* Driver internal status */
251 struct csio_lnode
*lnode
; /* Owner lnode */
252 struct csio_rnode
*rnode
; /* Src/destination rnode */
253 void (*io_cbfn
) (struct csio_hw
*, struct csio_ioreq
*);
254 /* completion callback */
255 void *scratch1
; /* Scratch area 1.
257 void *scratch2
; /* Scratch area 2. */
258 struct list_head gen_list
; /* Any list associated with
261 uint64_t fw_handle
; /* Unique handle passed
264 uint8_t dcopy
; /* Data copy required */
267 struct completion cmplobj
; /* ioreq completion object */
268 } ____cacheline_aligned_in_smp
;
271 * Egress status page for egress cidx updates
273 struct csio_qstatus_page
{
281 CSIO_MAX_FLBUF_PER_IQWR
= 4,
282 CSIO_QCREDIT_SZ
= 64, /* pidx/cidx increments
285 CSIO_MAX_QID
= 0xFFFF,
288 CSIO_SGE_NTIMERS
= 6,
289 CSIO_SGE_NCOUNTERS
= 4,
290 CSIO_SGE_FL_SIZE_REGS
= 16,
293 /* Defines for type */
301 * Structure for footer (last 2 flits) of Ingress Queue Entry.
303 struct csio_iqwr_footer
{
304 __be32 hdrbuflen_pidx
;
305 __be32 pldbuflen_qid
;
312 #define IQWRF_NEWBUF (1 << 31)
313 #define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
314 #define IQWRF_GEN_SHIFT 7
315 #define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
321 * A WR can start towards the end of a queue, and then continue at the
322 * beginning, since the queue is considered to be circular. This will
323 * require a pair of address/len to be passed back to the caller -
324 * hence the Work request pair structure.
326 struct csio_wr_pair
{
334 * The following structure is used by ingress processing to return the
335 * free list buffers to consumers.
337 struct csio_fl_dma_buf
{
338 struct csio_dma_buf flbufs
[CSIO_MAX_FLBUF_PER_IQWR
];
339 /* Freelist DMA buffers */
340 int offset
; /* Offset within the
343 uint32_t totlen
; /* Total length */
344 uint8_t defer_free
; /* Free of buffer can
350 typedef void (*iq_handler_t
)(struct csio_hw
*, void *, uint32_t,
351 struct csio_fl_dma_buf
*, void *);
354 uint16_t iqid
; /* Queue ID */
355 uint16_t physiqid
; /* Physical Queue ID */
356 uint16_t genbit
; /* Generation bit,
359 int flq_idx
; /* Freelist queue index */
360 iq_handler_t iq_intx_handler
; /* IQ INTx handler routine */
364 uint16_t eqid
; /* Qid */
365 uint16_t physeqid
; /* Physical Queue ID */
366 uint8_t wrap
[512]; /* Temp area for q-wrap around*/
370 uint16_t flid
; /* Qid */
371 uint16_t packen
; /* Packing enabled? */
372 int offset
; /* Offset within FL buf */
373 int sreg
; /* Size register */
374 struct csio_dma_buf
*bufs
; /* Free list buffer ptr array
375 * indexed using flq->cidx/pidx
380 uint32_t n_tot_reqs
; /* Total no. of Requests */
381 uint32_t n_tot_rsps
; /* Total no. of responses */
382 uint32_t n_qwrap
; /* Queue wraps */
383 uint32_t n_eq_wr_split
; /* Number of split EQ WRs */
384 uint32_t n_qentry
; /* Queue entry */
385 uint32_t n_qempty
; /* Queue empty */
386 uint32_t n_qfull
; /* Queue fulls */
387 uint32_t n_rsp_unknown
; /* Unknown response type */
388 uint32_t n_stray_comp
; /* Stray completion intr */
389 uint32_t n_flq_refill
; /* Number of FL refills */
394 uint16_t type
; /* Type: Ingress/Egress/FL */
395 uint16_t pidx
; /* producer index */
396 uint16_t cidx
; /* consumer index */
397 uint16_t inc_idx
; /* Incremental index */
398 uint32_t wr_sz
; /* Size of all WRs in this q
401 void *vstart
; /* Base virtual address
404 void *vwrap
; /* Virtual end address to
407 uint32_t credits
; /* Size of queue in credits */
408 void *owner
; /* Owner */
409 union { /* Queue contexts */
415 dma_addr_t pstart
; /* Base physical address of
418 uint32_t portid
; /* PCIE Channel */
419 uint32_t size
; /* Size of queue in bytes */
420 struct csio_qstats stats
; /* Statistics */
421 } ____cacheline_aligned_in_smp
;
424 uint32_t csio_fl_align
; /* Calculated and cached
427 uint32_t sge_control
; /* padding, boundaries,
430 uint32_t sge_host_page_size
; /* Host page size */
431 uint32_t sge_fl_buf_size
[CSIO_SGE_FL_SIZE_REGS
];
432 /* free list buffer sizes */
433 uint16_t timer_val
[CSIO_SGE_NTIMERS
];
434 uint8_t counter_val
[CSIO_SGE_NCOUNTERS
];
437 /* Work request module */
439 int num_q
; /* Number of queues */
440 struct csio_q
**q_arr
; /* Array of queue pointers
441 * allocated dynamically
442 * based on configured values
444 uint32_t fw_iq_start
; /* Start ID of IQ for this fn*/
445 uint32_t fw_eq_start
; /* Start ID of EQ for this fn*/
446 struct csio_q
*intr_map
[CSIO_MAX_IQ
];
447 /* IQ-id to IQ map table. */
448 int free_qidx
; /* queue idx of free queue */
449 struct csio_sge sge
; /* SGE params */
452 #define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
453 #define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
454 #define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
455 #define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
456 #define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
457 #define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
458 #define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
459 #define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
460 #define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
461 #define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
462 #define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
463 #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
464 #define csio_q_physiqid(__hw, __idx) \
465 ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
466 #define csio_q_iq_flq_idx(__hw, __idx) \
467 ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
468 #define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
469 #define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
471 #define csio_q_physeqid(__hw, __idx) \
472 ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
473 #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
475 #define csio_q_iq_to_flid(__hw, __iq_idx) \
476 csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
477 #define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
478 (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
479 #define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
483 int csio_wr_alloc_q(struct csio_hw
*, uint32_t, uint32_t,
484 uint16_t, void *, uint32_t, int, iq_handler_t
);
485 int csio_wr_iq_create(struct csio_hw
*, void *, int,
486 uint32_t, uint8_t, bool,
487 void (*)(struct csio_hw
*, struct csio_mb
*));
488 int csio_wr_eq_create(struct csio_hw
*, void *, int, int, uint8_t,
489 void (*)(struct csio_hw
*, struct csio_mb
*));
490 int csio_wr_destroy_queues(struct csio_hw
*, bool cmd
);
493 int csio_wr_get(struct csio_hw
*, int, uint32_t,
494 struct csio_wr_pair
*);
495 void csio_wr_copy_to_wrp(void *, struct csio_wr_pair
*, uint32_t, uint32_t);
496 int csio_wr_issue(struct csio_hw
*, int, bool);
497 int csio_wr_process_iq(struct csio_hw
*, struct csio_q
*,
498 void (*)(struct csio_hw
*, void *,
499 uint32_t, struct csio_fl_dma_buf
*,
502 int csio_wr_process_iq_idx(struct csio_hw
*, int,
503 void (*)(struct csio_hw
*, void *,
504 uint32_t, struct csio_fl_dma_buf
*,
508 void csio_wr_sge_init(struct csio_hw
*);
509 int csio_wrm_init(struct csio_wrm
*, struct csio_hw
*);
510 void csio_wrm_exit(struct csio_wrm
*, struct csio_hw
*);
512 #endif /* ifndef __CSIO_WR_H__ */