2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
24 #include "cxgb3i_ddp.h"
26 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
27 #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
28 #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
30 #ifdef __DEBUG_CXGB3I_DDP__
31 #define ddp_log_debug(fmt, args...) \
32 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
34 #define ddp_log_debug(fmt...)
38 * iSCSI Direct Data Placement
40 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
41 * pre-posted final destination host-memory buffers based on the Initiator
42 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
44 * The host memory address is programmed into h/w in the format of pagepod
46 * The location of the pagepod entry is encoded into ddp tag which is used or
47 * is the base for ITT/TTT.
50 #define DDP_PGIDX_MAX 4
51 #define DDP_THRESHOLD 2048
52 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
53 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
54 static unsigned char page_idx
= DDP_PGIDX_MAX
;
57 * functions to program the pagepod in h/w
59 static inline void ulp_mem_io_set_hdr(struct sk_buff
*skb
, unsigned int addr
)
61 struct ulp_mem_io
*req
= (struct ulp_mem_io
*)skb
->head
;
64 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_BYPASS
));
65 req
->cmd_lock_addr
= htonl(V_ULP_MEMIO_ADDR(addr
>> 5) |
66 V_ULPTX_CMD(ULP_MEM_WRITE
));
67 req
->len
= htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE
>> 5) |
68 V_ULPTX_NFLITS((PPOD_SIZE
>> 3) + 1));
71 static int set_ddp_map(struct cxgb3i_ddp_info
*ddp
, struct pagepod_hdr
*hdr
,
72 unsigned int idx
, unsigned int npods
,
73 struct cxgb3i_gather_list
*gl
)
75 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
78 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
79 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
83 /* hold on to the skb until we clear the ddp mapping */
86 ulp_mem_io_set_hdr(skb
, pm_addr
);
87 ppod
= (struct pagepod
*)
88 (skb
->head
+ sizeof(struct ulp_mem_io
));
89 memcpy(&(ppod
->hdr
), hdr
, sizeof(struct pagepod
));
90 for (pidx
= 4 * i
, j
= 0; j
< 5; ++j
, ++pidx
)
91 ppod
->addr
[j
] = pidx
< gl
->nelem
?
92 cpu_to_be64(gl
->phys_addr
[pidx
]) : 0UL;
94 skb
->priority
= CPL_PRIORITY_CONTROL
;
95 cxgb3_ofld_send(ddp
->tdev
, skb
);
100 static void clear_ddp_map(struct cxgb3i_ddp_info
*ddp
, unsigned int tag
,
101 unsigned int idx
, unsigned int npods
)
103 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
106 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
107 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
110 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
114 ddp
->gl_skb
[idx
] = NULL
;
115 memset((skb
->head
+ sizeof(struct ulp_mem_io
)), 0, PPOD_SIZE
);
116 ulp_mem_io_set_hdr(skb
, pm_addr
);
117 skb
->priority
= CPL_PRIORITY_CONTROL
;
118 cxgb3_ofld_send(ddp
->tdev
, skb
);
122 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info
*ddp
,
123 unsigned int start
, unsigned int max
,
125 struct cxgb3i_gather_list
*gl
)
127 unsigned int i
, j
, k
;
129 /* not enough entries */
130 if ((max
- start
) < count
)
134 spin_lock(&ddp
->map_lock
);
135 for (i
= start
; i
< max
;) {
136 for (j
= 0, k
= i
; j
< count
; j
++, k
++) {
141 for (j
= 0, k
= i
; j
< count
; j
++, k
++)
143 spin_unlock(&ddp
->map_lock
);
148 spin_unlock(&ddp
->map_lock
);
152 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info
*ddp
,
153 int start
, int count
)
155 spin_lock(&ddp
->map_lock
);
156 memset(&ddp
->gl_map
[start
], 0,
157 count
* sizeof(struct cxgb3i_gather_list
*));
158 spin_unlock(&ddp
->map_lock
);
161 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info
*ddp
,
166 for (i
= 0; i
< count
; i
++, idx
++)
167 if (ddp
->gl_skb
[idx
]) {
168 kfree_skb(ddp
->gl_skb
[idx
]);
169 ddp
->gl_skb
[idx
] = NULL
;
173 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info
*ddp
, int idx
,
174 int count
, gfp_t gfp
)
178 for (i
= 0; i
< count
; i
++) {
179 struct sk_buff
*skb
= alloc_skb(sizeof(struct ulp_mem_io
) +
182 ddp
->gl_skb
[idx
+ i
] = skb
;
183 skb_put(skb
, sizeof(struct ulp_mem_io
) + PPOD_SIZE
);
185 ddp_free_gl_skb(ddp
, idx
, i
);
193 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
195 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
197 int cxgb3i_ddp_find_page_index(unsigned long pgsz
)
201 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
202 if (pgsz
== (1UL << ddp_page_shift
[i
]))
205 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz
);
206 return DDP_PGIDX_MAX
;
209 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
210 struct cxgb3i_gather_list
*gl
)
214 for (i
= 0; i
< gl
->nelem
; i
++)
215 pci_unmap_page(pdev
, gl
->phys_addr
[i
], PAGE_SIZE
,
219 static inline int ddp_gl_map(struct pci_dev
*pdev
,
220 struct cxgb3i_gather_list
*gl
)
224 for (i
= 0; i
< gl
->nelem
; i
++) {
225 gl
->phys_addr
[i
] = pci_map_page(pdev
, gl
->pages
[i
], 0,
228 if (unlikely(pci_dma_mapping_error(pdev
, gl
->phys_addr
[i
])))
236 unsigned int nelem
= gl
->nelem
;
239 ddp_gl_unmap(pdev
, gl
);
246 * cxgb3i_ddp_make_gl - build ddp page buffer list
247 * @xferlen: total buffer length
248 * @sgl: page buffer scatter-gather list
249 * @sgcnt: # of page buffers
250 * @pdev: pci_dev, used for pci map
251 * @gfp: allocation mode
253 * construct a ddp page buffer list from the scsi scattergather list.
254 * coalesce buffers as much as possible, and obtain dma addresses for
257 * Return the cxgb3i_gather_list constructed from the page buffers if the
258 * memory can be used for ddp. Return NULL otherwise.
260 struct cxgb3i_gather_list
*cxgb3i_ddp_make_gl(unsigned int xferlen
,
261 struct scatterlist
*sgl
,
263 struct pci_dev
*pdev
,
266 struct cxgb3i_gather_list
*gl
;
267 struct scatterlist
*sg
= sgl
;
268 struct page
*sgpage
= sg_page(sg
);
269 unsigned int sglen
= sg
->length
;
270 unsigned int sgoffset
= sg
->offset
;
271 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
275 if (xferlen
< DDP_THRESHOLD
) {
276 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
277 xferlen
, DDP_THRESHOLD
);
281 gl
= kzalloc(sizeof(struct cxgb3i_gather_list
) +
282 npages
* (sizeof(dma_addr_t
) + sizeof(struct page
*)),
287 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
288 gl
->length
= xferlen
;
289 gl
->offset
= sgoffset
;
290 gl
->pages
[0] = sgpage
;
294 struct page
*page
= sg_page(sg
);
296 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
299 /* make sure the sgl is fit for ddp:
300 * each has the same page size, and
301 * all of the middle pages are used completely
303 if ((j
&& sgoffset
) ||
305 ((sglen
+ sgoffset
) & ~PAGE_MASK
)))
309 if (j
== gl
->nelem
|| sg
->offset
)
313 sgoffset
= sg
->offset
;
321 if (ddp_gl_map(pdev
, gl
) < 0)
332 * cxgb3i_ddp_release_gl - release a page buffer list
333 * @gl: a ddp page buffer list
334 * @pdev: pci_dev used for pci_unmap
335 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
337 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list
*gl
,
338 struct pci_dev
*pdev
)
340 ddp_gl_unmap(pdev
, gl
);
345 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
346 * @tdev: t3cdev adapter
347 * @tid: connection id
348 * @tformat: tag format
349 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
350 * @gl: the page momory list
351 * @gfp: allocation mode
353 * ddp setup for a given page buffer list and construct the ddp tag.
354 * return 0 if success, < 0 otherwise.
356 int cxgb3i_ddp_tag_reserve(struct t3cdev
*tdev
, unsigned int tid
,
357 struct cxgb3i_tag_format
*tformat
, u32
*tagp
,
358 struct cxgb3i_gather_list
*gl
, gfp_t gfp
)
360 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
361 struct pagepod_hdr hdr
;
368 if (page_idx
>= DDP_PGIDX_MAX
|| !ddp
|| !gl
|| !gl
->nelem
||
369 gl
->length
< DDP_THRESHOLD
) {
370 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
371 page_idx
, gl
->length
, DDP_THRESHOLD
);
375 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
377 if (ddp
->idx_last
== ddp
->nppods
)
378 idx
= ddp_find_unused_entries(ddp
, 0, ddp
->nppods
, npods
, gl
);
380 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
381 ddp
->nppods
, npods
, gl
);
382 if (idx
< 0 && ddp
->idx_last
>= npods
) {
383 idx
= ddp_find_unused_entries(ddp
, 0,
384 min(ddp
->idx_last
+ npods
, ddp
->nppods
),
389 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
390 gl
->length
, gl
->nelem
, npods
);
394 err
= ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
398 tag
= cxgb3i_ddp_tag_base(tformat
, sw_tag
);
399 tag
|= idx
<< PPOD_IDX_SHIFT
;
402 hdr
.vld_tid
= htonl(F_PPOD_VALID
| V_PPOD_TID(tid
));
403 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
404 hdr
.maxoffset
= htonl(gl
->length
);
405 hdr
.pgoffset
= htonl(gl
->offset
);
407 err
= set_ddp_map(ddp
, &hdr
, idx
, npods
, gl
);
412 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
413 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
,
419 ddp_free_gl_skb(ddp
, idx
, npods
);
421 ddp_unmark_entries(ddp
, idx
, npods
);
426 * cxgb3i_ddp_tag_release - release a ddp tag
427 * @tdev: t3cdev adapter
429 * ddp cleanup for a given ddp tag and release all the resources held
431 void cxgb3i_ddp_tag_release(struct t3cdev
*tdev
, u32 tag
)
433 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
437 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag
);
441 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
442 if (idx
< ddp
->nppods
) {
443 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[idx
];
446 if (!gl
|| !gl
->nelem
) {
447 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
448 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
451 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
452 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
454 clear_ddp_map(ddp
, tag
, idx
, npods
);
455 ddp_unmark_entries(ddp
, idx
, npods
);
456 cxgb3i_ddp_release_gl(gl
, ddp
->pdev
);
458 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
459 tag
, idx
, ddp
->nppods
);
462 static int setup_conn_pgidx(struct t3cdev
*tdev
, unsigned int tid
, int pg_idx
,
465 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
467 struct cpl_set_tcb_field
*req
;
468 u64 val
= pg_idx
< DDP_PGIDX_MAX
? pg_idx
: 0;
473 /* set up ulp submode and page size */
474 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
475 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
476 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
477 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
479 req
->word
= htons(31);
480 req
->mask
= cpu_to_be64(0xF0000000);
481 req
->val
= cpu_to_be64(val
<< 28);
482 skb
->priority
= CPL_PRIORITY_CONTROL
;
484 cxgb3_ofld_send(tdev
, skb
);
489 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
490 * @tdev: t3cdev adapter
491 * @tid: connection id
492 * @reply: request reply from h/w
493 * set up the ddp page size based on the host PAGE_SIZE for a connection
496 int cxgb3i_setup_conn_host_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
499 return setup_conn_pgidx(tdev
, tid
, page_idx
, reply
);
503 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
504 * @tdev: t3cdev adapter
505 * @tid: connection id
506 * @reply: request reply from h/w
507 * @pgsz: ddp page size
508 * set up the ddp page size for a connection identified by tid
510 int cxgb3i_setup_conn_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
511 int reply
, unsigned long pgsz
)
513 int pgidx
= cxgb3i_ddp_find_page_index(pgsz
);
515 return setup_conn_pgidx(tdev
, tid
, pgidx
, reply
);
519 * cxgb3i_setup_conn_digest - setup conn. digest setting
520 * @tdev: t3cdev adapter
521 * @tid: connection id
522 * @hcrc: header digest enabled
523 * @dcrc: data digest enabled
524 * @reply: request reply from h/w
525 * set up the iscsi digest settings for a connection identified by tid
527 int cxgb3i_setup_conn_digest(struct t3cdev
*tdev
, unsigned int tid
,
528 int hcrc
, int dcrc
, int reply
)
530 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
532 struct cpl_set_tcb_field
*req
;
533 u64 val
= (hcrc
? 1 : 0) | (dcrc
? 2 : 0);
538 /* set up ulp submode and page size */
539 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
540 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
541 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
542 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
544 req
->word
= htons(31);
545 req
->mask
= cpu_to_be64(0x0F000000);
546 req
->val
= cpu_to_be64(val
<< 24);
547 skb
->priority
= CPL_PRIORITY_CONTROL
;
549 cxgb3_ofld_send(tdev
, skb
);
555 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
556 * @tdev: t3cdev adapter
557 * @tformat: tag format
558 * @txsz: max tx pdu payload size, filled in by this func.
559 * @rxsz: max rx pdu payload size, filled in by this func.
560 * setup the tag format for a given iscsi entity
562 int cxgb3i_adapter_ddp_info(struct t3cdev
*tdev
,
563 struct cxgb3i_tag_format
*tformat
,
564 unsigned int *txsz
, unsigned int *rxsz
)
566 struct cxgb3i_ddp_info
*ddp
;
567 unsigned char idx_bits
;
572 if (!tdev
->ulp_iscsi
)
575 ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
577 idx_bits
= 32 - tformat
->sw_bits
;
578 tformat
->rsvd_bits
= ddp
->idx_bits
;
579 tformat
->rsvd_shift
= PPOD_IDX_SHIFT
;
580 tformat
->rsvd_mask
= (1 << tformat
->rsvd_bits
) - 1;
582 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
583 tformat
->sw_bits
, tformat
->rsvd_bits
,
584 tformat
->rsvd_shift
, tformat
->rsvd_mask
);
586 *txsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
587 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
588 *rxsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
589 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
590 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
591 *txsz
, ddp
->max_txsz
, *rxsz
, ddp
->max_rxsz
);
596 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
597 * @tdev: t3cdev adapter
598 * release all the resource held by the ddp pagepod manager for a given
601 void cxgb3i_ddp_cleanup(struct t3cdev
*tdev
)
604 struct cxgb3i_ddp_info
*ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
606 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev
, ddp
);
609 tdev
->ulp_iscsi
= NULL
;
610 while (i
< ddp
->nppods
) {
611 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[i
];
613 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
615 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
618 ddp_free_gl_skb(ddp
, i
, npods
);
623 cxgb3i_free_big_mem(ddp
);
628 * ddp_init - initialize the cxgb3 adapter's ddp resource
629 * @tdev: t3cdev adapter
630 * initialize the ddp pagepod manager for a given adapter
632 static void ddp_init(struct t3cdev
*tdev
)
634 struct cxgb3i_ddp_info
*ddp
;
635 struct ulp_iscsi_info uinfo
;
636 unsigned int ppmax
, bits
;
639 if (tdev
->ulp_iscsi
) {
640 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
641 tdev
, tdev
->ulp_iscsi
);
645 err
= tdev
->ctl(tdev
, ULP_ISCSI_GET_PARAMS
, &uinfo
);
647 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
652 ppmax
= (uinfo
.ulimit
- uinfo
.llimit
+ 1) >> PPOD_SIZE_SHIFT
;
653 bits
= __ilog2_u32(ppmax
) + 1;
654 if (bits
> PPOD_IDX_MAX_SIZE
)
655 bits
= PPOD_IDX_MAX_SIZE
;
656 ppmax
= (1 << (bits
- 1)) - 1;
658 ddp
= cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info
) +
660 (sizeof(struct cxgb3i_gather_list
*) +
661 sizeof(struct sk_buff
*)),
664 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
668 ddp
->gl_map
= (struct cxgb3i_gather_list
**)(ddp
+ 1);
669 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
671 sizeof(struct cxgb3i_gather_list
*));
672 spin_lock_init(&ddp
->map_lock
);
675 ddp
->pdev
= uinfo
.pdev
;
676 ddp
->max_txsz
= min_t(unsigned int, uinfo
.max_txsz
, ULP2_MAX_PKT_SIZE
);
677 ddp
->max_rxsz
= min_t(unsigned int, uinfo
.max_rxsz
, ULP2_MAX_PKT_SIZE
);
678 ddp
->llimit
= uinfo
.llimit
;
679 ddp
->ulimit
= uinfo
.ulimit
;
681 ddp
->idx_last
= ppmax
;
682 ddp
->idx_bits
= bits
;
683 ddp
->idx_mask
= (1 << bits
) - 1;
684 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
686 uinfo
.tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
687 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
688 uinfo
.pgsz_factor
[i
] = ddp_page_order
[i
];
689 uinfo
.ulimit
= uinfo
.llimit
+ (ppmax
<< PPOD_SIZE_SHIFT
);
691 err
= tdev
->ctl(tdev
, ULP_ISCSI_SET_PARAMS
, &uinfo
);
693 ddp_log_warn("%s unable to set iscsi param err=%d, "
694 "ddp disabled.\n", tdev
->name
, err
);
698 tdev
->ulp_iscsi
= ddp
;
700 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
702 tdev
, ppmax
, ddp
->idx_bits
, ddp
->idx_mask
,
703 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, uinfo
.max_txsz
,
704 ddp
->max_rxsz
, uinfo
.max_rxsz
);
708 cxgb3i_free_big_mem(ddp
);
712 * cxgb3i_ddp_init - initialize ddp functions
714 void cxgb3i_ddp_init(struct t3cdev
*tdev
)
716 if (page_idx
== DDP_PGIDX_MAX
) {
717 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
718 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
719 PAGE_SIZE
, page_idx
);