2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
13 #include <linux/slab.h>
14 #include <linux/skbuff.h>
15 #include <linux/scatterlist.h>
21 #include "cxgb3_ctl_defs.h"
22 #include "cxgb3_offload.h"
23 #include "firmware_exports.h"
25 #include "cxgb3i_ddp.h"
27 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
28 #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
29 #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
31 #ifdef __DEBUG_CXGB3I_DDP__
32 #define ddp_log_debug(fmt, args...) \
33 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
35 #define ddp_log_debug(fmt...)
39 * iSCSI Direct Data Placement
41 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
42 * pre-posted final destination host-memory buffers based on the Initiator
43 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
45 * The host memory address is programmed into h/w in the format of pagepod
47 * The location of the pagepod entry is encoded into ddp tag which is used or
48 * is the base for ITT/TTT.
51 #define DDP_PGIDX_MAX 4
52 #define DDP_THRESHOLD 2048
53 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
54 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
55 static unsigned char page_idx
= DDP_PGIDX_MAX
;
58 * functions to program the pagepod in h/w
60 static inline void ulp_mem_io_set_hdr(struct sk_buff
*skb
, unsigned int addr
)
62 struct ulp_mem_io
*req
= (struct ulp_mem_io
*)skb
->head
;
65 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_BYPASS
));
66 req
->cmd_lock_addr
= htonl(V_ULP_MEMIO_ADDR(addr
>> 5) |
67 V_ULPTX_CMD(ULP_MEM_WRITE
));
68 req
->len
= htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE
>> 5) |
69 V_ULPTX_NFLITS((PPOD_SIZE
>> 3) + 1));
72 static int set_ddp_map(struct cxgb3i_ddp_info
*ddp
, struct pagepod_hdr
*hdr
,
73 unsigned int idx
, unsigned int npods
,
74 struct cxgb3i_gather_list
*gl
)
76 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
79 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
80 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
84 /* hold on to the skb until we clear the ddp mapping */
87 ulp_mem_io_set_hdr(skb
, pm_addr
);
88 ppod
= (struct pagepod
*)
89 (skb
->head
+ sizeof(struct ulp_mem_io
));
90 memcpy(&(ppod
->hdr
), hdr
, sizeof(struct pagepod
));
91 for (pidx
= 4 * i
, j
= 0; j
< 5; ++j
, ++pidx
)
92 ppod
->addr
[j
] = pidx
< gl
->nelem
?
93 cpu_to_be64(gl
->phys_addr
[pidx
]) : 0UL;
95 skb
->priority
= CPL_PRIORITY_CONTROL
;
96 cxgb3_ofld_send(ddp
->tdev
, skb
);
101 static void clear_ddp_map(struct cxgb3i_ddp_info
*ddp
, unsigned int tag
,
102 unsigned int idx
, unsigned int npods
)
104 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
107 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
108 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
111 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
115 ddp
->gl_skb
[idx
] = NULL
;
116 memset((skb
->head
+ sizeof(struct ulp_mem_io
)), 0, PPOD_SIZE
);
117 ulp_mem_io_set_hdr(skb
, pm_addr
);
118 skb
->priority
= CPL_PRIORITY_CONTROL
;
119 cxgb3_ofld_send(ddp
->tdev
, skb
);
123 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info
*ddp
,
124 unsigned int start
, unsigned int max
,
126 struct cxgb3i_gather_list
*gl
)
128 unsigned int i
, j
, k
;
130 /* not enough entries */
131 if ((max
- start
) < count
)
135 spin_lock(&ddp
->map_lock
);
136 for (i
= start
; i
< max
;) {
137 for (j
= 0, k
= i
; j
< count
; j
++, k
++) {
142 for (j
= 0, k
= i
; j
< count
; j
++, k
++)
144 spin_unlock(&ddp
->map_lock
);
149 spin_unlock(&ddp
->map_lock
);
153 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info
*ddp
,
154 int start
, int count
)
156 spin_lock(&ddp
->map_lock
);
157 memset(&ddp
->gl_map
[start
], 0,
158 count
* sizeof(struct cxgb3i_gather_list
*));
159 spin_unlock(&ddp
->map_lock
);
162 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info
*ddp
,
167 for (i
= 0; i
< count
; i
++, idx
++)
168 if (ddp
->gl_skb
[idx
]) {
169 kfree_skb(ddp
->gl_skb
[idx
]);
170 ddp
->gl_skb
[idx
] = NULL
;
174 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info
*ddp
, int idx
,
175 int count
, gfp_t gfp
)
179 for (i
= 0; i
< count
; i
++) {
180 struct sk_buff
*skb
= alloc_skb(sizeof(struct ulp_mem_io
) +
183 ddp
->gl_skb
[idx
+ i
] = skb
;
184 skb_put(skb
, sizeof(struct ulp_mem_io
) + PPOD_SIZE
);
186 ddp_free_gl_skb(ddp
, idx
, i
);
194 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
196 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
198 int cxgb3i_ddp_find_page_index(unsigned long pgsz
)
202 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
203 if (pgsz
== (1UL << ddp_page_shift
[i
]))
206 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz
);
207 return DDP_PGIDX_MAX
;
211 * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
212 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
214 int cxgb3i_ddp_adjust_page_table(void)
217 unsigned int base_order
, order
;
219 if (PAGE_SIZE
< (1UL << ddp_page_shift
[0])) {
220 ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
221 PAGE_SIZE
, 1UL << ddp_page_shift
[0]);
225 base_order
= get_order(1UL << ddp_page_shift
[0]);
226 order
= get_order(1 << PAGE_SHIFT
);
227 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
228 /* first is the kernel page size, then just doubling the size */
229 ddp_page_order
[i
] = order
- base_order
+ i
;
230 ddp_page_shift
[i
] = PAGE_SHIFT
+ i
;
235 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
236 struct cxgb3i_gather_list
*gl
)
240 for (i
= 0; i
< gl
->nelem
; i
++)
241 pci_unmap_page(pdev
, gl
->phys_addr
[i
], PAGE_SIZE
,
245 static inline int ddp_gl_map(struct pci_dev
*pdev
,
246 struct cxgb3i_gather_list
*gl
)
250 for (i
= 0; i
< gl
->nelem
; i
++) {
251 gl
->phys_addr
[i
] = pci_map_page(pdev
, gl
->pages
[i
], 0,
254 if (unlikely(pci_dma_mapping_error(pdev
, gl
->phys_addr
[i
])))
262 unsigned int nelem
= gl
->nelem
;
265 ddp_gl_unmap(pdev
, gl
);
272 * cxgb3i_ddp_make_gl - build ddp page buffer list
273 * @xferlen: total buffer length
274 * @sgl: page buffer scatter-gather list
275 * @sgcnt: # of page buffers
276 * @pdev: pci_dev, used for pci map
277 * @gfp: allocation mode
279 * construct a ddp page buffer list from the scsi scattergather list.
280 * coalesce buffers as much as possible, and obtain dma addresses for
283 * Return the cxgb3i_gather_list constructed from the page buffers if the
284 * memory can be used for ddp. Return NULL otherwise.
286 struct cxgb3i_gather_list
*cxgb3i_ddp_make_gl(unsigned int xferlen
,
287 struct scatterlist
*sgl
,
289 struct pci_dev
*pdev
,
292 struct cxgb3i_gather_list
*gl
;
293 struct scatterlist
*sg
= sgl
;
294 struct page
*sgpage
= sg_page(sg
);
295 unsigned int sglen
= sg
->length
;
296 unsigned int sgoffset
= sg
->offset
;
297 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
301 if (xferlen
< DDP_THRESHOLD
) {
302 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
303 xferlen
, DDP_THRESHOLD
);
307 gl
= kzalloc(sizeof(struct cxgb3i_gather_list
) +
308 npages
* (sizeof(dma_addr_t
) + sizeof(struct page
*)),
313 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
314 gl
->length
= xferlen
;
315 gl
->offset
= sgoffset
;
316 gl
->pages
[0] = sgpage
;
320 struct page
*page
= sg_page(sg
);
322 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
325 /* make sure the sgl is fit for ddp:
326 * each has the same page size, and
327 * all of the middle pages are used completely
329 if ((j
&& sgoffset
) ||
331 ((sglen
+ sgoffset
) & ~PAGE_MASK
)))
335 if (j
== gl
->nelem
|| sg
->offset
)
339 sgoffset
= sg
->offset
;
347 if (ddp_gl_map(pdev
, gl
) < 0)
358 * cxgb3i_ddp_release_gl - release a page buffer list
359 * @gl: a ddp page buffer list
360 * @pdev: pci_dev used for pci_unmap
361 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
363 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list
*gl
,
364 struct pci_dev
*pdev
)
366 ddp_gl_unmap(pdev
, gl
);
371 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
372 * @tdev: t3cdev adapter
373 * @tid: connection id
374 * @tformat: tag format
375 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
376 * @gl: the page momory list
377 * @gfp: allocation mode
379 * ddp setup for a given page buffer list and construct the ddp tag.
380 * return 0 if success, < 0 otherwise.
382 int cxgb3i_ddp_tag_reserve(struct t3cdev
*tdev
, unsigned int tid
,
383 struct cxgb3i_tag_format
*tformat
, u32
*tagp
,
384 struct cxgb3i_gather_list
*gl
, gfp_t gfp
)
386 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
387 struct pagepod_hdr hdr
;
394 if (page_idx
>= DDP_PGIDX_MAX
|| !ddp
|| !gl
|| !gl
->nelem
||
395 gl
->length
< DDP_THRESHOLD
) {
396 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
397 page_idx
, gl
->length
, DDP_THRESHOLD
);
401 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
403 if (ddp
->idx_last
== ddp
->nppods
)
404 idx
= ddp_find_unused_entries(ddp
, 0, ddp
->nppods
, npods
, gl
);
406 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
407 ddp
->nppods
, npods
, gl
);
408 if (idx
< 0 && ddp
->idx_last
>= npods
) {
409 idx
= ddp_find_unused_entries(ddp
, 0,
410 min(ddp
->idx_last
+ npods
, ddp
->nppods
),
415 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
416 gl
->length
, gl
->nelem
, npods
);
420 err
= ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
424 tag
= cxgb3i_ddp_tag_base(tformat
, sw_tag
);
425 tag
|= idx
<< PPOD_IDX_SHIFT
;
428 hdr
.vld_tid
= htonl(F_PPOD_VALID
| V_PPOD_TID(tid
));
429 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
430 hdr
.maxoffset
= htonl(gl
->length
);
431 hdr
.pgoffset
= htonl(gl
->offset
);
433 err
= set_ddp_map(ddp
, &hdr
, idx
, npods
, gl
);
438 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
439 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
,
445 ddp_free_gl_skb(ddp
, idx
, npods
);
447 ddp_unmark_entries(ddp
, idx
, npods
);
452 * cxgb3i_ddp_tag_release - release a ddp tag
453 * @tdev: t3cdev adapter
455 * ddp cleanup for a given ddp tag and release all the resources held
457 void cxgb3i_ddp_tag_release(struct t3cdev
*tdev
, u32 tag
)
459 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
463 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag
);
467 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
468 if (idx
< ddp
->nppods
) {
469 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[idx
];
472 if (!gl
|| !gl
->nelem
) {
473 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
474 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
477 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
478 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
480 clear_ddp_map(ddp
, tag
, idx
, npods
);
481 ddp_unmark_entries(ddp
, idx
, npods
);
482 cxgb3i_ddp_release_gl(gl
, ddp
->pdev
);
484 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
485 tag
, idx
, ddp
->nppods
);
488 static int setup_conn_pgidx(struct t3cdev
*tdev
, unsigned int tid
, int pg_idx
,
491 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
493 struct cpl_set_tcb_field
*req
;
494 u64 val
= pg_idx
< DDP_PGIDX_MAX
? pg_idx
: 0;
499 /* set up ulp submode and page size */
500 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
501 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
502 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
503 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
505 req
->word
= htons(31);
506 req
->mask
= cpu_to_be64(0xF0000000);
507 req
->val
= cpu_to_be64(val
<< 28);
508 skb
->priority
= CPL_PRIORITY_CONTROL
;
510 cxgb3_ofld_send(tdev
, skb
);
515 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
516 * @tdev: t3cdev adapter
517 * @tid: connection id
518 * @reply: request reply from h/w
519 * set up the ddp page size based on the host PAGE_SIZE for a connection
522 int cxgb3i_setup_conn_host_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
525 return setup_conn_pgidx(tdev
, tid
, page_idx
, reply
);
529 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
530 * @tdev: t3cdev adapter
531 * @tid: connection id
532 * @reply: request reply from h/w
533 * @pgsz: ddp page size
534 * set up the ddp page size for a connection identified by tid
536 int cxgb3i_setup_conn_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
537 int reply
, unsigned long pgsz
)
539 int pgidx
= cxgb3i_ddp_find_page_index(pgsz
);
541 return setup_conn_pgidx(tdev
, tid
, pgidx
, reply
);
545 * cxgb3i_setup_conn_digest - setup conn. digest setting
546 * @tdev: t3cdev adapter
547 * @tid: connection id
548 * @hcrc: header digest enabled
549 * @dcrc: data digest enabled
550 * @reply: request reply from h/w
551 * set up the iscsi digest settings for a connection identified by tid
553 int cxgb3i_setup_conn_digest(struct t3cdev
*tdev
, unsigned int tid
,
554 int hcrc
, int dcrc
, int reply
)
556 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
558 struct cpl_set_tcb_field
*req
;
559 u64 val
= (hcrc
? 1 : 0) | (dcrc
? 2 : 0);
564 /* set up ulp submode and page size */
565 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
566 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
567 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
568 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
570 req
->word
= htons(31);
571 req
->mask
= cpu_to_be64(0x0F000000);
572 req
->val
= cpu_to_be64(val
<< 24);
573 skb
->priority
= CPL_PRIORITY_CONTROL
;
575 cxgb3_ofld_send(tdev
, skb
);
581 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
582 * @tdev: t3cdev adapter
583 * @tformat: tag format
584 * @txsz: max tx pdu payload size, filled in by this func.
585 * @rxsz: max rx pdu payload size, filled in by this func.
586 * setup the tag format for a given iscsi entity
588 int cxgb3i_adapter_ddp_info(struct t3cdev
*tdev
,
589 struct cxgb3i_tag_format
*tformat
,
590 unsigned int *txsz
, unsigned int *rxsz
)
592 struct cxgb3i_ddp_info
*ddp
;
593 unsigned char idx_bits
;
598 if (!tdev
->ulp_iscsi
)
601 ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
603 idx_bits
= 32 - tformat
->sw_bits
;
604 tformat
->rsvd_bits
= ddp
->idx_bits
;
605 tformat
->rsvd_shift
= PPOD_IDX_SHIFT
;
606 tformat
->rsvd_mask
= (1 << tformat
->rsvd_bits
) - 1;
608 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
609 tformat
->sw_bits
, tformat
->rsvd_bits
,
610 tformat
->rsvd_shift
, tformat
->rsvd_mask
);
612 *txsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
613 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
614 *rxsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
615 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
616 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
617 *txsz
, ddp
->max_txsz
, *rxsz
, ddp
->max_rxsz
);
622 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
623 * @tdev: t3cdev adapter
624 * release all the resource held by the ddp pagepod manager for a given
628 static void ddp_cleanup(struct kref
*kref
)
630 struct cxgb3i_ddp_info
*ddp
= container_of(kref
,
631 struct cxgb3i_ddp_info
,
635 ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp
, ddp
->tdev
);
637 ddp
->tdev
->ulp_iscsi
= NULL
;
638 while (i
< ddp
->nppods
) {
639 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[i
];
641 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
643 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
644 ddp
->tdev
, i
, npods
);
646 ddp_free_gl_skb(ddp
, i
, npods
);
651 cxgb3i_free_big_mem(ddp
);
654 void cxgb3i_ddp_cleanup(struct t3cdev
*tdev
)
656 struct cxgb3i_ddp_info
*ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
658 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev
, ddp
);
660 kref_put(&ddp
->refcnt
, ddp_cleanup
);
664 * ddp_init - initialize the cxgb3 adapter's ddp resource
665 * @tdev: t3cdev adapter
666 * initialize the ddp pagepod manager for a given adapter
668 static void ddp_init(struct t3cdev
*tdev
)
670 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
671 struct ulp_iscsi_info uinfo
;
672 unsigned int ppmax
, bits
;
676 kref_get(&ddp
->refcnt
);
677 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
678 tdev
, tdev
->ulp_iscsi
);
682 err
= tdev
->ctl(tdev
, ULP_ISCSI_GET_PARAMS
, &uinfo
);
684 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
689 ppmax
= (uinfo
.ulimit
- uinfo
.llimit
+ 1) >> PPOD_SIZE_SHIFT
;
690 bits
= __ilog2_u32(ppmax
) + 1;
691 if (bits
> PPOD_IDX_MAX_SIZE
)
692 bits
= PPOD_IDX_MAX_SIZE
;
693 ppmax
= (1 << (bits
- 1)) - 1;
695 ddp
= cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info
) +
697 (sizeof(struct cxgb3i_gather_list
*) +
698 sizeof(struct sk_buff
*)),
701 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
705 ddp
->gl_map
= (struct cxgb3i_gather_list
**)(ddp
+ 1);
706 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
708 sizeof(struct cxgb3i_gather_list
*));
709 spin_lock_init(&ddp
->map_lock
);
710 kref_init(&ddp
->refcnt
);
713 ddp
->pdev
= uinfo
.pdev
;
714 ddp
->max_txsz
= min_t(unsigned int, uinfo
.max_txsz
, ULP2_MAX_PKT_SIZE
);
715 ddp
->max_rxsz
= min_t(unsigned int, uinfo
.max_rxsz
, ULP2_MAX_PKT_SIZE
);
716 ddp
->llimit
= uinfo
.llimit
;
717 ddp
->ulimit
= uinfo
.ulimit
;
719 ddp
->idx_last
= ppmax
;
720 ddp
->idx_bits
= bits
;
721 ddp
->idx_mask
= (1 << bits
) - 1;
722 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
724 uinfo
.tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
725 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
726 uinfo
.pgsz_factor
[i
] = ddp_page_order
[i
];
727 uinfo
.ulimit
= uinfo
.llimit
+ (ppmax
<< PPOD_SIZE_SHIFT
);
729 err
= tdev
->ctl(tdev
, ULP_ISCSI_SET_PARAMS
, &uinfo
);
731 ddp_log_warn("%s unable to set iscsi param err=%d, "
732 "ddp disabled.\n", tdev
->name
, err
);
736 tdev
->ulp_iscsi
= ddp
;
738 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
740 tdev
, ppmax
, ddp
->idx_bits
, ddp
->idx_mask
,
741 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, uinfo
.max_txsz
,
742 ddp
->max_rxsz
, uinfo
.max_rxsz
);
746 cxgb3i_free_big_mem(ddp
);
750 * cxgb3i_ddp_init - initialize ddp functions
752 void cxgb3i_ddp_init(struct t3cdev
*tdev
)
754 if (page_idx
== DDP_PGIDX_MAX
) {
755 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
757 if (page_idx
== DDP_PGIDX_MAX
) {
758 ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
760 if (cxgb3i_ddp_adjust_page_table() < 0) {
761 ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
765 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
767 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
768 PAGE_SIZE
, page_idx
);