2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
24 #include "cxgb3i_ddp.h"
26 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
27 #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
28 #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
30 #ifdef __DEBUG_CXGB3I_DDP__
31 #define ddp_log_debug(fmt, args...) \
32 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
34 #define ddp_log_debug(fmt...)
38 * iSCSI Direct Data Placement
40 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
41 * pre-posted final destination host-memory buffers based on the Initiator
42 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
44 * The host memory address is programmed into h/w in the format of pagepod
46 * The location of the pagepod entry is encoded into ddp tag which is used or
47 * is the base for ITT/TTT.
50 #define DDP_PGIDX_MAX 4
51 #define DDP_THRESHOLD 2048
52 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
53 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
54 static unsigned char page_idx
= DDP_PGIDX_MAX
;
57 * functions to program the pagepod in h/w
59 static inline void ulp_mem_io_set_hdr(struct sk_buff
*skb
, unsigned int addr
)
61 struct ulp_mem_io
*req
= (struct ulp_mem_io
*)skb
->head
;
64 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_BYPASS
));
65 req
->cmd_lock_addr
= htonl(V_ULP_MEMIO_ADDR(addr
>> 5) |
66 V_ULPTX_CMD(ULP_MEM_WRITE
));
67 req
->len
= htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE
>> 5) |
68 V_ULPTX_NFLITS((PPOD_SIZE
>> 3) + 1));
71 static int set_ddp_map(struct cxgb3i_ddp_info
*ddp
, struct pagepod_hdr
*hdr
,
72 unsigned int idx
, unsigned int npods
,
73 struct cxgb3i_gather_list
*gl
)
75 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
78 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
79 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
83 /* hold on to the skb until we clear the ddp mapping */
86 ulp_mem_io_set_hdr(skb
, pm_addr
);
87 ppod
= (struct pagepod
*)
88 (skb
->head
+ sizeof(struct ulp_mem_io
));
89 memcpy(&(ppod
->hdr
), hdr
, sizeof(struct pagepod
));
90 for (pidx
= 4 * i
, j
= 0; j
< 5; ++j
, ++pidx
)
91 ppod
->addr
[j
] = pidx
< gl
->nelem
?
92 cpu_to_be64(gl
->phys_addr
[pidx
]) : 0UL;
94 skb
->priority
= CPL_PRIORITY_CONTROL
;
95 cxgb3_ofld_send(ddp
->tdev
, skb
);
100 static void clear_ddp_map(struct cxgb3i_ddp_info
*ddp
, unsigned int tag
,
101 unsigned int idx
, unsigned int npods
)
103 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
106 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
107 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
110 ddp_log_error("ddp tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
114 ddp
->gl_skb
[idx
] = NULL
;
115 memset((skb
->head
+ sizeof(struct ulp_mem_io
)), 0, PPOD_SIZE
);
116 ulp_mem_io_set_hdr(skb
, pm_addr
);
117 skb
->priority
= CPL_PRIORITY_CONTROL
;
118 cxgb3_ofld_send(ddp
->tdev
, skb
);
122 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info
*ddp
,
123 unsigned int start
, unsigned int max
,
125 struct cxgb3i_gather_list
*gl
)
127 unsigned int i
, j
, k
;
129 /* not enough entries */
130 if ((max
- start
) < count
)
134 spin_lock(&ddp
->map_lock
);
135 for (i
= start
; i
< max
;) {
136 for (j
= 0, k
= i
; j
< count
; j
++, k
++) {
141 for (j
= 0, k
= i
; j
< count
; j
++, k
++)
143 spin_unlock(&ddp
->map_lock
);
148 spin_unlock(&ddp
->map_lock
);
152 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info
*ddp
,
153 int start
, int count
)
155 spin_lock(&ddp
->map_lock
);
156 memset(&ddp
->gl_map
[start
], 0,
157 count
* sizeof(struct cxgb3i_gather_list
*));
158 spin_unlock(&ddp
->map_lock
);
161 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info
*ddp
,
166 for (i
= 0; i
< count
; i
++, idx
++)
167 if (ddp
->gl_skb
[idx
]) {
168 kfree_skb(ddp
->gl_skb
[idx
]);
169 ddp
->gl_skb
[idx
] = NULL
;
173 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info
*ddp
, int idx
,
174 int count
, gfp_t gfp
)
178 for (i
= 0; i
< count
; i
++) {
179 struct sk_buff
*skb
= alloc_skb(sizeof(struct ulp_mem_io
) +
182 ddp
->gl_skb
[idx
+ i
] = skb
;
183 skb_put(skb
, sizeof(struct ulp_mem_io
) + PPOD_SIZE
);
185 ddp_free_gl_skb(ddp
, idx
, i
);
193 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size
195 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
197 int cxgb3i_ddp_find_page_index(unsigned long pgsz
)
201 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
202 if (pgsz
== (1UL << ddp_page_shift
[i
]))
205 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz
);
206 return DDP_PGIDX_MAX
;
210 * cxgb3i_ddp_adjust_page_table - adjust page table with PAGE_SIZE
211 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
213 int cxgb3i_ddp_adjust_page_table(void)
216 unsigned int base_order
, order
;
218 if (PAGE_SIZE
< (1UL << ddp_page_shift
[0])) {
219 ddp_log_info("PAGE_SIZE 0x%lx too small, min. 0x%lx.\n",
220 PAGE_SIZE
, 1UL << ddp_page_shift
[0]);
224 base_order
= get_order(1UL << ddp_page_shift
[0]);
225 order
= get_order(1 << PAGE_SHIFT
);
226 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
227 /* first is the kernel page size, then just doubling the size */
228 ddp_page_order
[i
] = order
- base_order
+ i
;
229 ddp_page_shift
[i
] = PAGE_SHIFT
+ i
;
234 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
235 struct cxgb3i_gather_list
*gl
)
239 for (i
= 0; i
< gl
->nelem
; i
++)
240 pci_unmap_page(pdev
, gl
->phys_addr
[i
], PAGE_SIZE
,
244 static inline int ddp_gl_map(struct pci_dev
*pdev
,
245 struct cxgb3i_gather_list
*gl
)
249 for (i
= 0; i
< gl
->nelem
; i
++) {
250 gl
->phys_addr
[i
] = pci_map_page(pdev
, gl
->pages
[i
], 0,
253 if (unlikely(pci_dma_mapping_error(pdev
, gl
->phys_addr
[i
])))
261 unsigned int nelem
= gl
->nelem
;
264 ddp_gl_unmap(pdev
, gl
);
271 * cxgb3i_ddp_make_gl - build ddp page buffer list
272 * @xferlen: total buffer length
273 * @sgl: page buffer scatter-gather list
274 * @sgcnt: # of page buffers
275 * @pdev: pci_dev, used for pci map
276 * @gfp: allocation mode
278 * construct a ddp page buffer list from the scsi scattergather list.
279 * coalesce buffers as much as possible, and obtain dma addresses for
282 * Return the cxgb3i_gather_list constructed from the page buffers if the
283 * memory can be used for ddp. Return NULL otherwise.
285 struct cxgb3i_gather_list
*cxgb3i_ddp_make_gl(unsigned int xferlen
,
286 struct scatterlist
*sgl
,
288 struct pci_dev
*pdev
,
291 struct cxgb3i_gather_list
*gl
;
292 struct scatterlist
*sg
= sgl
;
293 struct page
*sgpage
= sg_page(sg
);
294 unsigned int sglen
= sg
->length
;
295 unsigned int sgoffset
= sg
->offset
;
296 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
300 if (xferlen
< DDP_THRESHOLD
) {
301 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
302 xferlen
, DDP_THRESHOLD
);
306 gl
= kzalloc(sizeof(struct cxgb3i_gather_list
) +
307 npages
* (sizeof(dma_addr_t
) + sizeof(struct page
*)),
312 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
313 gl
->length
= xferlen
;
314 gl
->offset
= sgoffset
;
315 gl
->pages
[0] = sgpage
;
319 struct page
*page
= sg_page(sg
);
321 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
324 /* make sure the sgl is fit for ddp:
325 * each has the same page size, and
326 * all of the middle pages are used completely
328 if ((j
&& sgoffset
) ||
330 ((sglen
+ sgoffset
) & ~PAGE_MASK
)))
334 if (j
== gl
->nelem
|| sg
->offset
)
338 sgoffset
= sg
->offset
;
346 if (ddp_gl_map(pdev
, gl
) < 0)
357 * cxgb3i_ddp_release_gl - release a page buffer list
358 * @gl: a ddp page buffer list
359 * @pdev: pci_dev used for pci_unmap
360 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
362 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list
*gl
,
363 struct pci_dev
*pdev
)
365 ddp_gl_unmap(pdev
, gl
);
370 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
371 * @tdev: t3cdev adapter
372 * @tid: connection id
373 * @tformat: tag format
374 * @tagp: contains s/w tag initially, will be updated with ddp/hw tag
375 * @gl: the page momory list
376 * @gfp: allocation mode
378 * ddp setup for a given page buffer list and construct the ddp tag.
379 * return 0 if success, < 0 otherwise.
381 int cxgb3i_ddp_tag_reserve(struct t3cdev
*tdev
, unsigned int tid
,
382 struct cxgb3i_tag_format
*tformat
, u32
*tagp
,
383 struct cxgb3i_gather_list
*gl
, gfp_t gfp
)
385 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
386 struct pagepod_hdr hdr
;
393 if (page_idx
>= DDP_PGIDX_MAX
|| !ddp
|| !gl
|| !gl
->nelem
||
394 gl
->length
< DDP_THRESHOLD
) {
395 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
396 page_idx
, gl
->length
, DDP_THRESHOLD
);
400 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
402 if (ddp
->idx_last
== ddp
->nppods
)
403 idx
= ddp_find_unused_entries(ddp
, 0, ddp
->nppods
, npods
, gl
);
405 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
406 ddp
->nppods
, npods
, gl
);
407 if (idx
< 0 && ddp
->idx_last
>= npods
) {
408 idx
= ddp_find_unused_entries(ddp
, 0,
409 min(ddp
->idx_last
+ npods
, ddp
->nppods
),
414 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
415 gl
->length
, gl
->nelem
, npods
);
419 err
= ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
423 tag
= cxgb3i_ddp_tag_base(tformat
, sw_tag
);
424 tag
|= idx
<< PPOD_IDX_SHIFT
;
427 hdr
.vld_tid
= htonl(F_PPOD_VALID
| V_PPOD_TID(tid
));
428 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
429 hdr
.maxoffset
= htonl(gl
->length
);
430 hdr
.pgoffset
= htonl(gl
->offset
);
432 err
= set_ddp_map(ddp
, &hdr
, idx
, npods
, gl
);
437 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
438 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
,
444 ddp_free_gl_skb(ddp
, idx
, npods
);
446 ddp_unmark_entries(ddp
, idx
, npods
);
451 * cxgb3i_ddp_tag_release - release a ddp tag
452 * @tdev: t3cdev adapter
454 * ddp cleanup for a given ddp tag and release all the resources held
456 void cxgb3i_ddp_tag_release(struct t3cdev
*tdev
, u32 tag
)
458 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
462 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag
);
466 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
467 if (idx
< ddp
->nppods
) {
468 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[idx
];
471 if (!gl
|| !gl
->nelem
) {
472 ddp_log_error("release 0x%x, idx 0x%x, gl 0x%p, %u.\n",
473 tag
, idx
, gl
, gl
? gl
->nelem
: 0);
476 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
477 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
479 clear_ddp_map(ddp
, tag
, idx
, npods
);
480 ddp_unmark_entries(ddp
, idx
, npods
);
481 cxgb3i_ddp_release_gl(gl
, ddp
->pdev
);
483 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
484 tag
, idx
, ddp
->nppods
);
487 static int setup_conn_pgidx(struct t3cdev
*tdev
, unsigned int tid
, int pg_idx
,
490 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
492 struct cpl_set_tcb_field
*req
;
493 u64 val
= pg_idx
< DDP_PGIDX_MAX
? pg_idx
: 0;
498 /* set up ulp submode and page size */
499 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
500 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
501 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
502 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
504 req
->word
= htons(31);
505 req
->mask
= cpu_to_be64(0xF0000000);
506 req
->val
= cpu_to_be64(val
<< 28);
507 skb
->priority
= CPL_PRIORITY_CONTROL
;
509 cxgb3_ofld_send(tdev
, skb
);
514 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
515 * @tdev: t3cdev adapter
516 * @tid: connection id
517 * @reply: request reply from h/w
518 * set up the ddp page size based on the host PAGE_SIZE for a connection
521 int cxgb3i_setup_conn_host_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
524 return setup_conn_pgidx(tdev
, tid
, page_idx
, reply
);
528 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
529 * @tdev: t3cdev adapter
530 * @tid: connection id
531 * @reply: request reply from h/w
532 * @pgsz: ddp page size
533 * set up the ddp page size for a connection identified by tid
535 int cxgb3i_setup_conn_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
536 int reply
, unsigned long pgsz
)
538 int pgidx
= cxgb3i_ddp_find_page_index(pgsz
);
540 return setup_conn_pgidx(tdev
, tid
, pgidx
, reply
);
544 * cxgb3i_setup_conn_digest - setup conn. digest setting
545 * @tdev: t3cdev adapter
546 * @tid: connection id
547 * @hcrc: header digest enabled
548 * @dcrc: data digest enabled
549 * @reply: request reply from h/w
550 * set up the iscsi digest settings for a connection identified by tid
552 int cxgb3i_setup_conn_digest(struct t3cdev
*tdev
, unsigned int tid
,
553 int hcrc
, int dcrc
, int reply
)
555 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
557 struct cpl_set_tcb_field
*req
;
558 u64 val
= (hcrc
? 1 : 0) | (dcrc
? 2 : 0);
563 /* set up ulp submode and page size */
564 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
565 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
566 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
567 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
569 req
->word
= htons(31);
570 req
->mask
= cpu_to_be64(0x0F000000);
571 req
->val
= cpu_to_be64(val
<< 24);
572 skb
->priority
= CPL_PRIORITY_CONTROL
;
574 cxgb3_ofld_send(tdev
, skb
);
580 * cxgb3i_adapter_ddp_info - read the adapter's ddp information
581 * @tdev: t3cdev adapter
582 * @tformat: tag format
583 * @txsz: max tx pdu payload size, filled in by this func.
584 * @rxsz: max rx pdu payload size, filled in by this func.
585 * setup the tag format for a given iscsi entity
587 int cxgb3i_adapter_ddp_info(struct t3cdev
*tdev
,
588 struct cxgb3i_tag_format
*tformat
,
589 unsigned int *txsz
, unsigned int *rxsz
)
591 struct cxgb3i_ddp_info
*ddp
;
592 unsigned char idx_bits
;
597 if (!tdev
->ulp_iscsi
)
600 ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
602 idx_bits
= 32 - tformat
->sw_bits
;
603 tformat
->rsvd_bits
= ddp
->idx_bits
;
604 tformat
->rsvd_shift
= PPOD_IDX_SHIFT
;
605 tformat
->rsvd_mask
= (1 << tformat
->rsvd_bits
) - 1;
607 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
608 tformat
->sw_bits
, tformat
->rsvd_bits
,
609 tformat
->rsvd_shift
, tformat
->rsvd_mask
);
611 *txsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
612 ddp
->max_txsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
613 *rxsz
= min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD
,
614 ddp
->max_rxsz
- ISCSI_PDU_NONPAYLOAD_LEN
);
615 ddp_log_info("max payload size: %u/%u, %u/%u.\n",
616 *txsz
, ddp
->max_txsz
, *rxsz
, ddp
->max_rxsz
);
621 * cxgb3i_ddp_cleanup - release the cxgb3 adapter's ddp resource
622 * @tdev: t3cdev adapter
623 * release all the resource held by the ddp pagepod manager for a given
627 static void ddp_cleanup(struct kref
*kref
)
629 struct cxgb3i_ddp_info
*ddp
= container_of(kref
,
630 struct cxgb3i_ddp_info
,
634 ddp_log_info("kref release ddp 0x%p, t3dev 0x%p.\n", ddp
, ddp
->tdev
);
636 ddp
->tdev
->ulp_iscsi
= NULL
;
637 while (i
< ddp
->nppods
) {
638 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[i
];
640 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
642 ddp_log_info("t3dev 0x%p, ddp %d + %d.\n",
643 ddp
->tdev
, i
, npods
);
645 ddp_free_gl_skb(ddp
, i
, npods
);
650 cxgb3i_free_big_mem(ddp
);
653 void cxgb3i_ddp_cleanup(struct t3cdev
*tdev
)
655 struct cxgb3i_ddp_info
*ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
657 ddp_log_info("t3dev 0x%p, release ddp 0x%p.\n", tdev
, ddp
);
659 kref_put(&ddp
->refcnt
, ddp_cleanup
);
663 * ddp_init - initialize the cxgb3 adapter's ddp resource
664 * @tdev: t3cdev adapter
665 * initialize the ddp pagepod manager for a given adapter
667 static void ddp_init(struct t3cdev
*tdev
)
669 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
670 struct ulp_iscsi_info uinfo
;
671 unsigned int ppmax
, bits
;
675 kref_get(&ddp
->refcnt
);
676 ddp_log_warn("t3dev 0x%p, ddp 0x%p already set up.\n",
677 tdev
, tdev
->ulp_iscsi
);
681 err
= tdev
->ctl(tdev
, ULP_ISCSI_GET_PARAMS
, &uinfo
);
683 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
688 ppmax
= (uinfo
.ulimit
- uinfo
.llimit
+ 1) >> PPOD_SIZE_SHIFT
;
689 bits
= __ilog2_u32(ppmax
) + 1;
690 if (bits
> PPOD_IDX_MAX_SIZE
)
691 bits
= PPOD_IDX_MAX_SIZE
;
692 ppmax
= (1 << (bits
- 1)) - 1;
694 ddp
= cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info
) +
696 (sizeof(struct cxgb3i_gather_list
*) +
697 sizeof(struct sk_buff
*)),
700 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
704 ddp
->gl_map
= (struct cxgb3i_gather_list
**)(ddp
+ 1);
705 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
707 sizeof(struct cxgb3i_gather_list
*));
708 spin_lock_init(&ddp
->map_lock
);
709 kref_init(&ddp
->refcnt
);
712 ddp
->pdev
= uinfo
.pdev
;
713 ddp
->max_txsz
= min_t(unsigned int, uinfo
.max_txsz
, ULP2_MAX_PKT_SIZE
);
714 ddp
->max_rxsz
= min_t(unsigned int, uinfo
.max_rxsz
, ULP2_MAX_PKT_SIZE
);
715 ddp
->llimit
= uinfo
.llimit
;
716 ddp
->ulimit
= uinfo
.ulimit
;
718 ddp
->idx_last
= ppmax
;
719 ddp
->idx_bits
= bits
;
720 ddp
->idx_mask
= (1 << bits
) - 1;
721 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
723 uinfo
.tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
724 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
725 uinfo
.pgsz_factor
[i
] = ddp_page_order
[i
];
726 uinfo
.ulimit
= uinfo
.llimit
+ (ppmax
<< PPOD_SIZE_SHIFT
);
728 err
= tdev
->ctl(tdev
, ULP_ISCSI_SET_PARAMS
, &uinfo
);
730 ddp_log_warn("%s unable to set iscsi param err=%d, "
731 "ddp disabled.\n", tdev
->name
, err
);
735 tdev
->ulp_iscsi
= ddp
;
737 ddp_log_info("tdev 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u,"
739 tdev
, ppmax
, ddp
->idx_bits
, ddp
->idx_mask
,
740 ddp
->rsvd_tag_mask
, ddp
->max_txsz
, uinfo
.max_txsz
,
741 ddp
->max_rxsz
, uinfo
.max_rxsz
);
745 cxgb3i_free_big_mem(ddp
);
749 * cxgb3i_ddp_init - initialize ddp functions
751 void cxgb3i_ddp_init(struct t3cdev
*tdev
)
753 if (page_idx
== DDP_PGIDX_MAX
) {
754 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
756 if (page_idx
== DDP_PGIDX_MAX
) {
757 ddp_log_info("system PAGE_SIZE %lu, update hw.\n",
759 if (cxgb3i_ddp_adjust_page_table() < 0) {
760 ddp_log_info("PAGE_SIZE %lu, ddp disabled.\n",
764 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
766 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
767 PAGE_SIZE
, page_idx
);