2 * Copyright (c) 2016 Chelsio Communications, Inc.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
12 cxgbit_set_one_ppod(struct cxgbi_pagepod
*ppod
,
13 struct cxgbi_task_tag_info
*ttinfo
,
14 struct scatterlist
**sg_pp
, unsigned int *sg_off
)
16 struct scatterlist
*sg
= sg_pp
? *sg_pp
: NULL
;
17 unsigned int offset
= sg_off
? *sg_off
: 0;
18 dma_addr_t addr
= 0UL;
22 memcpy(ppod
, &ttinfo
->hdr
, sizeof(struct cxgbi_pagepod_hdr
));
25 addr
= sg_dma_address(sg
);
29 for (i
= 0; i
< PPOD_PAGES_MAX
; i
++) {
31 ppod
->addr
[i
] = cpu_to_be64(addr
+ offset
);
33 if (offset
== (len
+ sg
->offset
)) {
37 addr
= sg_dma_address(sg
);
47 * the fifth address needs to be repeated in the next ppod, so do
60 addr
= sg_dma_address(sg
);
63 ppod
->addr
[i
] = sg
? cpu_to_be64(addr
+ offset
) : 0ULL;
66 static struct sk_buff
*
67 cxgbit_ppod_init_idata(struct cxgbit_device
*cdev
, struct cxgbi_ppm
*ppm
,
68 unsigned int idx
, unsigned int npods
, unsigned int tid
)
70 struct ulp_mem_io
*req
;
71 struct ulptx_idata
*idata
;
72 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ppm
->llimit
;
73 unsigned int dlen
= npods
<< PPOD_SIZE_SHIFT
;
74 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
75 sizeof(struct ulptx_idata
) + dlen
, 16);
78 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
82 req
= (struct ulp_mem_io
*)__skb_put(skb
, wr_len
);
83 INIT_ULPTX_WR(req
, wr_len
, 0, tid
);
84 req
->wr
.wr_hi
= htonl(FW_WR_OP_V(FW_ULPTX_WR
) |
86 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
87 ULP_MEMIO_ORDER_V(0) |
88 T5_ULP_MEMIO_IMM_V(1));
89 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN_V(dlen
>> 5));
90 req
->lock_addr
= htonl(ULP_MEMIO_ADDR_V(pm_addr
>> 5));
91 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
93 idata
= (struct ulptx_idata
*)(req
+ 1);
94 idata
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_IMM
));
95 idata
->len
= htonl(dlen
);
101 cxgbit_ppod_write_idata(struct cxgbi_ppm
*ppm
, struct cxgbit_sock
*csk
,
102 struct cxgbi_task_tag_info
*ttinfo
, unsigned int idx
,
103 unsigned int npods
, struct scatterlist
**sg_pp
,
104 unsigned int *sg_off
)
106 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
108 struct ulp_mem_io
*req
;
109 struct ulptx_idata
*idata
;
110 struct cxgbi_pagepod
*ppod
;
113 skb
= cxgbit_ppod_init_idata(cdev
, ppm
, idx
, npods
, csk
->tid
);
117 req
= (struct ulp_mem_io
*)skb
->data
;
118 idata
= (struct ulptx_idata
*)(req
+ 1);
119 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
121 for (i
= 0; i
< npods
; i
++, ppod
++)
122 cxgbit_set_one_ppod(ppod
, ttinfo
, sg_pp
, sg_off
);
124 __skb_queue_tail(&csk
->ppodq
, skb
);
130 cxgbit_ddp_set_map(struct cxgbi_ppm
*ppm
, struct cxgbit_sock
*csk
,
131 struct cxgbi_task_tag_info
*ttinfo
)
133 unsigned int pidx
= ttinfo
->idx
;
134 unsigned int npods
= ttinfo
->npods
;
136 struct scatterlist
*sg
= ttinfo
->sgl
;
137 unsigned int offset
= 0;
140 for (i
= 0; i
< npods
; i
+= cnt
, pidx
+= cnt
) {
143 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
144 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
146 ret
= cxgbit_ppod_write_idata(ppm
, csk
, ttinfo
, pidx
, cnt
,
155 static int cxgbit_ddp_sgl_check(struct scatterlist
*sg
,
158 unsigned int last_sgidx
= nents
- 1;
161 for (i
= 0; i
< nents
; i
++, sg
= sg_next(sg
)) {
162 unsigned int len
= sg
->length
+ sg
->offset
;
164 if ((sg
->offset
& 0x3) || (i
&& sg
->offset
) ||
165 ((i
!= last_sgidx
) && (len
!= PAGE_SIZE
))) {
174 cxgbit_ddp_reserve(struct cxgbit_sock
*csk
, struct cxgbi_task_tag_info
*ttinfo
,
175 unsigned int xferlen
)
177 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
178 struct cxgbi_ppm
*ppm
= cdev2ppm(cdev
);
179 struct scatterlist
*sgl
= ttinfo
->sgl
;
180 unsigned int sgcnt
= ttinfo
->nents
;
181 unsigned int sg_offset
= sgl
->offset
;
184 if ((xferlen
< DDP_THRESHOLD
) || (!sgcnt
)) {
185 pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
186 ppm
, ppm
->tformat
.pgsz_idx_dflt
,
187 xferlen
, ttinfo
->nents
);
191 if (cxgbit_ddp_sgl_check(sgl
, sgcnt
) < 0)
194 ttinfo
->nr_pages
= (xferlen
+ sgl
->offset
+
195 (1 << PAGE_SHIFT
) - 1) >> PAGE_SHIFT
;
198 * the ddp tag will be used for the ttt in the outgoing r2t pdu
200 ret
= cxgbi_ppm_ppods_reserve(ppm
, ttinfo
->nr_pages
, 0, &ttinfo
->idx
,
207 ret
= dma_map_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
208 sgl
->offset
= sg_offset
;
210 pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
211 __func__
, 0, xferlen
, sgcnt
);
215 cxgbi_ppm_make_ppod_hdr(ppm
, ttinfo
->tag
, csk
->tid
, sgl
->offset
,
216 xferlen
, &ttinfo
->hdr
);
218 ret
= cxgbit_ddp_set_map(ppm
, csk
, ttinfo
);
220 __skb_queue_purge(&csk
->ppodq
);
221 dma_unmap_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
228 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
233 cxgbit_get_r2t_ttt(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
234 struct iscsi_r2t
*r2t
)
236 struct cxgbit_sock
*csk
= conn
->context
;
237 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
238 struct cxgbit_cmd
*ccmd
= iscsit_priv_cmd(cmd
);
239 struct cxgbi_task_tag_info
*ttinfo
= &ccmd
->ttinfo
;
242 if ((!ccmd
->setup_ddp
) ||
243 (!test_bit(CSK_DDP_ENABLE
, &csk
->com
.flags
)))
246 ccmd
->setup_ddp
= false;
248 ttinfo
->sgl
= cmd
->se_cmd
.t_data_sg
;
249 ttinfo
->nents
= cmd
->se_cmd
.t_data_nents
;
251 ret
= cxgbit_ddp_reserve(csk
, ttinfo
, cmd
->se_cmd
.data_length
);
253 pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
254 csk
, cmd
, cmd
->se_cmd
.data_length
, ttinfo
->nents
);
259 ccmd
->release
= true;
262 pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev
, cmd
, ttinfo
->tag
);
263 r2t
->targ_xfer_tag
= ttinfo
->tag
;
266 void cxgbit_release_cmd(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
268 struct cxgbit_cmd
*ccmd
= iscsit_priv_cmd(cmd
);
271 struct cxgbi_task_tag_info
*ttinfo
= &ccmd
->ttinfo
;
274 struct cxgbit_sock
*csk
= conn
->context
;
275 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
276 struct cxgbi_ppm
*ppm
= cdev2ppm(cdev
);
278 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
280 dma_unmap_sg(&ppm
->pdev
->dev
, ttinfo
->sgl
,
281 ttinfo
->nents
, DMA_FROM_DEVICE
);
283 put_page(sg_page(&ccmd
->sg
));
286 ccmd
->release
= false;
290 int cxgbit_ddp_init(struct cxgbit_device
*cdev
)
292 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
293 struct net_device
*ndev
= cdev
->lldi
.ports
[0];
294 struct cxgbi_tag_format tformat
;
298 if (!lldi
->vr
->iscsi
.size
) {
299 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev
->name
);
303 ppmax
= lldi
->vr
->iscsi
.size
>> PPOD_SIZE_SHIFT
;
305 memset(&tformat
, 0, sizeof(struct cxgbi_tag_format
));
306 for (i
= 0; i
< 4; i
++)
307 tformat
.pgsz_order
[i
] = (lldi
->iscsi_pgsz_order
>> (i
<< 3))
309 cxgbi_tagmask_check(lldi
->iscsi_tagmask
, &tformat
);
311 ret
= cxgbi_ppm_init(lldi
->iscsi_ppm
, cdev
->lldi
.ports
[0],
312 cdev
->lldi
.pdev
, &cdev
->lldi
, &tformat
,
313 ppmax
, lldi
->iscsi_llimit
,
314 lldi
->vr
->iscsi
.start
, 2);
316 struct cxgbi_ppm
*ppm
= (struct cxgbi_ppm
*)(*lldi
->iscsi_ppm
);
318 if ((ppm
->tformat
.pgsz_idx_dflt
< DDP_PGIDX_MAX
) &&
319 (ppm
->ppmax
>= 1024))
320 set_bit(CDEV_DDP_ENABLE
, &cdev
->flags
);