1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016 Chelsio Communications, Inc.
9 cxgbit_set_one_ppod(struct cxgbi_pagepod
*ppod
,
10 struct cxgbi_task_tag_info
*ttinfo
,
11 struct scatterlist
**sg_pp
, unsigned int *sg_off
)
13 struct scatterlist
*sg
= sg_pp
? *sg_pp
: NULL
;
14 unsigned int offset
= sg_off
? *sg_off
: 0;
15 dma_addr_t addr
= 0UL;
19 memcpy(ppod
, &ttinfo
->hdr
, sizeof(struct cxgbi_pagepod_hdr
));
22 addr
= sg_dma_address(sg
);
26 for (i
= 0; i
< PPOD_PAGES_MAX
; i
++) {
28 ppod
->addr
[i
] = cpu_to_be64(addr
+ offset
);
30 if (offset
== (len
+ sg
->offset
)) {
34 addr
= sg_dma_address(sg
);
44 * the fifth address needs to be repeated in the next ppod, so do
57 addr
= sg_dma_address(sg
);
60 ppod
->addr
[i
] = sg
? cpu_to_be64(addr
+ offset
) : 0ULL;
63 static struct sk_buff
*
64 cxgbit_ppod_init_idata(struct cxgbit_device
*cdev
, struct cxgbi_ppm
*ppm
,
65 unsigned int idx
, unsigned int npods
, unsigned int tid
)
67 struct ulp_mem_io
*req
;
68 struct ulptx_idata
*idata
;
69 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ppm
->llimit
;
70 unsigned int dlen
= npods
<< PPOD_SIZE_SHIFT
;
71 unsigned int wr_len
= roundup(sizeof(struct ulp_mem_io
) +
72 sizeof(struct ulptx_idata
) + dlen
, 16);
75 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
79 req
= __skb_put(skb
, wr_len
);
80 INIT_ULPTX_WR(req
, wr_len
, 0, tid
);
81 req
->wr
.wr_hi
= htonl(FW_WR_OP_V(FW_ULPTX_WR
) |
83 req
->cmd
= htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE
) |
84 ULP_MEMIO_ORDER_V(0) |
85 T5_ULP_MEMIO_IMM_V(1));
86 req
->dlen
= htonl(ULP_MEMIO_DATA_LEN_V(dlen
>> 5));
87 req
->lock_addr
= htonl(ULP_MEMIO_ADDR_V(pm_addr
>> 5));
88 req
->len16
= htonl(DIV_ROUND_UP(wr_len
- sizeof(req
->wr
), 16));
90 idata
= (struct ulptx_idata
*)(req
+ 1);
91 idata
->cmd_more
= htonl(ULPTX_CMD_V(ULP_TX_SC_IMM
));
92 idata
->len
= htonl(dlen
);
98 cxgbit_ppod_write_idata(struct cxgbi_ppm
*ppm
, struct cxgbit_sock
*csk
,
99 struct cxgbi_task_tag_info
*ttinfo
, unsigned int idx
,
100 unsigned int npods
, struct scatterlist
**sg_pp
,
101 unsigned int *sg_off
)
103 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
105 struct ulp_mem_io
*req
;
106 struct ulptx_idata
*idata
;
107 struct cxgbi_pagepod
*ppod
;
110 skb
= cxgbit_ppod_init_idata(cdev
, ppm
, idx
, npods
, csk
->tid
);
114 req
= (struct ulp_mem_io
*)skb
->data
;
115 idata
= (struct ulptx_idata
*)(req
+ 1);
116 ppod
= (struct cxgbi_pagepod
*)(idata
+ 1);
118 for (i
= 0; i
< npods
; i
++, ppod
++)
119 cxgbit_set_one_ppod(ppod
, ttinfo
, sg_pp
, sg_off
);
121 __skb_queue_tail(&csk
->ppodq
, skb
);
127 cxgbit_ddp_set_map(struct cxgbi_ppm
*ppm
, struct cxgbit_sock
*csk
,
128 struct cxgbi_task_tag_info
*ttinfo
)
130 unsigned int pidx
= ttinfo
->idx
;
131 unsigned int npods
= ttinfo
->npods
;
133 struct scatterlist
*sg
= ttinfo
->sgl
;
134 unsigned int offset
= 0;
137 for (i
= 0; i
< npods
; i
+= cnt
, pidx
+= cnt
) {
140 if (cnt
> ULPMEM_IDATA_MAX_NPPODS
)
141 cnt
= ULPMEM_IDATA_MAX_NPPODS
;
143 ret
= cxgbit_ppod_write_idata(ppm
, csk
, ttinfo
, pidx
, cnt
,
152 static int cxgbit_ddp_sgl_check(struct scatterlist
*sg
,
155 unsigned int last_sgidx
= nents
- 1;
158 for (i
= 0; i
< nents
; i
++, sg
= sg_next(sg
)) {
159 unsigned int len
= sg
->length
+ sg
->offset
;
161 if ((sg
->offset
& 0x3) || (i
&& sg
->offset
) ||
162 ((i
!= last_sgidx
) && (len
!= PAGE_SIZE
))) {
171 cxgbit_ddp_reserve(struct cxgbit_sock
*csk
, struct cxgbi_task_tag_info
*ttinfo
,
172 unsigned int xferlen
)
174 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
175 struct cxgbi_ppm
*ppm
= cdev2ppm(cdev
);
176 struct scatterlist
*sgl
= ttinfo
->sgl
;
177 unsigned int sgcnt
= ttinfo
->nents
;
178 unsigned int sg_offset
= sgl
->offset
;
181 if ((xferlen
< DDP_THRESHOLD
) || (!sgcnt
)) {
182 pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
183 ppm
, ppm
->tformat
.pgsz_idx_dflt
,
184 xferlen
, ttinfo
->nents
);
188 if (cxgbit_ddp_sgl_check(sgl
, sgcnt
) < 0)
191 ttinfo
->nr_pages
= (xferlen
+ sgl
->offset
+
192 (1 << PAGE_SHIFT
) - 1) >> PAGE_SHIFT
;
195 * the ddp tag will be used for the ttt in the outgoing r2t pdu
197 ret
= cxgbi_ppm_ppods_reserve(ppm
, ttinfo
->nr_pages
, 0, &ttinfo
->idx
,
204 ret
= dma_map_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
205 sgl
->offset
= sg_offset
;
207 pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
208 __func__
, 0, xferlen
, sgcnt
);
212 cxgbi_ppm_make_ppod_hdr(ppm
, ttinfo
->tag
, csk
->tid
, sgl
->offset
,
213 xferlen
, &ttinfo
->hdr
);
215 ret
= cxgbit_ddp_set_map(ppm
, csk
, ttinfo
);
217 __skb_queue_purge(&csk
->ppodq
);
218 dma_unmap_sg(&ppm
->pdev
->dev
, sgl
, sgcnt
, DMA_FROM_DEVICE
);
225 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
230 cxgbit_get_r2t_ttt(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
,
231 struct iscsi_r2t
*r2t
)
233 struct cxgbit_sock
*csk
= conn
->context
;
234 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
235 struct cxgbit_cmd
*ccmd
= iscsit_priv_cmd(cmd
);
236 struct cxgbi_task_tag_info
*ttinfo
= &ccmd
->ttinfo
;
239 if ((!ccmd
->setup_ddp
) ||
240 (!test_bit(CSK_DDP_ENABLE
, &csk
->com
.flags
)))
243 ccmd
->setup_ddp
= false;
245 ttinfo
->sgl
= cmd
->se_cmd
.t_data_sg
;
246 ttinfo
->nents
= cmd
->se_cmd
.t_data_nents
;
248 ret
= cxgbit_ddp_reserve(csk
, ttinfo
, cmd
->se_cmd
.data_length
);
250 pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
251 csk
, cmd
, cmd
->se_cmd
.data_length
, ttinfo
->nents
);
256 ccmd
->release
= true;
259 pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev
, cmd
, ttinfo
->tag
);
260 r2t
->targ_xfer_tag
= ttinfo
->tag
;
263 void cxgbit_unmap_cmd(struct iscsi_conn
*conn
, struct iscsi_cmd
*cmd
)
265 struct cxgbit_cmd
*ccmd
= iscsit_priv_cmd(cmd
);
268 struct cxgbi_task_tag_info
*ttinfo
= &ccmd
->ttinfo
;
271 struct cxgbit_sock
*csk
= conn
->context
;
272 struct cxgbit_device
*cdev
= csk
->com
.cdev
;
273 struct cxgbi_ppm
*ppm
= cdev2ppm(cdev
);
275 /* Abort the TCP conn if DDP is not complete to
276 * avoid any possibility of DDP after freeing
279 if (unlikely(cmd
->write_data_done
!=
280 cmd
->se_cmd
.data_length
))
281 cxgbit_abort_conn(csk
);
283 cxgbi_ppm_ppod_release(ppm
, ttinfo
->idx
);
285 dma_unmap_sg(&ppm
->pdev
->dev
, ttinfo
->sgl
,
286 ttinfo
->nents
, DMA_FROM_DEVICE
);
288 put_page(sg_page(&ccmd
->sg
));
291 ccmd
->release
= false;
295 int cxgbit_ddp_init(struct cxgbit_device
*cdev
)
297 struct cxgb4_lld_info
*lldi
= &cdev
->lldi
;
298 struct net_device
*ndev
= cdev
->lldi
.ports
[0];
299 struct cxgbi_tag_format tformat
;
302 if (!lldi
->vr
->iscsi
.size
) {
303 pr_warn("%s, iscsi NOT enabled, check config!\n", ndev
->name
);
307 memset(&tformat
, 0, sizeof(struct cxgbi_tag_format
));
308 for (i
= 0; i
< 4; i
++)
309 tformat
.pgsz_order
[i
] = (lldi
->iscsi_pgsz_order
>> (i
<< 3))
311 cxgbi_tagmask_check(lldi
->iscsi_tagmask
, &tformat
);
313 ret
= cxgbi_ppm_init(lldi
->iscsi_ppm
, cdev
->lldi
.ports
[0],
314 cdev
->lldi
.pdev
, &cdev
->lldi
, &tformat
,
315 lldi
->vr
->iscsi
.size
, lldi
->iscsi_llimit
,
316 lldi
->vr
->iscsi
.start
, 2,
317 lldi
->vr
->ppod_edram
.start
,
318 lldi
->vr
->ppod_edram
.size
);
320 struct cxgbi_ppm
*ppm
= (struct cxgbi_ppm
*)(*lldi
->iscsi_ppm
);
322 if ((ppm
->tformat
.pgsz_idx_dflt
< DDP_PGIDX_MAX
) &&
323 (ppm
->ppmax
>= 1024))
324 set_bit(CDEV_DDP_ENABLE
, &cdev
->flags
);