3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.11 $)
5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
7 * Copyright 2004 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * $Revision: 1.11 $ $Date: 2005/03/24 09:04:18 $
14 #include <linux/config.h>
15 #include <linux/errno.h>
17 #include <linux/inetdevice.h>
18 #include <linux/netdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
28 #include "qeth_eddp.h"
31 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q
*queue
,
32 struct qeth_eddp_context
*ctx
)
34 int index
= queue
->next_buf_to_fill
;
35 int elements_needed
= ctx
->num_elements
;
36 int elements_in_buffer
;
38 int buffers_needed
= 0;
40 QETH_DBF_TEXT(trace
, 5, "eddpcbfc");
41 while(elements_needed
> 0) {
43 if (atomic_read(&queue
->bufs
[index
].state
) !=
47 elements_in_buffer
= QETH_MAX_BUFFER_ELEMENTS(queue
->card
) -
48 queue
->bufs
[index
].next_element_to_fill
;
49 skbs_in_buffer
= elements_in_buffer
/ ctx
->elements_per_skb
;
50 elements_needed
-= skbs_in_buffer
* ctx
->elements_per_skb
;
51 index
= (index
+ 1) % QDIO_MAX_BUFFERS_PER_Q
;
53 return buffers_needed
;
57 qeth_eddp_free_context(struct qeth_eddp_context
*ctx
)
61 QETH_DBF_TEXT(trace
, 5, "eddpfctx");
62 for (i
= 0; i
< ctx
->num_pages
; ++i
)
63 free_page((unsigned long)ctx
->pages
[i
]);
65 if (ctx
->elements
!= NULL
)
72 qeth_eddp_get_context(struct qeth_eddp_context
*ctx
)
74 atomic_inc(&ctx
->refcnt
);
78 qeth_eddp_put_context(struct qeth_eddp_context
*ctx
)
80 if (atomic_dec_return(&ctx
->refcnt
) == 0)
81 qeth_eddp_free_context(ctx
);
85 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer
*buf
)
87 struct qeth_eddp_context_reference
*ref
;
89 QETH_DBF_TEXT(trace
, 6, "eddprctx");
90 while (!list_empty(&buf
->ctx_list
)){
91 ref
= list_entry(buf
->ctx_list
.next
,
92 struct qeth_eddp_context_reference
, list
);
93 qeth_eddp_put_context(ref
->ctx
);
100 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer
*buf
,
101 struct qeth_eddp_context
*ctx
)
103 struct qeth_eddp_context_reference
*ref
;
105 QETH_DBF_TEXT(trace
, 6, "eddprfcx");
106 ref
= kmalloc(sizeof(struct qeth_eddp_context_reference
), GFP_ATOMIC
);
109 qeth_eddp_get_context(ctx
);
111 list_add_tail(&ref
->list
, &buf
->ctx_list
);
116 qeth_eddp_fill_buffer(struct qeth_qdio_out_q
*queue
,
117 struct qeth_eddp_context
*ctx
,
120 struct qeth_qdio_out_buffer
*buf
= NULL
;
121 struct qdio_buffer
*buffer
;
122 int elements
= ctx
->num_elements
;
128 QETH_DBF_TEXT(trace
, 5, "eddpfibu");
129 while (elements
> 0) {
130 buf
= &queue
->bufs
[index
];
131 if (atomic_read(&buf
->state
) != QETH_QDIO_BUF_EMPTY
){
132 /* normally this should not happen since we checked for
133 * available elements in qeth_check_elements_for_context
138 PRINT_WARN("could only partially fill eddp "
143 /* check if the whole next skb fits into current buffer */
144 if ((QETH_MAX_BUFFER_ELEMENTS(queue
->card
) -
145 buf
->next_element_to_fill
)
146 < ctx
->elements_per_skb
){
147 /* no -> go to next buffer */
148 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
149 index
= (index
+ 1) % QDIO_MAX_BUFFERS_PER_Q
;
151 /* new buffer, so we have to add ctx to buffer'ctx_list
152 * and increment ctx's refcnt */
158 if (qeth_eddp_buf_ref_context(buf
, ctx
)){
159 PRINT_WARN("no memory to create eddp context "
164 buffer
= buf
->buffer
;
165 /* fill one skb into buffer */
166 for (i
= 0; i
< ctx
->elements_per_skb
; ++i
){
167 buffer
->element
[buf
->next_element_to_fill
].addr
=
168 ctx
->elements
[element
].addr
;
169 buffer
->element
[buf
->next_element_to_fill
].length
=
170 ctx
->elements
[element
].length
;
171 buffer
->element
[buf
->next_element_to_fill
].flags
=
172 ctx
->elements
[element
].flags
;
173 buf
->next_element_to_fill
++;
179 if (!queue
->do_pack
) {
180 QETH_DBF_TEXT(trace
, 6, "fillbfnp");
181 /* set state to PRIMED -> will be flushed */
182 if (buf
->next_element_to_fill
> 0){
183 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
187 #ifdef CONFIG_QETH_PERF_STATS
188 queue
->card
->perf_stats
.skbs_sent_pack
++;
190 QETH_DBF_TEXT(trace
, 6, "fillbfpa");
191 if (buf
->next_element_to_fill
>=
192 QETH_MAX_BUFFER_ELEMENTS(queue
->card
)) {
194 * packed buffer if full -> set state PRIMED
197 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
206 qeth_get_skb_data_len(struct sk_buff
*skb
)
211 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
)
212 len
-= skb_shinfo(skb
)->frags
[i
].size
;
217 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context
*ctx
,
218 struct qeth_eddp_data
*eddp
)
224 struct qeth_eddp_element
*element
;
226 QETH_DBF_TEXT(trace
, 5, "eddpcrsh");
227 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
228 page_offset
= ctx
->offset
% PAGE_SIZE
;
229 element
= &ctx
->elements
[ctx
->num_elements
];
230 hdr_len
= eddp
->nhl
+ eddp
->thl
;
231 /* FIXME: layer2 and VLAN !!! */
232 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
)
234 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
))
235 hdr_len
+= VLAN_HLEN
;
236 /* does complete header fit in current page ? */
237 page_remainder
= PAGE_SIZE
- page_offset
;
238 if (page_remainder
< (sizeof(struct qeth_hdr
) + hdr_len
)){
239 /* no -> go to start of next page */
240 ctx
->offset
+= page_remainder
;
241 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
244 memcpy(page
+ page_offset
, &eddp
->qh
, sizeof(struct qeth_hdr
));
245 element
->addr
= page
+ page_offset
;
246 element
->length
= sizeof(struct qeth_hdr
);
247 ctx
->offset
+= sizeof(struct qeth_hdr
);
248 page_offset
+= sizeof(struct qeth_hdr
);
249 /* add mac header (?) */
250 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
){
251 memcpy(page
+ page_offset
, &eddp
->mac
, ETH_HLEN
);
252 element
->length
+= ETH_HLEN
;
253 ctx
->offset
+= ETH_HLEN
;
254 page_offset
+= ETH_HLEN
;
257 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
)){
258 memcpy(page
+ page_offset
, &eddp
->vlan
, VLAN_HLEN
);
259 element
->length
+= VLAN_HLEN
;
260 ctx
->offset
+= VLAN_HLEN
;
261 page_offset
+= VLAN_HLEN
;
263 /* add network header */
264 memcpy(page
+ page_offset
, (u8
*)&eddp
->nh
, eddp
->nhl
);
265 element
->length
+= eddp
->nhl
;
266 eddp
->nh_in_ctx
= page
+ page_offset
;
267 ctx
->offset
+= eddp
->nhl
;
268 page_offset
+= eddp
->nhl
;
269 /* add transport header */
270 memcpy(page
+ page_offset
, (u8
*)&eddp
->th
, eddp
->thl
);
271 element
->length
+= eddp
->thl
;
272 eddp
->th_in_ctx
= page
+ page_offset
;
273 ctx
->offset
+= eddp
->thl
;
277 qeth_eddp_copy_data_tcp(char *dst
, struct qeth_eddp_data
*eddp
, int len
,
280 struct skb_frag_struct
*frag
;
285 QETH_DBF_TEXT(trace
, 5, "eddpcdtc");
286 if (skb_shinfo(eddp
->skb
)->nr_frags
== 0) {
287 memcpy(dst
, eddp
->skb
->data
+ eddp
->skb_offset
, len
);
288 *hcsum
= csum_partial(eddp
->skb
->data
+ eddp
->skb_offset
, len
,
290 eddp
->skb_offset
+= len
;
293 if (eddp
->frag
< 0) {
294 /* we're in skb->data */
295 left_in_frag
= qeth_get_skb_data_len(eddp
->skb
)
297 src
= eddp
->skb
->data
+ eddp
->skb_offset
;
299 frag
= &skb_shinfo(eddp
->skb
)->
301 left_in_frag
= frag
->size
- eddp
->frag_offset
;
303 (page_to_pfn(frag
->page
) << PAGE_SHIFT
)+
304 frag
->page_offset
+ eddp
->frag_offset
);
306 if (left_in_frag
<= 0) {
308 eddp
->frag_offset
= 0;
311 copy_len
= min(left_in_frag
, len
);
312 memcpy(dst
, src
, copy_len
);
313 *hcsum
= csum_partial(src
, copy_len
, *hcsum
);
315 eddp
->frag_offset
+= copy_len
;
316 eddp
->skb_offset
+= copy_len
;
323 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context
*ctx
,
324 struct qeth_eddp_data
*eddp
, int data_len
,
330 struct qeth_eddp_element
*element
;
333 QETH_DBF_TEXT(trace
, 5, "eddpcsdt");
334 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
335 page_offset
= ctx
->offset
% PAGE_SIZE
;
336 element
= &ctx
->elements
[ctx
->num_elements
];
338 page_remainder
= PAGE_SIZE
- page_offset
;
339 if (page_remainder
< data_len
){
340 qeth_eddp_copy_data_tcp(page
+ page_offset
, eddp
,
341 page_remainder
, &hcsum
);
342 element
->length
+= page_remainder
;
344 element
->flags
= SBAL_FLAGS_FIRST_FRAG
;
346 element
->flags
= SBAL_FLAGS_MIDDLE_FRAG
;
349 data_len
-= page_remainder
;
350 ctx
->offset
+= page_remainder
;
351 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
353 element
->addr
= page
+ page_offset
;
355 qeth_eddp_copy_data_tcp(page
+ page_offset
, eddp
,
357 element
->length
+= data_len
;
359 element
->flags
= SBAL_FLAGS_LAST_FRAG
;
361 ctx
->offset
+= data_len
;
366 ((struct tcphdr
*)eddp
->th_in_ctx
)->check
= csum_fold(hcsum
);
370 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data
*eddp
, int data_len
)
372 u32 phcsum
; /* pseudo header checksum */
374 QETH_DBF_TEXT(trace
, 5, "eddpckt4");
375 eddp
->th
.tcp
.h
.check
= 0;
376 /* compute pseudo header checksum */
377 phcsum
= csum_tcpudp_nofold(eddp
->nh
.ip4
.h
.saddr
, eddp
->nh
.ip4
.h
.daddr
,
378 eddp
->thl
+ data_len
, IPPROTO_TCP
, 0);
379 /* compute checksum of tcp header */
380 return csum_partial((u8
*)&eddp
->th
, eddp
->thl
, phcsum
);
384 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data
*eddp
, int data_len
)
387 u32 phcsum
; /* pseudo header checksum */
389 QETH_DBF_TEXT(trace
, 5, "eddpckt6");
390 eddp
->th
.tcp
.h
.check
= 0;
391 /* compute pseudo header checksum */
392 phcsum
= csum_partial((u8
*)&eddp
->nh
.ip6
.h
.saddr
,
393 sizeof(struct in6_addr
), 0);
394 phcsum
= csum_partial((u8
*)&eddp
->nh
.ip6
.h
.daddr
,
395 sizeof(struct in6_addr
), phcsum
);
396 proto
= htonl(IPPROTO_TCP
);
397 phcsum
= csum_partial((u8
*)&proto
, sizeof(u32
), phcsum
);
401 static inline struct qeth_eddp_data
*
402 qeth_eddp_create_eddp_data(struct qeth_hdr
*qh
, u8
*nh
, u8 nhl
, u8
*th
, u8 thl
)
404 struct qeth_eddp_data
*eddp
;
406 QETH_DBF_TEXT(trace
, 5, "eddpcrda");
407 eddp
= kmalloc(sizeof(struct qeth_eddp_data
), GFP_ATOMIC
);
409 memset(eddp
, 0, sizeof(struct qeth_eddp_data
));
412 memcpy(&eddp
->qh
, qh
, sizeof(struct qeth_hdr
));
413 memcpy(&eddp
->nh
, nh
, nhl
);
414 memcpy(&eddp
->th
, th
, thl
);
415 eddp
->frag
= -1; /* initially we're in skb->data */
421 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context
*ctx
,
422 struct qeth_eddp_data
*eddp
)
428 QETH_DBF_TEXT(trace
, 5, "eddpftcp");
429 eddp
->skb_offset
= sizeof(struct qeth_hdr
) + eddp
->nhl
+ eddp
->thl
;
430 tcph
= eddp
->skb
->h
.th
;
431 while (eddp
->skb_offset
< eddp
->skb
->len
) {
432 data_len
= min((int)skb_shinfo(eddp
->skb
)->tso_size
,
433 (int)(eddp
->skb
->len
- eddp
->skb_offset
));
434 /* prepare qdio hdr */
435 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
){
436 eddp
->qh
.hdr
.l2
.pkt_length
= data_len
+ ETH_HLEN
+
437 eddp
->nhl
+ eddp
->thl
-
438 sizeof(struct qeth_hdr
);
439 #ifdef CONFIG_QETH_VLAN
440 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
))
441 eddp
->qh
.hdr
.l2
.pkt_length
+= VLAN_HLEN
;
442 #endif /* CONFIG_QETH_VLAN */
444 eddp
->qh
.hdr
.l3
.length
= data_len
+ eddp
->nhl
+
447 if (eddp
->skb
->protocol
== ETH_P_IP
){
448 eddp
->nh
.ip4
.h
.tot_len
= data_len
+ eddp
->nhl
+
450 eddp
->nh
.ip4
.h
.check
= 0;
451 eddp
->nh
.ip4
.h
.check
=
452 ip_fast_csum((u8
*)&eddp
->nh
.ip4
.h
,
455 eddp
->nh
.ip6
.h
.payload_len
= data_len
+ eddp
->thl
;
456 /* prepare tcp hdr */
457 if (data_len
== (eddp
->skb
->len
- eddp
->skb_offset
)){
458 /* last segment -> set FIN and PSH flags */
459 eddp
->th
.tcp
.h
.fin
= tcph
->fin
;
460 eddp
->th
.tcp
.h
.psh
= tcph
->psh
;
462 if (eddp
->skb
->protocol
== ETH_P_IP
)
463 hcsum
= qeth_eddp_check_tcp4_hdr(eddp
, data_len
);
465 hcsum
= qeth_eddp_check_tcp6_hdr(eddp
, data_len
);
466 /* fill the next segment into the context */
467 qeth_eddp_create_segment_hdrs(ctx
, eddp
);
468 qeth_eddp_create_segment_data_tcp(ctx
, eddp
, data_len
, hcsum
);
469 if (eddp
->skb_offset
>= eddp
->skb
->len
)
471 /* prepare headers for next round */
472 if (eddp
->skb
->protocol
== ETH_P_IP
)
474 eddp
->th
.tcp
.h
.seq
+= data_len
;
479 qeth_eddp_fill_context_tcp(struct qeth_eddp_context
*ctx
,
480 struct sk_buff
*skb
, struct qeth_hdr
*qhdr
)
482 struct qeth_eddp_data
*eddp
= NULL
;
484 QETH_DBF_TEXT(trace
, 5, "eddpficx");
485 /* create our segmentation headers and copy original headers */
486 if (skb
->protocol
== ETH_P_IP
)
487 eddp
= qeth_eddp_create_eddp_data(qhdr
, (u8
*)skb
->nh
.iph
,
489 (u8
*)skb
->h
.th
, skb
->h
.th
->doff
*4);
491 eddp
= qeth_eddp_create_eddp_data(qhdr
, (u8
*)skb
->nh
.ipv6h
,
492 sizeof(struct ipv6hdr
),
493 (u8
*)skb
->h
.th
, skb
->h
.th
->doff
*4);
496 QETH_DBF_TEXT(trace
, 2, "eddpfcnm");
499 if (qhdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
500 memcpy(&eddp
->mac
, eth_hdr(skb
), ETH_HLEN
);
501 #ifdef CONFIG_QETH_VLAN
502 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
)) {
503 eddp
->vlan
[0] = __constant_htons(skb
->protocol
);
504 eddp
->vlan
[1] = htons(vlan_tx_tag_get(skb
));
506 #endif /* CONFIG_QETH_VLAN */
508 /* the next flags will only be set on the last segment */
509 eddp
->th
.tcp
.h
.fin
= 0;
510 eddp
->th
.tcp
.h
.psh
= 0;
512 /* begin segmentation and fill context */
513 __qeth_eddp_fill_context_tcp(ctx
, eddp
);
519 qeth_eddp_calc_num_pages(struct qeth_eddp_context
*ctx
, struct sk_buff
*skb
,
524 QETH_DBF_TEXT(trace
, 5, "eddpcanp");
525 /* can we put multiple skbs in one page? */
526 skbs_per_page
= PAGE_SIZE
/ (skb_shinfo(skb
)->tso_size
+ hdr_len
);
527 if (skbs_per_page
> 1){
528 ctx
->num_pages
= (skb_shinfo(skb
)->tso_segs
+ 1) /
530 ctx
->elements_per_skb
= 1;
532 /* no -> how many elements per skb? */
533 ctx
->elements_per_skb
= (skb_shinfo(skb
)->tso_size
+ hdr_len
+
534 PAGE_SIZE
) >> PAGE_SHIFT
;
535 ctx
->num_pages
= ctx
->elements_per_skb
*
536 (skb_shinfo(skb
)->tso_segs
+ 1);
538 ctx
->num_elements
= ctx
->elements_per_skb
*
539 (skb_shinfo(skb
)->tso_segs
+ 1);
542 static inline struct qeth_eddp_context
*
543 qeth_eddp_create_context_generic(struct qeth_card
*card
, struct sk_buff
*skb
,
546 struct qeth_eddp_context
*ctx
= NULL
;
550 QETH_DBF_TEXT(trace
, 5, "creddpcg");
551 /* create the context and allocate pages */
552 ctx
= kmalloc(sizeof(struct qeth_eddp_context
), GFP_ATOMIC
);
554 QETH_DBF_TEXT(trace
, 2, "ceddpcn1");
557 memset(ctx
, 0, sizeof(struct qeth_eddp_context
));
558 ctx
->type
= QETH_LARGE_SEND_EDDP
;
559 qeth_eddp_calc_num_pages(ctx
, skb
, hdr_len
);
560 if (ctx
->elements_per_skb
> QETH_MAX_BUFFER_ELEMENTS(card
)){
561 QETH_DBF_TEXT(trace
, 2, "ceddpcis");
565 ctx
->pages
= kmalloc(ctx
->num_pages
* sizeof(u8
*), GFP_ATOMIC
);
566 if (ctx
->pages
== NULL
){
567 QETH_DBF_TEXT(trace
, 2, "ceddpcn2");
571 memset(ctx
->pages
, 0, ctx
->num_pages
* sizeof(u8
*));
572 for (i
= 0; i
< ctx
->num_pages
; ++i
){
573 addr
= (u8
*)__get_free_page(GFP_ATOMIC
);
575 QETH_DBF_TEXT(trace
, 2, "ceddpcn3");
577 qeth_eddp_free_context(ctx
);
580 memset(addr
, 0, PAGE_SIZE
);
581 ctx
->pages
[i
] = addr
;
583 ctx
->elements
= kmalloc(ctx
->num_elements
*
584 sizeof(struct qeth_eddp_element
), GFP_ATOMIC
);
585 if (ctx
->elements
== NULL
){
586 QETH_DBF_TEXT(trace
, 2, "ceddpcn4");
587 qeth_eddp_free_context(ctx
);
590 memset(ctx
->elements
, 0,
591 ctx
->num_elements
* sizeof(struct qeth_eddp_element
));
592 /* reset num_elements; will be incremented again in fill_buffer to
593 * reflect number of actually used elements */
594 ctx
->num_elements
= 0;
598 static inline struct qeth_eddp_context
*
599 qeth_eddp_create_context_tcp(struct qeth_card
*card
, struct sk_buff
*skb
,
600 struct qeth_hdr
*qhdr
)
602 struct qeth_eddp_context
*ctx
= NULL
;
604 QETH_DBF_TEXT(trace
, 5, "creddpct");
605 if (skb
->protocol
== ETH_P_IP
)
606 ctx
= qeth_eddp_create_context_generic(card
, skb
,
607 sizeof(struct qeth_hdr
) + skb
->nh
.iph
->ihl
*4 +
609 else if (skb
->protocol
== ETH_P_IPV6
)
610 ctx
= qeth_eddp_create_context_generic(card
, skb
,
611 sizeof(struct qeth_hdr
) + sizeof(struct ipv6hdr
) +
614 QETH_DBF_TEXT(trace
, 2, "cetcpinv");
617 QETH_DBF_TEXT(trace
, 2, "creddpnl");
620 if (qeth_eddp_fill_context_tcp(ctx
, skb
, qhdr
)){
621 QETH_DBF_TEXT(trace
, 2, "ceddptfe");
622 qeth_eddp_free_context(ctx
);
625 atomic_set(&ctx
->refcnt
, 1);
629 struct qeth_eddp_context
*
630 qeth_eddp_create_context(struct qeth_card
*card
, struct sk_buff
*skb
,
631 struct qeth_hdr
*qhdr
)
633 QETH_DBF_TEXT(trace
, 5, "creddpc");
634 switch (skb
->sk
->sk_protocol
){
636 return qeth_eddp_create_context_tcp(card
, skb
, qhdr
);
638 QETH_DBF_TEXT(trace
, 2, "eddpinvp");