2 * linux/drivers/s390/net/qeth_eddp.c
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6 * Copyright 2004 IBM Corporation
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 #include <linux/errno.h>
13 #include <linux/inetdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/kernel.h>
16 #include <linux/tcp.h>
18 #include <linux/skbuff.h>
24 #include "qeth_eddp.h"
27 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q
*queue
,
28 struct qeth_eddp_context
*ctx
)
30 int index
= queue
->next_buf_to_fill
;
31 int elements_needed
= ctx
->num_elements
;
32 int elements_in_buffer
;
34 int buffers_needed
= 0;
36 QETH_DBF_TEXT(trace
, 5, "eddpcbfc");
37 while(elements_needed
> 0) {
39 if (atomic_read(&queue
->bufs
[index
].state
) !=
43 elements_in_buffer
= QETH_MAX_BUFFER_ELEMENTS(queue
->card
) -
44 queue
->bufs
[index
].next_element_to_fill
;
45 skbs_in_buffer
= elements_in_buffer
/ ctx
->elements_per_skb
;
46 elements_needed
-= skbs_in_buffer
* ctx
->elements_per_skb
;
47 index
= (index
+ 1) % QDIO_MAX_BUFFERS_PER_Q
;
49 return buffers_needed
;
53 qeth_eddp_free_context(struct qeth_eddp_context
*ctx
)
57 QETH_DBF_TEXT(trace
, 5, "eddpfctx");
58 for (i
= 0; i
< ctx
->num_pages
; ++i
)
59 free_page((unsigned long)ctx
->pages
[i
]);
67 qeth_eddp_get_context(struct qeth_eddp_context
*ctx
)
69 atomic_inc(&ctx
->refcnt
);
73 qeth_eddp_put_context(struct qeth_eddp_context
*ctx
)
75 if (atomic_dec_return(&ctx
->refcnt
) == 0)
76 qeth_eddp_free_context(ctx
);
80 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer
*buf
)
82 struct qeth_eddp_context_reference
*ref
;
84 QETH_DBF_TEXT(trace
, 6, "eddprctx");
85 while (!list_empty(&buf
->ctx_list
)){
86 ref
= list_entry(buf
->ctx_list
.next
,
87 struct qeth_eddp_context_reference
, list
);
88 qeth_eddp_put_context(ref
->ctx
);
95 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer
*buf
,
96 struct qeth_eddp_context
*ctx
)
98 struct qeth_eddp_context_reference
*ref
;
100 QETH_DBF_TEXT(trace
, 6, "eddprfcx");
101 ref
= kmalloc(sizeof(struct qeth_eddp_context_reference
), GFP_ATOMIC
);
104 qeth_eddp_get_context(ctx
);
106 list_add_tail(&ref
->list
, &buf
->ctx_list
);
111 qeth_eddp_fill_buffer(struct qeth_qdio_out_q
*queue
,
112 struct qeth_eddp_context
*ctx
,
115 struct qeth_qdio_out_buffer
*buf
= NULL
;
116 struct qdio_buffer
*buffer
;
117 int elements
= ctx
->num_elements
;
123 QETH_DBF_TEXT(trace
, 5, "eddpfibu");
124 while (elements
> 0) {
125 buf
= &queue
->bufs
[index
];
126 if (atomic_read(&buf
->state
) != QETH_QDIO_BUF_EMPTY
){
127 /* normally this should not happen since we checked for
128 * available elements in qeth_check_elements_for_context
133 PRINT_WARN("could only partially fill eddp "
138 /* check if the whole next skb fits into current buffer */
139 if ((QETH_MAX_BUFFER_ELEMENTS(queue
->card
) -
140 buf
->next_element_to_fill
)
141 < ctx
->elements_per_skb
){
142 /* no -> go to next buffer */
143 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
144 index
= (index
+ 1) % QDIO_MAX_BUFFERS_PER_Q
;
146 /* new buffer, so we have to add ctx to buffer'ctx_list
147 * and increment ctx's refcnt */
153 if (qeth_eddp_buf_ref_context(buf
, ctx
)){
154 PRINT_WARN("no memory to create eddp context "
159 buffer
= buf
->buffer
;
160 /* fill one skb into buffer */
161 for (i
= 0; i
< ctx
->elements_per_skb
; ++i
){
162 buffer
->element
[buf
->next_element_to_fill
].addr
=
163 ctx
->elements
[element
].addr
;
164 buffer
->element
[buf
->next_element_to_fill
].length
=
165 ctx
->elements
[element
].length
;
166 buffer
->element
[buf
->next_element_to_fill
].flags
=
167 ctx
->elements
[element
].flags
;
168 buf
->next_element_to_fill
++;
174 if (!queue
->do_pack
) {
175 QETH_DBF_TEXT(trace
, 6, "fillbfnp");
176 /* set state to PRIMED -> will be flushed */
177 if (buf
->next_element_to_fill
> 0){
178 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
182 if (queue
->card
->options
.performance_stats
)
183 queue
->card
->perf_stats
.skbs_sent_pack
++;
184 QETH_DBF_TEXT(trace
, 6, "fillbfpa");
185 if (buf
->next_element_to_fill
>=
186 QETH_MAX_BUFFER_ELEMENTS(queue
->card
)) {
188 * packed buffer if full -> set state PRIMED
191 atomic_set(&buf
->state
, QETH_QDIO_BUF_PRIMED
);
200 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context
*ctx
,
201 struct qeth_eddp_data
*eddp
, int data_len
)
207 struct qeth_eddp_element
*element
;
209 QETH_DBF_TEXT(trace
, 5, "eddpcrsh");
210 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
211 page_offset
= ctx
->offset
% PAGE_SIZE
;
212 element
= &ctx
->elements
[ctx
->num_elements
];
213 pkt_len
= eddp
->nhl
+ eddp
->thl
+ data_len
;
214 /* FIXME: layer2 and VLAN !!! */
215 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
)
217 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
))
218 pkt_len
+= VLAN_HLEN
;
219 /* does complete packet fit in current page ? */
220 page_remainder
= PAGE_SIZE
- page_offset
;
221 if (page_remainder
< (sizeof(struct qeth_hdr
) + pkt_len
)){
222 /* no -> go to start of next page */
223 ctx
->offset
+= page_remainder
;
224 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
227 memcpy(page
+ page_offset
, &eddp
->qh
, sizeof(struct qeth_hdr
));
228 element
->addr
= page
+ page_offset
;
229 element
->length
= sizeof(struct qeth_hdr
);
230 ctx
->offset
+= sizeof(struct qeth_hdr
);
231 page_offset
+= sizeof(struct qeth_hdr
);
232 /* add mac header (?) */
233 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
){
234 memcpy(page
+ page_offset
, &eddp
->mac
, ETH_HLEN
);
235 element
->length
+= ETH_HLEN
;
236 ctx
->offset
+= ETH_HLEN
;
237 page_offset
+= ETH_HLEN
;
240 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
)){
241 memcpy(page
+ page_offset
, &eddp
->vlan
, VLAN_HLEN
);
242 element
->length
+= VLAN_HLEN
;
243 ctx
->offset
+= VLAN_HLEN
;
244 page_offset
+= VLAN_HLEN
;
246 /* add network header */
247 memcpy(page
+ page_offset
, (u8
*)&eddp
->nh
, eddp
->nhl
);
248 element
->length
+= eddp
->nhl
;
249 eddp
->nh_in_ctx
= page
+ page_offset
;
250 ctx
->offset
+= eddp
->nhl
;
251 page_offset
+= eddp
->nhl
;
252 /* add transport header */
253 memcpy(page
+ page_offset
, (u8
*)&eddp
->th
, eddp
->thl
);
254 element
->length
+= eddp
->thl
;
255 eddp
->th_in_ctx
= page
+ page_offset
;
256 ctx
->offset
+= eddp
->thl
;
260 qeth_eddp_copy_data_tcp(char *dst
, struct qeth_eddp_data
*eddp
, int len
,
263 struct skb_frag_struct
*frag
;
268 QETH_DBF_TEXT(trace
, 5, "eddpcdtc");
269 if (skb_shinfo(eddp
->skb
)->nr_frags
== 0) {
270 skb_copy_from_linear_data_offset(eddp
->skb
, eddp
->skb_offset
,
272 *hcsum
= csum_partial(eddp
->skb
->data
+ eddp
->skb_offset
, len
,
274 eddp
->skb_offset
+= len
;
277 if (eddp
->frag
< 0) {
278 /* we're in skb->data */
279 left_in_frag
= (eddp
->skb
->len
- eddp
->skb
->data_len
)
281 src
= eddp
->skb
->data
+ eddp
->skb_offset
;
283 frag
= &skb_shinfo(eddp
->skb
)->
285 left_in_frag
= frag
->size
- eddp
->frag_offset
;
287 (page_to_pfn(frag
->page
) << PAGE_SHIFT
)+
288 frag
->page_offset
+ eddp
->frag_offset
);
290 if (left_in_frag
<= 0) {
292 eddp
->frag_offset
= 0;
295 copy_len
= min(left_in_frag
, len
);
296 memcpy(dst
, src
, copy_len
);
297 *hcsum
= csum_partial(src
, copy_len
, *hcsum
);
299 eddp
->frag_offset
+= copy_len
;
300 eddp
->skb_offset
+= copy_len
;
307 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context
*ctx
,
308 struct qeth_eddp_data
*eddp
, int data_len
,
314 struct qeth_eddp_element
*element
;
317 QETH_DBF_TEXT(trace
, 5, "eddpcsdt");
318 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
319 page_offset
= ctx
->offset
% PAGE_SIZE
;
320 element
= &ctx
->elements
[ctx
->num_elements
];
322 page_remainder
= PAGE_SIZE
- page_offset
;
323 if (page_remainder
< data_len
){
324 qeth_eddp_copy_data_tcp(page
+ page_offset
, eddp
,
325 page_remainder
, &hcsum
);
326 element
->length
+= page_remainder
;
328 element
->flags
= SBAL_FLAGS_FIRST_FRAG
;
330 element
->flags
= SBAL_FLAGS_MIDDLE_FRAG
;
333 data_len
-= page_remainder
;
334 ctx
->offset
+= page_remainder
;
335 page
= ctx
->pages
[ctx
->offset
>> PAGE_SHIFT
];
337 element
->addr
= page
+ page_offset
;
339 qeth_eddp_copy_data_tcp(page
+ page_offset
, eddp
,
341 element
->length
+= data_len
;
343 element
->flags
= SBAL_FLAGS_LAST_FRAG
;
345 ctx
->offset
+= data_len
;
350 ((struct tcphdr
*)eddp
->th_in_ctx
)->check
= csum_fold(hcsum
);
354 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data
*eddp
, int data_len
)
356 __wsum phcsum
; /* pseudo header checksum */
358 QETH_DBF_TEXT(trace
, 5, "eddpckt4");
359 eddp
->th
.tcp
.h
.check
= 0;
360 /* compute pseudo header checksum */
361 phcsum
= csum_tcpudp_nofold(eddp
->nh
.ip4
.h
.saddr
, eddp
->nh
.ip4
.h
.daddr
,
362 eddp
->thl
+ data_len
, IPPROTO_TCP
, 0);
363 /* compute checksum of tcp header */
364 return csum_partial((u8
*)&eddp
->th
, eddp
->thl
, phcsum
);
368 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data
*eddp
, int data_len
)
371 __wsum phcsum
; /* pseudo header checksum */
373 QETH_DBF_TEXT(trace
, 5, "eddpckt6");
374 eddp
->th
.tcp
.h
.check
= 0;
375 /* compute pseudo header checksum */
376 phcsum
= csum_partial((u8
*)&eddp
->nh
.ip6
.h
.saddr
,
377 sizeof(struct in6_addr
), 0);
378 phcsum
= csum_partial((u8
*)&eddp
->nh
.ip6
.h
.daddr
,
379 sizeof(struct in6_addr
), phcsum
);
380 proto
= htonl(IPPROTO_TCP
);
381 phcsum
= csum_partial((u8
*)&proto
, sizeof(u32
), phcsum
);
385 static struct qeth_eddp_data
*
386 qeth_eddp_create_eddp_data(struct qeth_hdr
*qh
, u8
*nh
, u8 nhl
, u8
*th
, u8 thl
)
388 struct qeth_eddp_data
*eddp
;
390 QETH_DBF_TEXT(trace
, 5, "eddpcrda");
391 eddp
= kzalloc(sizeof(struct qeth_eddp_data
), GFP_ATOMIC
);
395 memcpy(&eddp
->qh
, qh
, sizeof(struct qeth_hdr
));
396 memcpy(&eddp
->nh
, nh
, nhl
);
397 memcpy(&eddp
->th
, th
, thl
);
398 eddp
->frag
= -1; /* initially we're in skb->data */
404 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context
*ctx
,
405 struct qeth_eddp_data
*eddp
)
411 QETH_DBF_TEXT(trace
, 5, "eddpftcp");
412 eddp
->skb_offset
= sizeof(struct qeth_hdr
) + eddp
->nhl
+ eddp
->thl
;
413 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
414 eddp
->skb_offset
+= sizeof(struct ethhdr
);
415 #ifdef CONFIG_QETH_VLAN
416 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
))
417 eddp
->skb_offset
+= VLAN_HLEN
;
418 #endif /* CONFIG_QETH_VLAN */
420 tcph
= tcp_hdr(eddp
->skb
);
421 while (eddp
->skb_offset
< eddp
->skb
->len
) {
422 data_len
= min((int)skb_shinfo(eddp
->skb
)->gso_size
,
423 (int)(eddp
->skb
->len
- eddp
->skb_offset
));
424 /* prepare qdio hdr */
425 if (eddp
->qh
.hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
){
426 eddp
->qh
.hdr
.l2
.pkt_length
= data_len
+ ETH_HLEN
+
427 eddp
->nhl
+ eddp
->thl
-
428 sizeof(struct qeth_hdr
);
429 #ifdef CONFIG_QETH_VLAN
430 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
))
431 eddp
->qh
.hdr
.l2
.pkt_length
+= VLAN_HLEN
;
432 #endif /* CONFIG_QETH_VLAN */
434 eddp
->qh
.hdr
.l3
.length
= data_len
+ eddp
->nhl
+
437 if (eddp
->skb
->protocol
== htons(ETH_P_IP
)){
438 eddp
->nh
.ip4
.h
.tot_len
= htons(data_len
+ eddp
->nhl
+
440 eddp
->nh
.ip4
.h
.check
= 0;
441 eddp
->nh
.ip4
.h
.check
=
442 ip_fast_csum((u8
*)&eddp
->nh
.ip4
.h
,
445 eddp
->nh
.ip6
.h
.payload_len
= htons(data_len
+ eddp
->thl
);
446 /* prepare tcp hdr */
447 if (data_len
== (eddp
->skb
->len
- eddp
->skb_offset
)){
448 /* last segment -> set FIN and PSH flags */
449 eddp
->th
.tcp
.h
.fin
= tcph
->fin
;
450 eddp
->th
.tcp
.h
.psh
= tcph
->psh
;
452 if (eddp
->skb
->protocol
== htons(ETH_P_IP
))
453 hcsum
= qeth_eddp_check_tcp4_hdr(eddp
, data_len
);
455 hcsum
= qeth_eddp_check_tcp6_hdr(eddp
, data_len
);
456 /* fill the next segment into the context */
457 qeth_eddp_create_segment_hdrs(ctx
, eddp
, data_len
);
458 qeth_eddp_create_segment_data_tcp(ctx
, eddp
, data_len
, hcsum
);
459 if (eddp
->skb_offset
>= eddp
->skb
->len
)
461 /* prepare headers for next round */
462 if (eddp
->skb
->protocol
== htons(ETH_P_IP
))
463 eddp
->nh
.ip4
.h
.id
= htons(ntohs(eddp
->nh
.ip4
.h
.id
) + 1);
464 eddp
->th
.tcp
.h
.seq
= htonl(ntohl(eddp
->th
.tcp
.h
.seq
) + data_len
);
469 qeth_eddp_fill_context_tcp(struct qeth_eddp_context
*ctx
,
470 struct sk_buff
*skb
, struct qeth_hdr
*qhdr
)
472 struct qeth_eddp_data
*eddp
= NULL
;
474 QETH_DBF_TEXT(trace
, 5, "eddpficx");
475 /* create our segmentation headers and copy original headers */
476 if (skb
->protocol
== htons(ETH_P_IP
))
477 eddp
= qeth_eddp_create_eddp_data(qhdr
,
478 skb_network_header(skb
),
480 skb_transport_header(skb
),
483 eddp
= qeth_eddp_create_eddp_data(qhdr
,
484 skb_network_header(skb
),
485 sizeof(struct ipv6hdr
),
486 skb_transport_header(skb
),
490 QETH_DBF_TEXT(trace
, 2, "eddpfcnm");
493 if (qhdr
->hdr
.l2
.id
== QETH_HEADER_TYPE_LAYER2
) {
494 skb_set_mac_header(skb
, sizeof(struct qeth_hdr
));
495 memcpy(&eddp
->mac
, eth_hdr(skb
), ETH_HLEN
);
496 #ifdef CONFIG_QETH_VLAN
497 if (eddp
->mac
.h_proto
== __constant_htons(ETH_P_8021Q
)) {
498 eddp
->vlan
[0] = skb
->protocol
;
499 eddp
->vlan
[1] = htons(vlan_tx_tag_get(skb
));
501 #endif /* CONFIG_QETH_VLAN */
503 /* the next flags will only be set on the last segment */
504 eddp
->th
.tcp
.h
.fin
= 0;
505 eddp
->th
.tcp
.h
.psh
= 0;
507 /* begin segmentation and fill context */
508 __qeth_eddp_fill_context_tcp(ctx
, eddp
);
514 qeth_eddp_calc_num_pages(struct qeth_eddp_context
*ctx
, struct sk_buff
*skb
,
519 QETH_DBF_TEXT(trace
, 5, "eddpcanp");
520 /* can we put multiple skbs in one page? */
521 skbs_per_page
= PAGE_SIZE
/ (skb_shinfo(skb
)->gso_size
+ hdr_len
);
522 if (skbs_per_page
> 1){
523 ctx
->num_pages
= (skb_shinfo(skb
)->gso_segs
+ 1) /
525 ctx
->elements_per_skb
= 1;
527 /* no -> how many elements per skb? */
528 ctx
->elements_per_skb
= (skb_shinfo(skb
)->gso_size
+ hdr_len
+
529 PAGE_SIZE
) >> PAGE_SHIFT
;
530 ctx
->num_pages
= ctx
->elements_per_skb
*
531 (skb_shinfo(skb
)->gso_segs
+ 1);
533 ctx
->num_elements
= ctx
->elements_per_skb
*
534 (skb_shinfo(skb
)->gso_segs
+ 1);
537 static struct qeth_eddp_context
*
538 qeth_eddp_create_context_generic(struct qeth_card
*card
, struct sk_buff
*skb
,
541 struct qeth_eddp_context
*ctx
= NULL
;
545 QETH_DBF_TEXT(trace
, 5, "creddpcg");
546 /* create the context and allocate pages */
547 ctx
= kzalloc(sizeof(struct qeth_eddp_context
), GFP_ATOMIC
);
549 QETH_DBF_TEXT(trace
, 2, "ceddpcn1");
552 ctx
->type
= QETH_LARGE_SEND_EDDP
;
553 qeth_eddp_calc_num_pages(ctx
, skb
, hdr_len
);
554 if (ctx
->elements_per_skb
> QETH_MAX_BUFFER_ELEMENTS(card
)){
555 QETH_DBF_TEXT(trace
, 2, "ceddpcis");
559 ctx
->pages
= kcalloc(ctx
->num_pages
, sizeof(u8
*), GFP_ATOMIC
);
560 if (ctx
->pages
== NULL
){
561 QETH_DBF_TEXT(trace
, 2, "ceddpcn2");
565 for (i
= 0; i
< ctx
->num_pages
; ++i
){
566 addr
= (u8
*)__get_free_page(GFP_ATOMIC
);
568 QETH_DBF_TEXT(trace
, 2, "ceddpcn3");
570 qeth_eddp_free_context(ctx
);
573 memset(addr
, 0, PAGE_SIZE
);
574 ctx
->pages
[i
] = addr
;
576 ctx
->elements
= kcalloc(ctx
->num_elements
,
577 sizeof(struct qeth_eddp_element
), GFP_ATOMIC
);
578 if (ctx
->elements
== NULL
){
579 QETH_DBF_TEXT(trace
, 2, "ceddpcn4");
580 qeth_eddp_free_context(ctx
);
583 /* reset num_elements; will be incremented again in fill_buffer to
584 * reflect number of actually used elements */
585 ctx
->num_elements
= 0;
589 static struct qeth_eddp_context
*
590 qeth_eddp_create_context_tcp(struct qeth_card
*card
, struct sk_buff
*skb
,
591 struct qeth_hdr
*qhdr
)
593 struct qeth_eddp_context
*ctx
= NULL
;
595 QETH_DBF_TEXT(trace
, 5, "creddpct");
596 if (skb
->protocol
== htons(ETH_P_IP
))
597 ctx
= qeth_eddp_create_context_generic(card
, skb
,
598 (sizeof(struct qeth_hdr
) +
601 else if (skb
->protocol
== htons(ETH_P_IPV6
))
602 ctx
= qeth_eddp_create_context_generic(card
, skb
,
603 sizeof(struct qeth_hdr
) + sizeof(struct ipv6hdr
) +
606 QETH_DBF_TEXT(trace
, 2, "cetcpinv");
609 QETH_DBF_TEXT(trace
, 2, "creddpnl");
612 if (qeth_eddp_fill_context_tcp(ctx
, skb
, qhdr
)){
613 QETH_DBF_TEXT(trace
, 2, "ceddptfe");
614 qeth_eddp_free_context(ctx
);
617 atomic_set(&ctx
->refcnt
, 1);
621 struct qeth_eddp_context
*
622 qeth_eddp_create_context(struct qeth_card
*card
, struct sk_buff
*skb
,
623 struct qeth_hdr
*qhdr
, unsigned char sk_protocol
)
625 QETH_DBF_TEXT(trace
, 5, "creddpc");
626 switch (sk_protocol
) {
628 return qeth_eddp_create_context_tcp(card
, skb
, qhdr
);
630 QETH_DBF_TEXT(trace
, 2, "eddpinvp");