treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / qualcomm / rmnet / rmnet_map_data.c
blob21d38167f96180718fdcda68ed2433d73271b1bf
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
4 * RMNET Data MAP protocol
5 */
7 #include <linux/netdevice.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/ip6_checksum.h>
11 #include "rmnet_config.h"
12 #include "rmnet_map.h"
13 #include "rmnet_private.h"
15 #define RMNET_MAP_DEAGGR_SPACING 64
16 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
18 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
19 const void *txporthdr)
21 __sum16 *check = NULL;
23 switch (protocol) {
24 case IPPROTO_TCP:
25 check = &(((struct tcphdr *)txporthdr)->check);
26 break;
28 case IPPROTO_UDP:
29 check = &(((struct udphdr *)txporthdr)->check);
30 break;
32 default:
33 check = NULL;
34 break;
37 return check;
40 static int
41 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
42 struct rmnet_map_dl_csum_trailer *csum_trailer,
43 struct rmnet_priv *priv)
45 __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
46 u16 csum_value, csum_value_final;
47 struct iphdr *ip4h;
48 void *txporthdr;
49 __be16 addend;
51 ip4h = (struct iphdr *)(skb->data);
52 if ((ntohs(ip4h->frag_off) & IP_MF) ||
53 ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
54 priv->stats.csum_fragmented_pkt++;
55 return -EOPNOTSUPP;
58 txporthdr = skb->data + ip4h->ihl * 4;
60 csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
62 if (!csum_field) {
63 priv->stats.csum_err_invalid_transport++;
64 return -EPROTONOSUPPORT;
67 /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
68 if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
69 priv->stats.csum_skipped++;
70 return 0;
73 csum_value = ~ntohs(csum_trailer->csum_value);
74 hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
75 ip_payload_csum = csum16_sub((__force __sum16)csum_value,
76 (__force __be16)hdr_csum);
78 pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
79 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
80 ip4h->protocol, 0);
81 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
82 pseudo_csum = csum16_add(ip_payload_csum, addend);
84 addend = (__force __be16)ntohs((__force __be16)*csum_field);
85 csum_temp = ~csum16_sub(pseudo_csum, addend);
86 csum_value_final = (__force u16)csum_temp;
88 if (unlikely(csum_value_final == 0)) {
89 switch (ip4h->protocol) {
90 case IPPROTO_UDP:
91 /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
92 csum_value_final = ~csum_value_final;
93 break;
95 case IPPROTO_TCP:
96 /* DL4 Non-RFC compliant TCP checksum found */
97 if (*csum_field == (__force __sum16)0xFFFF)
98 csum_value_final = ~csum_value_final;
99 break;
103 if (csum_value_final == ntohs((__force __be16)*csum_field)) {
104 priv->stats.csum_ok++;
105 return 0;
106 } else {
107 priv->stats.csum_validation_failed++;
108 return -EINVAL;
112 #if IS_ENABLED(CONFIG_IPV6)
113 static int
114 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
115 struct rmnet_map_dl_csum_trailer *csum_trailer,
116 struct rmnet_priv *priv)
118 __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
119 u16 csum_value, csum_value_final;
120 __be16 ip6_hdr_csum, addend;
121 struct ipv6hdr *ip6h;
122 void *txporthdr;
123 u32 length;
125 ip6h = (struct ipv6hdr *)(skb->data);
127 txporthdr = skb->data + sizeof(struct ipv6hdr);
128 csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
130 if (!csum_field) {
131 priv->stats.csum_err_invalid_transport++;
132 return -EPROTONOSUPPORT;
135 csum_value = ~ntohs(csum_trailer->csum_value);
136 ip6_hdr_csum = (__force __be16)
137 ~ntohs((__force __be16)ip_compute_csum(ip6h,
138 (int)(txporthdr - (void *)(skb->data))));
139 ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
140 ip6_hdr_csum);
142 length = (ip6h->nexthdr == IPPROTO_UDP) ?
143 ntohs(((struct udphdr *)txporthdr)->len) :
144 ntohs(ip6h->payload_len);
145 pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
146 length, ip6h->nexthdr, 0));
147 addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
148 pseudo_csum = csum16_add(ip6_payload_csum, addend);
150 addend = (__force __be16)ntohs((__force __be16)*csum_field);
151 csum_temp = ~csum16_sub(pseudo_csum, addend);
152 csum_value_final = (__force u16)csum_temp;
154 if (unlikely(csum_value_final == 0)) {
155 switch (ip6h->nexthdr) {
156 case IPPROTO_UDP:
157 /* RFC 2460 section 8.1
158 * DL6 One's complement rule for UDP checksum 0
160 csum_value_final = ~csum_value_final;
161 break;
163 case IPPROTO_TCP:
164 /* DL6 Non-RFC compliant TCP checksum found */
165 if (*csum_field == (__force __sum16)0xFFFF)
166 csum_value_final = ~csum_value_final;
167 break;
171 if (csum_value_final == ntohs((__force __be16)*csum_field)) {
172 priv->stats.csum_ok++;
173 return 0;
174 } else {
175 priv->stats.csum_validation_failed++;
176 return -EINVAL;
179 #endif
181 static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
183 struct iphdr *ip4h = (struct iphdr *)iphdr;
184 void *txphdr;
185 u16 *csum;
187 txphdr = iphdr + ip4h->ihl * 4;
189 if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
190 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
191 *csum = ~(*csum);
195 static void
196 rmnet_map_ipv4_ul_csum_header(void *iphdr,
197 struct rmnet_map_ul_csum_header *ul_header,
198 struct sk_buff *skb)
200 struct iphdr *ip4h = (struct iphdr *)iphdr;
201 __be16 *hdr = (__be16 *)ul_header, offset;
203 offset = htons((__force u16)(skb_transport_header(skb) -
204 (unsigned char *)iphdr));
205 ul_header->csum_start_offset = offset;
206 ul_header->csum_insert_offset = skb->csum_offset;
207 ul_header->csum_enabled = 1;
208 if (ip4h->protocol == IPPROTO_UDP)
209 ul_header->udp_ind = 1;
210 else
211 ul_header->udp_ind = 0;
213 /* Changing remaining fields to network order */
214 hdr++;
215 *hdr = htons((__force u16)*hdr);
217 skb->ip_summed = CHECKSUM_NONE;
219 rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
222 #if IS_ENABLED(CONFIG_IPV6)
223 static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
225 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
226 void *txphdr;
227 u16 *csum;
229 txphdr = ip6hdr + sizeof(struct ipv6hdr);
231 if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
232 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
233 *csum = ~(*csum);
237 static void
238 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
239 struct rmnet_map_ul_csum_header *ul_header,
240 struct sk_buff *skb)
242 struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
243 __be16 *hdr = (__be16 *)ul_header, offset;
245 offset = htons((__force u16)(skb_transport_header(skb) -
246 (unsigned char *)ip6hdr));
247 ul_header->csum_start_offset = offset;
248 ul_header->csum_insert_offset = skb->csum_offset;
249 ul_header->csum_enabled = 1;
251 if (ip6h->nexthdr == IPPROTO_UDP)
252 ul_header->udp_ind = 1;
253 else
254 ul_header->udp_ind = 0;
256 /* Changing remaining fields to network order */
257 hdr++;
258 *hdr = htons((__force u16)*hdr);
260 skb->ip_summed = CHECKSUM_NONE;
262 rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
264 #endif
266 /* Adds MAP header to front of skb->data
267 * Padding is calculated and set appropriately in MAP header. Mux ID is
268 * initialized to 0.
270 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
271 int hdrlen, int pad)
273 struct rmnet_map_header *map_header;
274 u32 padding, map_datalen;
275 u8 *padbytes;
277 map_datalen = skb->len - hdrlen;
278 map_header = (struct rmnet_map_header *)
279 skb_push(skb, sizeof(struct rmnet_map_header));
280 memset(map_header, 0, sizeof(struct rmnet_map_header));
282 if (pad == RMNET_MAP_NO_PAD_BYTES) {
283 map_header->pkt_len = htons(map_datalen);
284 return map_header;
287 padding = ALIGN(map_datalen, 4) - map_datalen;
289 if (padding == 0)
290 goto done;
292 if (skb_tailroom(skb) < padding)
293 return NULL;
295 padbytes = (u8 *)skb_put(skb, padding);
296 memset(padbytes, 0, padding);
298 done:
299 map_header->pkt_len = htons(map_datalen + padding);
300 map_header->pad_len = padding & 0x3F;
302 return map_header;
305 /* Deaggregates a single packet
306 * A whole new buffer is allocated for each portion of an aggregated frame.
307 * Caller should keep calling deaggregate() on the source skb until 0 is
308 * returned, indicating that there are no more packets to deaggregate. Caller
309 * is responsible for freeing the original skb.
311 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
312 struct rmnet_port *port)
314 struct rmnet_map_header *maph;
315 struct sk_buff *skbn;
316 u32 packet_len;
318 if (skb->len == 0)
319 return NULL;
321 maph = (struct rmnet_map_header *)skb->data;
322 packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header);
324 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
325 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
327 if (((int)skb->len - (int)packet_len) < 0)
328 return NULL;
330 /* Some hardware can send us empty frames. Catch them */
331 if (ntohs(maph->pkt_len) == 0)
332 return NULL;
334 skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
335 if (!skbn)
336 return NULL;
338 skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
339 skb_put(skbn, packet_len);
340 memcpy(skbn->data, skb->data, packet_len);
341 skb_pull(skb, packet_len);
343 return skbn;
346 /* Validates packet checksums. Function takes a pointer to
347 * the beginning of a buffer which contains the IP payload +
348 * padding + checksum trailer.
349 * Only IPv4 and IPv6 are supported along with TCP & UDP.
350 * Fragmented or tunneled packets are not supported.
352 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
354 struct rmnet_priv *priv = netdev_priv(skb->dev);
355 struct rmnet_map_dl_csum_trailer *csum_trailer;
357 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
358 priv->stats.csum_sw++;
359 return -EOPNOTSUPP;
362 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
364 if (!csum_trailer->valid) {
365 priv->stats.csum_valid_unset++;
366 return -EINVAL;
369 if (skb->protocol == htons(ETH_P_IP)) {
370 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
371 } else if (skb->protocol == htons(ETH_P_IPV6)) {
372 #if IS_ENABLED(CONFIG_IPV6)
373 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
374 #else
375 priv->stats.csum_err_invalid_ip_version++;
376 return -EPROTONOSUPPORT;
377 #endif
378 } else {
379 priv->stats.csum_err_invalid_ip_version++;
380 return -EPROTONOSUPPORT;
383 return 0;
386 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
387 * packets that are supported for UL checksum offload.
389 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
390 struct net_device *orig_dev)
392 struct rmnet_priv *priv = netdev_priv(orig_dev);
393 struct rmnet_map_ul_csum_header *ul_header;
394 void *iphdr;
396 ul_header = (struct rmnet_map_ul_csum_header *)
397 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
399 if (unlikely(!(orig_dev->features &
400 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
401 goto sw_csum;
403 if (skb->ip_summed == CHECKSUM_PARTIAL) {
404 iphdr = (char *)ul_header +
405 sizeof(struct rmnet_map_ul_csum_header);
407 if (skb->protocol == htons(ETH_P_IP)) {
408 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
409 return;
410 } else if (skb->protocol == htons(ETH_P_IPV6)) {
411 #if IS_ENABLED(CONFIG_IPV6)
412 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
413 return;
414 #else
415 priv->stats.csum_err_invalid_ip_version++;
416 goto sw_csum;
417 #endif
418 } else {
419 priv->stats.csum_err_invalid_ip_version++;
423 sw_csum:
424 ul_header->csum_start_offset = 0;
425 ul_header->csum_insert_offset = 0;
426 ul_header->csum_enabled = 0;
427 ul_header->udp_ind = 0;
429 priv->stats.csum_sw++;