1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021-2022 Intel Corporation
6 #include <uapi/linux/if_ether.h>
7 #include <uapi/linux/if_arp.h>
8 #include <uapi/linux/icmp.h>
10 #include <linux/etherdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/ieee80211.h>
15 #include <net/cfg80211.h>
18 #include <linux/if_arp.h>
19 #include <linux/icmp.h>
20 #include <linux/udp.h>
29 * Returns true if further filtering should be stopped. Only in that case
30 * pass_to_csme and rx_handler_res are set. Otherwise, next level of filters
33 static bool iwl_mei_rx_filter_eth(const struct ethhdr
*ethhdr
,
34 const struct iwl_sap_oob_filters
*filters
,
36 rx_handler_result_t
*rx_handler_res
)
38 const struct iwl_sap_eth_filter
*filt
;
40 /* This filter is not relevant for UCAST packet */
41 if (!is_multicast_ether_addr(ethhdr
->h_dest
) ||
42 is_broadcast_ether_addr(ethhdr
->h_dest
))
45 for (filt
= &filters
->eth_filters
[0];
46 filt
< &filters
->eth_filters
[0] + ARRAY_SIZE(filters
->eth_filters
);
48 /* Assume there are no enabled filter after a disabled one */
49 if (!(filt
->flags
& SAP_ETH_FILTER_ENABLED
))
52 if (compare_ether_header(filt
->mac_address
, ethhdr
->h_dest
))
55 /* Packet needs to reach the host's stack */
56 if (filt
->flags
& SAP_ETH_FILTER_COPY
)
57 *rx_handler_res
= RX_HANDLER_PASS
;
59 *rx_handler_res
= RX_HANDLER_CONSUMED
;
61 /* We have an authoritative answer, stop filtering */
62 if (filt
->flags
& SAP_ETH_FILTER_STOP
) {
70 /* MCAST frames that don't match layer 2 filters are not sent to ME */
71 *pass_to_csme
= false;
77 * Returns true iff the frame should be passed to CSME in which case
78 * rx_handler_res is set.
80 static bool iwl_mei_rx_filter_arp(struct sk_buff
*skb
,
81 const struct iwl_sap_oob_filters
*filters
,
82 rx_handler_result_t
*rx_handler_res
)
84 const struct iwl_sap_ipv4_filter
*filt
= &filters
->ipv4_filter
;
85 const struct arphdr
*arp
;
86 const __be32
*target_ip
;
87 u32 flags
= le32_to_cpu(filt
->flags
);
89 if (!pskb_may_pull(skb
, arp_hdr_len(skb
->dev
)))
94 /* Handle only IPv4 over ethernet ARP frames */
95 if (arp
->ar_hrd
!= htons(ARPHRD_ETHER
) ||
96 arp
->ar_pro
!= htons(ETH_P_IP
))
100 * After the ARP header, we have:
101 * src MAC address - 6 bytes
102 * src IP address - 4 bytes
103 * target MAC addess - 6 bytes
105 target_ip
= (const void *)((const u8
*)(arp
+ 1) +
106 ETH_ALEN
+ sizeof(__be32
) + ETH_ALEN
);
109 * ARP request is forwarded to ME only if IP address match in the
110 * ARP request's target ip field.
112 if (arp
->ar_op
== htons(ARPOP_REQUEST
) &&
113 (filt
->flags
& cpu_to_le32(SAP_IPV4_FILTER_ARP_REQ_PASS
)) &&
114 (filt
->ipv4_addr
== 0 || filt
->ipv4_addr
== *target_ip
)) {
115 if (flags
& SAP_IPV4_FILTER_ARP_REQ_COPY
)
116 *rx_handler_res
= RX_HANDLER_PASS
;
118 *rx_handler_res
= RX_HANDLER_CONSUMED
;
123 /* ARP reply is always forwarded to ME regardless of the IP */
124 if (flags
& SAP_IPV4_FILTER_ARP_RESP_PASS
&&
125 arp
->ar_op
== htons(ARPOP_REPLY
)) {
126 if (flags
& SAP_IPV4_FILTER_ARP_RESP_COPY
)
127 *rx_handler_res
= RX_HANDLER_PASS
;
129 *rx_handler_res
= RX_HANDLER_CONSUMED
;
138 iwl_mei_rx_filter_tcp_udp(struct sk_buff
*skb
, bool ip_match
,
139 const struct iwl_sap_oob_filters
*filters
,
140 rx_handler_result_t
*rx_handler_res
)
142 const struct iwl_sap_flex_filter
*filt
;
144 for (filt
= &filters
->flex_filters
[0];
145 filt
< &filters
->flex_filters
[0] + ARRAY_SIZE(filters
->flex_filters
);
147 if (!(filt
->flags
& SAP_FLEX_FILTER_ENABLED
))
151 * We are required to have a match on the IP level and we didn't
155 (SAP_FLEX_FILTER_IPV4
| SAP_FLEX_FILTER_IPV6
)) &&
159 if ((filt
->flags
& SAP_FLEX_FILTER_UDP
) &&
160 ip_hdr(skb
)->protocol
!= IPPROTO_UDP
)
163 if ((filt
->flags
& SAP_FLEX_FILTER_TCP
) &&
164 ip_hdr(skb
)->protocol
!= IPPROTO_TCP
)
168 * We must have either a TCP header or a UDP header, both
169 * starts with a source port and then a destination port.
170 * Both are big endian words.
171 * Use a UDP header and that will work for TCP as well.
173 if ((filt
->src_port
&& filt
->src_port
!= udp_hdr(skb
)->source
) ||
174 (filt
->dst_port
&& filt
->dst_port
!= udp_hdr(skb
)->dest
))
177 if (filt
->flags
& SAP_FLEX_FILTER_COPY
)
178 *rx_handler_res
= RX_HANDLER_PASS
;
180 *rx_handler_res
= RX_HANDLER_CONSUMED
;
188 static bool iwl_mei_rx_filter_ipv4(struct sk_buff
*skb
,
189 const struct iwl_sap_oob_filters
*filters
,
190 rx_handler_result_t
*rx_handler_res
)
192 const struct iwl_sap_ipv4_filter
*filt
= &filters
->ipv4_filter
;
193 const struct iphdr
*iphdr
;
194 unsigned int iphdrlen
;
197 if (!pskb_may_pull(skb
, skb_network_offset(skb
) + sizeof(*iphdr
)) ||
198 !pskb_may_pull(skb
, skb_network_offset(skb
) + ip_hdrlen(skb
)))
201 iphdrlen
= ip_hdrlen(skb
);
203 match
= !filters
->ipv4_filter
.ipv4_addr
||
204 filters
->ipv4_filter
.ipv4_addr
== iphdr
->daddr
;
206 skb_set_transport_header(skb
, skb_network_offset(skb
) + iphdrlen
);
208 switch (ip_hdr(skb
)->protocol
) {
212 * UDP header is shorter than TCP header and we look at the first bytes
213 * of the header anyway (see below).
214 * If we have a truncated TCP packet, let CSME handle this.
216 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) +
217 sizeof(struct udphdr
)))
220 return iwl_mei_rx_filter_tcp_udp(skb
, match
,
221 filters
, rx_handler_res
);
224 struct icmphdr
*icmp
;
226 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(*icmp
)))
229 icmp
= icmp_hdr(skb
);
232 * Don't pass echo requests to ME even if it wants it as we
233 * want the host to answer.
235 if ((filt
->flags
& cpu_to_le32(SAP_IPV4_FILTER_ICMP_PASS
)) &&
236 match
&& (icmp
->type
!= ICMP_ECHO
|| icmp
->code
!= 0)) {
237 if (filt
->flags
& cpu_to_le32(SAP_IPV4_FILTER_ICMP_COPY
))
238 *rx_handler_res
= RX_HANDLER_PASS
;
240 *rx_handler_res
= RX_HANDLER_CONSUMED
;
247 /* TODO: Should we have the same ICMP request logic here too? */
248 if ((filters
->icmpv6_flags
& cpu_to_le32(SAP_ICMPV6_FILTER_ENABLED
) &&
250 if (filters
->icmpv6_flags
&
251 cpu_to_le32(SAP_ICMPV6_FILTER_COPY
))
252 *rx_handler_res
= RX_HANDLER_PASS
;
254 *rx_handler_res
= RX_HANDLER_CONSUMED
;
266 static bool iwl_mei_rx_filter_ipv6(struct sk_buff
*skb
,
267 const struct iwl_sap_oob_filters
*filters
,
268 rx_handler_result_t
*rx_handler_res
)
270 *rx_handler_res
= RX_HANDLER_PASS
;
277 static rx_handler_result_t
278 iwl_mei_rx_pass_to_csme(struct sk_buff
*skb
,
279 const struct iwl_sap_oob_filters
*filters
,
282 const struct ethhdr
*ethhdr
= (void *)skb_mac_header(skb
);
283 rx_handler_result_t rx_handler_res
= RX_HANDLER_PASS
;
284 bool (*filt_handler
)(struct sk_buff
*skb
,
285 const struct iwl_sap_oob_filters
*filters
,
286 rx_handler_result_t
*rx_handler_res
);
289 * skb->data points the IP header / ARP header and the ETH header
290 * is in the headroom.
292 skb_reset_network_header(skb
);
295 * MCAST IP packets sent by us are received again here without
296 * an ETH header. Drop them here.
298 if (!skb_mac_offset(skb
))
299 return RX_HANDLER_PASS
;
301 if (skb_headroom(skb
) < sizeof(*ethhdr
))
302 return RX_HANDLER_PASS
;
304 if (iwl_mei_rx_filter_eth(ethhdr
, filters
,
305 pass_to_csme
, &rx_handler_res
))
306 return rx_handler_res
;
308 switch (skb
->protocol
) {
309 case htons(ETH_P_IP
):
310 filt_handler
= iwl_mei_rx_filter_ipv4
;
312 case htons(ETH_P_ARP
):
313 filt_handler
= iwl_mei_rx_filter_arp
;
315 case htons(ETH_P_IPV6
):
316 filt_handler
= iwl_mei_rx_filter_ipv6
;
319 *pass_to_csme
= false;
320 return rx_handler_res
;
323 *pass_to_csme
= filt_handler(skb
, filters
, &rx_handler_res
);
325 return rx_handler_res
;
328 rx_handler_result_t
iwl_mei_rx_filter(struct sk_buff
*orig_skb
,
329 const struct iwl_sap_oob_filters
*filters
,
332 rx_handler_result_t ret
;
335 ret
= iwl_mei_rx_pass_to_csme(orig_skb
, filters
, pass_to_csme
);
338 return RX_HANDLER_PASS
;
340 if (ret
== RX_HANDLER_PASS
) {
341 skb
= skb_copy(orig_skb
, GFP_ATOMIC
);
344 return RX_HANDLER_PASS
;
349 /* CSME wants the MAC header as well, push it back */
350 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
353 * Add the packet that CSME wants to get to the ring. Don't send the
354 * Check Shared Area HECI message since this is not possible from the
355 * Rx context. The caller will schedule a worker to do just that.
357 iwl_mei_add_data_to_ring(skb
, false);
360 * In case we drop the packet, don't free it, the caller will do that
363 if (ret
== RX_HANDLER_PASS
)
369 #define DHCP_SERVER_PORT 67
370 #define DHCP_CLIENT_PORT 68
371 void iwl_mei_tx_copy_to_csme(struct sk_buff
*origskb
, unsigned int ivlen
)
373 struct ieee80211_hdr
*hdr
;
375 struct ethhdr ethhdr
;
378 /* Catch DHCP packets */
379 if (origskb
->protocol
!= htons(ETH_P_IP
) ||
380 ip_hdr(origskb
)->protocol
!= IPPROTO_UDP
||
381 udp_hdr(origskb
)->source
!= htons(DHCP_CLIENT_PORT
) ||
382 udp_hdr(origskb
)->dest
!= htons(DHCP_SERVER_PORT
))
386 * We could be a bit less aggressive here and not copy everything, but
387 * this is very rare anyway, do don't bother much.
389 skb
= skb_copy(origskb
, GFP_ATOMIC
);
393 skb
->protocol
= origskb
->protocol
;
395 hdr
= (void *)skb
->data
;
397 memcpy(ethhdr
.h_dest
, ieee80211_get_DA(hdr
), ETH_ALEN
);
398 memcpy(ethhdr
.h_source
, ieee80211_get_SA(hdr
), ETH_ALEN
);
401 * Remove the ieee80211 header + IV + SNAP but leave the ethertype
402 * We still have enough headroom for the sap header.
404 pskb_pull(skb
, ieee80211_hdrlen(hdr
->frame_control
) + ivlen
+ 6);
405 eth
= skb_push(skb
, sizeof(ethhdr
.h_dest
) + sizeof(ethhdr
.h_source
));
406 memcpy(eth
, ðhdr
, sizeof(ethhdr
.h_dest
) + sizeof(ethhdr
.h_source
));
408 iwl_mei_add_data_to_ring(skb
, true);
412 EXPORT_SYMBOL_GPL(iwl_mei_tx_copy_to_csme
);