1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
4 * Copyright (c) 2019 Volkswagen Group Electronic Research
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/rcupdate.h>
49 #include <linux/rculist.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
53 #include <linux/skbuff.h>
54 #include <linux/can.h>
55 #include <linux/can/core.h>
56 #include <linux/can/skb.h>
57 #include <linux/can/gw.h>
58 #include <net/rtnetlink.h>
59 #include <net/net_namespace.h>
62 #define CAN_GW_NAME "can-gw"
64 MODULE_DESCRIPTION("PF_CAN netlink gateway");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67 MODULE_ALIAS(CAN_GW_NAME
);
69 #define CGW_MIN_HOPS 1
70 #define CGW_MAX_HOPS 6
71 #define CGW_DEFAULT_HOPS 1
73 static unsigned int max_hops __read_mostly
= CGW_DEFAULT_HOPS
;
74 module_param(max_hops
, uint
, 0444);
75 MODULE_PARM_DESC(max_hops
,
76 "maximum " CAN_GW_NAME
" routing hops for CAN frames "
77 "(valid values: " __stringify(CGW_MIN_HOPS
) "-"
78 __stringify(CGW_MAX_HOPS
) " hops, "
79 "default: " __stringify(CGW_DEFAULT_HOPS
) ")");
81 static struct notifier_block notifier
;
82 static struct kmem_cache
*cgw_cache __read_mostly
;
84 /* structure that contains the (on-the-fly) CAN frame modifications */
87 struct canfd_frame
and;
88 struct canfd_frame
or;
89 struct canfd_frame
xor;
90 struct canfd_frame set
;
98 void (*modfunc
[MAX_MODFUNCTIONS
])(struct canfd_frame
*cf
,
101 /* CAN frame checksum calculation after CAN frame modifications */
103 struct cgw_csum_xor
xor;
104 struct cgw_csum_crc8 crc8
;
107 void (*xor)(struct canfd_frame
*cf
,
108 struct cgw_csum_xor
*xor);
109 void (*crc8
)(struct canfd_frame
*cf
,
110 struct cgw_csum_crc8
*crc8
);
115 /* So far we just support CAN -> CAN routing and frame modifications.
117 * The internal can_can_gw structure contains data and attributes for
118 * a CAN -> CAN gateway job.
121 struct can_filter filter
;
126 /* list entry for CAN gateways jobs */
128 struct hlist_node list
;
135 /* CAN frame data source */
136 struct net_device
*dev
;
139 /* CAN frame data destination */
140 struct net_device
*dev
;
143 struct can_can_gw ccgw
;
151 /* modification functions that are invoked in the hot path in can_can_gw_rcv */
153 #define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
154 struct cf_mod *mod) { op ; }
156 MODFUNC(mod_and_id
, cf
->can_id
&= mod
->modframe
.and.can_id
)
157 MODFUNC(mod_and_len
, cf
->len
&= mod
->modframe
.and.len
)
158 MODFUNC(mod_and_flags
, cf
->flags
&= mod
->modframe
.and.flags
)
159 MODFUNC(mod_and_data
, *(u64
*)cf
->data
&= *(u64
*)mod
->modframe
.and.data
)
160 MODFUNC(mod_or_id
, cf
->can_id
|= mod
->modframe
.or.can_id
)
161 MODFUNC(mod_or_len
, cf
->len
|= mod
->modframe
.or.len
)
162 MODFUNC(mod_or_flags
, cf
->flags
|= mod
->modframe
.or.flags
)
163 MODFUNC(mod_or_data
, *(u64
*)cf
->data
|= *(u64
*)mod
->modframe
.or.data
)
164 MODFUNC(mod_xor_id
, cf
->can_id
^= mod
->modframe
.xor.can_id
)
165 MODFUNC(mod_xor_len
, cf
->len
^= mod
->modframe
.xor.len
)
166 MODFUNC(mod_xor_flags
, cf
->flags
^= mod
->modframe
.xor.flags
)
167 MODFUNC(mod_xor_data
, *(u64
*)cf
->data
^= *(u64
*)mod
->modframe
.xor.data
)
168 MODFUNC(mod_set_id
, cf
->can_id
= mod
->modframe
.set
.can_id
)
169 MODFUNC(mod_set_len
, cf
->len
= mod
->modframe
.set
.len
)
170 MODFUNC(mod_set_flags
, cf
->flags
= mod
->modframe
.set
.flags
)
171 MODFUNC(mod_set_data
, *(u64
*)cf
->data
= *(u64
*)mod
->modframe
.set
.data
)
173 static void mod_and_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
177 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
178 *(u64
*)(cf
->data
+ i
) &= *(u64
*)(mod
->modframe
.and.data
+ i
);
181 static void mod_or_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
185 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
186 *(u64
*)(cf
->data
+ i
) |= *(u64
*)(mod
->modframe
.or.data
+ i
);
189 static void mod_xor_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
193 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
194 *(u64
*)(cf
->data
+ i
) ^= *(u64
*)(mod
->modframe
.xor.data
+ i
);
197 static void mod_set_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
199 memcpy(cf
->data
, mod
->modframe
.set
.data
, CANFD_MAX_DLEN
);
202 /* retrieve valid CC DLC value and store it into 'len' */
203 static void mod_retrieve_ccdlc(struct canfd_frame
*cf
)
205 struct can_frame
*ccf
= (struct can_frame
*)cf
;
207 /* len8_dlc is only valid if len == CAN_MAX_DLEN */
208 if (ccf
->len
!= CAN_MAX_DLEN
)
211 /* do we have a valid len8_dlc value from 9 .. 15 ? */
212 if (ccf
->len8_dlc
> CAN_MAX_DLEN
&& ccf
->len8_dlc
<= CAN_MAX_RAW_DLC
)
213 ccf
->len
= ccf
->len8_dlc
;
216 /* convert valid CC DLC value in 'len' into struct can_frame elements */
217 static void mod_store_ccdlc(struct canfd_frame
*cf
)
219 struct can_frame
*ccf
= (struct can_frame
*)cf
;
221 /* clear potential leftovers */
224 /* plain data length 0 .. 8 - that was easy */
225 if (ccf
->len
<= CAN_MAX_DLEN
)
228 /* potentially broken values are catched in can_can_gw_rcv() */
229 if (ccf
->len
> CAN_MAX_RAW_DLC
)
232 /* we have a valid dlc value from 9 .. 15 in ccf->len */
233 ccf
->len8_dlc
= ccf
->len
;
234 ccf
->len
= CAN_MAX_DLEN
;
237 static void mod_and_ccdlc(struct canfd_frame
*cf
, struct cf_mod
*mod
)
239 mod_retrieve_ccdlc(cf
);
240 mod_and_len(cf
, mod
);
244 static void mod_or_ccdlc(struct canfd_frame
*cf
, struct cf_mod
*mod
)
246 mod_retrieve_ccdlc(cf
);
251 static void mod_xor_ccdlc(struct canfd_frame
*cf
, struct cf_mod
*mod
)
253 mod_retrieve_ccdlc(cf
);
254 mod_xor_len(cf
, mod
);
258 static void mod_set_ccdlc(struct canfd_frame
*cf
, struct cf_mod
*mod
)
260 mod_set_len(cf
, mod
);
264 static void canframecpy(struct canfd_frame
*dst
, struct can_frame
*src
)
266 /* Copy the struct members separately to ensure that no uninitialized
267 * data are copied in the 3 bytes hole of the struct. This is needed
268 * to make easy compares of the data in the struct cf_mod.
271 dst
->can_id
= src
->can_id
;
273 *(u64
*)dst
->data
= *(u64
*)src
->data
;
276 static void canfdframecpy(struct canfd_frame
*dst
, struct canfd_frame
*src
)
278 /* Copy the struct members separately to ensure that no uninitialized
279 * data are copied in the 2 bytes hole of the struct. This is needed
280 * to make easy compares of the data in the struct cf_mod.
283 dst
->can_id
= src
->can_id
;
284 dst
->flags
= src
->flags
;
286 memcpy(dst
->data
, src
->data
, CANFD_MAX_DLEN
);
289 static int cgw_chk_csum_parms(s8 fr
, s8 to
, s8 re
, struct rtcanmsg
*r
)
291 s8 dlen
= CAN_MAX_DLEN
;
293 if (r
->flags
& CGW_FLAGS_CAN_FD
)
294 dlen
= CANFD_MAX_DLEN
;
296 /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
297 * relative to received dlc -1 .. -8 :
298 * e.g. for received dlc = 8
299 * -1 => index = 7 (data[7])
300 * -3 => index = 5 (data[5])
301 * -8 => index = 0 (data[0])
304 if (fr
>= -dlen
&& fr
< dlen
&&
305 to
>= -dlen
&& to
< dlen
&&
306 re
>= -dlen
&& re
< dlen
)
312 static inline int calc_idx(int idx
, int rx_len
)
320 static void cgw_csum_xor_rel(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
322 int from
= calc_idx(xor->from_idx
, cf
->len
);
323 int to
= calc_idx(xor->to_idx
, cf
->len
);
324 int res
= calc_idx(xor->result_idx
, cf
->len
);
325 u8 val
= xor->init_xor_val
;
328 if (from
< 0 || to
< 0 || res
< 0)
332 for (i
= from
; i
<= to
; i
++)
335 for (i
= from
; i
>= to
; i
--)
342 static void cgw_csum_xor_pos(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
344 u8 val
= xor->init_xor_val
;
347 for (i
= xor->from_idx
; i
<= xor->to_idx
; i
++)
350 cf
->data
[xor->result_idx
] = val
;
353 static void cgw_csum_xor_neg(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
355 u8 val
= xor->init_xor_val
;
358 for (i
= xor->from_idx
; i
>= xor->to_idx
; i
--)
361 cf
->data
[xor->result_idx
] = val
;
364 static void cgw_csum_crc8_rel(struct canfd_frame
*cf
,
365 struct cgw_csum_crc8
*crc8
)
367 int from
= calc_idx(crc8
->from_idx
, cf
->len
);
368 int to
= calc_idx(crc8
->to_idx
, cf
->len
);
369 int res
= calc_idx(crc8
->result_idx
, cf
->len
);
370 u8 crc
= crc8
->init_crc_val
;
373 if (from
< 0 || to
< 0 || res
< 0)
377 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
378 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
380 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
381 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
384 switch (crc8
->profile
) {
385 case CGW_CRC8PRF_1U8
:
386 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
389 case CGW_CRC8PRF_16U8
:
390 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
393 case CGW_CRC8PRF_SFFID_XOR
:
394 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
395 (cf
->can_id
>> 8 & 0xFF)];
399 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
402 static void cgw_csum_crc8_pos(struct canfd_frame
*cf
,
403 struct cgw_csum_crc8
*crc8
)
405 u8 crc
= crc8
->init_crc_val
;
408 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
409 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
411 switch (crc8
->profile
) {
412 case CGW_CRC8PRF_1U8
:
413 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
416 case CGW_CRC8PRF_16U8
:
417 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
420 case CGW_CRC8PRF_SFFID_XOR
:
421 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
422 (cf
->can_id
>> 8 & 0xFF)];
426 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
429 static void cgw_csum_crc8_neg(struct canfd_frame
*cf
,
430 struct cgw_csum_crc8
*crc8
)
432 u8 crc
= crc8
->init_crc_val
;
435 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
436 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
438 switch (crc8
->profile
) {
439 case CGW_CRC8PRF_1U8
:
440 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
443 case CGW_CRC8PRF_16U8
:
444 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
447 case CGW_CRC8PRF_SFFID_XOR
:
448 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
449 (cf
->can_id
>> 8 & 0xFF)];
453 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
456 /* the receive & process & send function */
457 static void can_can_gw_rcv(struct sk_buff
*skb
, void *data
)
459 struct cgw_job
*gwj
= (struct cgw_job
*)data
;
460 struct canfd_frame
*cf
;
461 struct sk_buff
*nskb
;
464 /* process strictly Classic CAN or CAN FD frames */
465 if (gwj
->flags
& CGW_FLAGS_CAN_FD
) {
466 if (skb
->len
!= CANFD_MTU
)
469 if (skb
->len
!= CAN_MTU
)
473 /* Do not handle CAN frames routed more than 'max_hops' times.
474 * In general we should never catch this delimiter which is intended
475 * to cover a misconfiguration protection (e.g. circular CAN routes).
477 * The Controller Area Network controllers only accept CAN frames with
478 * correct CRCs - which are not visible in the controller registers.
479 * According to skbuff.h documentation the csum_start element for IP
480 * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
481 * Only CAN skbs can be processed here which already have this property.
484 #define cgw_hops(skb) ((skb)->csum_start)
486 BUG_ON(skb
->ip_summed
!= CHECKSUM_UNNECESSARY
);
488 if (cgw_hops(skb
) >= max_hops
) {
489 /* indicate deleted frames due to misconfiguration */
490 gwj
->deleted_frames
++;
494 if (!(gwj
->dst
.dev
->flags
& IFF_UP
)) {
495 gwj
->dropped_frames
++;
499 /* is sending the skb back to the incoming interface not allowed? */
500 if (!(gwj
->flags
& CGW_FLAGS_CAN_IIF_TX_OK
) &&
501 can_skb_prv(skb
)->ifindex
== gwj
->dst
.dev
->ifindex
)
504 /* clone the given skb, which has not been done in can_rcv()
506 * When there is at least one modification function activated,
507 * we need to copy the skb as we want to modify skb->data.
509 if (gwj
->mod
.modfunc
[0])
510 nskb
= skb_copy(skb
, GFP_ATOMIC
);
512 nskb
= skb_clone(skb
, GFP_ATOMIC
);
515 gwj
->dropped_frames
++;
519 /* put the incremented hop counter in the cloned skb */
520 cgw_hops(nskb
) = cgw_hops(skb
) + 1;
522 /* first processing of this CAN frame -> adjust to private hop limit */
523 if (gwj
->limit_hops
&& cgw_hops(nskb
) == 1)
524 cgw_hops(nskb
) = max_hops
- gwj
->limit_hops
+ 1;
526 nskb
->dev
= gwj
->dst
.dev
;
528 /* pointer to modifiable CAN frame */
529 cf
= (struct canfd_frame
*)nskb
->data
;
531 /* perform preprocessed modification functions if there are any */
532 while (modidx
< MAX_MODFUNCTIONS
&& gwj
->mod
.modfunc
[modidx
])
533 (*gwj
->mod
.modfunc
[modidx
++])(cf
, &gwj
->mod
);
535 /* Has the CAN frame been modified? */
537 /* get available space for the processed CAN frame type */
538 int max_len
= nskb
->len
- offsetof(struct canfd_frame
, data
);
540 /* dlc may have changed, make sure it fits to the CAN frame */
541 if (cf
->len
> max_len
) {
542 /* delete frame due to misconfiguration */
543 gwj
->deleted_frames
++;
548 /* check for checksum updates */
549 if (gwj
->mod
.csumfunc
.crc8
)
550 (*gwj
->mod
.csumfunc
.crc8
)(cf
, &gwj
->mod
.csum
.crc8
);
552 if (gwj
->mod
.csumfunc
.xor)
553 (*gwj
->mod
.csumfunc
.xor)(cf
, &gwj
->mod
.csum
.xor);
556 /* clear the skb timestamp if not configured the other way */
557 if (!(gwj
->flags
& CGW_FLAGS_CAN_SRC_TSTAMP
))
560 /* send to netdevice */
561 if (can_send(nskb
, gwj
->flags
& CGW_FLAGS_CAN_ECHO
))
562 gwj
->dropped_frames
++;
564 gwj
->handled_frames
++;
567 static inline int cgw_register_filter(struct net
*net
, struct cgw_job
*gwj
)
569 return can_rx_register(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
570 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
,
574 static inline void cgw_unregister_filter(struct net
*net
, struct cgw_job
*gwj
)
576 can_rx_unregister(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
577 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
, gwj
);
580 static int cgw_notifier(struct notifier_block
*nb
,
581 unsigned long msg
, void *ptr
)
583 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
584 struct net
*net
= dev_net(dev
);
586 if (dev
->type
!= ARPHRD_CAN
)
589 if (msg
== NETDEV_UNREGISTER
) {
590 struct cgw_job
*gwj
= NULL
;
591 struct hlist_node
*nx
;
595 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
596 if (gwj
->src
.dev
== dev
|| gwj
->dst
.dev
== dev
) {
597 hlist_del(&gwj
->list
);
598 cgw_unregister_filter(net
, gwj
);
599 kmem_cache_free(cgw_cache
, gwj
);
607 static int cgw_put_job(struct sk_buff
*skb
, struct cgw_job
*gwj
, int type
,
608 u32 pid
, u32 seq
, int flags
)
610 struct rtcanmsg
*rtcan
;
611 struct nlmsghdr
*nlh
;
613 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtcan
), flags
);
617 rtcan
= nlmsg_data(nlh
);
618 rtcan
->can_family
= AF_CAN
;
619 rtcan
->gwtype
= gwj
->gwtype
;
620 rtcan
->flags
= gwj
->flags
;
622 /* add statistics if available */
624 if (gwj
->handled_frames
) {
625 if (nla_put_u32(skb
, CGW_HANDLED
, gwj
->handled_frames
) < 0)
629 if (gwj
->dropped_frames
) {
630 if (nla_put_u32(skb
, CGW_DROPPED
, gwj
->dropped_frames
) < 0)
634 if (gwj
->deleted_frames
) {
635 if (nla_put_u32(skb
, CGW_DELETED
, gwj
->deleted_frames
) < 0)
639 /* check non default settings of attributes */
641 if (gwj
->limit_hops
) {
642 if (nla_put_u8(skb
, CGW_LIM_HOPS
, gwj
->limit_hops
) < 0)
646 if (gwj
->flags
& CGW_FLAGS_CAN_FD
) {
647 struct cgw_fdframe_mod mb
;
649 if (gwj
->mod
.modtype
.and) {
650 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
651 mb
.modtype
= gwj
->mod
.modtype
.and;
652 if (nla_put(skb
, CGW_FDMOD_AND
, sizeof(mb
), &mb
) < 0)
656 if (gwj
->mod
.modtype
.or) {
657 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
658 mb
.modtype
= gwj
->mod
.modtype
.or;
659 if (nla_put(skb
, CGW_FDMOD_OR
, sizeof(mb
), &mb
) < 0)
663 if (gwj
->mod
.modtype
.xor) {
664 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
665 mb
.modtype
= gwj
->mod
.modtype
.xor;
666 if (nla_put(skb
, CGW_FDMOD_XOR
, sizeof(mb
), &mb
) < 0)
670 if (gwj
->mod
.modtype
.set
) {
671 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
672 mb
.modtype
= gwj
->mod
.modtype
.set
;
673 if (nla_put(skb
, CGW_FDMOD_SET
, sizeof(mb
), &mb
) < 0)
677 struct cgw_frame_mod mb
;
679 if (gwj
->mod
.modtype
.and) {
680 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
681 mb
.modtype
= gwj
->mod
.modtype
.and;
682 if (nla_put(skb
, CGW_MOD_AND
, sizeof(mb
), &mb
) < 0)
686 if (gwj
->mod
.modtype
.or) {
687 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
688 mb
.modtype
= gwj
->mod
.modtype
.or;
689 if (nla_put(skb
, CGW_MOD_OR
, sizeof(mb
), &mb
) < 0)
693 if (gwj
->mod
.modtype
.xor) {
694 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
695 mb
.modtype
= gwj
->mod
.modtype
.xor;
696 if (nla_put(skb
, CGW_MOD_XOR
, sizeof(mb
), &mb
) < 0)
700 if (gwj
->mod
.modtype
.set
) {
701 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
702 mb
.modtype
= gwj
->mod
.modtype
.set
;
703 if (nla_put(skb
, CGW_MOD_SET
, sizeof(mb
), &mb
) < 0)
709 if (nla_put_u32(skb
, CGW_MOD_UID
, gwj
->mod
.uid
) < 0)
713 if (gwj
->mod
.csumfunc
.crc8
) {
714 if (nla_put(skb
, CGW_CS_CRC8
, CGW_CS_CRC8_LEN
,
715 &gwj
->mod
.csum
.crc8
) < 0)
719 if (gwj
->mod
.csumfunc
.xor) {
720 if (nla_put(skb
, CGW_CS_XOR
, CGW_CS_XOR_LEN
,
721 &gwj
->mod
.csum
.xor) < 0)
725 if (gwj
->gwtype
== CGW_TYPE_CAN_CAN
) {
726 if (gwj
->ccgw
.filter
.can_id
|| gwj
->ccgw
.filter
.can_mask
) {
727 if (nla_put(skb
, CGW_FILTER
, sizeof(struct can_filter
),
728 &gwj
->ccgw
.filter
) < 0)
732 if (nla_put_u32(skb
, CGW_SRC_IF
, gwj
->ccgw
.src_idx
) < 0)
735 if (nla_put_u32(skb
, CGW_DST_IF
, gwj
->ccgw
.dst_idx
) < 0)
743 nlmsg_cancel(skb
, nlh
);
747 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
748 static int cgw_dump_jobs(struct sk_buff
*skb
, struct netlink_callback
*cb
)
750 struct net
*net
= sock_net(skb
->sk
);
751 struct cgw_job
*gwj
= NULL
;
753 int s_idx
= cb
->args
[0];
756 hlist_for_each_entry_rcu(gwj
, &net
->can
.cgw_list
, list
) {
760 if (cgw_put_job(skb
, gwj
, RTM_NEWROUTE
,
761 NETLINK_CB(cb
->skb
).portid
,
762 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
) < 0)
774 static const struct nla_policy cgw_policy
[CGW_MAX
+ 1] = {
775 [CGW_MOD_AND
] = { .len
= sizeof(struct cgw_frame_mod
) },
776 [CGW_MOD_OR
] = { .len
= sizeof(struct cgw_frame_mod
) },
777 [CGW_MOD_XOR
] = { .len
= sizeof(struct cgw_frame_mod
) },
778 [CGW_MOD_SET
] = { .len
= sizeof(struct cgw_frame_mod
) },
779 [CGW_CS_XOR
] = { .len
= sizeof(struct cgw_csum_xor
) },
780 [CGW_CS_CRC8
] = { .len
= sizeof(struct cgw_csum_crc8
) },
781 [CGW_SRC_IF
] = { .type
= NLA_U32
},
782 [CGW_DST_IF
] = { .type
= NLA_U32
},
783 [CGW_FILTER
] = { .len
= sizeof(struct can_filter
) },
784 [CGW_LIM_HOPS
] = { .type
= NLA_U8
},
785 [CGW_MOD_UID
] = { .type
= NLA_U32
},
786 [CGW_FDMOD_AND
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
787 [CGW_FDMOD_OR
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
788 [CGW_FDMOD_XOR
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
789 [CGW_FDMOD_SET
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
792 /* check for common and gwtype specific attributes */
793 static int cgw_parse_attr(struct nlmsghdr
*nlh
, struct cf_mod
*mod
,
794 u8 gwtype
, void *gwtypeattr
, u8
*limhops
)
796 struct nlattr
*tb
[CGW_MAX
+ 1];
797 struct rtcanmsg
*r
= nlmsg_data(nlh
);
801 /* initialize modification & checksum data space */
802 memset(mod
, 0, sizeof(*mod
));
804 err
= nlmsg_parse_deprecated(nlh
, sizeof(struct rtcanmsg
), tb
,
805 CGW_MAX
, cgw_policy
, NULL
);
809 if (tb
[CGW_LIM_HOPS
]) {
810 *limhops
= nla_get_u8(tb
[CGW_LIM_HOPS
]);
812 if (*limhops
< 1 || *limhops
> max_hops
)
816 /* check for AND/OR/XOR/SET modifications */
817 if (r
->flags
& CGW_FLAGS_CAN_FD
) {
818 struct cgw_fdframe_mod mb
;
820 if (tb
[CGW_FDMOD_AND
]) {
821 nla_memcpy(&mb
, tb
[CGW_FDMOD_AND
], CGW_FDMODATTR_LEN
);
823 canfdframecpy(&mod
->modframe
.and, &mb
.cf
);
824 mod
->modtype
.and = mb
.modtype
;
826 if (mb
.modtype
& CGW_MOD_ID
)
827 mod
->modfunc
[modidx
++] = mod_and_id
;
829 if (mb
.modtype
& CGW_MOD_LEN
)
830 mod
->modfunc
[modidx
++] = mod_and_len
;
832 if (mb
.modtype
& CGW_MOD_FLAGS
)
833 mod
->modfunc
[modidx
++] = mod_and_flags
;
835 if (mb
.modtype
& CGW_MOD_DATA
)
836 mod
->modfunc
[modidx
++] = mod_and_fddata
;
839 if (tb
[CGW_FDMOD_OR
]) {
840 nla_memcpy(&mb
, tb
[CGW_FDMOD_OR
], CGW_FDMODATTR_LEN
);
842 canfdframecpy(&mod
->modframe
.or, &mb
.cf
);
843 mod
->modtype
.or = mb
.modtype
;
845 if (mb
.modtype
& CGW_MOD_ID
)
846 mod
->modfunc
[modidx
++] = mod_or_id
;
848 if (mb
.modtype
& CGW_MOD_LEN
)
849 mod
->modfunc
[modidx
++] = mod_or_len
;
851 if (mb
.modtype
& CGW_MOD_FLAGS
)
852 mod
->modfunc
[modidx
++] = mod_or_flags
;
854 if (mb
.modtype
& CGW_MOD_DATA
)
855 mod
->modfunc
[modidx
++] = mod_or_fddata
;
858 if (tb
[CGW_FDMOD_XOR
]) {
859 nla_memcpy(&mb
, tb
[CGW_FDMOD_XOR
], CGW_FDMODATTR_LEN
);
861 canfdframecpy(&mod
->modframe
.xor, &mb
.cf
);
862 mod
->modtype
.xor = mb
.modtype
;
864 if (mb
.modtype
& CGW_MOD_ID
)
865 mod
->modfunc
[modidx
++] = mod_xor_id
;
867 if (mb
.modtype
& CGW_MOD_LEN
)
868 mod
->modfunc
[modidx
++] = mod_xor_len
;
870 if (mb
.modtype
& CGW_MOD_FLAGS
)
871 mod
->modfunc
[modidx
++] = mod_xor_flags
;
873 if (mb
.modtype
& CGW_MOD_DATA
)
874 mod
->modfunc
[modidx
++] = mod_xor_fddata
;
877 if (tb
[CGW_FDMOD_SET
]) {
878 nla_memcpy(&mb
, tb
[CGW_FDMOD_SET
], CGW_FDMODATTR_LEN
);
880 canfdframecpy(&mod
->modframe
.set
, &mb
.cf
);
881 mod
->modtype
.set
= mb
.modtype
;
883 if (mb
.modtype
& CGW_MOD_ID
)
884 mod
->modfunc
[modidx
++] = mod_set_id
;
886 if (mb
.modtype
& CGW_MOD_LEN
)
887 mod
->modfunc
[modidx
++] = mod_set_len
;
889 if (mb
.modtype
& CGW_MOD_FLAGS
)
890 mod
->modfunc
[modidx
++] = mod_set_flags
;
892 if (mb
.modtype
& CGW_MOD_DATA
)
893 mod
->modfunc
[modidx
++] = mod_set_fddata
;
896 struct cgw_frame_mod mb
;
898 if (tb
[CGW_MOD_AND
]) {
899 nla_memcpy(&mb
, tb
[CGW_MOD_AND
], CGW_MODATTR_LEN
);
901 canframecpy(&mod
->modframe
.and, &mb
.cf
);
902 mod
->modtype
.and = mb
.modtype
;
904 if (mb
.modtype
& CGW_MOD_ID
)
905 mod
->modfunc
[modidx
++] = mod_and_id
;
907 if (mb
.modtype
& CGW_MOD_DLC
)
908 mod
->modfunc
[modidx
++] = mod_and_ccdlc
;
910 if (mb
.modtype
& CGW_MOD_DATA
)
911 mod
->modfunc
[modidx
++] = mod_and_data
;
914 if (tb
[CGW_MOD_OR
]) {
915 nla_memcpy(&mb
, tb
[CGW_MOD_OR
], CGW_MODATTR_LEN
);
917 canframecpy(&mod
->modframe
.or, &mb
.cf
);
918 mod
->modtype
.or = mb
.modtype
;
920 if (mb
.modtype
& CGW_MOD_ID
)
921 mod
->modfunc
[modidx
++] = mod_or_id
;
923 if (mb
.modtype
& CGW_MOD_DLC
)
924 mod
->modfunc
[modidx
++] = mod_or_ccdlc
;
926 if (mb
.modtype
& CGW_MOD_DATA
)
927 mod
->modfunc
[modidx
++] = mod_or_data
;
930 if (tb
[CGW_MOD_XOR
]) {
931 nla_memcpy(&mb
, tb
[CGW_MOD_XOR
], CGW_MODATTR_LEN
);
933 canframecpy(&mod
->modframe
.xor, &mb
.cf
);
934 mod
->modtype
.xor = mb
.modtype
;
936 if (mb
.modtype
& CGW_MOD_ID
)
937 mod
->modfunc
[modidx
++] = mod_xor_id
;
939 if (mb
.modtype
& CGW_MOD_DLC
)
940 mod
->modfunc
[modidx
++] = mod_xor_ccdlc
;
942 if (mb
.modtype
& CGW_MOD_DATA
)
943 mod
->modfunc
[modidx
++] = mod_xor_data
;
946 if (tb
[CGW_MOD_SET
]) {
947 nla_memcpy(&mb
, tb
[CGW_MOD_SET
], CGW_MODATTR_LEN
);
949 canframecpy(&mod
->modframe
.set
, &mb
.cf
);
950 mod
->modtype
.set
= mb
.modtype
;
952 if (mb
.modtype
& CGW_MOD_ID
)
953 mod
->modfunc
[modidx
++] = mod_set_id
;
955 if (mb
.modtype
& CGW_MOD_DLC
)
956 mod
->modfunc
[modidx
++] = mod_set_ccdlc
;
958 if (mb
.modtype
& CGW_MOD_DATA
)
959 mod
->modfunc
[modidx
++] = mod_set_data
;
963 /* check for checksum operations after CAN frame modifications */
965 if (tb
[CGW_CS_CRC8
]) {
966 struct cgw_csum_crc8
*c
= nla_data(tb
[CGW_CS_CRC8
]);
968 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
973 nla_memcpy(&mod
->csum
.crc8
, tb
[CGW_CS_CRC8
],
976 /* select dedicated processing function to reduce
977 * runtime operations in receive hot path.
979 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
981 mod
->csumfunc
.crc8
= cgw_csum_crc8_rel
;
982 else if (c
->from_idx
<= c
->to_idx
)
983 mod
->csumfunc
.crc8
= cgw_csum_crc8_pos
;
985 mod
->csumfunc
.crc8
= cgw_csum_crc8_neg
;
988 if (tb
[CGW_CS_XOR
]) {
989 struct cgw_csum_xor
*c
= nla_data(tb
[CGW_CS_XOR
]);
991 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
996 nla_memcpy(&mod
->csum
.xor, tb
[CGW_CS_XOR
],
999 /* select dedicated processing function to reduce
1000 * runtime operations in receive hot path.
1002 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
1004 mod
->csumfunc
.xor = cgw_csum_xor_rel
;
1005 else if (c
->from_idx
<= c
->to_idx
)
1006 mod
->csumfunc
.xor = cgw_csum_xor_pos
;
1008 mod
->csumfunc
.xor = cgw_csum_xor_neg
;
1011 if (tb
[CGW_MOD_UID
])
1012 nla_memcpy(&mod
->uid
, tb
[CGW_MOD_UID
], sizeof(u32
));
1015 if (gwtype
== CGW_TYPE_CAN_CAN
) {
1016 /* check CGW_TYPE_CAN_CAN specific attributes */
1017 struct can_can_gw
*ccgw
= (struct can_can_gw
*)gwtypeattr
;
1019 memset(ccgw
, 0, sizeof(*ccgw
));
1021 /* check for can_filter in attributes */
1023 nla_memcpy(&ccgw
->filter
, tb
[CGW_FILTER
],
1024 sizeof(struct can_filter
));
1028 /* specifying two interfaces is mandatory */
1029 if (!tb
[CGW_SRC_IF
] || !tb
[CGW_DST_IF
])
1032 ccgw
->src_idx
= nla_get_u32(tb
[CGW_SRC_IF
]);
1033 ccgw
->dst_idx
= nla_get_u32(tb
[CGW_DST_IF
]);
1035 /* both indices set to 0 for flushing all routing entries */
1036 if (!ccgw
->src_idx
&& !ccgw
->dst_idx
)
1039 /* only one index set to 0 is an error */
1040 if (!ccgw
->src_idx
|| !ccgw
->dst_idx
)
1044 /* add the checks for other gwtypes here */
1049 static int cgw_create_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1050 struct netlink_ext_ack
*extack
)
1052 struct net
*net
= sock_net(skb
->sk
);
1054 struct cgw_job
*gwj
;
1056 struct can_can_gw ccgw
;
1060 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
1063 if (nlmsg_len(nlh
) < sizeof(*r
))
1066 r
= nlmsg_data(nlh
);
1067 if (r
->can_family
!= AF_CAN
)
1068 return -EPFNOSUPPORT
;
1070 /* so far we only support CAN -> CAN routings */
1071 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
1074 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
1081 /* check for updating an existing job with identical uid */
1082 hlist_for_each_entry(gwj
, &net
->can
.cgw_list
, list
) {
1083 if (gwj
->mod
.uid
!= mod
.uid
)
1086 /* interfaces & filters must be identical */
1087 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
1090 /* update modifications with disabled softirq & quit */
1092 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
1098 /* ifindex == 0 is not allowed for job creation */
1099 if (!ccgw
.src_idx
|| !ccgw
.dst_idx
)
1102 gwj
= kmem_cache_alloc(cgw_cache
, GFP_KERNEL
);
1106 gwj
->handled_frames
= 0;
1107 gwj
->dropped_frames
= 0;
1108 gwj
->deleted_frames
= 0;
1109 gwj
->flags
= r
->flags
;
1110 gwj
->gwtype
= r
->gwtype
;
1111 gwj
->limit_hops
= limhops
;
1113 /* insert already parsed information */
1114 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
1115 memcpy(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
));
1119 gwj
->src
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.src_idx
);
1124 if (gwj
->src
.dev
->type
!= ARPHRD_CAN
)
1127 gwj
->dst
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.dst_idx
);
1132 if (gwj
->dst
.dev
->type
!= ARPHRD_CAN
)
1137 err
= cgw_register_filter(net
, gwj
);
1139 hlist_add_head_rcu(&gwj
->list
, &net
->can
.cgw_list
);
1142 kmem_cache_free(cgw_cache
, gwj
);
1147 static void cgw_remove_all_jobs(struct net
*net
)
1149 struct cgw_job
*gwj
= NULL
;
1150 struct hlist_node
*nx
;
1154 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
1155 hlist_del(&gwj
->list
);
1156 cgw_unregister_filter(net
, gwj
);
1157 kmem_cache_free(cgw_cache
, gwj
);
1161 static int cgw_remove_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1162 struct netlink_ext_ack
*extack
)
1164 struct net
*net
= sock_net(skb
->sk
);
1165 struct cgw_job
*gwj
= NULL
;
1166 struct hlist_node
*nx
;
1169 struct can_can_gw ccgw
;
1173 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
1176 if (nlmsg_len(nlh
) < sizeof(*r
))
1179 r
= nlmsg_data(nlh
);
1180 if (r
->can_family
!= AF_CAN
)
1181 return -EPFNOSUPPORT
;
1183 /* so far we only support CAN -> CAN routings */
1184 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
1187 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
1191 /* two interface indices both set to 0 => remove all entries */
1192 if (!ccgw
.src_idx
&& !ccgw
.dst_idx
) {
1193 cgw_remove_all_jobs(net
);
1201 /* remove only the first matching entry */
1202 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
1203 if (gwj
->flags
!= r
->flags
)
1206 if (gwj
->limit_hops
!= limhops
)
1209 /* we have a match when uid is enabled and identical */
1210 if (gwj
->mod
.uid
|| mod
.uid
) {
1211 if (gwj
->mod
.uid
!= mod
.uid
)
1214 /* no uid => check for identical modifications */
1215 if (memcmp(&gwj
->mod
, &mod
, sizeof(mod
)))
1219 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
1220 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
1223 hlist_del(&gwj
->list
);
1224 cgw_unregister_filter(net
, gwj
);
1225 kmem_cache_free(cgw_cache
, gwj
);
1233 static int __net_init
cangw_pernet_init(struct net
*net
)
1235 INIT_HLIST_HEAD(&net
->can
.cgw_list
);
1239 static void __net_exit
cangw_pernet_exit(struct net
*net
)
1242 cgw_remove_all_jobs(net
);
1246 static struct pernet_operations cangw_pernet_ops
= {
1247 .init
= cangw_pernet_init
,
1248 .exit
= cangw_pernet_exit
,
1251 static __init
int cgw_module_init(void)
1255 /* sanitize given module parameter */
1256 max_hops
= clamp_t(unsigned int, max_hops
, CGW_MIN_HOPS
, CGW_MAX_HOPS
);
1258 pr_info("can: netlink gateway - max_hops=%d\n", max_hops
);
1260 ret
= register_pernet_subsys(&cangw_pernet_ops
);
1265 cgw_cache
= kmem_cache_create("can_gw", sizeof(struct cgw_job
),
1268 goto out_cache_create
;
1271 notifier
.notifier_call
= cgw_notifier
;
1272 ret
= register_netdevice_notifier(¬ifier
);
1274 goto out_register_notifier
;
1276 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_GETROUTE
,
1277 NULL
, cgw_dump_jobs
, 0);
1279 goto out_rtnl_register1
;
1281 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_NEWROUTE
,
1282 cgw_create_job
, NULL
, 0);
1284 goto out_rtnl_register2
;
1285 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_DELROUTE
,
1286 cgw_remove_job
, NULL
, 0);
1288 goto out_rtnl_register3
;
1293 rtnl_unregister(PF_CAN
, RTM_NEWROUTE
);
1295 rtnl_unregister(PF_CAN
, RTM_GETROUTE
);
1297 unregister_netdevice_notifier(¬ifier
);
1298 out_register_notifier
:
1299 kmem_cache_destroy(cgw_cache
);
1301 unregister_pernet_subsys(&cangw_pernet_ops
);
1306 static __exit
void cgw_module_exit(void)
1308 rtnl_unregister_all(PF_CAN
);
1310 unregister_netdevice_notifier(¬ifier
);
1312 unregister_pernet_subsys(&cangw_pernet_ops
);
1313 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1315 kmem_cache_destroy(cgw_cache
);
1318 module_init(cgw_module_init
);
1319 module_exit(cgw_module_exit
);