1 // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
2 /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
4 * Copyright (c) 2019 Volkswagen Group Electronic Research
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/rcupdate.h>
49 #include <linux/rculist.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
53 #include <linux/skbuff.h>
54 #include <linux/can.h>
55 #include <linux/can/core.h>
56 #include <linux/can/skb.h>
57 #include <linux/can/gw.h>
58 #include <net/rtnetlink.h>
59 #include <net/net_namespace.h>
62 #define CAN_GW_VERSION "20190810"
63 #define CAN_GW_NAME "can-gw"
65 MODULE_DESCRIPTION("PF_CAN netlink gateway");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
68 MODULE_ALIAS(CAN_GW_NAME
);
70 #define CGW_MIN_HOPS 1
71 #define CGW_MAX_HOPS 6
72 #define CGW_DEFAULT_HOPS 1
74 static unsigned int max_hops __read_mostly
= CGW_DEFAULT_HOPS
;
75 module_param(max_hops
, uint
, 0444);
76 MODULE_PARM_DESC(max_hops
,
77 "maximum " CAN_GW_NAME
" routing hops for CAN frames "
78 "(valid values: " __stringify(CGW_MIN_HOPS
) "-"
79 __stringify(CGW_MAX_HOPS
) " hops, "
80 "default: " __stringify(CGW_DEFAULT_HOPS
) ")");
82 static struct notifier_block notifier
;
83 static struct kmem_cache
*cgw_cache __read_mostly
;
85 /* structure that contains the (on-the-fly) CAN frame modifications */
88 struct canfd_frame
and;
89 struct canfd_frame
or;
90 struct canfd_frame
xor;
91 struct canfd_frame set
;
99 void (*modfunc
[MAX_MODFUNCTIONS
])(struct canfd_frame
*cf
,
102 /* CAN frame checksum calculation after CAN frame modifications */
104 struct cgw_csum_xor
xor;
105 struct cgw_csum_crc8 crc8
;
108 void (*xor)(struct canfd_frame
*cf
,
109 struct cgw_csum_xor
*xor);
110 void (*crc8
)(struct canfd_frame
*cf
,
111 struct cgw_csum_crc8
*crc8
);
116 /* So far we just support CAN -> CAN routing and frame modifications.
118 * The internal can_can_gw structure contains data and attributes for
119 * a CAN -> CAN gateway job.
122 struct can_filter filter
;
127 /* list entry for CAN gateways jobs */
129 struct hlist_node list
;
136 /* CAN frame data source */
137 struct net_device
*dev
;
140 /* CAN frame data destination */
141 struct net_device
*dev
;
144 struct can_can_gw ccgw
;
152 /* modification functions that are invoked in the hot path in can_can_gw_rcv */
154 #define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
155 struct cf_mod *mod) { op ; }
157 MODFUNC(mod_and_id
, cf
->can_id
&= mod
->modframe
.and.can_id
)
158 MODFUNC(mod_and_len
, cf
->len
&= mod
->modframe
.and.len
)
159 MODFUNC(mod_and_flags
, cf
->flags
&= mod
->modframe
.and.flags
)
160 MODFUNC(mod_and_data
, *(u64
*)cf
->data
&= *(u64
*)mod
->modframe
.and.data
)
161 MODFUNC(mod_or_id
, cf
->can_id
|= mod
->modframe
.or.can_id
)
162 MODFUNC(mod_or_len
, cf
->len
|= mod
->modframe
.or.len
)
163 MODFUNC(mod_or_flags
, cf
->flags
|= mod
->modframe
.or.flags
)
164 MODFUNC(mod_or_data
, *(u64
*)cf
->data
|= *(u64
*)mod
->modframe
.or.data
)
165 MODFUNC(mod_xor_id
, cf
->can_id
^= mod
->modframe
.xor.can_id
)
166 MODFUNC(mod_xor_len
, cf
->len
^= mod
->modframe
.xor.len
)
167 MODFUNC(mod_xor_flags
, cf
->flags
^= mod
->modframe
.xor.flags
)
168 MODFUNC(mod_xor_data
, *(u64
*)cf
->data
^= *(u64
*)mod
->modframe
.xor.data
)
169 MODFUNC(mod_set_id
, cf
->can_id
= mod
->modframe
.set
.can_id
)
170 MODFUNC(mod_set_len
, cf
->len
= mod
->modframe
.set
.len
)
171 MODFUNC(mod_set_flags
, cf
->flags
= mod
->modframe
.set
.flags
)
172 MODFUNC(mod_set_data
, *(u64
*)cf
->data
= *(u64
*)mod
->modframe
.set
.data
)
174 static void mod_and_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
178 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
179 *(u64
*)(cf
->data
+ i
) &= *(u64
*)(mod
->modframe
.and.data
+ i
);
182 static void mod_or_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
186 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
187 *(u64
*)(cf
->data
+ i
) |= *(u64
*)(mod
->modframe
.or.data
+ i
);
190 static void mod_xor_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
194 for (i
= 0; i
< CANFD_MAX_DLEN
; i
+= 8)
195 *(u64
*)(cf
->data
+ i
) ^= *(u64
*)(mod
->modframe
.xor.data
+ i
);
198 static void mod_set_fddata(struct canfd_frame
*cf
, struct cf_mod
*mod
)
200 memcpy(cf
->data
, mod
->modframe
.set
.data
, CANFD_MAX_DLEN
);
203 static void canframecpy(struct canfd_frame
*dst
, struct can_frame
*src
)
205 /* Copy the struct members separately to ensure that no uninitialized
206 * data are copied in the 3 bytes hole of the struct. This is needed
207 * to make easy compares of the data in the struct cf_mod.
210 dst
->can_id
= src
->can_id
;
211 dst
->len
= src
->can_dlc
;
212 *(u64
*)dst
->data
= *(u64
*)src
->data
;
215 static void canfdframecpy(struct canfd_frame
*dst
, struct canfd_frame
*src
)
217 /* Copy the struct members separately to ensure that no uninitialized
218 * data are copied in the 2 bytes hole of the struct. This is needed
219 * to make easy compares of the data in the struct cf_mod.
222 dst
->can_id
= src
->can_id
;
223 dst
->flags
= src
->flags
;
225 memcpy(dst
->data
, src
->data
, CANFD_MAX_DLEN
);
228 static int cgw_chk_csum_parms(s8 fr
, s8 to
, s8 re
, struct rtcanmsg
*r
)
230 s8 dlen
= CAN_MAX_DLEN
;
232 if (r
->flags
& CGW_FLAGS_CAN_FD
)
233 dlen
= CANFD_MAX_DLEN
;
235 /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
236 * relative to received dlc -1 .. -8 :
237 * e.g. for received dlc = 8
238 * -1 => index = 7 (data[7])
239 * -3 => index = 5 (data[5])
240 * -8 => index = 0 (data[0])
243 if (fr
>= -dlen
&& fr
< dlen
&&
244 to
>= -dlen
&& to
< dlen
&&
245 re
>= -dlen
&& re
< dlen
)
251 static inline int calc_idx(int idx
, int rx_len
)
259 static void cgw_csum_xor_rel(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
261 int from
= calc_idx(xor->from_idx
, cf
->len
);
262 int to
= calc_idx(xor->to_idx
, cf
->len
);
263 int res
= calc_idx(xor->result_idx
, cf
->len
);
264 u8 val
= xor->init_xor_val
;
267 if (from
< 0 || to
< 0 || res
< 0)
271 for (i
= from
; i
<= to
; i
++)
274 for (i
= from
; i
>= to
; i
--)
281 static void cgw_csum_xor_pos(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
283 u8 val
= xor->init_xor_val
;
286 for (i
= xor->from_idx
; i
<= xor->to_idx
; i
++)
289 cf
->data
[xor->result_idx
] = val
;
292 static void cgw_csum_xor_neg(struct canfd_frame
*cf
, struct cgw_csum_xor
*xor)
294 u8 val
= xor->init_xor_val
;
297 for (i
= xor->from_idx
; i
>= xor->to_idx
; i
--)
300 cf
->data
[xor->result_idx
] = val
;
303 static void cgw_csum_crc8_rel(struct canfd_frame
*cf
,
304 struct cgw_csum_crc8
*crc8
)
306 int from
= calc_idx(crc8
->from_idx
, cf
->len
);
307 int to
= calc_idx(crc8
->to_idx
, cf
->len
);
308 int res
= calc_idx(crc8
->result_idx
, cf
->len
);
309 u8 crc
= crc8
->init_crc_val
;
312 if (from
< 0 || to
< 0 || res
< 0)
316 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
317 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
319 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
320 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
323 switch (crc8
->profile
) {
324 case CGW_CRC8PRF_1U8
:
325 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
328 case CGW_CRC8PRF_16U8
:
329 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
332 case CGW_CRC8PRF_SFFID_XOR
:
333 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
334 (cf
->can_id
>> 8 & 0xFF)];
338 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
341 static void cgw_csum_crc8_pos(struct canfd_frame
*cf
,
342 struct cgw_csum_crc8
*crc8
)
344 u8 crc
= crc8
->init_crc_val
;
347 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
348 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
350 switch (crc8
->profile
) {
351 case CGW_CRC8PRF_1U8
:
352 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
355 case CGW_CRC8PRF_16U8
:
356 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
359 case CGW_CRC8PRF_SFFID_XOR
:
360 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
361 (cf
->can_id
>> 8 & 0xFF)];
365 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
368 static void cgw_csum_crc8_neg(struct canfd_frame
*cf
,
369 struct cgw_csum_crc8
*crc8
)
371 u8 crc
= crc8
->init_crc_val
;
374 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
375 crc
= crc8
->crctab
[crc
^ cf
->data
[i
]];
377 switch (crc8
->profile
) {
378 case CGW_CRC8PRF_1U8
:
379 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[0]];
382 case CGW_CRC8PRF_16U8
:
383 crc
= crc8
->crctab
[crc
^ crc8
->profile_data
[cf
->data
[1] & 0xF]];
386 case CGW_CRC8PRF_SFFID_XOR
:
387 crc
= crc8
->crctab
[crc
^ (cf
->can_id
& 0xFF) ^
388 (cf
->can_id
>> 8 & 0xFF)];
392 cf
->data
[crc8
->result_idx
] = crc
^ crc8
->final_xor_val
;
395 /* the receive & process & send function */
396 static void can_can_gw_rcv(struct sk_buff
*skb
, void *data
)
398 struct cgw_job
*gwj
= (struct cgw_job
*)data
;
399 struct canfd_frame
*cf
;
400 struct sk_buff
*nskb
;
403 /* process strictly Classic CAN or CAN FD frames */
404 if (gwj
->flags
& CGW_FLAGS_CAN_FD
) {
405 if (skb
->len
!= CANFD_MTU
)
408 if (skb
->len
!= CAN_MTU
)
412 /* Do not handle CAN frames routed more than 'max_hops' times.
413 * In general we should never catch this delimiter which is intended
414 * to cover a misconfiguration protection (e.g. circular CAN routes).
416 * The Controller Area Network controllers only accept CAN frames with
417 * correct CRCs - which are not visible in the controller registers.
418 * According to skbuff.h documentation the csum_start element for IP
419 * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
420 * Only CAN skbs can be processed here which already have this property.
423 #define cgw_hops(skb) ((skb)->csum_start)
425 BUG_ON(skb
->ip_summed
!= CHECKSUM_UNNECESSARY
);
427 if (cgw_hops(skb
) >= max_hops
) {
428 /* indicate deleted frames due to misconfiguration */
429 gwj
->deleted_frames
++;
433 if (!(gwj
->dst
.dev
->flags
& IFF_UP
)) {
434 gwj
->dropped_frames
++;
438 /* is sending the skb back to the incoming interface not allowed? */
439 if (!(gwj
->flags
& CGW_FLAGS_CAN_IIF_TX_OK
) &&
440 can_skb_prv(skb
)->ifindex
== gwj
->dst
.dev
->ifindex
)
443 /* clone the given skb, which has not been done in can_rcv()
445 * When there is at least one modification function activated,
446 * we need to copy the skb as we want to modify skb->data.
448 if (gwj
->mod
.modfunc
[0])
449 nskb
= skb_copy(skb
, GFP_ATOMIC
);
451 nskb
= skb_clone(skb
, GFP_ATOMIC
);
454 gwj
->dropped_frames
++;
458 /* put the incremented hop counter in the cloned skb */
459 cgw_hops(nskb
) = cgw_hops(skb
) + 1;
461 /* first processing of this CAN frame -> adjust to private hop limit */
462 if (gwj
->limit_hops
&& cgw_hops(nskb
) == 1)
463 cgw_hops(nskb
) = max_hops
- gwj
->limit_hops
+ 1;
465 nskb
->dev
= gwj
->dst
.dev
;
467 /* pointer to modifiable CAN frame */
468 cf
= (struct canfd_frame
*)nskb
->data
;
470 /* perform preprocessed modification functions if there are any */
471 while (modidx
< MAX_MODFUNCTIONS
&& gwj
->mod
.modfunc
[modidx
])
472 (*gwj
->mod
.modfunc
[modidx
++])(cf
, &gwj
->mod
);
474 /* Has the CAN frame been modified? */
476 /* get available space for the processed CAN frame type */
477 int max_len
= nskb
->len
- offsetof(struct canfd_frame
, data
);
479 /* dlc may have changed, make sure it fits to the CAN frame */
480 if (cf
->len
> max_len
) {
481 /* delete frame due to misconfiguration */
482 gwj
->deleted_frames
++;
487 /* check for checksum updates */
488 if (gwj
->mod
.csumfunc
.crc8
)
489 (*gwj
->mod
.csumfunc
.crc8
)(cf
, &gwj
->mod
.csum
.crc8
);
491 if (gwj
->mod
.csumfunc
.xor)
492 (*gwj
->mod
.csumfunc
.xor)(cf
, &gwj
->mod
.csum
.xor);
495 /* clear the skb timestamp if not configured the other way */
496 if (!(gwj
->flags
& CGW_FLAGS_CAN_SRC_TSTAMP
))
499 /* send to netdevice */
500 if (can_send(nskb
, gwj
->flags
& CGW_FLAGS_CAN_ECHO
))
501 gwj
->dropped_frames
++;
503 gwj
->handled_frames
++;
506 static inline int cgw_register_filter(struct net
*net
, struct cgw_job
*gwj
)
508 return can_rx_register(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
509 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
,
513 static inline void cgw_unregister_filter(struct net
*net
, struct cgw_job
*gwj
)
515 can_rx_unregister(net
, gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
516 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
, gwj
);
519 static int cgw_notifier(struct notifier_block
*nb
,
520 unsigned long msg
, void *ptr
)
522 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
523 struct net
*net
= dev_net(dev
);
525 if (dev
->type
!= ARPHRD_CAN
)
528 if (msg
== NETDEV_UNREGISTER
) {
529 struct cgw_job
*gwj
= NULL
;
530 struct hlist_node
*nx
;
534 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
535 if (gwj
->src
.dev
== dev
|| gwj
->dst
.dev
== dev
) {
536 hlist_del(&gwj
->list
);
537 cgw_unregister_filter(net
, gwj
);
538 kmem_cache_free(cgw_cache
, gwj
);
546 static int cgw_put_job(struct sk_buff
*skb
, struct cgw_job
*gwj
, int type
,
547 u32 pid
, u32 seq
, int flags
)
549 struct rtcanmsg
*rtcan
;
550 struct nlmsghdr
*nlh
;
552 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtcan
), flags
);
556 rtcan
= nlmsg_data(nlh
);
557 rtcan
->can_family
= AF_CAN
;
558 rtcan
->gwtype
= gwj
->gwtype
;
559 rtcan
->flags
= gwj
->flags
;
561 /* add statistics if available */
563 if (gwj
->handled_frames
) {
564 if (nla_put_u32(skb
, CGW_HANDLED
, gwj
->handled_frames
) < 0)
568 if (gwj
->dropped_frames
) {
569 if (nla_put_u32(skb
, CGW_DROPPED
, gwj
->dropped_frames
) < 0)
573 if (gwj
->deleted_frames
) {
574 if (nla_put_u32(skb
, CGW_DELETED
, gwj
->deleted_frames
) < 0)
578 /* check non default settings of attributes */
580 if (gwj
->limit_hops
) {
581 if (nla_put_u8(skb
, CGW_LIM_HOPS
, gwj
->limit_hops
) < 0)
585 if (gwj
->flags
& CGW_FLAGS_CAN_FD
) {
586 struct cgw_fdframe_mod mb
;
588 if (gwj
->mod
.modtype
.and) {
589 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
590 mb
.modtype
= gwj
->mod
.modtype
.and;
591 if (nla_put(skb
, CGW_FDMOD_AND
, sizeof(mb
), &mb
) < 0)
595 if (gwj
->mod
.modtype
.or) {
596 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
597 mb
.modtype
= gwj
->mod
.modtype
.or;
598 if (nla_put(skb
, CGW_FDMOD_OR
, sizeof(mb
), &mb
) < 0)
602 if (gwj
->mod
.modtype
.xor) {
603 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
604 mb
.modtype
= gwj
->mod
.modtype
.xor;
605 if (nla_put(skb
, CGW_FDMOD_XOR
, sizeof(mb
), &mb
) < 0)
609 if (gwj
->mod
.modtype
.set
) {
610 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
611 mb
.modtype
= gwj
->mod
.modtype
.set
;
612 if (nla_put(skb
, CGW_FDMOD_SET
, sizeof(mb
), &mb
) < 0)
616 struct cgw_frame_mod mb
;
618 if (gwj
->mod
.modtype
.and) {
619 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
620 mb
.modtype
= gwj
->mod
.modtype
.and;
621 if (nla_put(skb
, CGW_MOD_AND
, sizeof(mb
), &mb
) < 0)
625 if (gwj
->mod
.modtype
.or) {
626 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
627 mb
.modtype
= gwj
->mod
.modtype
.or;
628 if (nla_put(skb
, CGW_MOD_OR
, sizeof(mb
), &mb
) < 0)
632 if (gwj
->mod
.modtype
.xor) {
633 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
634 mb
.modtype
= gwj
->mod
.modtype
.xor;
635 if (nla_put(skb
, CGW_MOD_XOR
, sizeof(mb
), &mb
) < 0)
639 if (gwj
->mod
.modtype
.set
) {
640 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
641 mb
.modtype
= gwj
->mod
.modtype
.set
;
642 if (nla_put(skb
, CGW_MOD_SET
, sizeof(mb
), &mb
) < 0)
648 if (nla_put_u32(skb
, CGW_MOD_UID
, gwj
->mod
.uid
) < 0)
652 if (gwj
->mod
.csumfunc
.crc8
) {
653 if (nla_put(skb
, CGW_CS_CRC8
, CGW_CS_CRC8_LEN
,
654 &gwj
->mod
.csum
.crc8
) < 0)
658 if (gwj
->mod
.csumfunc
.xor) {
659 if (nla_put(skb
, CGW_CS_XOR
, CGW_CS_XOR_LEN
,
660 &gwj
->mod
.csum
.xor) < 0)
664 if (gwj
->gwtype
== CGW_TYPE_CAN_CAN
) {
665 if (gwj
->ccgw
.filter
.can_id
|| gwj
->ccgw
.filter
.can_mask
) {
666 if (nla_put(skb
, CGW_FILTER
, sizeof(struct can_filter
),
667 &gwj
->ccgw
.filter
) < 0)
671 if (nla_put_u32(skb
, CGW_SRC_IF
, gwj
->ccgw
.src_idx
) < 0)
674 if (nla_put_u32(skb
, CGW_DST_IF
, gwj
->ccgw
.dst_idx
) < 0)
682 nlmsg_cancel(skb
, nlh
);
686 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
687 static int cgw_dump_jobs(struct sk_buff
*skb
, struct netlink_callback
*cb
)
689 struct net
*net
= sock_net(skb
->sk
);
690 struct cgw_job
*gwj
= NULL
;
692 int s_idx
= cb
->args
[0];
695 hlist_for_each_entry_rcu(gwj
, &net
->can
.cgw_list
, list
) {
699 if (cgw_put_job(skb
, gwj
, RTM_NEWROUTE
,
700 NETLINK_CB(cb
->skb
).portid
,
701 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
) < 0)
713 static const struct nla_policy cgw_policy
[CGW_MAX
+ 1] = {
714 [CGW_MOD_AND
] = { .len
= sizeof(struct cgw_frame_mod
) },
715 [CGW_MOD_OR
] = { .len
= sizeof(struct cgw_frame_mod
) },
716 [CGW_MOD_XOR
] = { .len
= sizeof(struct cgw_frame_mod
) },
717 [CGW_MOD_SET
] = { .len
= sizeof(struct cgw_frame_mod
) },
718 [CGW_CS_XOR
] = { .len
= sizeof(struct cgw_csum_xor
) },
719 [CGW_CS_CRC8
] = { .len
= sizeof(struct cgw_csum_crc8
) },
720 [CGW_SRC_IF
] = { .type
= NLA_U32
},
721 [CGW_DST_IF
] = { .type
= NLA_U32
},
722 [CGW_FILTER
] = { .len
= sizeof(struct can_filter
) },
723 [CGW_LIM_HOPS
] = { .type
= NLA_U8
},
724 [CGW_MOD_UID
] = { .type
= NLA_U32
},
725 [CGW_FDMOD_AND
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
726 [CGW_FDMOD_OR
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
727 [CGW_FDMOD_XOR
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
728 [CGW_FDMOD_SET
] = { .len
= sizeof(struct cgw_fdframe_mod
) },
731 /* check for common and gwtype specific attributes */
732 static int cgw_parse_attr(struct nlmsghdr
*nlh
, struct cf_mod
*mod
,
733 u8 gwtype
, void *gwtypeattr
, u8
*limhops
)
735 struct nlattr
*tb
[CGW_MAX
+ 1];
736 struct rtcanmsg
*r
= nlmsg_data(nlh
);
740 /* initialize modification & checksum data space */
741 memset(mod
, 0, sizeof(*mod
));
743 err
= nlmsg_parse_deprecated(nlh
, sizeof(struct rtcanmsg
), tb
,
744 CGW_MAX
, cgw_policy
, NULL
);
748 if (tb
[CGW_LIM_HOPS
]) {
749 *limhops
= nla_get_u8(tb
[CGW_LIM_HOPS
]);
751 if (*limhops
< 1 || *limhops
> max_hops
)
755 /* check for AND/OR/XOR/SET modifications */
756 if (r
->flags
& CGW_FLAGS_CAN_FD
) {
757 struct cgw_fdframe_mod mb
;
759 if (tb
[CGW_FDMOD_AND
]) {
760 nla_memcpy(&mb
, tb
[CGW_FDMOD_AND
], CGW_FDMODATTR_LEN
);
762 canfdframecpy(&mod
->modframe
.and, &mb
.cf
);
763 mod
->modtype
.and = mb
.modtype
;
765 if (mb
.modtype
& CGW_MOD_ID
)
766 mod
->modfunc
[modidx
++] = mod_and_id
;
768 if (mb
.modtype
& CGW_MOD_LEN
)
769 mod
->modfunc
[modidx
++] = mod_and_len
;
771 if (mb
.modtype
& CGW_MOD_FLAGS
)
772 mod
->modfunc
[modidx
++] = mod_and_flags
;
774 if (mb
.modtype
& CGW_MOD_DATA
)
775 mod
->modfunc
[modidx
++] = mod_and_fddata
;
778 if (tb
[CGW_FDMOD_OR
]) {
779 nla_memcpy(&mb
, tb
[CGW_FDMOD_OR
], CGW_FDMODATTR_LEN
);
781 canfdframecpy(&mod
->modframe
.or, &mb
.cf
);
782 mod
->modtype
.or = mb
.modtype
;
784 if (mb
.modtype
& CGW_MOD_ID
)
785 mod
->modfunc
[modidx
++] = mod_or_id
;
787 if (mb
.modtype
& CGW_MOD_LEN
)
788 mod
->modfunc
[modidx
++] = mod_or_len
;
790 if (mb
.modtype
& CGW_MOD_FLAGS
)
791 mod
->modfunc
[modidx
++] = mod_or_flags
;
793 if (mb
.modtype
& CGW_MOD_DATA
)
794 mod
->modfunc
[modidx
++] = mod_or_fddata
;
797 if (tb
[CGW_FDMOD_XOR
]) {
798 nla_memcpy(&mb
, tb
[CGW_FDMOD_XOR
], CGW_FDMODATTR_LEN
);
800 canfdframecpy(&mod
->modframe
.xor, &mb
.cf
);
801 mod
->modtype
.xor = mb
.modtype
;
803 if (mb
.modtype
& CGW_MOD_ID
)
804 mod
->modfunc
[modidx
++] = mod_xor_id
;
806 if (mb
.modtype
& CGW_MOD_LEN
)
807 mod
->modfunc
[modidx
++] = mod_xor_len
;
809 if (mb
.modtype
& CGW_MOD_FLAGS
)
810 mod
->modfunc
[modidx
++] = mod_xor_flags
;
812 if (mb
.modtype
& CGW_MOD_DATA
)
813 mod
->modfunc
[modidx
++] = mod_xor_fddata
;
816 if (tb
[CGW_FDMOD_SET
]) {
817 nla_memcpy(&mb
, tb
[CGW_FDMOD_SET
], CGW_FDMODATTR_LEN
);
819 canfdframecpy(&mod
->modframe
.set
, &mb
.cf
);
820 mod
->modtype
.set
= mb
.modtype
;
822 if (mb
.modtype
& CGW_MOD_ID
)
823 mod
->modfunc
[modidx
++] = mod_set_id
;
825 if (mb
.modtype
& CGW_MOD_LEN
)
826 mod
->modfunc
[modidx
++] = mod_set_len
;
828 if (mb
.modtype
& CGW_MOD_FLAGS
)
829 mod
->modfunc
[modidx
++] = mod_set_flags
;
831 if (mb
.modtype
& CGW_MOD_DATA
)
832 mod
->modfunc
[modidx
++] = mod_set_fddata
;
835 struct cgw_frame_mod mb
;
837 if (tb
[CGW_MOD_AND
]) {
838 nla_memcpy(&mb
, tb
[CGW_MOD_AND
], CGW_MODATTR_LEN
);
840 canframecpy(&mod
->modframe
.and, &mb
.cf
);
841 mod
->modtype
.and = mb
.modtype
;
843 if (mb
.modtype
& CGW_MOD_ID
)
844 mod
->modfunc
[modidx
++] = mod_and_id
;
846 if (mb
.modtype
& CGW_MOD_LEN
)
847 mod
->modfunc
[modidx
++] = mod_and_len
;
849 if (mb
.modtype
& CGW_MOD_DATA
)
850 mod
->modfunc
[modidx
++] = mod_and_data
;
853 if (tb
[CGW_MOD_OR
]) {
854 nla_memcpy(&mb
, tb
[CGW_MOD_OR
], CGW_MODATTR_LEN
);
856 canframecpy(&mod
->modframe
.or, &mb
.cf
);
857 mod
->modtype
.or = mb
.modtype
;
859 if (mb
.modtype
& CGW_MOD_ID
)
860 mod
->modfunc
[modidx
++] = mod_or_id
;
862 if (mb
.modtype
& CGW_MOD_LEN
)
863 mod
->modfunc
[modidx
++] = mod_or_len
;
865 if (mb
.modtype
& CGW_MOD_DATA
)
866 mod
->modfunc
[modidx
++] = mod_or_data
;
869 if (tb
[CGW_MOD_XOR
]) {
870 nla_memcpy(&mb
, tb
[CGW_MOD_XOR
], CGW_MODATTR_LEN
);
872 canframecpy(&mod
->modframe
.xor, &mb
.cf
);
873 mod
->modtype
.xor = mb
.modtype
;
875 if (mb
.modtype
& CGW_MOD_ID
)
876 mod
->modfunc
[modidx
++] = mod_xor_id
;
878 if (mb
.modtype
& CGW_MOD_LEN
)
879 mod
->modfunc
[modidx
++] = mod_xor_len
;
881 if (mb
.modtype
& CGW_MOD_DATA
)
882 mod
->modfunc
[modidx
++] = mod_xor_data
;
885 if (tb
[CGW_MOD_SET
]) {
886 nla_memcpy(&mb
, tb
[CGW_MOD_SET
], CGW_MODATTR_LEN
);
888 canframecpy(&mod
->modframe
.set
, &mb
.cf
);
889 mod
->modtype
.set
= mb
.modtype
;
891 if (mb
.modtype
& CGW_MOD_ID
)
892 mod
->modfunc
[modidx
++] = mod_set_id
;
894 if (mb
.modtype
& CGW_MOD_LEN
)
895 mod
->modfunc
[modidx
++] = mod_set_len
;
897 if (mb
.modtype
& CGW_MOD_DATA
)
898 mod
->modfunc
[modidx
++] = mod_set_data
;
902 /* check for checksum operations after CAN frame modifications */
904 if (tb
[CGW_CS_CRC8
]) {
905 struct cgw_csum_crc8
*c
= nla_data(tb
[CGW_CS_CRC8
]);
907 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
912 nla_memcpy(&mod
->csum
.crc8
, tb
[CGW_CS_CRC8
],
915 /* select dedicated processing function to reduce
916 * runtime operations in receive hot path.
918 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
920 mod
->csumfunc
.crc8
= cgw_csum_crc8_rel
;
921 else if (c
->from_idx
<= c
->to_idx
)
922 mod
->csumfunc
.crc8
= cgw_csum_crc8_pos
;
924 mod
->csumfunc
.crc8
= cgw_csum_crc8_neg
;
927 if (tb
[CGW_CS_XOR
]) {
928 struct cgw_csum_xor
*c
= nla_data(tb
[CGW_CS_XOR
]);
930 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
935 nla_memcpy(&mod
->csum
.xor, tb
[CGW_CS_XOR
],
938 /* select dedicated processing function to reduce
939 * runtime operations in receive hot path.
941 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
943 mod
->csumfunc
.xor = cgw_csum_xor_rel
;
944 else if (c
->from_idx
<= c
->to_idx
)
945 mod
->csumfunc
.xor = cgw_csum_xor_pos
;
947 mod
->csumfunc
.xor = cgw_csum_xor_neg
;
951 nla_memcpy(&mod
->uid
, tb
[CGW_MOD_UID
], sizeof(u32
));
954 if (gwtype
== CGW_TYPE_CAN_CAN
) {
955 /* check CGW_TYPE_CAN_CAN specific attributes */
956 struct can_can_gw
*ccgw
= (struct can_can_gw
*)gwtypeattr
;
958 memset(ccgw
, 0, sizeof(*ccgw
));
960 /* check for can_filter in attributes */
962 nla_memcpy(&ccgw
->filter
, tb
[CGW_FILTER
],
963 sizeof(struct can_filter
));
967 /* specifying two interfaces is mandatory */
968 if (!tb
[CGW_SRC_IF
] || !tb
[CGW_DST_IF
])
971 ccgw
->src_idx
= nla_get_u32(tb
[CGW_SRC_IF
]);
972 ccgw
->dst_idx
= nla_get_u32(tb
[CGW_DST_IF
]);
974 /* both indices set to 0 for flushing all routing entries */
975 if (!ccgw
->src_idx
&& !ccgw
->dst_idx
)
978 /* only one index set to 0 is an error */
979 if (!ccgw
->src_idx
|| !ccgw
->dst_idx
)
983 /* add the checks for other gwtypes here */
988 static int cgw_create_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
989 struct netlink_ext_ack
*extack
)
991 struct net
*net
= sock_net(skb
->sk
);
995 struct can_can_gw ccgw
;
999 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
1002 if (nlmsg_len(nlh
) < sizeof(*r
))
1005 r
= nlmsg_data(nlh
);
1006 if (r
->can_family
!= AF_CAN
)
1007 return -EPFNOSUPPORT
;
1009 /* so far we only support CAN -> CAN routings */
1010 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
1013 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
1020 /* check for updating an existing job with identical uid */
1021 hlist_for_each_entry(gwj
, &net
->can
.cgw_list
, list
) {
1022 if (gwj
->mod
.uid
!= mod
.uid
)
1025 /* interfaces & filters must be identical */
1026 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
1029 /* update modifications with disabled softirq & quit */
1031 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
1037 /* ifindex == 0 is not allowed for job creation */
1038 if (!ccgw
.src_idx
|| !ccgw
.dst_idx
)
1041 gwj
= kmem_cache_alloc(cgw_cache
, GFP_KERNEL
);
1045 gwj
->handled_frames
= 0;
1046 gwj
->dropped_frames
= 0;
1047 gwj
->deleted_frames
= 0;
1048 gwj
->flags
= r
->flags
;
1049 gwj
->gwtype
= r
->gwtype
;
1050 gwj
->limit_hops
= limhops
;
1052 /* insert already parsed information */
1053 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
1054 memcpy(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
));
1058 gwj
->src
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.src_idx
);
1063 if (gwj
->src
.dev
->type
!= ARPHRD_CAN
)
1066 gwj
->dst
.dev
= __dev_get_by_index(net
, gwj
->ccgw
.dst_idx
);
1071 if (gwj
->dst
.dev
->type
!= ARPHRD_CAN
)
1076 err
= cgw_register_filter(net
, gwj
);
1078 hlist_add_head_rcu(&gwj
->list
, &net
->can
.cgw_list
);
1081 kmem_cache_free(cgw_cache
, gwj
);
1086 static void cgw_remove_all_jobs(struct net
*net
)
1088 struct cgw_job
*gwj
= NULL
;
1089 struct hlist_node
*nx
;
1093 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
1094 hlist_del(&gwj
->list
);
1095 cgw_unregister_filter(net
, gwj
);
1096 kmem_cache_free(cgw_cache
, gwj
);
1100 static int cgw_remove_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
1101 struct netlink_ext_ack
*extack
)
1103 struct net
*net
= sock_net(skb
->sk
);
1104 struct cgw_job
*gwj
= NULL
;
1105 struct hlist_node
*nx
;
1108 struct can_can_gw ccgw
;
1112 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
1115 if (nlmsg_len(nlh
) < sizeof(*r
))
1118 r
= nlmsg_data(nlh
);
1119 if (r
->can_family
!= AF_CAN
)
1120 return -EPFNOSUPPORT
;
1122 /* so far we only support CAN -> CAN routings */
1123 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
1126 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
1130 /* two interface indices both set to 0 => remove all entries */
1131 if (!ccgw
.src_idx
&& !ccgw
.dst_idx
) {
1132 cgw_remove_all_jobs(net
);
1140 /* remove only the first matching entry */
1141 hlist_for_each_entry_safe(gwj
, nx
, &net
->can
.cgw_list
, list
) {
1142 if (gwj
->flags
!= r
->flags
)
1145 if (gwj
->limit_hops
!= limhops
)
1148 /* we have a match when uid is enabled and identical */
1149 if (gwj
->mod
.uid
|| mod
.uid
) {
1150 if (gwj
->mod
.uid
!= mod
.uid
)
1153 /* no uid => check for identical modifications */
1154 if (memcmp(&gwj
->mod
, &mod
, sizeof(mod
)))
1158 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
1159 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
1162 hlist_del(&gwj
->list
);
1163 cgw_unregister_filter(net
, gwj
);
1164 kmem_cache_free(cgw_cache
, gwj
);
1172 static int __net_init
cangw_pernet_init(struct net
*net
)
1174 INIT_HLIST_HEAD(&net
->can
.cgw_list
);
1178 static void __net_exit
cangw_pernet_exit(struct net
*net
)
1181 cgw_remove_all_jobs(net
);
1185 static struct pernet_operations cangw_pernet_ops
= {
1186 .init
= cangw_pernet_init
,
1187 .exit
= cangw_pernet_exit
,
1190 static __init
int cgw_module_init(void)
1194 /* sanitize given module parameter */
1195 max_hops
= clamp_t(unsigned int, max_hops
, CGW_MIN_HOPS
, CGW_MAX_HOPS
);
1197 pr_info("can: netlink gateway (rev " CAN_GW_VERSION
") max_hops=%d\n",
1200 ret
= register_pernet_subsys(&cangw_pernet_ops
);
1205 cgw_cache
= kmem_cache_create("can_gw", sizeof(struct cgw_job
),
1208 goto out_cache_create
;
1211 notifier
.notifier_call
= cgw_notifier
;
1212 ret
= register_netdevice_notifier(¬ifier
);
1214 goto out_register_notifier
;
1216 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_GETROUTE
,
1217 NULL
, cgw_dump_jobs
, 0);
1219 goto out_rtnl_register1
;
1221 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_NEWROUTE
,
1222 cgw_create_job
, NULL
, 0);
1224 goto out_rtnl_register2
;
1225 ret
= rtnl_register_module(THIS_MODULE
, PF_CAN
, RTM_DELROUTE
,
1226 cgw_remove_job
, NULL
, 0);
1228 goto out_rtnl_register3
;
1233 rtnl_unregister(PF_CAN
, RTM_NEWROUTE
);
1235 rtnl_unregister(PF_CAN
, RTM_GETROUTE
);
1237 unregister_netdevice_notifier(¬ifier
);
1238 out_register_notifier
:
1239 kmem_cache_destroy(cgw_cache
);
1241 unregister_pernet_subsys(&cangw_pernet_ops
);
1246 static __exit
void cgw_module_exit(void)
1248 rtnl_unregister_all(PF_CAN
);
1250 unregister_netdevice_notifier(¬ifier
);
1252 unregister_pernet_subsys(&cangw_pernet_ops
);
1253 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1255 kmem_cache_destroy(cgw_cache
);
1258 module_init(cgw_module_init
);
1259 module_exit(cgw_module_exit
);