2 * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
4 * Copyright (c) 2011 Volkswagen Group Electronic Research
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
42 #include <linux/module.h>
43 #include <linux/init.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
48 #include <linux/rcupdate.h>
49 #include <linux/rculist.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_arp.h>
53 #include <linux/skbuff.h>
54 #include <linux/can.h>
55 #include <linux/can/core.h>
56 #include <linux/can/skb.h>
57 #include <linux/can/gw.h>
58 #include <net/rtnetlink.h>
59 #include <net/net_namespace.h>
62 #define CAN_GW_VERSION "20130117"
63 #define CAN_GW_NAME "can-gw"
65 MODULE_DESCRIPTION("PF_CAN netlink gateway");
66 MODULE_LICENSE("Dual BSD/GPL");
67 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
68 MODULE_ALIAS(CAN_GW_NAME
);
70 #define CGW_MIN_HOPS 1
71 #define CGW_MAX_HOPS 6
72 #define CGW_DEFAULT_HOPS 1
74 static unsigned int max_hops __read_mostly
= CGW_DEFAULT_HOPS
;
75 module_param(max_hops
, uint
, S_IRUGO
);
76 MODULE_PARM_DESC(max_hops
,
77 "maximum " CAN_GW_NAME
" routing hops for CAN frames "
78 "(valid values: " __stringify(CGW_MIN_HOPS
) "-"
79 __stringify(CGW_MAX_HOPS
) " hops, "
80 "default: " __stringify(CGW_DEFAULT_HOPS
) ")");
82 static HLIST_HEAD(cgw_list
);
83 static struct notifier_block notifier
;
85 static struct kmem_cache
*cgw_cache __read_mostly
;
87 /* structure that contains the (on-the-fly) CAN frame modifications */
101 void (*modfunc
[MAX_MODFUNCTIONS
])(struct can_frame
*cf
,
104 /* CAN frame checksum calculation after CAN frame modifications */
106 struct cgw_csum_xor
xor;
107 struct cgw_csum_crc8 crc8
;
110 void (*xor)(struct can_frame
*cf
, struct cgw_csum_xor
*xor);
111 void (*crc8
)(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
);
118 * So far we just support CAN -> CAN routing and frame modifications.
120 * The internal can_can_gw structure contains data and attributes for
121 * a CAN -> CAN gateway job.
124 struct can_filter filter
;
129 /* list entry for CAN gateways jobs */
131 struct hlist_node list
;
138 /* CAN frame data source */
139 struct net_device
*dev
;
142 /* CAN frame data destination */
143 struct net_device
*dev
;
146 struct can_can_gw ccgw
;
154 /* modification functions that are invoked in the hot path in can_can_gw_rcv */
156 #define MODFUNC(func, op) static void func(struct can_frame *cf, \
157 struct cf_mod *mod) { op ; }
159 MODFUNC(mod_and_id
, cf
->can_id
&= mod
->modframe
.and.can_id
)
160 MODFUNC(mod_and_dlc
, cf
->can_dlc
&= mod
->modframe
.and.can_dlc
)
161 MODFUNC(mod_and_data
, *(u64
*)cf
->data
&= *(u64
*)mod
->modframe
.and.data
)
162 MODFUNC(mod_or_id
, cf
->can_id
|= mod
->modframe
.or.can_id
)
163 MODFUNC(mod_or_dlc
, cf
->can_dlc
|= mod
->modframe
.or.can_dlc
)
164 MODFUNC(mod_or_data
, *(u64
*)cf
->data
|= *(u64
*)mod
->modframe
.or.data
)
165 MODFUNC(mod_xor_id
, cf
->can_id
^= mod
->modframe
.xor.can_id
)
166 MODFUNC(mod_xor_dlc
, cf
->can_dlc
^= mod
->modframe
.xor.can_dlc
)
167 MODFUNC(mod_xor_data
, *(u64
*)cf
->data
^= *(u64
*)mod
->modframe
.xor.data
)
168 MODFUNC(mod_set_id
, cf
->can_id
= mod
->modframe
.set
.can_id
)
169 MODFUNC(mod_set_dlc
, cf
->can_dlc
= mod
->modframe
.set
.can_dlc
)
170 MODFUNC(mod_set_data
, *(u64
*)cf
->data
= *(u64
*)mod
->modframe
.set
.data
)
172 static inline void canframecpy(struct can_frame
*dst
, struct can_frame
*src
)
175 * Copy the struct members separately to ensure that no uninitialized
176 * data are copied in the 3 bytes hole of the struct. This is needed
177 * to make easy compares of the data in the struct cf_mod.
180 dst
->can_id
= src
->can_id
;
181 dst
->can_dlc
= src
->can_dlc
;
182 *(u64
*)dst
->data
= *(u64
*)src
->data
;
185 static int cgw_chk_csum_parms(s8 fr
, s8 to
, s8 re
)
188 * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
189 * relative to received dlc -1 .. -8 :
190 * e.g. for received dlc = 8
191 * -1 => index = 7 (data[7])
192 * -3 => index = 5 (data[5])
193 * -8 => index = 0 (data[0])
196 if (fr
> -9 && fr
< 8 &&
204 static inline int calc_idx(int idx
, int rx_dlc
)
212 static void cgw_csum_xor_rel(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
214 int from
= calc_idx(xor->from_idx
, cf
->can_dlc
);
215 int to
= calc_idx(xor->to_idx
, cf
->can_dlc
);
216 int res
= calc_idx(xor->result_idx
, cf
->can_dlc
);
217 u8 val
= xor->init_xor_val
;
220 if (from
< 0 || to
< 0 || res
< 0)
224 for (i
= from
; i
<= to
; i
++)
227 for (i
= from
; i
>= to
; i
--)
234 static void cgw_csum_xor_pos(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
236 u8 val
= xor->init_xor_val
;
239 for (i
= xor->from_idx
; i
<= xor->to_idx
; i
++)
242 cf
->data
[xor->result_idx
] = val
;
245 static void cgw_csum_xor_neg(struct can_frame
*cf
, struct cgw_csum_xor
*xor)
247 u8 val
= xor->init_xor_val
;
250 for (i
= xor->from_idx
; i
>= xor->to_idx
; i
--)
253 cf
->data
[xor->result_idx
] = val
;
256 static void cgw_csum_crc8_rel(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
258 int from
= calc_idx(crc8
->from_idx
, cf
->can_dlc
);
259 int to
= calc_idx(crc8
->to_idx
, cf
->can_dlc
);
260 int res
= calc_idx(crc8
->result_idx
, cf
->can_dlc
);
261 u8 crc
= crc8
->init_crc_val
;
264 if (from
< 0 || to
< 0 || res
< 0)
268 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
269 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
271 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
272 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
275 switch (crc8
->profile
) {
277 case CGW_CRC8PRF_1U8
:
278 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
281 case CGW_CRC8PRF_16U8
:
282 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
285 case CGW_CRC8PRF_SFFID_XOR
:
286 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
287 (cf
->can_id
>> 8 & 0xFF)];
292 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
295 static void cgw_csum_crc8_pos(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
297 u8 crc
= crc8
->init_crc_val
;
300 for (i
= crc8
->from_idx
; i
<= crc8
->to_idx
; i
++)
301 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
303 switch (crc8
->profile
) {
305 case CGW_CRC8PRF_1U8
:
306 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
309 case CGW_CRC8PRF_16U8
:
310 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
313 case CGW_CRC8PRF_SFFID_XOR
:
314 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
315 (cf
->can_id
>> 8 & 0xFF)];
319 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
322 static void cgw_csum_crc8_neg(struct can_frame
*cf
, struct cgw_csum_crc8
*crc8
)
324 u8 crc
= crc8
->init_crc_val
;
327 for (i
= crc8
->from_idx
; i
>= crc8
->to_idx
; i
--)
328 crc
= crc8
->crctab
[crc
^cf
->data
[i
]];
330 switch (crc8
->profile
) {
332 case CGW_CRC8PRF_1U8
:
333 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[0]];
336 case CGW_CRC8PRF_16U8
:
337 crc
= crc8
->crctab
[crc
^crc8
->profile_data
[cf
->data
[1] & 0xF]];
340 case CGW_CRC8PRF_SFFID_XOR
:
341 crc
= crc8
->crctab
[crc
^(cf
->can_id
& 0xFF)^
342 (cf
->can_id
>> 8 & 0xFF)];
346 cf
->data
[crc8
->result_idx
] = crc
^crc8
->final_xor_val
;
349 /* the receive & process & send function */
350 static void can_can_gw_rcv(struct sk_buff
*skb
, void *data
)
352 struct cgw_job
*gwj
= (struct cgw_job
*)data
;
353 struct can_frame
*cf
;
354 struct sk_buff
*nskb
;
358 * Do not handle CAN frames routed more than 'max_hops' times.
359 * In general we should never catch this delimiter which is intended
360 * to cover a misconfiguration protection (e.g. circular CAN routes).
362 * The Controller Area Network controllers only accept CAN frames with
363 * correct CRCs - which are not visible in the controller registers.
364 * According to skbuff.h documentation the csum_start element for IP
365 * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
366 * Only CAN skbs can be processed here which already have this property.
369 #define cgw_hops(skb) ((skb)->csum_start)
371 BUG_ON(skb
->ip_summed
!= CHECKSUM_UNNECESSARY
);
373 if (cgw_hops(skb
) >= max_hops
) {
374 /* indicate deleted frames due to misconfiguration */
375 gwj
->deleted_frames
++;
379 if (!(gwj
->dst
.dev
->flags
& IFF_UP
)) {
380 gwj
->dropped_frames
++;
384 /* is sending the skb back to the incoming interface not allowed? */
385 if (!(gwj
->flags
& CGW_FLAGS_CAN_IIF_TX_OK
) &&
386 can_skb_prv(skb
)->ifindex
== gwj
->dst
.dev
->ifindex
)
390 * clone the given skb, which has not been done in can_rcv()
392 * When there is at least one modification function activated,
393 * we need to copy the skb as we want to modify skb->data.
395 if (gwj
->mod
.modfunc
[0])
396 nskb
= skb_copy(skb
, GFP_ATOMIC
);
398 nskb
= skb_clone(skb
, GFP_ATOMIC
);
401 gwj
->dropped_frames
++;
405 /* put the incremented hop counter in the cloned skb */
406 cgw_hops(nskb
) = cgw_hops(skb
) + 1;
408 /* first processing of this CAN frame -> adjust to private hop limit */
409 if (gwj
->limit_hops
&& cgw_hops(nskb
) == 1)
410 cgw_hops(nskb
) = max_hops
- gwj
->limit_hops
+ 1;
412 nskb
->dev
= gwj
->dst
.dev
;
414 /* pointer to modifiable CAN frame */
415 cf
= (struct can_frame
*)nskb
->data
;
417 /* perform preprocessed modification functions if there are any */
418 while (modidx
< MAX_MODFUNCTIONS
&& gwj
->mod
.modfunc
[modidx
])
419 (*gwj
->mod
.modfunc
[modidx
++])(cf
, &gwj
->mod
);
421 /* check for checksum updates when the CAN frame has been modified */
423 if (gwj
->mod
.csumfunc
.crc8
)
424 (*gwj
->mod
.csumfunc
.crc8
)(cf
, &gwj
->mod
.csum
.crc8
);
426 if (gwj
->mod
.csumfunc
.xor)
427 (*gwj
->mod
.csumfunc
.xor)(cf
, &gwj
->mod
.csum
.xor);
430 /* clear the skb timestamp if not configured the other way */
431 if (!(gwj
->flags
& CGW_FLAGS_CAN_SRC_TSTAMP
))
432 nskb
->tstamp
.tv64
= 0;
434 /* send to netdevice */
435 if (can_send(nskb
, gwj
->flags
& CGW_FLAGS_CAN_ECHO
))
436 gwj
->dropped_frames
++;
438 gwj
->handled_frames
++;
441 static inline int cgw_register_filter(struct cgw_job
*gwj
)
443 return can_rx_register(gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
444 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
,
448 static inline void cgw_unregister_filter(struct cgw_job
*gwj
)
450 can_rx_unregister(gwj
->src
.dev
, gwj
->ccgw
.filter
.can_id
,
451 gwj
->ccgw
.filter
.can_mask
, can_can_gw_rcv
, gwj
);
454 static int cgw_notifier(struct notifier_block
*nb
,
455 unsigned long msg
, void *ptr
)
457 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
459 if (!net_eq(dev_net(dev
), &init_net
))
461 if (dev
->type
!= ARPHRD_CAN
)
464 if (msg
== NETDEV_UNREGISTER
) {
466 struct cgw_job
*gwj
= NULL
;
467 struct hlist_node
*nx
;
471 hlist_for_each_entry_safe(gwj
, nx
, &cgw_list
, list
) {
473 if (gwj
->src
.dev
== dev
|| gwj
->dst
.dev
== dev
) {
474 hlist_del(&gwj
->list
);
475 cgw_unregister_filter(gwj
);
476 kmem_cache_free(cgw_cache
, gwj
);
484 static int cgw_put_job(struct sk_buff
*skb
, struct cgw_job
*gwj
, int type
,
485 u32 pid
, u32 seq
, int flags
)
487 struct cgw_frame_mod mb
;
488 struct rtcanmsg
*rtcan
;
489 struct nlmsghdr
*nlh
;
491 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*rtcan
), flags
);
495 rtcan
= nlmsg_data(nlh
);
496 rtcan
->can_family
= AF_CAN
;
497 rtcan
->gwtype
= gwj
->gwtype
;
498 rtcan
->flags
= gwj
->flags
;
500 /* add statistics if available */
502 if (gwj
->handled_frames
) {
503 if (nla_put_u32(skb
, CGW_HANDLED
, gwj
->handled_frames
) < 0)
507 if (gwj
->dropped_frames
) {
508 if (nla_put_u32(skb
, CGW_DROPPED
, gwj
->dropped_frames
) < 0)
512 if (gwj
->deleted_frames
) {
513 if (nla_put_u32(skb
, CGW_DELETED
, gwj
->deleted_frames
) < 0)
517 /* check non default settings of attributes */
519 if (gwj
->limit_hops
) {
520 if (nla_put_u8(skb
, CGW_LIM_HOPS
, gwj
->limit_hops
) < 0)
524 if (gwj
->mod
.modtype
.and) {
525 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.and, sizeof(mb
.cf
));
526 mb
.modtype
= gwj
->mod
.modtype
.and;
527 if (nla_put(skb
, CGW_MOD_AND
, sizeof(mb
), &mb
) < 0)
531 if (gwj
->mod
.modtype
.or) {
532 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.or, sizeof(mb
.cf
));
533 mb
.modtype
= gwj
->mod
.modtype
.or;
534 if (nla_put(skb
, CGW_MOD_OR
, sizeof(mb
), &mb
) < 0)
538 if (gwj
->mod
.modtype
.xor) {
539 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.xor, sizeof(mb
.cf
));
540 mb
.modtype
= gwj
->mod
.modtype
.xor;
541 if (nla_put(skb
, CGW_MOD_XOR
, sizeof(mb
), &mb
) < 0)
545 if (gwj
->mod
.modtype
.set
) {
546 memcpy(&mb
.cf
, &gwj
->mod
.modframe
.set
, sizeof(mb
.cf
));
547 mb
.modtype
= gwj
->mod
.modtype
.set
;
548 if (nla_put(skb
, CGW_MOD_SET
, sizeof(mb
), &mb
) < 0)
553 if (nla_put_u32(skb
, CGW_MOD_UID
, gwj
->mod
.uid
) < 0)
557 if (gwj
->mod
.csumfunc
.crc8
) {
558 if (nla_put(skb
, CGW_CS_CRC8
, CGW_CS_CRC8_LEN
,
559 &gwj
->mod
.csum
.crc8
) < 0)
563 if (gwj
->mod
.csumfunc
.xor) {
564 if (nla_put(skb
, CGW_CS_XOR
, CGW_CS_XOR_LEN
,
565 &gwj
->mod
.csum
.xor) < 0)
569 if (gwj
->gwtype
== CGW_TYPE_CAN_CAN
) {
571 if (gwj
->ccgw
.filter
.can_id
|| gwj
->ccgw
.filter
.can_mask
) {
572 if (nla_put(skb
, CGW_FILTER
, sizeof(struct can_filter
),
573 &gwj
->ccgw
.filter
) < 0)
577 if (nla_put_u32(skb
, CGW_SRC_IF
, gwj
->ccgw
.src_idx
) < 0)
580 if (nla_put_u32(skb
, CGW_DST_IF
, gwj
->ccgw
.dst_idx
) < 0)
588 nlmsg_cancel(skb
, nlh
);
592 /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
593 static int cgw_dump_jobs(struct sk_buff
*skb
, struct netlink_callback
*cb
)
595 struct cgw_job
*gwj
= NULL
;
597 int s_idx
= cb
->args
[0];
600 hlist_for_each_entry_rcu(gwj
, &cgw_list
, list
) {
604 if (cgw_put_job(skb
, gwj
, RTM_NEWROUTE
, NETLINK_CB(cb
->skb
).portid
,
605 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
) < 0)
617 static const struct nla_policy cgw_policy
[CGW_MAX
+1] = {
618 [CGW_MOD_AND
] = { .len
= sizeof(struct cgw_frame_mod
) },
619 [CGW_MOD_OR
] = { .len
= sizeof(struct cgw_frame_mod
) },
620 [CGW_MOD_XOR
] = { .len
= sizeof(struct cgw_frame_mod
) },
621 [CGW_MOD_SET
] = { .len
= sizeof(struct cgw_frame_mod
) },
622 [CGW_CS_XOR
] = { .len
= sizeof(struct cgw_csum_xor
) },
623 [CGW_CS_CRC8
] = { .len
= sizeof(struct cgw_csum_crc8
) },
624 [CGW_SRC_IF
] = { .type
= NLA_U32
},
625 [CGW_DST_IF
] = { .type
= NLA_U32
},
626 [CGW_FILTER
] = { .len
= sizeof(struct can_filter
) },
627 [CGW_LIM_HOPS
] = { .type
= NLA_U8
},
628 [CGW_MOD_UID
] = { .type
= NLA_U32
},
631 /* check for common and gwtype specific attributes */
632 static int cgw_parse_attr(struct nlmsghdr
*nlh
, struct cf_mod
*mod
,
633 u8 gwtype
, void *gwtypeattr
, u8
*limhops
)
635 struct nlattr
*tb
[CGW_MAX
+1];
636 struct cgw_frame_mod mb
;
640 /* initialize modification & checksum data space */
641 memset(mod
, 0, sizeof(*mod
));
643 err
= nlmsg_parse(nlh
, sizeof(struct rtcanmsg
), tb
, CGW_MAX
,
648 if (tb
[CGW_LIM_HOPS
]) {
649 *limhops
= nla_get_u8(tb
[CGW_LIM_HOPS
]);
651 if (*limhops
< 1 || *limhops
> max_hops
)
655 /* check for AND/OR/XOR/SET modifications */
657 if (tb
[CGW_MOD_AND
]) {
658 nla_memcpy(&mb
, tb
[CGW_MOD_AND
], CGW_MODATTR_LEN
);
660 canframecpy(&mod
->modframe
.and, &mb
.cf
);
661 mod
->modtype
.and = mb
.modtype
;
663 if (mb
.modtype
& CGW_MOD_ID
)
664 mod
->modfunc
[modidx
++] = mod_and_id
;
666 if (mb
.modtype
& CGW_MOD_DLC
)
667 mod
->modfunc
[modidx
++] = mod_and_dlc
;
669 if (mb
.modtype
& CGW_MOD_DATA
)
670 mod
->modfunc
[modidx
++] = mod_and_data
;
673 if (tb
[CGW_MOD_OR
]) {
674 nla_memcpy(&mb
, tb
[CGW_MOD_OR
], CGW_MODATTR_LEN
);
676 canframecpy(&mod
->modframe
.or, &mb
.cf
);
677 mod
->modtype
.or = mb
.modtype
;
679 if (mb
.modtype
& CGW_MOD_ID
)
680 mod
->modfunc
[modidx
++] = mod_or_id
;
682 if (mb
.modtype
& CGW_MOD_DLC
)
683 mod
->modfunc
[modidx
++] = mod_or_dlc
;
685 if (mb
.modtype
& CGW_MOD_DATA
)
686 mod
->modfunc
[modidx
++] = mod_or_data
;
689 if (tb
[CGW_MOD_XOR
]) {
690 nla_memcpy(&mb
, tb
[CGW_MOD_XOR
], CGW_MODATTR_LEN
);
692 canframecpy(&mod
->modframe
.xor, &mb
.cf
);
693 mod
->modtype
.xor = mb
.modtype
;
695 if (mb
.modtype
& CGW_MOD_ID
)
696 mod
->modfunc
[modidx
++] = mod_xor_id
;
698 if (mb
.modtype
& CGW_MOD_DLC
)
699 mod
->modfunc
[modidx
++] = mod_xor_dlc
;
701 if (mb
.modtype
& CGW_MOD_DATA
)
702 mod
->modfunc
[modidx
++] = mod_xor_data
;
705 if (tb
[CGW_MOD_SET
]) {
706 nla_memcpy(&mb
, tb
[CGW_MOD_SET
], CGW_MODATTR_LEN
);
708 canframecpy(&mod
->modframe
.set
, &mb
.cf
);
709 mod
->modtype
.set
= mb
.modtype
;
711 if (mb
.modtype
& CGW_MOD_ID
)
712 mod
->modfunc
[modidx
++] = mod_set_id
;
714 if (mb
.modtype
& CGW_MOD_DLC
)
715 mod
->modfunc
[modidx
++] = mod_set_dlc
;
717 if (mb
.modtype
& CGW_MOD_DATA
)
718 mod
->modfunc
[modidx
++] = mod_set_data
;
721 /* check for checksum operations after CAN frame modifications */
724 if (tb
[CGW_CS_CRC8
]) {
725 struct cgw_csum_crc8
*c
= nla_data(tb
[CGW_CS_CRC8
]);
727 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
732 nla_memcpy(&mod
->csum
.crc8
, tb
[CGW_CS_CRC8
],
736 * select dedicated processing function to reduce
737 * runtime operations in receive hot path.
739 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
741 mod
->csumfunc
.crc8
= cgw_csum_crc8_rel
;
742 else if (c
->from_idx
<= c
->to_idx
)
743 mod
->csumfunc
.crc8
= cgw_csum_crc8_pos
;
745 mod
->csumfunc
.crc8
= cgw_csum_crc8_neg
;
748 if (tb
[CGW_CS_XOR
]) {
749 struct cgw_csum_xor
*c
= nla_data(tb
[CGW_CS_XOR
]);
751 err
= cgw_chk_csum_parms(c
->from_idx
, c
->to_idx
,
756 nla_memcpy(&mod
->csum
.xor, tb
[CGW_CS_XOR
],
760 * select dedicated processing function to reduce
761 * runtime operations in receive hot path.
763 if (c
->from_idx
< 0 || c
->to_idx
< 0 ||
765 mod
->csumfunc
.xor = cgw_csum_xor_rel
;
766 else if (c
->from_idx
<= c
->to_idx
)
767 mod
->csumfunc
.xor = cgw_csum_xor_pos
;
769 mod
->csumfunc
.xor = cgw_csum_xor_neg
;
772 if (tb
[CGW_MOD_UID
]) {
773 nla_memcpy(&mod
->uid
, tb
[CGW_MOD_UID
], sizeof(u32
));
777 if (gwtype
== CGW_TYPE_CAN_CAN
) {
779 /* check CGW_TYPE_CAN_CAN specific attributes */
781 struct can_can_gw
*ccgw
= (struct can_can_gw
*)gwtypeattr
;
782 memset(ccgw
, 0, sizeof(*ccgw
));
784 /* check for can_filter in attributes */
786 nla_memcpy(&ccgw
->filter
, tb
[CGW_FILTER
],
787 sizeof(struct can_filter
));
791 /* specifying two interfaces is mandatory */
792 if (!tb
[CGW_SRC_IF
] || !tb
[CGW_DST_IF
])
795 ccgw
->src_idx
= nla_get_u32(tb
[CGW_SRC_IF
]);
796 ccgw
->dst_idx
= nla_get_u32(tb
[CGW_DST_IF
]);
798 /* both indices set to 0 for flushing all routing entries */
799 if (!ccgw
->src_idx
&& !ccgw
->dst_idx
)
802 /* only one index set to 0 is an error */
803 if (!ccgw
->src_idx
|| !ccgw
->dst_idx
)
807 /* add the checks for other gwtypes here */
812 static int cgw_create_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
817 struct can_can_gw ccgw
;
821 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
824 if (nlmsg_len(nlh
) < sizeof(*r
))
828 if (r
->can_family
!= AF_CAN
)
829 return -EPFNOSUPPORT
;
831 /* so far we only support CAN -> CAN routings */
832 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
835 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
843 /* check for updating an existing job with identical uid */
844 hlist_for_each_entry(gwj
, &cgw_list
, list
) {
846 if (gwj
->mod
.uid
!= mod
.uid
)
849 /* interfaces & filters must be identical */
850 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
853 /* update modifications with disabled softirq & quit */
855 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
861 /* ifindex == 0 is not allowed for job creation */
862 if (!ccgw
.src_idx
|| !ccgw
.dst_idx
)
865 gwj
= kmem_cache_alloc(cgw_cache
, GFP_KERNEL
);
869 gwj
->handled_frames
= 0;
870 gwj
->dropped_frames
= 0;
871 gwj
->deleted_frames
= 0;
872 gwj
->flags
= r
->flags
;
873 gwj
->gwtype
= r
->gwtype
;
874 gwj
->limit_hops
= limhops
;
876 /* insert already parsed information */
877 memcpy(&gwj
->mod
, &mod
, sizeof(mod
));
878 memcpy(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
));
882 gwj
->src
.dev
= __dev_get_by_index(&init_net
, gwj
->ccgw
.src_idx
);
887 if (gwj
->src
.dev
->type
!= ARPHRD_CAN
)
890 gwj
->dst
.dev
= __dev_get_by_index(&init_net
, gwj
->ccgw
.dst_idx
);
895 if (gwj
->dst
.dev
->type
!= ARPHRD_CAN
)
900 err
= cgw_register_filter(gwj
);
902 hlist_add_head_rcu(&gwj
->list
, &cgw_list
);
905 kmem_cache_free(cgw_cache
, gwj
);
910 static void cgw_remove_all_jobs(void)
912 struct cgw_job
*gwj
= NULL
;
913 struct hlist_node
*nx
;
917 hlist_for_each_entry_safe(gwj
, nx
, &cgw_list
, list
) {
918 hlist_del(&gwj
->list
);
919 cgw_unregister_filter(gwj
);
920 kmem_cache_free(cgw_cache
, gwj
);
924 static int cgw_remove_job(struct sk_buff
*skb
, struct nlmsghdr
*nlh
)
926 struct cgw_job
*gwj
= NULL
;
927 struct hlist_node
*nx
;
930 struct can_can_gw ccgw
;
934 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
937 if (nlmsg_len(nlh
) < sizeof(*r
))
941 if (r
->can_family
!= AF_CAN
)
942 return -EPFNOSUPPORT
;
944 /* so far we only support CAN -> CAN routings */
945 if (r
->gwtype
!= CGW_TYPE_CAN_CAN
)
948 err
= cgw_parse_attr(nlh
, &mod
, CGW_TYPE_CAN_CAN
, &ccgw
, &limhops
);
952 /* two interface indices both set to 0 => remove all entries */
953 if (!ccgw
.src_idx
&& !ccgw
.dst_idx
) {
954 cgw_remove_all_jobs();
962 /* remove only the first matching entry */
963 hlist_for_each_entry_safe(gwj
, nx
, &cgw_list
, list
) {
965 if (gwj
->flags
!= r
->flags
)
968 if (gwj
->limit_hops
!= limhops
)
971 /* we have a match when uid is enabled and identical */
972 if (gwj
->mod
.uid
|| mod
.uid
) {
973 if (gwj
->mod
.uid
!= mod
.uid
)
976 /* no uid => check for identical modifications */
977 if (memcmp(&gwj
->mod
, &mod
, sizeof(mod
)))
981 /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
982 if (memcmp(&gwj
->ccgw
, &ccgw
, sizeof(ccgw
)))
985 hlist_del(&gwj
->list
);
986 cgw_unregister_filter(gwj
);
987 kmem_cache_free(cgw_cache
, gwj
);
995 static __init
int cgw_module_init(void)
997 /* sanitize given module parameter */
998 max_hops
= clamp_t(unsigned int, max_hops
, CGW_MIN_HOPS
, CGW_MAX_HOPS
);
1000 pr_info("can: netlink gateway (rev " CAN_GW_VERSION
") max_hops=%d\n",
1003 cgw_cache
= kmem_cache_create("can_gw", sizeof(struct cgw_job
),
1010 notifier
.notifier_call
= cgw_notifier
;
1011 register_netdevice_notifier(¬ifier
);
1013 if (__rtnl_register(PF_CAN
, RTM_GETROUTE
, NULL
, cgw_dump_jobs
, NULL
)) {
1014 unregister_netdevice_notifier(¬ifier
);
1015 kmem_cache_destroy(cgw_cache
);
1019 /* Only the first call to __rtnl_register can fail */
1020 __rtnl_register(PF_CAN
, RTM_NEWROUTE
, cgw_create_job
, NULL
, NULL
);
1021 __rtnl_register(PF_CAN
, RTM_DELROUTE
, cgw_remove_job
, NULL
, NULL
);
1026 static __exit
void cgw_module_exit(void)
1028 rtnl_unregister_all(PF_CAN
);
1030 unregister_netdevice_notifier(¬ifier
);
1033 cgw_remove_all_jobs();
1036 rcu_barrier(); /* Wait for completion of call_rcu()'s */
1038 kmem_cache_destroy(cgw_cache
);
1041 module_init(cgw_module_init
);
1042 module_exit(cgw_module_exit
);