2 * Copyright (c) 2015, Sony Mobile Communications Inc.
3 * Copyright (c) 2013, The Linux Foundation. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 #include <linux/module.h>
15 #include <linux/netlink.h>
16 #include <linux/qrtr.h>
17 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
23 #define QRTR_PROTO_VER 1
26 #define QRTR_MIN_EPH_SOCKET 0x4000
27 #define QRTR_MAX_EPH_SOCKET 0x7fff
33 QRTR_TYPE_NEW_SERVER
= 4,
34 QRTR_TYPE_DEL_SERVER
= 5,
35 QRTR_TYPE_DEL_CLIENT
= 6,
36 QRTR_TYPE_RESUME_TX
= 7,
42 * struct qrtr_hdr - (I|R)PCrouter packet header
43 * @version: protocol version
44 * @type: packet type; one of QRTR_TYPE_*
45 * @src_node_id: source node
46 * @src_port_id: source port
47 * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
48 * @size: length of packet, excluding this header
49 * @dst_node_id: destination node
50 * @dst_port_id: destination port
63 #define QRTR_HDR_SIZE sizeof(struct qrtr_hdr)
64 #define QRTR_NODE_BCAST ((unsigned int)-1)
65 #define QRTR_PORT_CTRL ((unsigned int)-2)
68 /* WARNING: sk must be the first member */
70 struct sockaddr_qrtr us
;
71 struct sockaddr_qrtr peer
;
74 static inline struct qrtr_sock
*qrtr_sk(struct sock
*sk
)
76 BUILD_BUG_ON(offsetof(struct qrtr_sock
, sk
) != 0);
77 return container_of(sk
, struct qrtr_sock
, sk
);
80 static unsigned int qrtr_local_nid
= -1;
83 static RADIX_TREE(qrtr_nodes
, GFP_KERNEL
);
85 static LIST_HEAD(qrtr_all_nodes
);
86 /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
87 static DEFINE_MUTEX(qrtr_node_lock
);
89 /* local port allocation management */
90 static DEFINE_IDR(qrtr_ports
);
91 static DEFINE_MUTEX(qrtr_port_lock
);
94 * struct qrtr_node - endpoint node
95 * @ep_lock: lock for endpoint management and callbacks
97 * @ref: reference count for node
99 * @rx_queue: receive queue
100 * @work: scheduled work struct for recv work
101 * @item: list item for broadcast list
104 struct mutex ep_lock
;
105 struct qrtr_endpoint
*ep
;
109 struct sk_buff_head rx_queue
;
110 struct work_struct work
;
111 struct list_head item
;
114 /* Release node resources and free the node.
116 * Do not call directly, use qrtr_node_release. To be used with
117 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
119 static void __qrtr_node_release(struct kref
*kref
)
121 struct qrtr_node
*node
= container_of(kref
, struct qrtr_node
, ref
);
123 if (node
->nid
!= QRTR_EP_NID_AUTO
)
124 radix_tree_delete(&qrtr_nodes
, node
->nid
);
126 list_del(&node
->item
);
127 mutex_unlock(&qrtr_node_lock
);
129 skb_queue_purge(&node
->rx_queue
);
133 /* Increment reference to node. */
134 static struct qrtr_node
*qrtr_node_acquire(struct qrtr_node
*node
)
137 kref_get(&node
->ref
);
141 /* Decrement reference to node and release as necessary. */
142 static void qrtr_node_release(struct qrtr_node
*node
)
146 kref_put_mutex(&node
->ref
, __qrtr_node_release
, &qrtr_node_lock
);
149 /* Pass an outgoing packet socket buffer to the endpoint driver. */
150 static int qrtr_node_enqueue(struct qrtr_node
*node
, struct sk_buff
*skb
)
154 mutex_lock(&node
->ep_lock
);
156 rc
= node
->ep
->xmit(node
->ep
, skb
);
159 mutex_unlock(&node
->ep_lock
);
164 /* Lookup node by id.
166 * callers must release with qrtr_node_release()
168 static struct qrtr_node
*qrtr_node_lookup(unsigned int nid
)
170 struct qrtr_node
*node
;
172 mutex_lock(&qrtr_node_lock
);
173 node
= radix_tree_lookup(&qrtr_nodes
, nid
);
174 node
= qrtr_node_acquire(node
);
175 mutex_unlock(&qrtr_node_lock
);
180 /* Assign node id to node.
182 * This is mostly useful for automatic node id assignment, based on
183 * the source id in the incoming packet.
185 static void qrtr_node_assign(struct qrtr_node
*node
, unsigned int nid
)
187 if (node
->nid
!= QRTR_EP_NID_AUTO
|| nid
== QRTR_EP_NID_AUTO
)
190 mutex_lock(&qrtr_node_lock
);
191 radix_tree_insert(&qrtr_nodes
, nid
, node
);
193 mutex_unlock(&qrtr_node_lock
);
197 * qrtr_endpoint_post() - post incoming data
198 * @ep: endpoint handle
199 * @data: data pointer
200 * @len: size of data in bytes
202 * Return: 0 on success; negative error code on failure
204 int qrtr_endpoint_post(struct qrtr_endpoint
*ep
, const void *data
, size_t len
)
206 struct qrtr_node
*node
= ep
->node
;
207 const struct qrtr_hdr
*phdr
= data
;
215 if (len
< QRTR_HDR_SIZE
|| len
& 3)
218 ver
= le32_to_cpu(phdr
->version
);
219 size
= le32_to_cpu(phdr
->size
);
220 type
= le32_to_cpu(phdr
->type
);
221 dst
= le32_to_cpu(phdr
->dst_port_id
);
223 psize
= (size
+ 3) & ~3;
225 if (ver
!= QRTR_PROTO_VER
)
228 if (len
!= psize
+ QRTR_HDR_SIZE
)
231 if (dst
!= QRTR_PORT_CTRL
&& type
!= QRTR_TYPE_DATA
)
234 skb
= netdev_alloc_skb(NULL
, len
);
238 skb_reset_transport_header(skb
);
239 memcpy(skb_put(skb
, len
), data
, len
);
241 skb_queue_tail(&node
->rx_queue
, skb
);
242 schedule_work(&node
->work
);
246 EXPORT_SYMBOL_GPL(qrtr_endpoint_post
);
248 /* Allocate and construct a resume-tx packet. */
249 static struct sk_buff
*qrtr_alloc_resume_tx(u32 src_node
,
250 u32 dst_node
, u32 port
)
252 const int pkt_len
= 20;
253 struct qrtr_hdr
*hdr
;
257 skb
= alloc_skb(QRTR_HDR_SIZE
+ pkt_len
, GFP_KERNEL
);
260 skb_reset_transport_header(skb
);
262 hdr
= (struct qrtr_hdr
*)skb_put(skb
, QRTR_HDR_SIZE
);
263 hdr
->version
= cpu_to_le32(QRTR_PROTO_VER
);
264 hdr
->type
= cpu_to_le32(QRTR_TYPE_RESUME_TX
);
265 hdr
->src_node_id
= cpu_to_le32(src_node
);
266 hdr
->src_port_id
= cpu_to_le32(QRTR_PORT_CTRL
);
267 hdr
->confirm_rx
= cpu_to_le32(0);
268 hdr
->size
= cpu_to_le32(pkt_len
);
269 hdr
->dst_node_id
= cpu_to_le32(dst_node
);
270 hdr
->dst_port_id
= cpu_to_le32(QRTR_PORT_CTRL
);
272 buf
= (__le32
*)skb_put(skb
, pkt_len
);
273 memset(buf
, 0, pkt_len
);
274 buf
[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX
);
275 buf
[1] = cpu_to_le32(src_node
);
276 buf
[2] = cpu_to_le32(port
);
281 static struct qrtr_sock
*qrtr_port_lookup(int port
);
282 static void qrtr_port_put(struct qrtr_sock
*ipc
);
284 /* Handle and route a received packet.
286 * This will auto-reply with resume-tx packet as necessary.
288 static void qrtr_node_rx_work(struct work_struct
*work
)
290 struct qrtr_node
*node
= container_of(work
, struct qrtr_node
, work
);
293 while ((skb
= skb_dequeue(&node
->rx_queue
)) != NULL
) {
294 const struct qrtr_hdr
*phdr
;
295 u32 dst_node
, dst_port
;
296 struct qrtr_sock
*ipc
;
300 phdr
= (const struct qrtr_hdr
*)skb_transport_header(skb
);
301 src_node
= le32_to_cpu(phdr
->src_node_id
);
302 dst_node
= le32_to_cpu(phdr
->dst_node_id
);
303 dst_port
= le32_to_cpu(phdr
->dst_port_id
);
304 confirm
= !!phdr
->confirm_rx
;
306 qrtr_node_assign(node
, src_node
);
308 ipc
= qrtr_port_lookup(dst_port
);
312 if (sock_queue_rcv_skb(&ipc
->sk
, skb
))
319 skb
= qrtr_alloc_resume_tx(dst_node
, node
->nid
, dst_port
);
322 if (qrtr_node_enqueue(node
, skb
))
329 * qrtr_endpoint_register() - register a new endpoint
330 * @ep: endpoint to register
331 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
332 * Return: 0 on success; negative error code on failure
334 * The specified endpoint must have the xmit function pointer set on call.
336 int qrtr_endpoint_register(struct qrtr_endpoint
*ep
, unsigned int nid
)
338 struct qrtr_node
*node
;
340 if (!ep
|| !ep
->xmit
)
343 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
347 INIT_WORK(&node
->work
, qrtr_node_rx_work
);
348 kref_init(&node
->ref
);
349 mutex_init(&node
->ep_lock
);
350 skb_queue_head_init(&node
->rx_queue
);
351 node
->nid
= QRTR_EP_NID_AUTO
;
354 qrtr_node_assign(node
, nid
);
356 mutex_lock(&qrtr_node_lock
);
357 list_add(&node
->item
, &qrtr_all_nodes
);
358 mutex_unlock(&qrtr_node_lock
);
363 EXPORT_SYMBOL_GPL(qrtr_endpoint_register
);
366 * qrtr_endpoint_unregister - unregister endpoint
367 * @ep: endpoint to unregister
369 void qrtr_endpoint_unregister(struct qrtr_endpoint
*ep
)
371 struct qrtr_node
*node
= ep
->node
;
373 mutex_lock(&node
->ep_lock
);
375 mutex_unlock(&node
->ep_lock
);
377 qrtr_node_release(node
);
380 EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister
);
382 /* Lookup socket by port.
384 * Callers must release with qrtr_port_put()
386 static struct qrtr_sock
*qrtr_port_lookup(int port
)
388 struct qrtr_sock
*ipc
;
390 if (port
== QRTR_PORT_CTRL
)
393 mutex_lock(&qrtr_port_lock
);
394 ipc
= idr_find(&qrtr_ports
, port
);
397 mutex_unlock(&qrtr_port_lock
);
402 /* Release acquired socket. */
403 static void qrtr_port_put(struct qrtr_sock
*ipc
)
408 /* Remove port assignment. */
409 static void qrtr_port_remove(struct qrtr_sock
*ipc
)
411 int port
= ipc
->us
.sq_port
;
413 if (port
== QRTR_PORT_CTRL
)
416 __sock_put(&ipc
->sk
);
418 mutex_lock(&qrtr_port_lock
);
419 idr_remove(&qrtr_ports
, port
);
420 mutex_unlock(&qrtr_port_lock
);
423 /* Assign port number to socket.
425 * Specify port in the integer pointed to by port, and it will be adjusted
426 * on return as necesssary.
429 * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
430 * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
431 * >QRTR_MIN_EPH_SOCKET: Specified; available to all
433 static int qrtr_port_assign(struct qrtr_sock
*ipc
, int *port
)
437 mutex_lock(&qrtr_port_lock
);
439 rc
= idr_alloc(&qrtr_ports
, ipc
,
440 QRTR_MIN_EPH_SOCKET
, QRTR_MAX_EPH_SOCKET
+ 1,
444 } else if (*port
< QRTR_MIN_EPH_SOCKET
&& !capable(CAP_NET_ADMIN
)) {
446 } else if (*port
== QRTR_PORT_CTRL
) {
447 rc
= idr_alloc(&qrtr_ports
, ipc
, 0, 1, GFP_ATOMIC
);
449 rc
= idr_alloc(&qrtr_ports
, ipc
, *port
, *port
+ 1, GFP_ATOMIC
);
453 mutex_unlock(&qrtr_port_lock
);
465 /* Bind socket to address.
467 * Socket should be locked upon call.
469 static int __qrtr_bind(struct socket
*sock
,
470 const struct sockaddr_qrtr
*addr
, int zapped
)
472 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
473 struct sock
*sk
= sock
->sk
;
478 if (!zapped
&& addr
->sq_port
== ipc
->us
.sq_port
)
481 port
= addr
->sq_port
;
482 rc
= qrtr_port_assign(ipc
, &port
);
486 /* unbind previous, if any */
488 qrtr_port_remove(ipc
);
489 ipc
->us
.sq_port
= port
;
491 sock_reset_flag(sk
, SOCK_ZAPPED
);
496 /* Auto bind to an ephemeral port. */
497 static int qrtr_autobind(struct socket
*sock
)
499 struct sock
*sk
= sock
->sk
;
500 struct sockaddr_qrtr addr
;
502 if (!sock_flag(sk
, SOCK_ZAPPED
))
505 addr
.sq_family
= AF_QIPCRTR
;
506 addr
.sq_node
= qrtr_local_nid
;
509 return __qrtr_bind(sock
, &addr
, 1);
512 /* Bind socket to specified sockaddr. */
513 static int qrtr_bind(struct socket
*sock
, struct sockaddr
*saddr
, int len
)
515 DECLARE_SOCKADDR(struct sockaddr_qrtr
*, addr
, saddr
);
516 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
517 struct sock
*sk
= sock
->sk
;
520 if (len
< sizeof(*addr
) || addr
->sq_family
!= AF_QIPCRTR
)
523 if (addr
->sq_node
!= ipc
->us
.sq_node
)
527 rc
= __qrtr_bind(sock
, addr
, sock_flag(sk
, SOCK_ZAPPED
));
533 /* Queue packet to local peer socket. */
534 static int qrtr_local_enqueue(struct qrtr_node
*node
, struct sk_buff
*skb
)
536 const struct qrtr_hdr
*phdr
;
537 struct qrtr_sock
*ipc
;
539 phdr
= (const struct qrtr_hdr
*)skb_transport_header(skb
);
541 ipc
= qrtr_port_lookup(le32_to_cpu(phdr
->dst_port_id
));
542 if (!ipc
|| &ipc
->sk
== skb
->sk
) { /* do not send to self */
547 if (sock_queue_rcv_skb(&ipc
->sk
, skb
)) {
558 /* Queue packet for broadcast. */
559 static int qrtr_bcast_enqueue(struct qrtr_node
*node
, struct sk_buff
*skb
)
561 struct sk_buff
*skbn
;
563 mutex_lock(&qrtr_node_lock
);
564 list_for_each_entry(node
, &qrtr_all_nodes
, item
) {
565 skbn
= skb_clone(skb
, GFP_KERNEL
);
568 skb_set_owner_w(skbn
, skb
->sk
);
569 qrtr_node_enqueue(node
, skbn
);
571 mutex_unlock(&qrtr_node_lock
);
573 qrtr_local_enqueue(node
, skb
);
578 static int qrtr_sendmsg(struct socket
*sock
, struct msghdr
*msg
, size_t len
)
580 DECLARE_SOCKADDR(struct sockaddr_qrtr
*, addr
, msg
->msg_name
);
581 int (*enqueue_fn
)(struct qrtr_node
*, struct sk_buff
*);
582 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
583 struct sock
*sk
= sock
->sk
;
584 struct qrtr_node
*node
;
585 struct qrtr_hdr
*hdr
;
590 if (msg
->msg_flags
& ~(MSG_DONTWAIT
))
599 if (msg
->msg_namelen
< sizeof(*addr
)) {
604 if (addr
->sq_family
!= AF_QIPCRTR
) {
609 rc
= qrtr_autobind(sock
);
614 } else if (sk
->sk_state
== TCP_ESTABLISHED
) {
622 if (addr
->sq_node
== QRTR_NODE_BCAST
) {
623 enqueue_fn
= qrtr_bcast_enqueue
;
624 } else if (addr
->sq_node
== ipc
->us
.sq_node
) {
625 enqueue_fn
= qrtr_local_enqueue
;
627 enqueue_fn
= qrtr_node_enqueue
;
628 node
= qrtr_node_lookup(addr
->sq_node
);
635 plen
= (len
+ 3) & ~3;
636 skb
= sock_alloc_send_skb(sk
, plen
+ QRTR_HDR_SIZE
,
637 msg
->msg_flags
& MSG_DONTWAIT
, &rc
);
641 skb_reset_transport_header(skb
);
642 skb_put(skb
, len
+ QRTR_HDR_SIZE
);
644 hdr
= (struct qrtr_hdr
*)skb_transport_header(skb
);
645 hdr
->version
= cpu_to_le32(QRTR_PROTO_VER
);
646 hdr
->src_node_id
= cpu_to_le32(ipc
->us
.sq_node
);
647 hdr
->src_port_id
= cpu_to_le32(ipc
->us
.sq_port
);
648 hdr
->confirm_rx
= cpu_to_le32(0);
649 hdr
->size
= cpu_to_le32(len
);
650 hdr
->dst_node_id
= cpu_to_le32(addr
->sq_node
);
651 hdr
->dst_port_id
= cpu_to_le32(addr
->sq_port
);
653 rc
= skb_copy_datagram_from_iter(skb
, QRTR_HDR_SIZE
,
654 &msg
->msg_iter
, len
);
661 rc
= skb_pad(skb
, plen
- len
);
664 skb_put(skb
, plen
- len
);
667 if (ipc
->us
.sq_port
== QRTR_PORT_CTRL
) {
674 /* control messages already require the type as 'command' */
675 skb_copy_bits(skb
, QRTR_HDR_SIZE
, &hdr
->type
, 4);
677 hdr
->type
= cpu_to_le32(QRTR_TYPE_DATA
);
680 rc
= enqueue_fn(node
, skb
);
685 qrtr_node_release(node
);
691 static int qrtr_recvmsg(struct socket
*sock
, struct msghdr
*msg
,
692 size_t size
, int flags
)
694 DECLARE_SOCKADDR(struct sockaddr_qrtr
*, addr
, msg
->msg_name
);
695 const struct qrtr_hdr
*phdr
;
696 struct sock
*sk
= sock
->sk
;
702 if (sock_flag(sk
, SOCK_ZAPPED
)) {
704 return -EADDRNOTAVAIL
;
707 skb
= skb_recv_datagram(sk
, flags
& ~MSG_DONTWAIT
,
708 flags
& MSG_DONTWAIT
, &rc
);
714 phdr
= (const struct qrtr_hdr
*)skb_transport_header(skb
);
715 copied
= le32_to_cpu(phdr
->size
);
718 msg
->msg_flags
|= MSG_TRUNC
;
721 rc
= skb_copy_datagram_msg(skb
, QRTR_HDR_SIZE
, msg
, copied
);
727 addr
->sq_family
= AF_QIPCRTR
;
728 addr
->sq_node
= le32_to_cpu(phdr
->src_node_id
);
729 addr
->sq_port
= le32_to_cpu(phdr
->src_port_id
);
730 msg
->msg_namelen
= sizeof(*addr
);
734 skb_free_datagram(sk
, skb
);
740 static int qrtr_connect(struct socket
*sock
, struct sockaddr
*saddr
,
743 DECLARE_SOCKADDR(struct sockaddr_qrtr
*, addr
, saddr
);
744 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
745 struct sock
*sk
= sock
->sk
;
748 if (len
< sizeof(*addr
) || addr
->sq_family
!= AF_QIPCRTR
)
753 sk
->sk_state
= TCP_CLOSE
;
754 sock
->state
= SS_UNCONNECTED
;
756 rc
= qrtr_autobind(sock
);
763 sock
->state
= SS_CONNECTED
;
764 sk
->sk_state
= TCP_ESTABLISHED
;
771 static int qrtr_getname(struct socket
*sock
, struct sockaddr
*saddr
,
774 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
775 struct sockaddr_qrtr qaddr
;
776 struct sock
*sk
= sock
->sk
;
780 if (sk
->sk_state
!= TCP_ESTABLISHED
) {
791 *len
= sizeof(qaddr
);
792 qaddr
.sq_family
= AF_QIPCRTR
;
794 memcpy(saddr
, &qaddr
, sizeof(qaddr
));
799 static int qrtr_ioctl(struct socket
*sock
, unsigned int cmd
, unsigned long arg
)
801 void __user
*argp
= (void __user
*)arg
;
802 struct qrtr_sock
*ipc
= qrtr_sk(sock
->sk
);
803 struct sock
*sk
= sock
->sk
;
804 struct sockaddr_qrtr
*sq
;
814 len
= sk
->sk_sndbuf
- sk_wmem_alloc_get(sk
);
817 rc
= put_user(len
, (int __user
*)argp
);
820 skb
= skb_peek(&sk
->sk_receive_queue
);
822 len
= skb
->len
- QRTR_HDR_SIZE
;
823 rc
= put_user(len
, (int __user
*)argp
);
826 if (copy_from_user(&ifr
, argp
, sizeof(ifr
))) {
831 sq
= (struct sockaddr_qrtr
*)&ifr
.ifr_addr
;
833 if (copy_to_user(argp
, &ifr
, sizeof(ifr
))) {
839 rc
= sock_get_timestamp(sk
, argp
);
862 static int qrtr_release(struct socket
*sock
)
864 struct sock
*sk
= sock
->sk
;
865 struct qrtr_sock
*ipc
;
873 sk
->sk_shutdown
= SHUTDOWN_MASK
;
874 if (!sock_flag(sk
, SOCK_DEAD
))
875 sk
->sk_state_change(sk
);
877 sock_set_flag(sk
, SOCK_DEAD
);
880 if (!sock_flag(sk
, SOCK_ZAPPED
))
881 qrtr_port_remove(ipc
);
883 skb_queue_purge(&sk
->sk_receive_queue
);
891 static const struct proto_ops qrtr_proto_ops
= {
892 .owner
= THIS_MODULE
,
893 .family
= AF_QIPCRTR
,
895 .connect
= qrtr_connect
,
896 .socketpair
= sock_no_socketpair
,
897 .accept
= sock_no_accept
,
898 .listen
= sock_no_listen
,
899 .sendmsg
= qrtr_sendmsg
,
900 .recvmsg
= qrtr_recvmsg
,
901 .getname
= qrtr_getname
,
903 .poll
= datagram_poll
,
904 .shutdown
= sock_no_shutdown
,
905 .setsockopt
= sock_no_setsockopt
,
906 .getsockopt
= sock_no_getsockopt
,
907 .release
= qrtr_release
,
908 .mmap
= sock_no_mmap
,
909 .sendpage
= sock_no_sendpage
,
912 static struct proto qrtr_proto
= {
914 .owner
= THIS_MODULE
,
915 .obj_size
= sizeof(struct qrtr_sock
),
918 static int qrtr_create(struct net
*net
, struct socket
*sock
,
919 int protocol
, int kern
)
921 struct qrtr_sock
*ipc
;
924 if (sock
->type
!= SOCK_DGRAM
)
927 sk
= sk_alloc(net
, AF_QIPCRTR
, GFP_KERNEL
, &qrtr_proto
, kern
);
931 sock_set_flag(sk
, SOCK_ZAPPED
);
933 sock_init_data(sock
, sk
);
934 sock
->ops
= &qrtr_proto_ops
;
937 ipc
->us
.sq_family
= AF_QIPCRTR
;
938 ipc
->us
.sq_node
= qrtr_local_nid
;
944 static const struct nla_policy qrtr_policy
[IFA_MAX
+ 1] = {
945 [IFA_LOCAL
] = { .type
= NLA_U32
},
948 static int qrtr_addr_doit(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
949 struct netlink_ext_ack
*extack
)
951 struct nlattr
*tb
[IFA_MAX
+ 1];
952 struct ifaddrmsg
*ifm
;
955 if (!netlink_capable(skb
, CAP_NET_ADMIN
))
958 if (!netlink_capable(skb
, CAP_SYS_ADMIN
))
963 rc
= nlmsg_parse(nlh
, sizeof(*ifm
), tb
, IFA_MAX
, qrtr_policy
, extack
);
967 ifm
= nlmsg_data(nlh
);
971 qrtr_local_nid
= nla_get_u32(tb
[IFA_LOCAL
]);
975 static const struct net_proto_family qrtr_family
= {
976 .owner
= THIS_MODULE
,
977 .family
= AF_QIPCRTR
,
978 .create
= qrtr_create
,
981 static int __init
qrtr_proto_init(void)
985 rc
= proto_register(&qrtr_proto
, 1);
989 rc
= sock_register(&qrtr_family
);
991 proto_unregister(&qrtr_proto
);
995 rtnl_register(PF_QIPCRTR
, RTM_NEWADDR
, qrtr_addr_doit
, NULL
, NULL
);
999 module_init(qrtr_proto_init
);
1001 static void __exit
qrtr_proto_fini(void)
1003 rtnl_unregister(PF_QIPCRTR
, RTM_NEWADDR
);
1004 sock_unregister(qrtr_family
.family
);
1005 proto_unregister(&qrtr_proto
);
1007 module_exit(qrtr_proto_fini
);
1009 MODULE_DESCRIPTION("Qualcomm IPC-router driver");
1010 MODULE_LICENSE("GPL v2");