1 /* RxRPC packet reception
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h>
16 #include <linux/udp.h>
18 #include <linux/in6.h>
19 #include <linux/icmp.h>
20 #include <linux/gfp.h>
22 #include <net/af_rxrpc.h>
25 #include <net/net_namespace.h>
26 #include "ar-internal.h"
28 const char *rxrpc_pkts
[] = {
30 "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
31 "?09", "?10", "?11", "?12", "?13", "?14", "?15"
35 * queue a packet for recvmsg to pass to userspace
36 * - the caller must hold a lock on call->lock
37 * - must not be called with interrupts disabled (sk_filter() disables BH's)
38 * - eats the packet whether successful or not
39 * - there must be just one reference to the packet, which the caller passes to
42 int rxrpc_queue_rcv_skb(struct rxrpc_call
*call
, struct sk_buff
*skb
,
43 bool force
, bool terminal
)
45 struct rxrpc_skb_priv
*sp
;
46 struct rxrpc_sock
*rx
= call
->socket
;
50 _enter(",,%d,%d", force
, terminal
);
52 ASSERT(!irqs_disabled());
55 ASSERTCMP(sp
->call
, ==, call
);
57 /* if we've already posted the terminal message for a call, then we
58 * don't post any more */
59 if (test_bit(RXRPC_CALL_TERMINAL_MSG
, &call
->flags
)) {
60 _debug("already terminated");
61 ASSERTCMP(call
->state
, >=, RXRPC_CALL_COMPLETE
);
62 skb
->destructor
= NULL
;
72 /* cast skb->rcvbuf to unsigned... It's pointless, but
73 * reduces number of warnings when compiling with -W
76 // if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
77 // (unsigned int) sk->sk_rcvbuf)
80 ret
= sk_filter(sk
, skb
);
85 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
86 if (!test_bit(RXRPC_CALL_TERMINAL_MSG
, &call
->flags
) &&
87 !test_bit(RXRPC_CALL_RELEASED
, &call
->flags
) &&
88 call
->socket
->sk
.sk_state
!= RXRPC_CLOSE
) {
89 skb
->destructor
= rxrpc_packet_destructor
;
92 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
95 _debug("<<<< TERMINAL MESSAGE >>>>");
96 set_bit(RXRPC_CALL_TERMINAL_MSG
, &call
->flags
);
99 /* allow interception by a kernel service */
100 if (rx
->interceptor
) {
101 rx
->interceptor(sk
, call
->user_call_ID
, skb
);
102 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
105 /* Cache the SKB length before we tack it onto the
106 * receive queue. Once it is added it no longer
107 * belongs to us and may be freed by other threads of
108 * control pulling packets from the queue */
111 _net("post skb %p", skb
);
112 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
113 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
115 if (!sock_flag(sk
, SOCK_DEAD
))
116 sk
->sk_data_ready(sk
);
120 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
125 /* release the socket buffer */
127 skb
->destructor
= NULL
;
129 rxrpc_put_call(call
);
133 _leave(" = %d", ret
);
138 * process a DATA packet, posting the packet to the appropriate queue
139 * - eats the packet if successful
141 static int rxrpc_fast_process_data(struct rxrpc_call
*call
,
142 struct sk_buff
*skb
, u32 seq
)
144 struct rxrpc_skb_priv
*sp
;
146 int ret
, ackbit
, ack
;
148 _enter("{%u,%u},,{%u}", call
->rx_data_post
, call
->rx_first_oos
, seq
);
151 ASSERTCMP(sp
->call
, ==, NULL
);
153 spin_lock(&call
->lock
);
155 if (call
->state
> RXRPC_CALL_COMPLETE
)
158 ASSERTCMP(call
->rx_data_expect
, >=, call
->rx_data_post
);
159 ASSERTCMP(call
->rx_data_post
, >=, call
->rx_data_recv
);
160 ASSERTCMP(call
->rx_data_recv
, >=, call
->rx_data_eaten
);
162 if (seq
< call
->rx_data_post
) {
163 _debug("dup #%u [-%u]", seq
, call
->rx_data_post
);
164 ack
= RXRPC_ACK_DUPLICATE
;
166 goto discard_and_ack
;
169 /* we may already have the packet in the out of sequence queue */
170 ackbit
= seq
- (call
->rx_data_eaten
+ 1);
171 ASSERTCMP(ackbit
, >=, 0);
172 if (__test_and_set_bit(ackbit
, call
->ackr_window
)) {
173 _debug("dup oos #%u [%u,%u]",
174 seq
, call
->rx_data_eaten
, call
->rx_data_post
);
175 ack
= RXRPC_ACK_DUPLICATE
;
176 goto discard_and_ack
;
179 if (seq
>= call
->ackr_win_top
) {
180 _debug("exceed #%u [%u]", seq
, call
->ackr_win_top
);
181 __clear_bit(ackbit
, call
->ackr_window
);
182 ack
= RXRPC_ACK_EXCEEDS_WINDOW
;
183 goto discard_and_ack
;
186 if (seq
== call
->rx_data_expect
) {
187 clear_bit(RXRPC_CALL_EXPECT_OOS
, &call
->flags
);
188 call
->rx_data_expect
++;
189 } else if (seq
> call
->rx_data_expect
) {
190 _debug("oos #%u [%u]", seq
, call
->rx_data_expect
);
191 call
->rx_data_expect
= seq
+ 1;
192 if (test_and_set_bit(RXRPC_CALL_EXPECT_OOS
, &call
->flags
)) {
193 ack
= RXRPC_ACK_OUT_OF_SEQUENCE
;
194 goto enqueue_and_ack
;
199 if (seq
!= call
->rx_data_post
) {
200 _debug("ahead #%u [%u]", seq
, call
->rx_data_post
);
204 if (test_bit(RXRPC_CALL_RCVD_LAST
, &call
->flags
))
207 /* if the packet need security things doing to it, then it goes down
209 if (call
->conn
->security
)
213 rxrpc_get_call(call
);
214 terminal
= ((sp
->hdr
.flags
& RXRPC_LAST_PACKET
) &&
215 !(sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
));
216 ret
= rxrpc_queue_rcv_skb(call
, skb
, false, terminal
);
218 if (ret
== -ENOMEM
|| ret
== -ENOBUFS
) {
219 __clear_bit(ackbit
, call
->ackr_window
);
220 ack
= RXRPC_ACK_NOSPACE
;
221 goto discard_and_ack
;
228 _debug("post #%u", seq
);
229 ASSERTCMP(call
->rx_data_post
, ==, seq
);
230 call
->rx_data_post
++;
232 if (sp
->hdr
.flags
& RXRPC_LAST_PACKET
)
233 set_bit(RXRPC_CALL_RCVD_LAST
, &call
->flags
);
235 /* if we've reached an out of sequence packet then we need to drain
236 * that queue into the socket Rx queue now */
237 if (call
->rx_data_post
== call
->rx_first_oos
) {
238 _debug("drain rx oos now");
239 read_lock(&call
->state_lock
);
240 if (call
->state
< RXRPC_CALL_COMPLETE
&&
241 !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS
, &call
->events
))
242 rxrpc_queue_call(call
);
243 read_unlock(&call
->state_lock
);
246 spin_unlock(&call
->lock
);
247 atomic_inc(&call
->ackr_not_idle
);
248 rxrpc_propose_ACK(call
, RXRPC_ACK_DELAY
, sp
->hdr
.serial
, false);
249 _leave(" = 0 [posted]");
255 spin_unlock(&call
->lock
);
256 _leave(" = %d", ret
);
260 _debug("discard and ACK packet %p", skb
);
261 __rxrpc_propose_ACK(call
, ack
, sp
->hdr
.serial
, true);
263 spin_unlock(&call
->lock
);
265 _leave(" = 0 [discarded]");
269 __rxrpc_propose_ACK(call
, ack
, sp
->hdr
.serial
, true);
271 _net("defer skb %p", skb
);
272 spin_unlock(&call
->lock
);
273 skb_queue_tail(&call
->rx_queue
, skb
);
274 atomic_inc(&call
->ackr_not_idle
);
275 read_lock(&call
->state_lock
);
276 if (call
->state
< RXRPC_CALL_DEAD
)
277 rxrpc_queue_call(call
);
278 read_unlock(&call
->state_lock
);
279 _leave(" = 0 [queued]");
284 * assume an implicit ACKALL of the transmission phase of a client socket upon
285 * reception of the first reply packet
287 static void rxrpc_assume_implicit_ackall(struct rxrpc_call
*call
, u32 serial
)
289 write_lock_bh(&call
->state_lock
);
291 switch (call
->state
) {
292 case RXRPC_CALL_CLIENT_AWAIT_REPLY
:
293 call
->state
= RXRPC_CALL_CLIENT_RECV_REPLY
;
294 call
->acks_latest
= serial
;
296 _debug("implicit ACKALL %%%u", call
->acks_latest
);
297 set_bit(RXRPC_CALL_RCVD_ACKALL
, &call
->events
);
298 write_unlock_bh(&call
->state_lock
);
300 if (try_to_del_timer_sync(&call
->resend_timer
) >= 0) {
301 clear_bit(RXRPC_CALL_RESEND_TIMER
, &call
->events
);
302 clear_bit(RXRPC_CALL_RESEND
, &call
->events
);
303 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
308 write_unlock_bh(&call
->state_lock
);
314 * post an incoming packet to the nominated call to deal with
315 * - must get rid of the sk_buff, either by freeing it or by queuing it
317 void rxrpc_fast_process_packet(struct rxrpc_call
*call
, struct sk_buff
*skb
)
319 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
321 u32 serial
, hi_serial
, seq
, abort_code
;
323 _enter("%p,%p", call
, skb
);
325 ASSERT(!irqs_disabled());
327 #if 0 // INJECT RX ERROR
328 if (sp
->hdr
.type
== RXRPC_PACKET_TYPE_DATA
) {
331 printk("DROPPED 3RD PACKET!!!!!!!!!!!!!\n");
338 /* track the latest serial number on this connection for ACK packet
340 serial
= ntohl(sp
->hdr
.serial
);
341 hi_serial
= atomic_read(&call
->conn
->hi_serial
);
342 while (serial
> hi_serial
)
343 hi_serial
= atomic_cmpxchg(&call
->conn
->hi_serial
, hi_serial
,
346 /* request ACK generation for any ACK or DATA packet that requests
348 if (sp
->hdr
.flags
& RXRPC_REQUEST_ACK
) {
349 _proto("ACK Requested on %%%u", serial
);
350 rxrpc_propose_ACK(call
, RXRPC_ACK_REQUESTED
, sp
->hdr
.serial
, false);
353 switch (sp
->hdr
.type
) {
354 case RXRPC_PACKET_TYPE_ABORT
:
357 if (skb_copy_bits(skb
, 0, &_abort_code
,
358 sizeof(_abort_code
)) < 0)
361 abort_code
= ntohl(_abort_code
);
362 _proto("Rx ABORT %%%u { %x }", serial
, abort_code
);
364 write_lock_bh(&call
->state_lock
);
365 if (call
->state
< RXRPC_CALL_COMPLETE
) {
366 call
->state
= RXRPC_CALL_REMOTELY_ABORTED
;
367 call
->abort_code
= abort_code
;
368 set_bit(RXRPC_CALL_RCVD_ABORT
, &call
->events
);
369 rxrpc_queue_call(call
);
371 goto free_packet_unlock
;
373 case RXRPC_PACKET_TYPE_BUSY
:
374 _proto("Rx BUSY %%%u", serial
);
376 if (call
->conn
->out_clientflag
)
379 write_lock_bh(&call
->state_lock
);
380 switch (call
->state
) {
381 case RXRPC_CALL_CLIENT_SEND_REQUEST
:
382 call
->state
= RXRPC_CALL_SERVER_BUSY
;
383 set_bit(RXRPC_CALL_RCVD_BUSY
, &call
->events
);
384 rxrpc_queue_call(call
);
385 case RXRPC_CALL_SERVER_BUSY
:
386 goto free_packet_unlock
;
388 goto protocol_error_locked
;
392 _proto("Rx %s %%%u", rxrpc_pkts
[sp
->hdr
.type
], serial
);
395 case RXRPC_PACKET_TYPE_DATA
:
396 seq
= ntohl(sp
->hdr
.seq
);
398 _proto("Rx DATA %%%u { #%u }", serial
, seq
);
403 call
->ackr_prev_seq
= sp
->hdr
.seq
;
405 /* received data implicitly ACKs all of the request packets we
406 * sent when we're acting as a client */
407 if (call
->state
== RXRPC_CALL_CLIENT_AWAIT_REPLY
)
408 rxrpc_assume_implicit_ackall(call
, serial
);
410 switch (rxrpc_fast_process_data(call
, skb
, seq
)) {
418 /* data packet received beyond the last packet */
423 case RXRPC_PACKET_TYPE_ACKALL
:
424 case RXRPC_PACKET_TYPE_ACK
:
425 /* ACK processing is done in process context */
426 read_lock_bh(&call
->state_lock
);
427 if (call
->state
< RXRPC_CALL_DEAD
) {
428 skb_queue_tail(&call
->rx_queue
, skb
);
429 rxrpc_queue_call(call
);
432 read_unlock_bh(&call
->state_lock
);
437 _debug("protocol error");
438 write_lock_bh(&call
->state_lock
);
439 protocol_error_locked
:
440 if (call
->state
<= RXRPC_CALL_COMPLETE
) {
441 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
442 call
->abort_code
= RX_PROTOCOL_ERROR
;
443 set_bit(RXRPC_CALL_ABORT
, &call
->events
);
444 rxrpc_queue_call(call
);
447 write_unlock_bh(&call
->state_lock
);
455 * split up a jumbo data packet
457 static void rxrpc_process_jumbo_packet(struct rxrpc_call
*call
,
458 struct sk_buff
*jumbo
)
460 struct rxrpc_jumbo_header jhdr
;
461 struct rxrpc_skb_priv
*sp
;
462 struct sk_buff
*part
;
464 _enter(",{%u,%u}", jumbo
->data_len
, jumbo
->len
);
466 sp
= rxrpc_skb(jumbo
);
469 sp
->hdr
.flags
&= ~RXRPC_JUMBO_PACKET
;
471 /* make a clone to represent the first subpacket in what's left
472 * of the jumbo packet */
473 part
= skb_clone(jumbo
, GFP_ATOMIC
);
475 /* simply ditch the tail in the event of ENOMEM */
476 pskb_trim(jumbo
, RXRPC_JUMBO_DATALEN
);
481 pskb_trim(part
, RXRPC_JUMBO_DATALEN
);
483 if (!pskb_pull(jumbo
, RXRPC_JUMBO_DATALEN
))
486 if (skb_copy_bits(jumbo
, 0, &jhdr
, sizeof(jhdr
)) < 0)
488 if (!pskb_pull(jumbo
, sizeof(jhdr
)))
491 sp
->hdr
.seq
= htonl(ntohl(sp
->hdr
.seq
) + 1);
492 sp
->hdr
.serial
= htonl(ntohl(sp
->hdr
.serial
) + 1);
493 sp
->hdr
.flags
= jhdr
.flags
;
494 sp
->hdr
._rsvd
= jhdr
._rsvd
;
496 _proto("Rx DATA Jumbo %%%u", ntohl(sp
->hdr
.serial
) - 1);
498 rxrpc_fast_process_packet(call
, part
);
501 } while (sp
->hdr
.flags
& RXRPC_JUMBO_PACKET
);
503 rxrpc_fast_process_packet(call
, jumbo
);
508 _debug("protocol error");
509 rxrpc_free_skb(part
);
510 rxrpc_free_skb(jumbo
);
511 write_lock_bh(&call
->state_lock
);
512 if (call
->state
<= RXRPC_CALL_COMPLETE
) {
513 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
514 call
->abort_code
= RX_PROTOCOL_ERROR
;
515 set_bit(RXRPC_CALL_ABORT
, &call
->events
);
516 rxrpc_queue_call(call
);
518 write_unlock_bh(&call
->state_lock
);
523 * post an incoming packet to the appropriate call/socket to deal with
524 * - must get rid of the sk_buff, either by freeing it or by queuing it
526 static void rxrpc_post_packet_to_call(struct rxrpc_call
*call
,
529 struct rxrpc_skb_priv
*sp
;
531 _enter("%p,%p", call
, skb
);
535 _debug("extant call [%d]", call
->state
);
537 read_lock(&call
->state_lock
);
538 switch (call
->state
) {
539 case RXRPC_CALL_LOCALLY_ABORTED
:
540 if (!test_and_set_bit(RXRPC_CALL_ABORT
, &call
->events
)) {
541 rxrpc_queue_call(call
);
544 case RXRPC_CALL_REMOTELY_ABORTED
:
545 case RXRPC_CALL_NETWORK_ERROR
:
546 case RXRPC_CALL_DEAD
:
548 case RXRPC_CALL_COMPLETE
:
549 case RXRPC_CALL_CLIENT_FINAL_ACK
:
550 /* complete server call */
551 if (call
->conn
->in_clientflag
)
553 /* resend last packet of a completed call */
554 _debug("final ack again");
555 rxrpc_get_call(call
);
556 set_bit(RXRPC_CALL_ACK_FINAL
, &call
->events
);
557 rxrpc_queue_call(call
);
563 read_unlock(&call
->state_lock
);
564 rxrpc_get_call(call
);
566 if (sp
->hdr
.type
== RXRPC_PACKET_TYPE_DATA
&&
567 sp
->hdr
.flags
& RXRPC_JUMBO_PACKET
)
568 rxrpc_process_jumbo_packet(call
, skb
);
570 rxrpc_fast_process_packet(call
, skb
);
572 rxrpc_put_call(call
);
576 if (sp
->hdr
.type
!= RXRPC_PACKET_TYPE_ABORT
) {
577 skb
->priority
= RX_CALL_DEAD
;
578 rxrpc_reject_packet(call
->conn
->trans
->local
, skb
);
584 read_unlock(&call
->state_lock
);
590 * post connection-level events to the connection
591 * - this includes challenges, responses and some aborts
593 static void rxrpc_post_packet_to_conn(struct rxrpc_connection
*conn
,
596 _enter("%p,%p", conn
, skb
);
598 atomic_inc(&conn
->usage
);
599 skb_queue_tail(&conn
->rx_queue
, skb
);
600 rxrpc_queue_conn(conn
);
603 static struct rxrpc_connection
*rxrpc_conn_from_local(struct rxrpc_local
*local
,
605 struct rxrpc_skb_priv
*sp
)
607 struct rxrpc_peer
*peer
;
608 struct rxrpc_transport
*trans
;
609 struct rxrpc_connection
*conn
;
611 peer
= rxrpc_find_peer(local
, ip_hdr(skb
)->saddr
,
612 udp_hdr(skb
)->source
);
616 trans
= rxrpc_find_transport(local
, peer
);
617 rxrpc_put_peer(peer
);
621 conn
= rxrpc_find_connection(trans
, &sp
->hdr
);
622 rxrpc_put_transport(trans
);
632 * handle data received on the local endpoint
633 * - may be called in interrupt context
635 void rxrpc_data_ready(struct sock
*sk
)
637 struct rxrpc_skb_priv
*sp
;
638 struct rxrpc_local
*local
;
644 ASSERT(!irqs_disabled());
646 read_lock_bh(&rxrpc_local_lock
);
647 local
= sk
->sk_user_data
;
648 if (local
&& atomic_read(&local
->usage
) > 0)
649 rxrpc_get_local(local
);
652 read_unlock_bh(&rxrpc_local_lock
);
654 _leave(" [local dead]");
658 skb
= skb_recv_datagram(sk
, 0, 1, &ret
);
660 rxrpc_put_local(local
);
663 _debug("UDP socket error %d", ret
);
669 _net("recv skb %p", skb
);
671 /* we'll probably need to checksum it (didn't call sock_recvmsg) */
672 if (skb_checksum_complete(skb
)) {
674 rxrpc_put_local(local
);
675 UDP_INC_STATS_BH(&init_net
, UDP_MIB_INERRORS
, 0);
676 _leave(" [CSUM failed]");
680 UDP_INC_STATS_BH(&init_net
, UDP_MIB_INDATAGRAMS
, 0);
682 /* the socket buffer we have is owned by UDP, with UDP's data all over
683 * it, but we really want our own */
686 memset(sp
, 0, sizeof(*sp
));
688 _net("Rx UDP packet from %08x:%04hu",
689 ntohl(ip_hdr(skb
)->saddr
), ntohs(udp_hdr(skb
)->source
));
691 /* dig out the RxRPC connection details */
692 if (skb_copy_bits(skb
, sizeof(struct udphdr
), &sp
->hdr
,
693 sizeof(sp
->hdr
)) < 0)
695 if (!pskb_pull(skb
, sizeof(struct udphdr
) + sizeof(sp
->hdr
)))
698 _net("Rx RxRPC %s ep=%x call=%x:%x",
699 sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
? "ToServer" : "ToClient",
700 ntohl(sp
->hdr
.epoch
),
702 ntohl(sp
->hdr
.callNumber
));
704 if (sp
->hdr
.type
== 0 || sp
->hdr
.type
>= RXRPC_N_PACKET_TYPES
) {
705 _proto("Rx Bad Packet Type %u", sp
->hdr
.type
);
709 if (sp
->hdr
.type
== RXRPC_PACKET_TYPE_DATA
&&
710 (sp
->hdr
.callNumber
== 0 || sp
->hdr
.seq
== 0))
713 if (sp
->hdr
.callNumber
== 0) {
714 /* This is a connection-level packet. These should be
715 * fairly rare, so the extra overhead of looking them up the
716 * old-fashioned way doesn't really hurt */
717 struct rxrpc_connection
*conn
;
719 conn
= rxrpc_conn_from_local(local
, skb
, sp
);
721 goto cant_route_call
;
723 _debug("CONN %p {%d}", conn
, conn
->debug_id
);
724 rxrpc_post_packet_to_conn(conn
, skb
);
725 rxrpc_put_connection(conn
);
727 struct rxrpc_call
*call
;
728 u8 in_clientflag
= 0;
730 if (sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
)
731 in_clientflag
= RXRPC_CLIENT_INITIATED
;
732 call
= rxrpc_find_call_hash(in_clientflag
, sp
->hdr
.cid
,
733 sp
->hdr
.callNumber
, sp
->hdr
.epoch
,
734 sp
->hdr
.serviceId
, local
, AF_INET
,
735 (u8
*)&ip_hdr(skb
)->saddr
);
737 rxrpc_post_packet_to_call(call
, skb
);
739 goto cant_route_call
;
741 rxrpc_put_local(local
);
745 _debug("can't route call");
746 if (sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
&&
747 sp
->hdr
.type
== RXRPC_PACKET_TYPE_DATA
) {
748 if (sp
->hdr
.seq
== cpu_to_be32(1)) {
749 _debug("first packet");
750 skb_queue_tail(&local
->accept_queue
, skb
);
751 rxrpc_queue_work(&local
->acceptor
);
752 rxrpc_put_local(local
);
753 _leave(" [incoming]");
756 skb
->priority
= RX_INVALID_OPERATION
;
758 skb
->priority
= RX_CALL_DEAD
;
761 if (sp
->hdr
.type
!= RXRPC_PACKET_TYPE_ABORT
) {
762 _debug("reject type %d",sp
->hdr
.type
);
763 rxrpc_reject_packet(local
, skb
);
765 rxrpc_put_local(local
);
766 _leave(" [no call]");
770 skb
->priority
= RX_PROTOCOL_ERROR
;
771 rxrpc_reject_packet(local
, skb
);
772 rxrpc_put_local(local
);