1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2002 International Business Machines, Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions are the methods for accessing the SCTP inqueue.
11 * An SCTP inqueue is a queue into which you push SCTP packets
12 * (which might be bundles or fragments of chunks) and out of which you
13 * pop SCTP whole chunks.
15 * Please send any bug reports or fixes you make to the
17 * lksctp developers <linux-sctp@vger.kernel.org>
19 * Written or modified by:
20 * La Monte H.P. Yarroll <piggy@acm.org>
21 * Karl Knutson <karl@athena.chicago.il.us>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <net/sctp/sctp.h>
27 #include <net/sctp/sm.h>
28 #include <linux/interrupt.h>
29 #include <linux/slab.h>
31 /* Initialize an SCTP inqueue. */
32 void sctp_inq_init(struct sctp_inq
*queue
)
34 INIT_LIST_HEAD(&queue
->in_chunk_list
);
35 queue
->in_progress
= NULL
;
37 /* Create a task for delivering data. */
38 INIT_WORK(&queue
->immediate
, NULL
);
41 /* Properly release the chunk which is being worked on. */
42 static inline void sctp_inq_chunk_free(struct sctp_chunk
*chunk
)
45 chunk
->skb
= chunk
->head_skb
;
46 sctp_chunk_free(chunk
);
49 /* Release the memory associated with an SCTP inqueue. */
50 void sctp_inq_free(struct sctp_inq
*queue
)
52 struct sctp_chunk
*chunk
, *tmp
;
54 /* Empty the queue. */
55 list_for_each_entry_safe(chunk
, tmp
, &queue
->in_chunk_list
, list
) {
56 list_del_init(&chunk
->list
);
57 sctp_chunk_free(chunk
);
60 /* If there is a packet which is currently being worked on,
63 if (queue
->in_progress
) {
64 sctp_inq_chunk_free(queue
->in_progress
);
65 queue
->in_progress
= NULL
;
69 /* Put a new packet in an SCTP inqueue.
70 * We assume that packet->sctp_hdr is set and in host byte order.
72 void sctp_inq_push(struct sctp_inq
*q
, struct sctp_chunk
*chunk
)
74 /* Directly call the packet handling routine. */
75 if (chunk
->rcvr
->dead
) {
76 sctp_chunk_free(chunk
);
80 /* We are now calling this either from the soft interrupt
81 * or from the backlog processing.
82 * Eventually, we should clean up inqueue to not rely
83 * on the BH related data structures.
85 list_add_tail(&chunk
->list
, &q
->in_chunk_list
);
87 chunk
->asoc
->stats
.ipackets
++;
88 q
->immediate
.func(&q
->immediate
);
91 /* Peek at the next chunk on the inqeue. */
92 struct sctp_chunkhdr
*sctp_inq_peek(struct sctp_inq
*queue
)
94 struct sctp_chunk
*chunk
;
95 struct sctp_chunkhdr
*ch
= NULL
;
97 chunk
= queue
->in_progress
;
98 /* If there is no more chunks in this packet, say so */
99 if (chunk
->singleton
||
100 chunk
->end_of_packet
||
104 ch
= (struct sctp_chunkhdr
*)chunk
->chunk_end
;
110 /* Extract a chunk from an SCTP inqueue.
112 * WARNING: If you need to put the chunk on another queue, you need to
113 * make a shallow copy (clone) of it.
115 struct sctp_chunk
*sctp_inq_pop(struct sctp_inq
*queue
)
117 struct sctp_chunk
*chunk
;
118 struct sctp_chunkhdr
*ch
= NULL
;
120 /* The assumption is that we are safe to process the chunks
124 chunk
= queue
->in_progress
;
126 /* There is a packet that we have been working on.
127 * Any post processing work to do before we move on?
129 if (chunk
->singleton
||
130 chunk
->end_of_packet
||
132 if (chunk
->head_skb
== chunk
->skb
) {
133 chunk
->skb
= skb_shinfo(chunk
->skb
)->frag_list
;
136 if (chunk
->skb
->next
) {
137 chunk
->skb
= chunk
->skb
->next
;
141 sctp_inq_chunk_free(chunk
);
142 chunk
= queue
->in_progress
= NULL
;
144 /* Nothing to do. Next chunk in the packet, please. */
145 ch
= (struct sctp_chunkhdr
*)chunk
->chunk_end
;
146 /* Force chunk->skb->data to chunk->chunk_end. */
147 skb_pull(chunk
->skb
, chunk
->chunk_end
- chunk
->skb
->data
);
148 /* We are guaranteed to pull a SCTP header. */
152 /* Do we need to take the next packet out of the queue to process? */
154 struct list_head
*entry
;
157 /* Is the queue empty? */
158 entry
= sctp_list_dequeue(&queue
->in_chunk_list
);
162 chunk
= list_entry(entry
, struct sctp_chunk
, list
);
164 if (skb_is_gso(chunk
->skb
) && skb_is_gso_sctp(chunk
->skb
)) {
165 /* GSO-marked skbs but without frags, handle
168 if (skb_shinfo(chunk
->skb
)->frag_list
)
169 chunk
->head_skb
= chunk
->skb
;
171 /* skbs with "cover letter" */
172 if (chunk
->head_skb
&& chunk
->skb
->data_len
== chunk
->skb
->len
)
173 chunk
->skb
= skb_shinfo(chunk
->skb
)->frag_list
;
175 if (WARN_ON(!chunk
->skb
)) {
176 __SCTP_INC_STATS(dev_net(chunk
->skb
->dev
), SCTP_MIB_IN_PKT_DISCARDS
);
177 sctp_chunk_free(chunk
);
183 sock_rps_save_rxhash(chunk
->asoc
->base
.sk
, chunk
->skb
);
185 queue
->in_progress
= chunk
;
188 /* This is the first chunk in the packet. */
189 ch
= (struct sctp_chunkhdr
*)chunk
->skb
->data
;
190 chunk
->singleton
= 1;
191 chunk
->data_accepted
= 0;
194 chunk
->has_asconf
= 0;
195 chunk
->end_of_packet
= 0;
196 if (chunk
->head_skb
) {
198 *cb
= SCTP_INPUT_CB(chunk
->skb
),
199 *head_cb
= SCTP_INPUT_CB(chunk
->head_skb
);
201 cb
->chunk
= head_cb
->chunk
;
202 cb
->af
= head_cb
->af
;
206 chunk
->chunk_hdr
= ch
;
207 chunk
->chunk_end
= ((__u8
*)ch
) + SCTP_PAD4(ntohs(ch
->length
));
208 skb_pull(chunk
->skb
, sizeof(*ch
));
209 chunk
->subh
.v
= NULL
; /* Subheader is no longer valid. */
211 if (chunk
->chunk_end
+ sizeof(*ch
) <= skb_tail_pointer(chunk
->skb
)) {
212 /* This is not a singleton */
213 chunk
->singleton
= 0;
214 } else if (chunk
->chunk_end
> skb_tail_pointer(chunk
->skb
)) {
215 /* Discard inside state machine. */
217 chunk
->chunk_end
= skb_tail_pointer(chunk
->skb
);
219 /* We are at the end of the packet, so mark the chunk
220 * in case we need to send a SACK.
222 chunk
->end_of_packet
= 1;
225 pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
226 chunk
, sctp_cname(SCTP_ST_CHUNK(chunk
->chunk_hdr
->type
)),
227 ntohs(chunk
->chunk_hdr
->length
), chunk
->skb
->len
);
232 /* Set a top-half handler.
234 * Originally, we the top-half handler was scheduled as a BH. We now
235 * call the handler directly in sctp_inq_push() at a time that
236 * we know we are lock safe.
237 * The intent is that this routine will pull stuff out of the
238 * inqueue and process it.
240 void sctp_inq_set_th_handler(struct sctp_inq
*q
, work_func_t callback
)
242 INIT_WORK(&q
->immediate
, callback
);