2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor Centralised disconnection processing.
19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
21 * jun/24/01 Arnaldo C. Melo use skb_queue_purge, cleanups
22 * apr/04/15 Shaun Pereira Fast select with no
23 * restriction on response.
26 #define pr_fmt(fmt) "X25: " fmt
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/string.h>
31 #include <linux/skbuff.h>
33 #include <net/tcp_states.h>
37 * This routine purges all of the queues of frames.
39 void x25_clear_queues(struct sock
*sk
)
41 struct x25_sock
*x25
= x25_sk(sk
);
43 skb_queue_purge(&sk
->sk_write_queue
);
44 skb_queue_purge(&x25
->ack_queue
);
45 skb_queue_purge(&x25
->interrupt_in_queue
);
46 skb_queue_purge(&x25
->interrupt_out_queue
);
47 skb_queue_purge(&x25
->fragment_queue
);
52 * This routine purges the input queue of those frames that have been
53 * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
56 void x25_frames_acked(struct sock
*sk
, unsigned short nr
)
59 struct x25_sock
*x25
= x25_sk(sk
);
60 int modulus
= x25
->neighbour
->extended
? X25_EMODULUS
: X25_SMODULUS
;
63 * Remove all the ack-ed frames from the ack queue.
66 while (skb_peek(&x25
->ack_queue
) && x25
->va
!= nr
) {
67 skb
= skb_dequeue(&x25
->ack_queue
);
69 x25
->va
= (x25
->va
+ 1) % modulus
;
73 void x25_requeue_frames(struct sock
*sk
)
75 struct sk_buff
*skb
, *skb_prev
= NULL
;
78 * Requeue all the un-ack-ed frames on the output queue to be picked
79 * up by x25_kick. This arrangement handles the possibility of an empty
82 while ((skb
= skb_dequeue(&x25_sk(sk
)->ack_queue
)) != NULL
) {
84 skb_queue_head(&sk
->sk_write_queue
, skb
);
86 skb_append(skb_prev
, skb
, &sk
->sk_write_queue
);
92 * Validate that the value of nr is between va and vs. Return true or
95 int x25_validate_nr(struct sock
*sk
, unsigned short nr
)
97 struct x25_sock
*x25
= x25_sk(sk
);
98 unsigned short vc
= x25
->va
;
99 int modulus
= x25
->neighbour
->extended
? X25_EMODULUS
: X25_SMODULUS
;
101 while (vc
!= x25
->vs
) {
104 vc
= (vc
+ 1) % modulus
;
107 return nr
== x25
->vs
? 1 : 0;
111 * This routine is called when the packet layer internally generates a
114 void x25_write_internal(struct sock
*sk
, int frametype
)
116 struct x25_sock
*x25
= x25_sk(sk
);
119 unsigned char facilities
[X25_MAX_FAC_LEN
];
120 unsigned char addresses
[1 + X25_ADDR_LEN
];
121 unsigned char lci1
, lci2
;
123 * Default safe frame size.
125 int len
= X25_MAX_L2_LEN
+ X25_EXT_MIN_LEN
;
131 case X25_CALL_REQUEST
:
132 len
+= 1 + X25_ADDR_LEN
+ X25_MAX_FAC_LEN
+ X25_MAX_CUD_LEN
;
134 case X25_CALL_ACCEPTED
: /* fast sel with no restr on resp */
135 if (x25
->facilities
.reverse
& 0x80) {
136 len
+= 1 + X25_MAX_FAC_LEN
+ X25_MAX_CUD_LEN
;
138 len
+= 1 + X25_MAX_FAC_LEN
;
141 case X25_CLEAR_REQUEST
:
142 case X25_RESET_REQUEST
:
148 case X25_CLEAR_CONFIRMATION
:
149 case X25_INTERRUPT_CONFIRMATION
:
150 case X25_RESET_CONFIRMATION
:
153 pr_err("invalid frame type %02X\n", frametype
);
157 if ((skb
= alloc_skb(len
, GFP_ATOMIC
)) == NULL
)
161 * Space for Ethernet and 802.2 LLC headers.
163 skb_reserve(skb
, X25_MAX_L2_LEN
);
166 * Make space for the GFI and LCI, and fill them in.
168 dptr
= skb_put(skb
, 2);
170 lci1
= (x25
->lci
>> 8) & 0x0F;
171 lci2
= (x25
->lci
>> 0) & 0xFF;
173 if (x25
->neighbour
->extended
) {
174 *dptr
++ = lci1
| X25_GFI_EXTSEQ
;
177 *dptr
++ = lci1
| X25_GFI_STDSEQ
;
182 * Now fill in the frame type specific information.
186 case X25_CALL_REQUEST
:
187 dptr
= skb_put(skb
, 1);
188 *dptr
++ = X25_CALL_REQUEST
;
189 len
= x25_addr_aton(addresses
, &x25
->dest_addr
,
191 dptr
= skb_put(skb
, len
);
192 memcpy(dptr
, addresses
, len
);
193 len
= x25_create_facilities(facilities
,
195 &x25
->dte_facilities
,
196 x25
->neighbour
->global_facil_mask
);
197 dptr
= skb_put(skb
, len
);
198 memcpy(dptr
, facilities
, len
);
199 dptr
= skb_put(skb
, x25
->calluserdata
.cudlength
);
200 memcpy(dptr
, x25
->calluserdata
.cuddata
,
201 x25
->calluserdata
.cudlength
);
202 x25
->calluserdata
.cudlength
= 0;
205 case X25_CALL_ACCEPTED
:
206 dptr
= skb_put(skb
, 2);
207 *dptr
++ = X25_CALL_ACCEPTED
;
208 *dptr
++ = 0x00; /* Address lengths */
209 len
= x25_create_facilities(facilities
,
211 &x25
->dte_facilities
,
213 dptr
= skb_put(skb
, len
);
214 memcpy(dptr
, facilities
, len
);
216 /* fast select with no restriction on response
217 allows call user data. Userland must
218 ensure it is ours and not theirs */
219 if(x25
->facilities
.reverse
& 0x80) {
221 x25
->calluserdata
.cudlength
);
222 memcpy(dptr
, x25
->calluserdata
.cuddata
,
223 x25
->calluserdata
.cudlength
);
225 x25
->calluserdata
.cudlength
= 0;
228 case X25_CLEAR_REQUEST
:
229 dptr
= skb_put(skb
, 3);
231 *dptr
++ = x25
->causediag
.cause
;
232 *dptr
++ = x25
->causediag
.diagnostic
;
235 case X25_RESET_REQUEST
:
236 dptr
= skb_put(skb
, 3);
238 *dptr
++ = 0x00; /* XXX */
239 *dptr
++ = 0x00; /* XXX */
245 if (x25
->neighbour
->extended
) {
246 dptr
= skb_put(skb
, 2);
248 *dptr
++ = (x25
->vr
<< 1) & 0xFE;
250 dptr
= skb_put(skb
, 1);
252 *dptr
++ |= (x25
->vr
<< 5) & 0xE0;
256 case X25_CLEAR_CONFIRMATION
:
257 case X25_INTERRUPT_CONFIRMATION
:
258 case X25_RESET_CONFIRMATION
:
259 dptr
= skb_put(skb
, 1);
264 x25_transmit_link(skb
, x25
->neighbour
);
268 * Unpick the contents of the passed X.25 Packet Layer frame.
270 int x25_decode(struct sock
*sk
, struct sk_buff
*skb
, int *ns
, int *nr
, int *q
,
273 struct x25_sock
*x25
= x25_sk(sk
);
274 unsigned char *frame
;
276 if (!pskb_may_pull(skb
, X25_STD_MIN_LEN
))
280 *ns
= *nr
= *q
= *d
= *m
= 0;
283 case X25_CALL_REQUEST
:
284 case X25_CALL_ACCEPTED
:
285 case X25_CLEAR_REQUEST
:
286 case X25_CLEAR_CONFIRMATION
:
288 case X25_INTERRUPT_CONFIRMATION
:
289 case X25_RESET_REQUEST
:
290 case X25_RESET_CONFIRMATION
:
291 case X25_RESTART_REQUEST
:
292 case X25_RESTART_CONFIRMATION
:
293 case X25_REGISTRATION_REQUEST
:
294 case X25_REGISTRATION_CONFIRMATION
:
299 if (x25
->neighbour
->extended
) {
300 if (frame
[2] == X25_RR
||
301 frame
[2] == X25_RNR
||
302 frame
[2] == X25_REJ
) {
303 if (!pskb_may_pull(skb
, X25_EXT_MIN_LEN
))
307 *nr
= (frame
[3] >> 1) & 0x7F;
311 if ((frame
[2] & 0x1F) == X25_RR
||
312 (frame
[2] & 0x1F) == X25_RNR
||
313 (frame
[2] & 0x1F) == X25_REJ
) {
314 *nr
= (frame
[2] >> 5) & 0x07;
315 return frame
[2] & 0x1F;
319 if (x25
->neighbour
->extended
) {
320 if ((frame
[2] & 0x01) == X25_DATA
) {
321 if (!pskb_may_pull(skb
, X25_EXT_MIN_LEN
))
325 *q
= (frame
[0] & X25_Q_BIT
) == X25_Q_BIT
;
326 *d
= (frame
[0] & X25_D_BIT
) == X25_D_BIT
;
327 *m
= (frame
[3] & X25_EXT_M_BIT
) == X25_EXT_M_BIT
;
328 *nr
= (frame
[3] >> 1) & 0x7F;
329 *ns
= (frame
[2] >> 1) & 0x7F;
333 if ((frame
[2] & 0x01) == X25_DATA
) {
334 *q
= (frame
[0] & X25_Q_BIT
) == X25_Q_BIT
;
335 *d
= (frame
[0] & X25_D_BIT
) == X25_D_BIT
;
336 *m
= (frame
[2] & X25_STD_M_BIT
) == X25_STD_M_BIT
;
337 *nr
= (frame
[2] >> 5) & 0x07;
338 *ns
= (frame
[2] >> 1) & 0x07;
343 pr_debug("invalid PLP frame %02X %02X %02X\n",
344 frame
[0], frame
[1], frame
[2]);
349 void x25_disconnect(struct sock
*sk
, int reason
, unsigned char cause
,
350 unsigned char diagnostic
)
352 struct x25_sock
*x25
= x25_sk(sk
);
354 x25_clear_queues(sk
);
358 x25
->state
= X25_STATE_0
;
360 x25
->causediag
.cause
= cause
;
361 x25
->causediag
.diagnostic
= diagnostic
;
363 sk
->sk_state
= TCP_CLOSE
;
365 sk
->sk_shutdown
|= SEND_SHUTDOWN
;
367 if (!sock_flag(sk
, SOCK_DEAD
)) {
368 sk
->sk_state_change(sk
);
369 sock_set_flag(sk
, SOCK_DEAD
);
374 * Clear an own-rx-busy condition and tell the peer about this, provided
375 * that there is a significant amount of free receive buffer space available.
377 void x25_check_rbuf(struct sock
*sk
)
379 struct x25_sock
*x25
= x25_sk(sk
);
381 if (atomic_read(&sk
->sk_rmem_alloc
) < (sk
->sk_rcvbuf
>> 1) &&
382 (x25
->condition
& X25_COND_OWN_RX_BUSY
)) {
383 x25
->condition
&= ~X25_COND_OWN_RX_BUSY
;
384 x25
->condition
&= ~X25_COND_ACK_PENDING
;
386 x25_write_internal(sk
, X25_RR
);