2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor New timer architecture.
19 * mar/20/00 Daniela Squassoni Disabling/enabling of facilities
21 * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh.
24 #define pr_fmt(fmt) "X25: " fmt
26 #include <linux/kernel.h>
27 #include <linux/jiffies.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/uaccess.h>
33 #include <linux/init.h>
36 LIST_HEAD(x25_neigh_list
);
37 DEFINE_RWLOCK(x25_neigh_list_lock
);
39 static void x25_t20timer_expiry(struct timer_list
*);
41 static void x25_transmit_restart_confirmation(struct x25_neigh
*nb
);
42 static void x25_transmit_restart_request(struct x25_neigh
*nb
);
45 * Linux set/reset timer routines
47 static inline void x25_start_t20timer(struct x25_neigh
*nb
)
49 mod_timer(&nb
->t20timer
, jiffies
+ nb
->t20
);
52 static void x25_t20timer_expiry(struct timer_list
*t
)
54 struct x25_neigh
*nb
= from_timer(nb
, t
, t20timer
);
56 x25_transmit_restart_request(nb
);
58 x25_start_t20timer(nb
);
61 static inline void x25_stop_t20timer(struct x25_neigh
*nb
)
63 del_timer(&nb
->t20timer
);
66 static inline int x25_t20timer_pending(struct x25_neigh
*nb
)
68 return timer_pending(&nb
->t20timer
);
72 * This handles all restart and diagnostic frames.
74 void x25_link_control(struct sk_buff
*skb
, struct x25_neigh
*nb
,
75 unsigned short frametype
)
81 case X25_RESTART_REQUEST
:
82 confirm
= !x25_t20timer_pending(nb
);
83 x25_stop_t20timer(nb
);
84 nb
->state
= X25_LINK_STATE_3
;
86 x25_transmit_restart_confirmation(nb
);
89 case X25_RESTART_CONFIRMATION
:
90 x25_stop_t20timer(nb
);
91 nb
->state
= X25_LINK_STATE_3
;
95 if (!pskb_may_pull(skb
, X25_STD_MIN_LEN
+ 4))
98 pr_warn("diagnostic #%d - %02X %02X %02X\n",
99 skb
->data
[3], skb
->data
[4],
100 skb
->data
[5], skb
->data
[6]);
104 pr_warn("received unknown %02X with LCI 000\n",
109 if (nb
->state
== X25_LINK_STATE_3
)
110 while ((skbn
= skb_dequeue(&nb
->queue
)) != NULL
)
111 x25_send_frame(skbn
, nb
);
115 * This routine is called when a Restart Request is needed
117 static void x25_transmit_restart_request(struct x25_neigh
*nb
)
120 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
+ 2;
121 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
126 skb_reserve(skb
, X25_MAX_L2_LEN
);
128 dptr
= skb_put(skb
, X25_STD_MIN_LEN
+ 2);
130 *dptr
++ = nb
->extended
? X25_GFI_EXTSEQ
: X25_GFI_STDSEQ
;
132 *dptr
++ = X25_RESTART_REQUEST
;
138 x25_send_frame(skb
, nb
);
142 * This routine is called when a Restart Confirmation is needed
144 static void x25_transmit_restart_confirmation(struct x25_neigh
*nb
)
147 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
;
148 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
153 skb_reserve(skb
, X25_MAX_L2_LEN
);
155 dptr
= skb_put(skb
, X25_STD_MIN_LEN
);
157 *dptr
++ = nb
->extended
? X25_GFI_EXTSEQ
: X25_GFI_STDSEQ
;
159 *dptr
++ = X25_RESTART_CONFIRMATION
;
163 x25_send_frame(skb
, nb
);
167 * This routine is called when a Clear Request is needed outside of the context
168 * of a connected socket.
170 void x25_transmit_clear_request(struct x25_neigh
*nb
, unsigned int lci
,
174 int len
= X25_MAX_L2_LEN
+ X25_STD_MIN_LEN
+ 2;
175 struct sk_buff
*skb
= alloc_skb(len
, GFP_ATOMIC
);
180 skb_reserve(skb
, X25_MAX_L2_LEN
);
182 dptr
= skb_put(skb
, X25_STD_MIN_LEN
+ 2);
184 *dptr
++ = ((lci
>> 8) & 0x0F) | (nb
->extended
?
187 *dptr
++ = (lci
>> 0) & 0xFF;
188 *dptr
++ = X25_CLEAR_REQUEST
;
194 x25_send_frame(skb
, nb
);
197 void x25_transmit_link(struct sk_buff
*skb
, struct x25_neigh
*nb
)
200 case X25_LINK_STATE_0
:
201 skb_queue_tail(&nb
->queue
, skb
);
202 nb
->state
= X25_LINK_STATE_1
;
203 x25_establish_link(nb
);
205 case X25_LINK_STATE_1
:
206 case X25_LINK_STATE_2
:
207 skb_queue_tail(&nb
->queue
, skb
);
209 case X25_LINK_STATE_3
:
210 x25_send_frame(skb
, nb
);
216 * Called when the link layer has become established.
218 void x25_link_established(struct x25_neigh
*nb
)
221 case X25_LINK_STATE_0
:
222 nb
->state
= X25_LINK_STATE_2
;
224 case X25_LINK_STATE_1
:
225 x25_transmit_restart_request(nb
);
226 nb
->state
= X25_LINK_STATE_2
;
227 x25_start_t20timer(nb
);
233 * Called when the link layer has terminated, or an establishment
234 * request has failed.
237 void x25_link_terminated(struct x25_neigh
*nb
)
239 nb
->state
= X25_LINK_STATE_0
;
240 /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */
241 x25_kill_by_neigh(nb
);
247 void x25_link_device_up(struct net_device
*dev
)
249 struct x25_neigh
*nb
= kmalloc(sizeof(*nb
), GFP_ATOMIC
);
254 skb_queue_head_init(&nb
->queue
);
255 timer_setup(&nb
->t20timer
, x25_t20timer_expiry
, 0);
259 nb
->state
= X25_LINK_STATE_0
;
262 * Enables negotiation
264 nb
->global_facil_mask
= X25_MASK_REVERSE
|
265 X25_MASK_THROUGHPUT
|
266 X25_MASK_PACKET_SIZE
|
267 X25_MASK_WINDOW_SIZE
;
268 nb
->t20
= sysctl_x25_restart_request_timeout
;
269 refcount_set(&nb
->refcnt
, 1);
271 write_lock_bh(&x25_neigh_list_lock
);
272 list_add(&nb
->node
, &x25_neigh_list
);
273 write_unlock_bh(&x25_neigh_list_lock
);
277 * __x25_remove_neigh - remove neighbour from x25_neigh_list
278 * @nb - neigh to remove
280 * Remove neighbour from x25_neigh_list. If it was there.
281 * Caller must hold x25_neigh_list_lock.
283 static void __x25_remove_neigh(struct x25_neigh
*nb
)
285 skb_queue_purge(&nb
->queue
);
286 x25_stop_t20timer(nb
);
295 * A device has been removed, remove its links.
297 void x25_link_device_down(struct net_device
*dev
)
299 struct x25_neigh
*nb
;
300 struct list_head
*entry
, *tmp
;
302 write_lock_bh(&x25_neigh_list_lock
);
304 list_for_each_safe(entry
, tmp
, &x25_neigh_list
) {
305 nb
= list_entry(entry
, struct x25_neigh
, node
);
307 if (nb
->dev
== dev
) {
308 __x25_remove_neigh(nb
);
313 write_unlock_bh(&x25_neigh_list_lock
);
317 * Given a device, return the neighbour address.
319 struct x25_neigh
*x25_get_neigh(struct net_device
*dev
)
321 struct x25_neigh
*nb
, *use
= NULL
;
322 struct list_head
*entry
;
324 read_lock_bh(&x25_neigh_list_lock
);
325 list_for_each(entry
, &x25_neigh_list
) {
326 nb
= list_entry(entry
, struct x25_neigh
, node
);
328 if (nb
->dev
== dev
) {
336 read_unlock_bh(&x25_neigh_list_lock
);
341 * Handle the ioctls that control the subscription functions.
343 int x25_subscr_ioctl(unsigned int cmd
, void __user
*arg
)
345 struct x25_subscrip_struct x25_subscr
;
346 struct x25_neigh
*nb
;
347 struct net_device
*dev
;
350 if (cmd
!= SIOCX25GSUBSCRIP
&& cmd
!= SIOCX25SSUBSCRIP
)
354 if (copy_from_user(&x25_subscr
, arg
, sizeof(x25_subscr
)))
358 if ((dev
= x25_dev_get(x25_subscr
.device
)) == NULL
)
361 if ((nb
= x25_get_neigh(dev
)) == NULL
)
366 if (cmd
== SIOCX25GSUBSCRIP
) {
367 read_lock_bh(&x25_neigh_list_lock
);
368 x25_subscr
.extended
= nb
->extended
;
369 x25_subscr
.global_facil_mask
= nb
->global_facil_mask
;
370 read_unlock_bh(&x25_neigh_list_lock
);
371 rc
= copy_to_user(arg
, &x25_subscr
,
372 sizeof(x25_subscr
)) ? -EFAULT
: 0;
375 if (!(x25_subscr
.extended
&& x25_subscr
.extended
!= 1)) {
377 write_lock_bh(&x25_neigh_list_lock
);
378 nb
->extended
= x25_subscr
.extended
;
379 nb
->global_facil_mask
= x25_subscr
.global_facil_mask
;
380 write_unlock_bh(&x25_neigh_list_lock
);
393 * Release all memory associated with X.25 neighbour structures.
395 void __exit
x25_link_free(void)
397 struct x25_neigh
*nb
;
398 struct list_head
*entry
, *tmp
;
400 write_lock_bh(&x25_neigh_list_lock
);
402 list_for_each_safe(entry
, tmp
, &x25_neigh_list
) {
403 struct net_device
*dev
;
405 nb
= list_entry(entry
, struct x25_neigh
, node
);
407 __x25_remove_neigh(nb
);
410 write_unlock_bh(&x25_neigh_list_lock
);