iio: adc: xilinx: Fix error handling
[linux/fpc-iii.git] / net / x25 / x25_out.c
blob0144271d2184fddbc8fb4f2a0262387fc125f767
1 /*
2 * X.25 Packet Layer release 002
4 * This is ALPHA test software. This code may break your machine,
5 * randomly fail to work with new releases, misbehave and/or generally
6 * screw up. It might even work.
8 * This code REQUIRES 2.1.15 or higher
10 * This module:
11 * This module is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * History
17 * X.25 001 Jonathan Naylor Started coding.
18 * X.25 002 Jonathan Naylor New timer architecture.
19 * 2000-09-04 Henner Eisen Prevented x25_output() skb leakage.
20 * 2000-10-27 Henner Eisen MSG_DONTWAIT for fragment allocation.
21 * 2000-11-10 Henner Eisen x25_send_iframe(): re-queued frames
22 * needed cleaned seq-number fields.
25 #include <linux/slab.h>
26 #include <linux/socket.h>
27 #include <linux/kernel.h>
28 #include <linux/string.h>
29 #include <linux/skbuff.h>
30 #include <net/sock.h>
31 #include <net/x25.h>
33 static int x25_pacsize_to_bytes(unsigned int pacsize)
35 int bytes = 1;
37 if (!pacsize)
38 return 128;
40 while (pacsize-- > 0)
41 bytes *= 2;
43 return bytes;
47 * This is where all X.25 information frames pass.
49 * Returns the amount of user data bytes sent on success
50 * or a negative error code on failure.
52 int x25_output(struct sock *sk, struct sk_buff *skb)
54 struct sk_buff *skbn;
55 unsigned char header[X25_EXT_MIN_LEN];
56 int err, frontlen, len;
57 int sent=0, noblock = X25_SKB_CB(skb)->flags & MSG_DONTWAIT;
58 struct x25_sock *x25 = x25_sk(sk);
59 int header_len = x25->neighbour->extended ? X25_EXT_MIN_LEN :
60 X25_STD_MIN_LEN;
61 int max_len = x25_pacsize_to_bytes(x25->facilities.pacsize_out);
63 if (skb->len - header_len > max_len) {
64 /* Save a copy of the Header */
65 skb_copy_from_linear_data(skb, header, header_len);
66 skb_pull(skb, header_len);
68 frontlen = skb_headroom(skb);
70 while (skb->len > 0) {
71 release_sock(sk);
72 skbn = sock_alloc_send_skb(sk, frontlen + max_len,
73 noblock, &err);
74 lock_sock(sk);
75 if (!skbn) {
76 if (err == -EWOULDBLOCK && noblock){
77 kfree_skb(skb);
78 return sent;
80 SOCK_DEBUG(sk, "x25_output: fragment alloc"
81 " failed, err=%d, %d bytes "
82 "sent\n", err, sent);
83 return err;
86 skb_reserve(skbn, frontlen);
88 len = max_len > skb->len ? skb->len : max_len;
90 /* Copy the user data */
91 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
92 skb_pull(skb, len);
94 /* Duplicate the Header */
95 skb_push(skbn, header_len);
96 skb_copy_to_linear_data(skbn, header, header_len);
98 if (skb->len > 0) {
99 if (x25->neighbour->extended)
100 skbn->data[3] |= X25_EXT_M_BIT;
101 else
102 skbn->data[2] |= X25_STD_M_BIT;
105 skb_queue_tail(&sk->sk_write_queue, skbn);
106 sent += len;
109 kfree_skb(skb);
110 } else {
111 skb_queue_tail(&sk->sk_write_queue, skb);
112 sent = skb->len - header_len;
114 return sent;
118 * This procedure is passed a buffer descriptor for an iframe. It builds
119 * the rest of the control part of the frame and then writes it out.
121 static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
123 struct x25_sock *x25 = x25_sk(sk);
125 if (!skb)
126 return;
128 if (x25->neighbour->extended) {
129 skb->data[2] = (x25->vs << 1) & 0xFE;
130 skb->data[3] &= X25_EXT_M_BIT;
131 skb->data[3] |= (x25->vr << 1) & 0xFE;
132 } else {
133 skb->data[2] &= X25_STD_M_BIT;
134 skb->data[2] |= (x25->vs << 1) & 0x0E;
135 skb->data[2] |= (x25->vr << 5) & 0xE0;
138 x25_transmit_link(skb, x25->neighbour);
141 void x25_kick(struct sock *sk)
143 struct sk_buff *skb, *skbn;
144 unsigned short start, end;
145 int modulus;
146 struct x25_sock *x25 = x25_sk(sk);
148 if (x25->state != X25_STATE_3)
149 return;
152 * Transmit interrupt data.
154 if (skb_peek(&x25->interrupt_out_queue) != NULL &&
155 !test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {
157 skb = skb_dequeue(&x25->interrupt_out_queue);
158 x25_transmit_link(skb, x25->neighbour);
161 if (x25->condition & X25_COND_PEER_RX_BUSY)
162 return;
164 if (!skb_peek(&sk->sk_write_queue))
165 return;
167 modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
169 start = skb_peek(&x25->ack_queue) ? x25->vs : x25->va;
170 end = (x25->va + x25->facilities.winsize_out) % modulus;
172 if (start == end)
173 return;
175 x25->vs = start;
178 * Transmit data until either we're out of data to send or
179 * the window is full.
182 skb = skb_dequeue(&sk->sk_write_queue);
184 do {
185 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
186 skb_queue_head(&sk->sk_write_queue, skb);
187 break;
190 skb_set_owner_w(skbn, sk);
193 * Transmit the frame copy.
195 x25_send_iframe(sk, skbn);
197 x25->vs = (x25->vs + 1) % modulus;
200 * Requeue the original data frame.
202 skb_queue_tail(&x25->ack_queue, skb);
204 } while (x25->vs != end &&
205 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
207 x25->vl = x25->vr;
208 x25->condition &= ~X25_COND_ACK_PENDING;
210 x25_stop_timer(sk);
214 * The following routines are taken from page 170 of the 7th ARRL Computer
215 * Networking Conference paper, as is the whole state machine.
218 void x25_enquiry_response(struct sock *sk)
220 struct x25_sock *x25 = x25_sk(sk);
222 if (x25->condition & X25_COND_OWN_RX_BUSY)
223 x25_write_internal(sk, X25_RNR);
224 else
225 x25_write_internal(sk, X25_RR);
227 x25->vl = x25->vr;
228 x25->condition &= ~X25_COND_ACK_PENDING;
230 x25_stop_timer(sk);