dm cache: fix spurious cell_defer when dealing with partial block at end of device
[linux/fpc-iii.git] / net / dccp / ackvec.c
blobba07824af4c07950d8ee25c6abfccfb1fe919f5c
1 /*
2 * net/dccp/ackvec.c
4 * An implementation of Ack Vectors for the DCCP protocol
5 * Copyright (c) 2007 University of Aberdeen, Scotland, UK
6 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License;
12 #include "dccp.h"
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
17 static struct kmem_cache *dccp_ackvec_slab;
18 static struct kmem_cache *dccp_ackvec_record_slab;
20 struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
22 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
24 if (av != NULL) {
25 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
26 INIT_LIST_HEAD(&av->av_records);
28 return av;
31 static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
33 struct dccp_ackvec_record *cur, *next;
35 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
36 kmem_cache_free(dccp_ackvec_record_slab, cur);
37 INIT_LIST_HEAD(&av->av_records);
40 void dccp_ackvec_free(struct dccp_ackvec *av)
42 if (likely(av != NULL)) {
43 dccp_ackvec_purge_records(av);
44 kmem_cache_free(dccp_ackvec_slab, av);
48 /**
49 * dccp_ackvec_update_records - Record information about sent Ack Vectors
50 * @av: Ack Vector records to update
51 * @seqno: Sequence number of the packet carrying the Ack Vector just sent
52 * @nonce_sum: The sum of all buffer nonces contained in the Ack Vector
54 int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
56 struct dccp_ackvec_record *avr;
58 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
59 if (avr == NULL)
60 return -ENOBUFS;
62 avr->avr_ack_seqno = seqno;
63 avr->avr_ack_ptr = av->av_buf_head;
64 avr->avr_ack_ackno = av->av_buf_ackno;
65 avr->avr_ack_nonce = nonce_sum;
66 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
68 * When the buffer overflows, we keep no more than one record. This is
69 * the simplest way of disambiguating sender-Acks dating from before the
70 * overflow from sender-Acks which refer to after the overflow; a simple
71 * solution is preferable here since we are handling an exception.
73 if (av->av_overflow)
74 dccp_ackvec_purge_records(av);
76 * Since GSS is incremented for each packet, the list is automatically
77 * arranged in descending order of @ack_seqno.
79 list_add(&avr->avr_node, &av->av_records);
81 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
82 (unsigned long long)avr->avr_ack_seqno,
83 (unsigned long long)avr->avr_ack_ackno,
84 avr->avr_ack_runlen);
85 return 0;
88 static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
89 const u64 ackno)
91 struct dccp_ackvec_record *avr;
93 * Exploit that records are inserted in descending order of sequence
94 * number, start with the oldest record first. If @ackno is `before'
95 * the earliest ack_ackno, the packet is too old to be considered.
97 list_for_each_entry_reverse(avr, av_list, avr_node) {
98 if (avr->avr_ack_seqno == ackno)
99 return avr;
100 if (before48(ackno, avr->avr_ack_seqno))
101 break;
103 return NULL;
107 * Buffer index and length computation using modulo-buffersize arithmetic.
108 * Note that, as pointers move from right to left, head is `before' tail.
110 static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
112 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
115 static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
117 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
120 u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
122 if (unlikely(av->av_overflow))
123 return DCCPAV_MAX_ACKVEC_LEN;
124 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
128 * dccp_ackvec_update_old - Update previous state as per RFC 4340, 11.4.1
129 * @av: non-empty buffer to update
130 * @distance: negative or zero distance of @seqno from buf_ackno downward
131 * @seqno: the (old) sequence number whose record is to be updated
132 * @state: state in which packet carrying @seqno was received
134 static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
135 u64 seqno, enum dccp_ackvec_states state)
137 u16 ptr = av->av_buf_head;
139 BUG_ON(distance > 0);
140 if (unlikely(dccp_ackvec_is_empty(av)))
141 return;
143 do {
144 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
146 if (distance + runlen >= 0) {
148 * Only update the state if packet has not been received
149 * yet. This is OK as per the second table in RFC 4340,
150 * 11.4.1; i.e. here we are using the following table:
151 * RECEIVED
152 * 0 1 3
153 * S +---+---+---+
154 * T 0 | 0 | 0 | 0 |
155 * O +---+---+---+
156 * R 1 | 1 | 1 | 1 |
157 * E +---+---+---+
158 * D 3 | 0 | 1 | 3 |
159 * +---+---+---+
160 * The "Not Received" state was set by reserve_seats().
162 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
163 av->av_buf[ptr] = state;
164 else
165 dccp_pr_debug("Not changing %llu state to %u\n",
166 (unsigned long long)seqno, state);
167 break;
170 distance += runlen + 1;
171 ptr = __ackvec_idx_add(ptr, 1);
173 } while (ptr != av->av_buf_tail);
176 /* Mark @num entries after buf_head as "Not yet received". */
177 static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
179 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
180 len = DCCPAV_MAX_ACKVEC_LEN - start;
182 /* check for buffer wrap-around */
183 if (num > len) {
184 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
185 start = 0;
186 num -= len;
188 if (num)
189 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
193 * dccp_ackvec_add_new - Record one or more new entries in Ack Vector buffer
194 * @av: container of buffer to update (can be empty or non-empty)
195 * @num_packets: number of packets to register (must be >= 1)
196 * @seqno: sequence number of the first packet in @num_packets
197 * @state: state in which packet carrying @seqno was received
199 static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
200 u64 seqno, enum dccp_ackvec_states state)
202 u32 num_cells = num_packets;
204 if (num_packets > DCCPAV_BURST_THRESH) {
205 u32 lost_packets = num_packets - 1;
207 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
209 * We received 1 packet and have a loss of size "num_packets-1"
210 * which we squeeze into num_cells-1 rather than reserving an
211 * entire byte for each lost packet.
212 * The reason is that the vector grows in O(burst_length); when
213 * it grows too large there will no room left for the payload.
214 * This is a trade-off: if a few packets out of the burst show
215 * up later, their state will not be changed; it is simply too
216 * costly to reshuffle/reallocate/copy the buffer each time.
217 * Should such problems persist, we will need to switch to a
218 * different underlying data structure.
220 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
221 u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
223 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
224 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
226 lost_packets -= len;
230 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
231 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
232 av->av_overflow = true;
235 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
236 if (av->av_overflow)
237 av->av_buf_tail = av->av_buf_head;
239 av->av_buf[av->av_buf_head] = state;
240 av->av_buf_ackno = seqno;
242 if (num_packets > 1)
243 dccp_ackvec_reserve_seats(av, num_packets - 1);
247 * dccp_ackvec_input - Register incoming packet in the buffer
249 void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
251 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
252 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
254 if (dccp_ackvec_is_empty(av)) {
255 dccp_ackvec_add_new(av, 1, seqno, state);
256 av->av_tail_ackno = seqno;
258 } else {
259 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
260 u8 *current_head = av->av_buf + av->av_buf_head;
262 if (num_packets == 1 &&
263 dccp_ackvec_state(current_head) == state &&
264 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
266 *current_head += 1;
267 av->av_buf_ackno = seqno;
269 } else if (num_packets > 0) {
270 dccp_ackvec_add_new(av, num_packets, seqno, state);
271 } else {
272 dccp_ackvec_update_old(av, num_packets, seqno, state);
278 * dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
279 * This routine is called when the peer acknowledges the receipt of Ack Vectors
280 * up to and including @ackno. While based on on section A.3 of RFC 4340, here
281 * are additional precautions to prevent corrupted buffer state. In particular,
282 * we use tail_ackno to identify outdated records; it always marks the earliest
283 * packet of group (2) in 11.4.2.
285 void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
287 struct dccp_ackvec_record *avr, *next;
288 u8 runlen_now, eff_runlen;
289 s64 delta;
291 avr = dccp_ackvec_lookup(&av->av_records, ackno);
292 if (avr == NULL)
293 return;
295 * Deal with outdated acknowledgments: this arises when e.g. there are
296 * several old records and the acks from the peer come in slowly. In
297 * that case we may still have records that pre-date tail_ackno.
299 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
300 if (delta < 0)
301 goto free_records;
303 * Deal with overlapping Ack Vectors: don't subtract more than the
304 * number of packets between tail_ackno and ack_ackno.
306 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
308 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
310 * The run length of Ack Vector cells does not decrease over time. If
311 * the run length is the same as at the time the Ack Vector was sent, we
312 * free the ack_ptr cell. That cell can however not be freed if the run
313 * length has increased: in this case we need to move the tail pointer
314 * backwards (towards higher indices), to its next-oldest neighbour.
316 if (runlen_now > eff_runlen) {
318 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
319 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
321 /* This move may not have cleared the overflow flag. */
322 if (av->av_overflow)
323 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
324 } else {
325 av->av_buf_tail = avr->avr_ack_ptr;
327 * We have made sure that avr points to a valid cell within the
328 * buffer. This cell is either older than head, or equals head
329 * (empty buffer): in both cases we no longer have any overflow.
331 av->av_overflow = 0;
335 * The peer has acknowledged up to and including ack_ackno. Hence the
336 * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
338 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
340 free_records:
341 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
342 list_del(&avr->avr_node);
343 kmem_cache_free(dccp_ackvec_record_slab, avr);
348 * Routines to keep track of Ack Vectors received in an skb
350 int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
352 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
354 if (new == NULL)
355 return -ENOBUFS;
356 new->vec = vec;
357 new->len = len;
358 new->nonce = nonce;
360 list_add_tail(&new->node, head);
361 return 0;
363 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
365 void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
367 struct dccp_ackvec_parsed *cur, *next;
369 list_for_each_entry_safe(cur, next, parsed_chunks, node)
370 kfree(cur);
371 INIT_LIST_HEAD(parsed_chunks);
373 EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
375 int __init dccp_ackvec_init(void)
377 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
378 sizeof(struct dccp_ackvec), 0,
379 SLAB_HWCACHE_ALIGN, NULL);
380 if (dccp_ackvec_slab == NULL)
381 goto out_err;
383 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
384 sizeof(struct dccp_ackvec_record),
385 0, SLAB_HWCACHE_ALIGN, NULL);
386 if (dccp_ackvec_record_slab == NULL)
387 goto out_destroy_slab;
389 return 0;
391 out_destroy_slab:
392 kmem_cache_destroy(dccp_ackvec_slab);
393 dccp_ackvec_slab = NULL;
394 out_err:
395 DCCP_CRIT("Unable to create Ack Vector slab cache");
396 return -ENOBUFS;
399 void dccp_ackvec_exit(void)
401 if (dccp_ackvec_slab != NULL) {
402 kmem_cache_destroy(dccp_ackvec_slab);
403 dccp_ackvec_slab = NULL;
405 if (dccp_ackvec_record_slab != NULL) {
406 kmem_cache_destroy(dccp_ackvec_record_slab);
407 dccp_ackvec_record_slab = NULL;