2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/atomic.h>
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/interrupt.h>
21 #include <linux/sched.h>
22 #include <linux/netdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/rbtree.h>
30 #include <linux/socket.h>
33 #include <linux/math64.h>
38 #define ETH_P_COR 0x1022
42 #define PROTO_COR_RAW 0
43 #define PROTO_COR_RDEAMON 1
52 #define COR_PASS_ON_CLOSE 1
54 #define COR_PUBLISH_SERVICE 2
57 #define COR_TOS_DEFAULT 0
58 #define COR_TOS_LOW_LATENCY 1
59 #define COR_TOS_HIGH_LATENCY 2
61 #define COR_PRIORITY 4
63 #define MAX_CONN_CMD_LEN 64
65 #define PACKET_TYPE_NONE 0
66 #define PACKET_TYPE_ANNOUNCE 1
67 #define PACKET_TYPE_CMSG_NOACK 2
68 #define PACKET_TYPE_CMSG_ACKSLOW 3
69 #define PACKET_TYPE_CMSG_ACKFAST 4
70 #define PACKET_TYPE_CONNDATA 64
72 #define PACKET_TYPE_CONNDATA_FLAGS 63
73 #define PACKET_TYPE_CONNDATA_FLAGS_FLUSH 32
74 #define PACKET_TYPE_CONNDATA_FLAGS_WINDOWUSED 31
78 * Announce data format:
80 * is 0, may be increased if the protocol changes
82 * is 0, must be increased if a future version of the protocol is incompatible
83 * to the current version
86 * Data format of the announce packet "data" field:
87 *{command [2] commandlength [2] commanddata [commandlength]}[...]
92 /* ANNCMD_VERSION: version[2] minversion[2] */
93 #define ANNCMD_VERSION 1
95 /* ANNCMD_ADDR: addr[8] */
98 /* ANNCMD_NOADDR: (no params) */
99 #define ANNCMD_NOADDR 3
103 * Kernel packet data - these commands are sent by the neighbor
104 * The end nodes may cause these commands to be sent, but they see them beyond
108 #define KP_ACK_CONN 1
109 #define KP_CONN_DATA 2
112 #define KP_MISC_PADDING 0
115 * KP_INIT_SESSION[1] sessionid[4]
117 * finishes neighbor discovery and starts a session
119 * Before this is received all other commands are ignored. The sessionid is used
120 * to prevent usage of old neighbor discovery data (e.g. addresses)
122 #define KP_MISC_INIT_SESSION 1
124 #define KP_MISC_INIT_SESSION_CMDLEN 5
127 * KP_PING[1] cookie[4]
128 * KP_PONG[1] cookie[4] respdelay_full[4] respdelay_netonly[4]
130 * This is needed to find out whether the other node is reachable. After a new
131 * neighbor is seen, ping requests are sent and the neighbor is only reachable
132 * after a few pongs are received. These requests are also used to find out
133 * whether a neighber is gone.
136 * The receiver of a ping may delay the sending of the pong e.g. to create
137 * bigger packets. The respdelay is the time in microseconds the packet was
140 #define KP_MISC_PING 2
142 #define KP_MISC_PING_CMDLEN 5
144 #define KP_MISC_PONG 3
146 /* KP_ACK[1] seqno[4] */
147 #define KP_MISC_ACK 4
150 * NOTE on connection ids:
151 * connection ids we receive with most significant bit 0 have been generated by
153 * connection ids we receive with most significant bit 1 have been generated by
156 * ATTENTION: the priority seqno are reversed:
157 * priority seqnos we send are used when we send updates
158 * priority seqnos we received are used when we receive updates
162 * incoming connection
163 * seqno1... used to ack data sent from the side which initiated the connection
164 * seqno2... used to ack data sent to the side which initiated the connection
165 * KP_CONNECT[1] conn_id[4] seqno1[4] seqno2[4] window[1] priority_seqno[0.5]
166 * priority[1.5] is_highlatency[1]
168 #define KP_MISC_CONNECT 5
171 * incoming connection successful,
172 * KP_CONNECT_SUCCESS[1] conn_id[4] window[1]
174 #define KP_MISC_CONNECT_SUCCESS 6
177 * KP_RESET_CONN[1] conn_id[4]
178 * We send this, if there is an established connection we want to close.
180 #define KP_MISC_RESET_CONN 7
183 * KP_SET_MAX_CMSG_DELAY[1] cpacket_ack_fast_delay[4] cpacket_ack_slow_delay[4]
184 * data_ack_delay[4] cmsg_delay[4]
185 * Sent after connecting and at any change
186 * delay in specifies in microsecs
188 #define KP_MISC_SET_MAX_CMSG_DELAY 8
190 #define KP_MISC_SET_MAX_CMSG_DELAY_CMDLEN 17
193 * KP_MISC_SET_RECEIVE_MTU[1] receive_mtu[4]
194 * Sent after connecting and at any change
196 #define KP_MISC_SET_RECEIVE_MTU 9
198 #define KP_MISC_SET_RECEIVE_MTU_CMDLEN 5
202 * KP_ACK_CONN[1] conn_id[4] delay_remaining[1] seqno[4] window[2]
203 * bufsize_changerate[1] seqno_ooo[4]
204 * length[1-4] priority_seqno[0.5] priority[1.5] is_highlatency[1]
206 * conn_id is the conn_id we use if we sent something through this conn and
207 * *not* the conn_id that the neighbor used to send us the data
209 * delay_remaining = time the ack_conn could have remained in the queue
210 * 255 means the ack_conn has been sent immediately
211 * 0 means it has been delayed by as much the delay set by SET_MAX_CMSG_DELAY
213 * seqno = the seqno which is expected in the next non-out-of-order packet
215 * window = amount of data which can be sent without receiving the next ack
216 * packets with lower seqno do not overwrite the last window size
217 * The window may also be reduced. However, this only indicates a wish.
218 * Packets must be accepted if they exceed the new window, but not the old
223 * 1...255 = 64*2^((value-1)/7) end result is rounded down to an integer
225 * bufsize_changerate = if the next router(s) is increasing or decreasing its
227 * 0 = for every byte we can send, the end host will receive 2 bytes
228 * 64 = for every byte we can send, the end host will receive 1 byte
229 * 128 = for every 2 byte we can send, the end host will receive 1 byte
232 * seqno_ooo, length = This packet was received out of order. Maybe a previous
233 * packet has been lost. Out of order data should not be retransmitted.
234 * Multiple ooo packets may be merged into a single ack. Ooo packets may be
235 * partially accepted, so that the length does not cover the full packet and/
236 * or the seqno starts in the middle of a packet
238 #define KP_ACK_CONN_FLAGS_SEQNO 1
239 #define KP_ACK_CONN_FLAGS_WINDOW 2
240 #define KP_ACK_CONN_FLAGS_OOO 12 /* 4+8 */
241 #define KP_ACK_CONN_FLAGS_PRIORITY 16
243 static inline __u8
cor_ooolen_to_flags(__u32 len
)
254 static inline int cor_ooolen(__u8 flags
)
256 int len
= ((flags
& KP_ACK_CONN_FLAGS_OOO
) >> 2);
258 if (unlikely(len
== 3))
263 static inline int cor_ack_conn_len(__u8 flags
)
267 if ((flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0) {
269 if ((flags
& KP_ACK_CONN_FLAGS_WINDOW
) != 0)
273 if (cor_ooolen(flags
) != 0) {
275 len
+= cor_ooolen(flags
);
278 /* delay_remaining */
279 if ((flags
& KP_ACK_CONN_FLAGS_SEQNO
) != 0 ||
280 cor_ooolen(flags
) != 0)
283 if (flags
& KP_ACK_CONN_FLAGS_PRIORITY
)
289 /* KP_CONN_DATA[1] conn_id[4] seqno[4] length[1-2] data[length] */
290 #define KP_CONN_DATA_FLAGS_WINDOWUSED 31
291 #define KP_CONN_DATA_FLAGS_FLUSH 32
293 #define KP_CONN_DATA_MAXLEN (128 + 32767)
295 static inline __u32
get_kp_conn_data_length(__u32 datalen
)
303 static inline __u8
get_kp_code(__u8 maj
, __u8 min
)
305 BUILD_BUG_ON(maj
> 3);
307 return (maj
<< 6) + min
;
310 static inline __u8
kp_maj(__u8 code
)
315 static inline __u8
kp_min(__u8 code
)
322 * Connection data which in interpreted when connection has no target yet
323 * These commands are sent by the end node.
326 * cmd[2] length[1-4] parameter[length]
327 * unrecogniced commands are ignored
328 * parameters which are longer than expected are ignored as well
331 #define CD_CONTINUE_ON_ERROR_FLAG 32768
332 #define CD_NOPARAM_FLAG 16384
334 /* outgoing connection: CD_CONNECT_NB[2] length[1-4] addr[8] */
335 #define CD_CONNECT_NB 1
337 /* connection to local open part: CD_CONNECT_PORT[2] length[1-4] port[4] */
338 #define CD_CONNECT_PORT 2
341 * list connected neighbors: CD_LIST_NEIGH[2] length[1-4]
342 * responds with CDR_BINDATA if successful
347 * numfields[1-4] (field[2] fieldlen[1-4])[numfields]
348 * rows[responserows]:
349 * fieldlen[1-4], only if fieldlen in the header was "0"
350 * fielddata[fieldlen]
352 * Future versions may append data to field definition. Clients must silently
353 * discard fields they do not expect.
355 #define CD_LIST_NEIGH 3
360 #define LIST_NEIGH_FIELD_ADDR 1
363 * latency_in_microsecs[1] (64_11 encoding)
364 * Only raw network latency in measured. Delays caused by the priority queues
365 * are *not* included.
367 #define LIST_NEIGH_FIELD_LATENCY 2
370 * list services: CD_LIST_SERVICES[2] length[1-4]
371 * responds with CDR_BINDATA if successful
373 #define CD_LIST_SERVICES 4
376 * list services: CD_LIST_SERVICES[2] length[1-4]
377 * responds with CDR_BINDATA if successful
379 #define CD_LIST_L4PROTOCOLS 5
383 * Connection data response
384 * Format is the same as with connection data
393 * CDR_EXEOK_BINDATA[1] bindatalen[1-4] bindata[bindatalen]
394 * CDR_EXECOK_BINDATA_NORESP[1]
396 #define CDR_EXECOK_BINDATA 2
397 #define CDR_EXECOK_BINDATA_NORESP 3
400 * CDR_EXECFAILED[1] reasoncode[2]
402 #define CDR_EXECFAILED 4
403 #define CDR_EXECFAILED_INVALID_COMMAND 1
404 #define CDR_EXECFAILED_COMMAND_PARSE_ERROR 2
405 #define CDR_EXECFAILED_TEMPORARILY_OUT_OF_RESOURCES 3
406 #define CDR_EXECFAILED_NB_DOESNTEXIST 4
407 #define CDR_EXECFAILED_UNKNOWN_L4PROTOCOL 5
408 #define CDR_EXECFAILED_PORTCLOSED 6
410 #define L4PROTO_STREAM 42399
414 * routing daemon sock
416 * cmdcode[4] length[4] cmddata[length]
418 #define CRD_KTU_SUPPORTEDVERSIONS 1
420 * CRD_KTU_SUPPORTEDVERSIONS[4] length[4] min[4] max[4]
423 #define CRD_KTU_CONNECT 2
425 * CRD_KTU_KTOU_CONNECT[4] length[4] cookie[8] targetlen[4] target[targetlen]
428 #define CRD_UTK_VERSION 1
430 * CRD_UTK_VERSION[4] length[4] version[4]
434 #define CRD_UTK_UP_FLAGS_ADDR 1
435 #define CRD_UTK_UP_FLAGS_INTERFACES 2
437 * CRD_UTK_UP[4] length[4] flags[8]
438 * if CRD_UTK_UP_FLAGS_ADDR
440 * if CRD_UTK_UP_FLAGS_INTERFACES:
441 * num_interfaces[4] (length[4] interface[length])[num_interfaces]
445 #define CRD_UTK_CONNECTERROR 3
447 * CRD_UTK_CONNECTERROR[4] length[4] cookie[8] error[4]
450 #define CRD_UTK_CONNECTERROR_ACCES 1
451 #define CRD_UTK_CONNECTERROR_NETUNREACH 2
452 #define CRD_UTK_CONNECTERROR_TIMEDOUT 3
453 #define CRD_UTK_CONNECTERROR_REFUSED 4
455 #define CONN_MNGD_HEADERLEN 2
456 #define CONN_MNGD_MAX_CTRL_DATALEN 8
457 #define CONN_MNGD_CHECKSUMLEN 4
459 #define CONN_MNGD_HASDATA (1 << 15)
460 #define CONN_MNGD_EOF (1 << 0)
461 #define CONN_MNGD_RCVEND (1 << 1)
462 #define CONN_MNGD_KEEPALIVE_REQ (1 << 2)
463 #define CONN_MNGD_KEEPALIVE_RESP (1 << 3)
464 #define CONN_MNGD_DATALEN 4095
466 #define CONN_MNGD_MAX_SEGMENT_SIZE (CONN_MNGD_DATALEN + 1)
469 struct cor_interface_config
{
474 #define CONGSTATUS_NONE 0
475 #define CONGSTATUS_CONNDATA 1
476 #define CONGSTATUS_ANNOUNCE 2
477 #define CONGSTATUS_RETRANS 3
478 #define CONGSTATUS_KPACKETS 4
481 struct list_head dev_list
;
483 struct net_device
*dev
; /* may not change while queue is in list */
490 struct task_struct
*qos_resume_thread
;
491 wait_queue_head_t qos_resume_wq
;
492 atomic_t qos_resume_scheduled
;
493 unsigned long jiffies_lastprogress
;
495 struct list_head kpackets_waiting
;
496 struct list_head conn_retrans_waiting
;
497 struct list_head announce_waiting
;
498 struct list_head neighbors_waiting
;
499 struct list_head neighbors_waiting_nextpass
;
501 unsigned long jiffies_nb_pass_start
;
502 unsigned long jiffies_nb_lastduration
;
505 unsigned long jiffies_lastdrop
;
510 atomic_t cong_status
;
518 * switch to and from RB_INQUEUE_NBCONGWIN is only done with nbcongwin.lock
521 #define RB_INQUEUE_FALSE 0
522 #define RB_INQUEUE_TRUE 1
523 #define RB_INQUEUE_NBCONGWIN 2 /* only for nb->rb */
524 #define RB_INQUEUE_NBNOTACTIVE 3 /* only for nb->rb */
526 struct cor_resume_block
{
531 #define ANNOUNCE_TYPE_BROADCAST 1
532 #define ANNOUNCE_TYPE_UNICAST 2
534 struct cor_announce_data
{
540 struct net_device
*dev
;
541 char mac
[MAX_ADDR_LEN
];
542 struct delayed_work announce_work
;
543 struct cor_resume_block rb
;
546 struct cor_neighbor_discdata
{
548 unsigned long jiffies_created
;
552 struct net_device
*dev
;
553 char mac
[MAX_ADDR_LEN
];
567 struct cor_ping_cookie
{
568 ktime_t time_created
;
570 unsigned long jiffies_sent
;
573 __u8 pongs
; /* count of pongs for pings sent after this one */
576 #define NEIGHBOR_STATE_INITIAL 0
577 #define NEIGHBOR_STATE_ACTIVE 1
578 #define NEIGHBOR_STATE_STALLED 2
579 #define NEIGHBOR_STATE_KILLED 3
581 #define NBCONGWIN_SHIFT 16
582 #define NBCONGWIN_MUL (1 << NBCONGWIN_SHIFT)
584 struct cor_neighbor
{
585 struct list_head nb_list
;
590 struct net_device
*dev
;
591 char mac
[MAX_ADDR_LEN
];
596 atomic_t sessionid_rcv_needed
;
597 atomic_t sessionid_snd_needed
;
602 atomic64_t cmsg_timer_timeout
;
603 struct timer_list cmsg_timer
;
604 spinlock_t cmsg_lock
;
605 struct list_head cmsg_queue_pong
;
606 struct list_head cmsg_queue_ack_fast
;
607 struct list_head cmsg_queue_ack_slow
;
608 struct list_head cmsg_queue_ackconn_urgent
;
609 struct list_head cmsg_queue_ackconn
;
610 struct list_head cmsg_queue_conndata_lowlat
;
611 struct list_head cmsg_queue_conndata_highlat
;
612 struct list_head cmsg_queue_other
;
613 __u8 add_retrans_needed
;
614 __u32 kpacket_seqno
; /* not locked, only accessed by single tasklet */
616 struct rb_root pending_conn_resets_rb
;
618 __u32 cmsg_pongslength
;
619 __u32 cmsg_otherlength
;
621 __u32 cmsg_pongscnt
; /* size of queue only, protected by cmsg_lock */
622 atomic_t cmsg_pongs_retrans_cnt
; /* number of retransmits only */
623 atomic_t cmsg_othercnt
; /* size of queue + retransmits */
625 atomic_t cmsg_bulk_readds
;
627 atomic_t cmsg_delay_conndata
;
629 /* not locked, only accessed by single thread */
630 __u8 max_cmsg_delay_sent
;
632 atomic_t rcvmtu_sendneeded
;
635 /* procected by cor_dev send_queue->qlock */
636 struct cor_resume_block rb_kp
;
637 struct cor_resume_block rb_cr
;
638 struct cor_resume_block rb
;
639 unsigned long cmsg_send_start_j
;
640 ktime_t cmsg_send_start_kt
;
645 struct list_head lh_nextpass
;
654 * acked: avoid send bursts when data_intransit is shrunk too
657 struct hrtimer acked_timer
;
658 ktime_t acked_timeout_time
;
659 ktime_t acked_refresh_time
;
664 atomic64_t data_intransit
;
669 spinlock_t state_lock
;
670 unsigned long last_ping_time
;
671 struct cor_ping_cookie cookies
[PING_COOKIES_PER_NEIGH
];
672 __u32 ping_intransit
;
675 __u64 latency_variance_retrans_us
; /* microsecs */
676 atomic_t latency_retrans_us
; /* microsecs */
677 atomic_t latency_stddev_retrans_us
; /* microsecs */
678 atomic_t latency_advertised_us
; /* microsecs */
679 __u8 rcvmtu_delayed_send_needed
:1,
680 rcvmtu_allowed_countdown
:2;
682 atomic_t max_remote_ack_fast_delay_us
; /* microsecs */
683 atomic_t max_remote_ack_slow_delay_us
; /* microsecs */
684 atomic_t max_remote_ackconn_delay_us
; /* microsecs */
685 atomic_t max_remote_pong_delay_us
; /* microsecs */
687 atomic_t remote_rcvmtu
;
690 unsigned long initial_state_since
;/* initial state */
693 * time of the last sent packet which has been acked or
694 * otherwise responded to (e.g. pong)
696 unsigned long last_roundtrip
;/* active/stalled state */
698 ktime_t last_roundtrip_end
;
702 __u8 str_timer_pending
;
703 struct delayed_work stalltimeout_timer
;
705 spinlock_t connid_lock
;
706 struct rb_root connid_rb
;
708 spinlock_t connid_reuse_lock
;
709 struct rb_root connid_reuse_rb
;
710 struct list_head connid_reuse_list
;
711 __u16 connid_reuse_pingcnt
;
712 __u8 connid_reuse_oom_countdown
;
714 atomic64_t priority_sum
;
717 * connecions which receive data from/send data to this node
718 * used when terminating all connections of a neighbor and terminating
719 * inactive connections
721 spinlock_t conn_list_lock
;
722 struct list_head snd_conn_idle_list
;
723 struct list_head snd_conn_busy_list
;
726 * the timer has to be inited when adding the neighbor
728 * add_timer(struct timer_list * timer);
730 spinlock_t retrans_lock
;
731 struct timer_list retrans_timer
;
732 struct list_head retrans_fast_list
;
733 struct list_head retrans_slow_list
;
734 struct rb_root kp_retransmits_rb
;
736 spinlock_t retrans_conn_lock
;
737 struct timer_list retrans_conn_timer
;
738 struct list_head retrans_conn_lowlatency_list
;
739 struct list_head retrans_conn_highlatency_list
;
741 struct work_struct reset_neigh_work
;
744 static inline void cor_nb_kref_get(struct cor_neighbor
*nb
, char *reason
)
746 /* printk(KERN_ERR "cor_nb_kref_get %p %s\n", nb, reason); */
750 void cor_neighbor_free(struct kref
*ref
); /* neigh.c */
752 static inline void cor_nb_kref_put(struct cor_neighbor
*nb
, char *reason
)
754 /* printk(KERN_ERR "cor_nb_kref_put %p %s\n", nb, reason); */
755 kref_put(&nb
->ref
, cor_neighbor_free
);
758 void cor_kreffree_bug(struct kref
*ref
); /* util.c */
760 static inline void cor_nb_kref_put_bug(struct cor_neighbor
*nb
, char *reason
)
762 /* printk(KERN_ERR "cor_nb_kref_put_bug %p %s\n", nb, reason); */
763 kref_put(&nb
->ref
, cor_kreffree_bug
);
767 #define DATABUF_BUF 0
768 #define DATABUF_SKB 1
770 struct cor_data_buf_item
{
771 struct list_head buf_list
;
780 struct cor_connid_reuse_item
{
790 #define SNDSPEED_INIT 0
791 #define SNDSPEED_ACTIVE 1
792 struct cor_snd_speed
{
795 unsigned long jiffies_last_refresh
;
798 /* bytes per second */
805 /* This struct helps keep struct cor_conn small. */
806 struct cor_conn_src_sock_extradata
{
807 struct cor_conn
*src_sock
;
812 struct cor_snd_speed snd_speed
;
815 * keepalive_lh and in_keepalive_list is protected by
816 * cor_keepalive_req_lock
818 struct timer_list keepalive_timer
;
819 struct list_head keepalive_lh
;
820 __u8 in_keepalive_list
;
822 __be32 keepalive_req_cookie
;
823 __be32 keepalive_resp_cookie
;
825 * keepalive_intransit == 0... last resp received
826 * keepalive_intransit == 1... req sent
828 unsigned long jiffies_keepalive_lastact
;
831 char snd_hdr
[CONN_MNGD_HEADERLEN
];
832 char snd_data
[CONN_MNGD_MAX_CTRL_DATALEN
];
833 char snd_chksum
[CONN_MNGD_CHECKSUMLEN
];
838 char snd_hdr
[CONN_MNGD_HEADERLEN
];
839 char snd_chksum
[CONN_MNGD_CHECKSUMLEN
];
848 * There are 2 conn objects per bi-directional connection. They refer to each
849 * other with in the reversedir field.
855 * cn: conn we do not know what is inside
856 * src_in, trgt_unconn, trgt_out, ...: A conn with the specified source or
857 * targettype. In the unlocked case the types are only a guess, because they
858 * might have changed since the last access. After locking the
859 * source/destination parameters have to be checked whether they still are what
860 * we expect. This includes source/targettype, neighbor, conn_id
864 * no suffix: unlocked
866 * _l: this direction is locked
868 * _ll: both directions are locked
870 * _lx: this direction is locked, the other direction may be locked
872 * _o: unlocked, but source or target is known for sure, because an outside
873 * lock is taken; For variables on the heap this means that an outside lock must
874 * be taken before accessing the struct which points to the conn can be
878 * Most fields are protected by rcv_lock. Fields which which control
879 * source and destination of the data flow require both directions to
880 * to be locked and external references to be cleared before the change can
881 * happen. This includes fields like sourcetype, targettype, connid,
882 * list_heads, ???. In this case the side with is_client == 1 needs to be locked
885 * Some other fields are locked outside (e.g. at struct neighbor).
887 #define SOURCE_UNCONNECTED 0
889 #define SOURCE_SOCK 2
891 #define TARGET_UNCONNECTED 0
893 #define TARGET_SOCK 2
894 #define TARGET_DISCARD 3
896 #define BUFSIZE_NOACTION 0
897 #define BUFSIZE_DECR 1
898 #define BUFSIZE_DECR_FAST 2
899 #define BUFSIZE_INCR 3
900 #define BUFSIZE_INCR_FAST 4
902 #define JIFFIES_LAST_IDLE_SHIFT 8
903 #define BUFSIZE_SHIFT 3
905 #define SOCKTYPE_RAW 0
906 #define SOCKTYPE_MANAGED 1
908 #define RCV_BUF_STATE_OK 0
909 #define RCV_BUF_STATE_INCOMPLETE 1
910 #define RCV_BUF_STATE_RESET 2
912 #define SND_BUF_STATE_INCOMPLETE 0
913 #define SND_BUF_STATE_FILLED 1
920 __u8 is_client
; /* immutable after allocated */
924 * 0... connection active
925 * 1... connection is about to be reset, target does not need to be
927 * 2... connection is reset
933 is_highlatency_send_needed
:1;
939 struct cor_neighbor
*nb
;
941 struct list_head reorder_queue
;
942 __u32 reorder_memused
;
948 __u16 small_ooo_packets
:14,
949 inorder_ack_needed
:1,
955 __u32 window_seqnolimit
;
956 __u32 window_seqnolimit_remote
;
958 /* protected by nb->cmsg_lock */
959 struct list_head acks_pending
;
963 struct cor_conn_src_sock_extradata
*ed
;
966 * cl_list and in_cl_list is protected by cor_bindnodes
968 struct list_head cl_list
;
971 /* protected by flushtoconn_oom_lock */
972 struct list_head flushtoconn_oom_lh
;
973 /* protected by conn->rcv_lock */
974 __u8 in_flushtoconn_oom_list
;
977 __u8 keepalive_intransit
:1,
980 send_keepalive_req_needed
:1,
981 send_keepalive_resp_needed
:1,
983 send_rcvend_needed
:1;
985 __u8 last_windowused
;
998 char paramlen_buf
[4];
1002 struct cor_neighbor
*nb
;
1004 /* list of all connections to this neighbor */
1005 struct list_head nb_list
;
1006 unsigned long jiffies_last_act
;
1007 __u32 nblist_busy_remaining
;
1010 __u32 seqno_nextsend
;
1012 __u32 seqno_windowlimit
;
1014 /* protected by nb->retrans_conn_lock, sorted by seqno
1016 struct list_head retrans_list
;
1018 struct cor_resume_block rb
;
1021 unsigned long jiffies_idle_since
;
1024 __u16 maxsend_extra
;
1028 __u8 lastsend_windowused
;
1030 __u8 remote_bufsize_changerate
;
1032 __u8 priority_send_allowed
:1,
1036 __u16 priority_last
:12,
1039 /* protected by nb->retrans_conn_lock */
1040 __u16 retrans_lowwindow
;
1044 __u8 waiting_for_userspace
;
1045 unsigned long waiting_for_userspace_since
;
1047 struct cor_sock
*cs
;
1052 char rcv_hdr
[CONN_MNGD_HEADERLEN
];
1053 char rcv_chksum
[CONN_MNGD_CHECKSUMLEN
];
1056 __u16 rcv_hdr_flags
;
1062 struct list_head items
;
1063 struct cor_data_buf_item
*nextread
;
1068 __u32 read_remaining
;
1070 __u16 next_read_offset
;
1073 __u32 bufspace_accounted
;
1076 __u32 bufsize
; /* 8 ==> 1 byte, see BUFSIZE_SHIFT */
1077 __u32 ignore_rcv_lowbuf
;
1098 static inline __u32
cor_get_connid_reverse(__u32 conn_id
)
1100 return conn_id
^ (1 << 31);
1103 struct cor_conn_bidir
{
1104 struct cor_conn cli
;
1105 struct cor_conn srv
;
1110 static inline struct cor_conn_bidir
*cor_get_conn_bidir(struct cor_conn
*cn
)
1113 return container_of(cn
, struct cor_conn_bidir
, cli
);
1115 return container_of(cn
, struct cor_conn_bidir
, srv
);
1118 static inline struct cor_conn
*cor_get_conn_reversedir(struct cor_conn
*cn
)
1120 if (cn
->is_client
) {
1121 struct cor_conn_bidir
*cnb
= container_of(cn
,
1122 struct cor_conn_bidir
, cli
);
1125 struct cor_conn_bidir
*cnb
= container_of(cn
,
1126 struct cor_conn_bidir
, srv
);
1131 static inline void cor_conn_kref_get(struct cor_conn
*cn
, char *reason
)
1133 /* printk(KERN_ERR "cor_conn_kref_get %p %s\n", cn, reason); */
1134 kref_get(&cor_get_conn_bidir(cn
)->ref
);
1137 void cor_free_conn(struct kref
*ref
); /* conn.c */
1139 static inline void cor_conn_kref_put(struct cor_conn
*cn
, char *reason
)
1141 /* printk(KERN_ERR "cor_conn_kref_put %p %s\n", cn, reason); */
1142 kref_put(&cor_get_conn_bidir(cn
)->ref
, cor_free_conn
);
1145 static inline void cor_conn_kref_put_bug(struct cor_conn
*cn
, char *reason
)
1147 /* printk(KERN_ERR "cor_conn_kref_put_bug %p %s\n", cn, reason); */
1148 kref_put(&cor_get_conn_bidir(cn
)->ref
, cor_kreffree_bug
);
1153 #define CONN_RETRANS_INITIAL 0
1154 #define CONN_RETRANS_SCHEDULED 1
1155 #define CONN_RETRANS_LOWWINDOW 2
1156 #define CONN_RETRANS_SENDING 3
1157 #define CONN_RETRANS_ACKED 4
1158 struct cor_conn_retrans
{
1159 /* timeout_list and conn_list share a single ref */
1161 /* only in timeout_list if state == CONN_RETRANS_SCHEDULED */
1162 struct list_head timeout_list
;
1163 struct list_head conn_list
;
1164 struct cor_conn
*trgt_out_o
;
1170 unsigned long timeout
;
1173 #define RCVOOO_BUF 0
1174 #define RCVOOO_SKB 1
1176 struct list_head lh
;
1183 struct cor_rcvooo_buf
{
1184 struct cor_rcvooo r
;
1189 /* inside skb->cb */
1190 struct cor_skb_procstate
{
1193 struct work_struct work
;
1202 struct cor_rcvooo r
;
1206 struct cor_data_buf_item dbi
;
1211 #define CS_TYPE_UNCONNECTED 0
1212 #define CS_TYPE_LISTENER 1
1213 #define CS_TYPE_CONN_RAW 2
1214 #define CS_TYPE_CONN_MANAGED 3
1216 #define CS_CONNECTSTATE_UNCONNECTED 0
1217 #define CS_CONNECTSTATE_CONNECTING 1
1218 #define CS_CONNECTSTATE_CONNECTED 2
1219 #define CS_CONNECTSTATE_ERROR 3
1222 struct sock sk
; /* must be first */
1227 /* type may not change once it is set to != CS_TYPE_UNCONNECTED */
1231 __u8 publish_service
;
1233 __u8 is_highlatency
;
1239 /* listener is protected by cor_bindnodes */
1240 struct list_head lh
;
1242 __u8 publish_service
;
1245 struct list_head conn_queue
;
1249 struct cor_conn
*src_sock
;
1250 struct cor_conn
*trgt_sock
;
1252 struct cor_data_buf_item
*rcvitem
;
1255 struct cor_sock
*pass_on_close
;
1259 struct cor_sockaddr remoteaddr
;
1261 struct list_head rd_msgs
;
1262 struct list_head crd_lh
;
1281 struct cor_conn
*src_sock
;
1282 struct cor_conn
*trgt_sock
;
1286 __u16 snd_segment_size
;
1288 __u8 send_in_progress
;
1293 __u16 rcvbuf_consumed
;
1298 struct work_struct readfromconn_work
;
1299 atomic_t readfromconn_work_scheduled
;
1301 atomic_t ready_to_read
;
1302 atomic_t ready_to_write
;
1303 atomic_t ready_to_accept
;
1306 #define ACK_NEEDED_NO 0
1307 #define ACK_NEEDED_SLOW 1
1308 #define ACK_NEEDED_FAST 2
1311 extern spinlock_t cor_local_addr_lock
;
1312 extern __u8 cor_local_has_addr
;
1313 extern __be64 cor_local_addr
;
1314 extern __be32 cor_local_addr_sessionid
;
1316 int cor_is_device_configurated(struct net_device
*dev
);
1318 void cor_set_interface_config(struct cor_interface_config
*new_config
,
1319 __u32 new_num_interfaces
, int new_all_interfaces
);
1321 void cor_config_down(void);
1323 int cor_config_up(__u8 has_addr
, __be64 addr
);
1325 int cor_is_clientmode(void);
1328 #ifdef DEBUG_QOS_SLOWSEND
1329 int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
);
1331 static inline int _cor_dev_queue_xmit(struct sk_buff
*skb
, int caller
)
1333 return dev_queue_xmit(skb
);
1337 struct sk_buff
*cor_create_packet_cmsg(struct cor_neighbor
*nb
, int size
,
1338 gfp_t alloc_flags
, __u32 seqno
);
1340 struct sk_buff
*cor_create_packet(struct cor_neighbor
*nb
, int size
,
1343 struct sk_buff
*cor_create_packet_conndata(struct cor_neighbor
*nb
, int size
,
1344 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
, __u8 windowused
,
1347 void cor_dev_free(struct kref
*ref
);
1349 struct cor_dev
*cor_dev_get(struct net_device
*dev
);
1351 void cor_dev_destroy(struct net_device
*dev
);
1353 void cor_dev_down(void);
1355 int cor_dev_up(void);
1357 int __init
cor_dev_init(void);
1359 void __exit
cor_dev_exit1(void);
1362 void cor_dev_queue_destroy(struct cor_dev
*cd
);
1364 int cor_dev_queue_init(struct cor_dev
*cd
);
1366 void cor_dev_queue_set_congstatus(struct cor_dev
*cd_qlocked
);
1368 void cor_dev_queue_set_lastdrop(struct cor_dev
*cd
);
1372 #define QOS_RESUME_DONE 0
1373 #define QOS_RESUME_CONG 1
1374 #define QOS_RESUME_NEXTNEIGHBOR 2 /* cor_resume_neighbors() internal */
1375 #define QOS_RESUME_EXIT 3
1377 #define QOS_CALLER_KPACKET 0
1378 #define QOS_CALLER_CONN_RETRANS 1
1379 #define QOS_CALLER_ANNOUNCE 2
1380 #define QOS_CALLER_NEIGHBOR 3
1382 void _cor_dev_queue_enqueue(struct cor_dev
*cd
, struct cor_resume_block
*rb
,
1383 unsigned long cmsg_send_start_j
, ktime_t cmsg_send_start_kt
,
1384 int caller
, int from_nbcongwin_resume
,
1385 int from_nbnotactive_resume
);
1387 void cor_dev_queue_enqueue(struct cor_dev
*cd
, struct cor_resume_block
*rb
,
1388 unsigned long cmsg_send_start_j
, ktime_t cmsg_send_start_kt
,
1389 int caller
, int from_nbnotactive_resume
);
1392 static inline int cor_dev_queue_xmit(struct sk_buff
*skb
,
1393 struct cor_dev
*cd
, int caller
)
1395 int rc
= _cor_dev_queue_xmit(skb
, caller
);
1397 if (unlikely(rc
!= NET_XMIT_SUCCESS
))
1398 cor_dev_queue_set_lastdrop(cd
);
1403 static inline __u16
cor_enc_priority(__u32 value
)
1409 while ((value
>> exponent
) > 255) {
1412 BUG_ON(exponent
> 15);
1414 mantissa
= (value
>> exponent
);
1415 ret
= (mantissa
<< 4) | exponent
;
1420 static inline __u32
cor_dec_priority(__u16 priority
)
1422 __u32 mantissa
= (__u32
) (priority
>> 4);
1423 __u16 exponent
= (priority
& 15);
1425 BUG_ON(priority
> 4095);
1426 return (mantissa
<< exponent
);
1429 static inline __u32
cor_priority_max(void)
1431 return cor_dec_priority(4095);
1434 __u8
__attribute__((const)) cor_enc_log_64_11(__u32 value
);
1436 __u32
__attribute__((const)) cor_dec_log_64_11(__u8 value
);
1438 void cor_swap_list_items(struct list_head
*lh1
, struct list_head
*lh2
);
1440 __u64
cor_update_atomic_sum(atomic64_t
*atomic_sum
, __u32 oldvalue
,
1443 int __init
cor_util_init(void);
1448 extern atomic_t cor_num_neighs
;
1450 int cor_is_from_nb(struct sk_buff
*skb
, struct cor_neighbor
*nb
);
1452 struct cor_neighbor
*_cor_get_neigh_by_mac(struct net_device
*dev
,
1455 struct cor_neighbor
*cor_get_neigh_by_mac(struct sk_buff
*skb
);
1457 struct cor_neighbor
*cor_find_neigh(__be64 addr
);
1459 void cor_resend_rcvmtu(struct net_device
*dev
);
1461 __u32
cor_generate_neigh_list(char *buf
, __u32 buflen
);
1463 void cor_reset_neighbors(struct net_device
*dev
);
1465 int cor_get_neigh_state(struct cor_neighbor
*nb
);
1467 void cor_ping_resp(struct cor_neighbor
*nb
, __u32 cookie
, __u32 respdelay
);
1469 __u32
cor_add_ping_req(struct cor_neighbor
*nb
, unsigned long *last_ping_time
);
1471 void cor_ping_sent(struct cor_neighbor
*nb
, __u32 cookie
);
1473 void cor_unadd_ping_req(struct cor_neighbor
*nb
, __u32 cookie
,
1474 unsigned long last_ping_time
, int congested
);
1476 #define TIMETOSENDPING_NO 0
1477 #define TIMETOSENDPING_YES 1
1478 #define TIMETOSENDPING_FORCE 2
1479 int cor_time_to_send_ping(struct cor_neighbor
*nb
);
1481 unsigned long cor_get_next_ping_time(struct cor_neighbor
*nb
);
1483 void cor_add_neighbor(struct cor_neighbor_discdata
*nb_dd
);
1485 struct cor_conn
*cor_get_conn(struct cor_neighbor
*nb
, __u32 conn_id
);
1487 int cor_insert_connid(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
);
1489 void cor_insert_connid_reuse(struct cor_neighbor
*nb
, __u32 conn_id
);
1491 int cor_connid_alloc(struct cor_neighbor
*nb
, struct cor_conn
*src_in_ll
);
1493 int __init
cor_neighbor_init(void);
1495 void __exit
cor_neighbor_exit2(void);
1497 /* neigh_ann_rcv.c */
1498 int cor_rcv_announce(struct sk_buff
*skb
);
1500 int __init
cor_neigh_ann_rcv_init(void);
1502 void __exit
cor_neigh_ann_rcv_exit2(void);
1504 /* neigh_ann_snd.c */
1505 int _cor_send_announce(struct cor_announce_data
*ann
, int fromqos
, int *sent
);
1507 void cor_announce_data_free(struct kref
*ref
);
1509 void cor_announce_send_start(struct net_device
*dev
, char *mac
, int type
);
1511 void cor_announce_send_stop(struct net_device
*dev
, char *mac
, int type
);
1514 void cor_kernel_packet(struct cor_neighbor
*nb
, struct sk_buff
*skb
,
1518 struct cor_control_msg_out
;
1520 #define ACM_PRIORITY_LOW 1 /* oom recovery easy */
1521 #define ACM_PRIORITY_MED 2 /* oom may cause timeouts */
1522 #define ACM_PRIORITY_HIGH 3 /* cm acks - needed for freeing old cms */
1524 struct cor_control_msg_out
*cor_alloc_control_msg(struct cor_neighbor
*nb
,
1527 void cor_free_control_msg(struct cor_control_msg_out
*cm
);
1529 void cor_retransmit_timerfunc(struct timer_list
*retrans_timer
);
1531 void cor_kern_ack_rcvd(struct cor_neighbor
*nb
, __u32 seqno
);
1533 int cor_send_messages(struct cor_neighbor
*nb
, unsigned long cmsg_send_start_j
,
1534 ktime_t cmsg_send_start_kt
, int *sent
);
1536 void cor_controlmsg_timerfunc(struct timer_list
*cmsg_timer
);
1538 void cor_schedule_controlmsg_timer(struct cor_neighbor
*nb_cmsglocked
);
1540 void cor_send_rcvmtu(struct cor_neighbor
*nb
);
1542 void cor_send_pong(struct cor_neighbor
*nb
, __u32 cookie
, ktime_t ping_rcvtime
);
1544 int cor_send_reset_conn(struct cor_neighbor
*nb
, __u32 conn_id
, int lowprio
);
1546 void cor_send_ack(struct cor_neighbor
*nb
, __u32 seqno
, __u8 fast
);
1548 void cor_send_ack_conn_ifneeded(struct cor_conn
*src_in_l
, __u32 seqno_ooo
,
1551 void cor_send_priority(struct cor_conn
*trgt_out_ll
, __u16 priority
);
1553 void cor_free_ack_conns(struct cor_conn
*src_in_lx
);
1555 void cor_send_connect_success(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1556 struct cor_conn
*src_in
);
1558 void cor_send_connect_nb(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1559 __u32 seqno1
, __u32 seqno2
, struct cor_conn
*src_in_ll
);
1561 void cor_send_conndata(struct cor_control_msg_out
*cm
, __u32 conn_id
,
1562 __u32 seqno
, char *data_orig
, char *data
, __u32 datalen
,
1563 __u8 windowused
, __u8 flush
, __u8 highlatency
,
1564 struct cor_conn_retrans
*cr
);
1566 int __init
cor_kgen_init(void);
1568 void __exit
cor_kgen_exit2(void);
1570 /* neigh_congwin.c */
1571 void cor_nbcongwin_data_retransmitted(struct cor_neighbor
*nb
,
1574 enum hrtimer_restart
cor_nbcongwin_hrtimerfunc(struct hrtimer
*nb_congwin_timer
);
1576 void cor_nbcongwin_data_acked(struct cor_neighbor
*nb
, __u64 bytes_acked
);
1578 void cor_nbcongwin_data_sent(struct cor_neighbor
*nb
, __u32 bytes_sent
);
1580 int cor_nbcongwin_send_allowed(struct cor_neighbor
*nb
);
1582 /* neigh_waitingconns.c */
1583 int cor_neigh_waitingsconns_resume(struct cor_dev
*cd
, struct cor_neighbor
*nb
,
1584 unsigned long jiffies_nb_lastduration
, int *progress
);
1586 void cor_neigh_waitingconns_remove_conn(struct cor_conn
*trgt_out_l
);
1588 void cor_neigh_waitingconns_enqueue_conn(struct cor_conn
*trgt_out_lx
);
1591 extern struct kmem_cache
*cor_connid_reuse_slab
;
1593 extern atomic_t cor_num_conns
;
1595 extern spinlock_t cor_bindnodes
;
1597 int cor_new_incoming_conn_allowed(struct cor_neighbor
*nb
);
1599 __u32
_cor_conn_refresh_priority(struct cor_conn
*cn_lx
);
1601 __u32
cor_conn_refresh_priority(struct cor_conn
*cn
, int locked
);
1603 void cor_set_conn_is_highlatency(struct cor_conn
*cn
, __u8 is_highlatency
,
1604 int locked
, int call_refresh_priority
);
1606 void cor_set_conn_in_priority(struct cor_neighbor
*nb
, __u32 conn_id
,
1607 struct cor_conn
*src_in
, __u8 priority_seqno
, __u16 priority
,
1608 __u8 is_highlatency
);
1610 void cor_conn_set_last_act(struct cor_conn
*trgt_out_lx
);
1612 int cor_conn_init_out(struct cor_conn
*trgt_unconn_ll
, struct cor_neighbor
*nb
,
1613 __u32 rcvd_connid
, int use_rcvd_connid
);
1615 int cor_conn_init_sock_source(struct cor_conn
*cn
);
1617 void cor_conn_init_sock_target(struct cor_conn
*cn
);
1619 __u32
cor_list_services(char *buf
, __u32 buflen
);
1621 void cor_set_publish_service(struct cor_sock
*cs
, __u8 value
);
1623 void cor_close_port(struct cor_sock
*cs
);
1625 int cor_open_port(struct cor_sock
*cs_l
, __be32 port
);
1627 #define CONNECT_PORT_OK 0
1628 #define CONNECT_PORT_PORTCLOSED 1
1629 #define CONNECT_PORT_TEMPORARILY_OUT_OF_RESOURCES 2
1631 int cor_connect_port(struct cor_conn
*trgt_unconn_ll
, __be32 port
);
1633 int cor_connect_neigh(struct cor_conn
*trgt_unconn_ll
, __be64 addr
);
1635 struct cor_conn_bidir
*cor_alloc_conn(gfp_t allocflags
, __u8 is_highlatency
);
1637 void cor_reset_conn_locked(struct cor_conn_bidir
*cnb_ll
);
1639 void cor_reset_conn(struct cor_conn
*cn
);
1642 void cor_reset_ooo_queue(struct cor_conn
*src_in_lx
);
1644 void cor_drain_ooo_queue(struct cor_conn
*src_in_l
);
1646 void cor_conn_rcv(struct cor_neighbor
*nb
, struct sk_buff
*skb
, char *data
,
1647 __u32 len
, __u32 conn_id
, __u32 seqno
, __u8 windowused
,
1650 int __init
cor_rcv_init(void);
1652 void __exit
cor_rcv_exit2(void);
1654 /* conn_src_sock.c */
1655 void cor_update_src_sock_sndspeed(struct cor_conn
*src_sock_l
,
1658 int cor_sock_sndbufavailable(struct cor_conn
*src_sock_lx
, int for_wakeup
);
1661 #define RC_FTC_OOM 1
1662 #define RC_FTC_ERR 2
1663 int _cor_mngdsocket_flushtoconn(struct cor_conn
*src_sock_l
);
1665 int cor_mngdsocket_flushtoconn_ctrl(struct cor_sock
*cs_m_l
, __u8 send_eof
,
1666 __u8 send_rcvend
, __u8 send_keepalive_resp
,
1667 __be32 keepalive_resp_cookie
);
1669 int cor_mngdsocket_flushtoconn_data(struct cor_sock
*cs_m_l
);
1671 void cor_keepalive_req_timerfunc(struct timer_list
*retrans_conn_timer
);
1673 void cor_keepalive_req_sched_timer(struct cor_conn
*src_sock_lx
);
1675 void cor_keepalive_resp_rcvd(struct cor_sock
*cs_m_l
, __be32 cookie
);
1677 int __init
cor_conn_src_sock_init1(void);
1679 void __exit
cor_conn_src_sock_exit1(void);
1681 /* conn_trgt_unconn.c */
1682 int cor_encode_len(char *buf
, int buflen
, __u32 len
);
1684 void cor_proc_cpacket(struct cor_conn
*trgt_unconn
);
1686 /* conn_trgt_out.c */
1687 void cor_free_connretrans(struct kref
*ref
);
1689 void cor_reschedule_conn_retrans_timer(struct cor_neighbor
*nb_retranslocked
);
1691 void cor_cancel_all_conn_retrans(struct cor_conn
*trgt_out_l
);
1693 int cor_send_retrans(struct cor_neighbor
*nb
, int *sent
);
1695 void cor_retransmit_conn_timerfunc(struct timer_list
*retrans_timer_conn
);
1697 void cor_conn_ack_ooo_rcvd(struct cor_neighbor
*nb
, __u32 conn_id
,
1698 struct cor_conn
*trgt_out
, __u32 seqno_ooo
, __u32 length
,
1699 __u64
*bytes_acked
);
1701 void cor_conn_ack_rcvd(struct cor_neighbor
*nb
, __u32 conn_id
,
1702 struct cor_conn
*trgt_out
, __u32 seqno
, int setwindow
,
1703 __u8 window
, __u8 bufsize_changerate
, __u64
*bytes_acked
);
1705 void cor_schedule_retransmit_conn(struct cor_conn_retrans
*cr
, int connlocked
,
1706 int nbretrans_locked
);
1708 int cor_srcin_buflimit_reached(struct cor_conn
*src_in_lx
);
1710 /* RC_FLUSH_CONN_OUT_SENT | RC_FLUSH_CONN_OUT_{^SENT} */
1711 #define RC_FLUSH_CONN_OUT_OK 1
1712 #define RC_FLUSH_CONN_OUT_SENT_CONG 2 /* cor_flush_out internal only */
1713 #define RC_FLUSH_CONN_OUT_NBNOTACTIVE 3
1714 #define RC_FLUSH_CONN_OUT_CONG 4
1715 #define RC_FLUSH_CONN_OUT_MAXSENT 5
1716 #define RC_FLUSH_CONN_OUT_OOM 6
1718 int _cor_flush_out(struct cor_conn
*trgt_out_lx
, __u32 maxsend
, __u32
*sent
,
1719 int from_qos
, int maxsend_forcedelay
);
1721 static inline int cor_flush_out(struct cor_conn
*trgt_out_lx
, __u32
*sent
)
1723 int rc
= _cor_flush_out(trgt_out_lx
, 1 << 30, sent
, 0, 0);
1725 if (rc
== RC_FLUSH_CONN_OUT_CONG
|| rc
== RC_FLUSH_CONN_OUT_MAXSENT
||
1726 rc
== RC_FLUSH_CONN_OUT_OOM
||
1727 rc
== RC_FLUSH_CONN_OUT_NBNOTACTIVE
)
1728 cor_neigh_waitingconns_enqueue_conn(trgt_out_lx
);
1733 unsigned long cor_get_conn_idletime(struct cor_conn
*trgt_out_lx
);
1735 int __init
cor_snd_init(void);
1737 void __exit
cor_snd_exit2(void);
1739 /* conn_trgt_sock.c */
1740 void cor_flush_sock_managed(struct cor_conn
*trgt_sock_lx
, int from_recvmsg
,
1741 __u8
*do_wake_sender
);
1743 void cor_flush_sock(struct cor_conn
*trgt_sock_lx
);
1745 /* conn_databuf.c */
1746 extern struct kmem_cache
*cor_data_buf_item_slab
;
1748 void cor_databuf_init(struct cor_conn
*cn_init
);
1750 void cor_bufsize_init(struct cor_conn
*cn_l
, __u32 bufsize
);
1752 int cor_account_bufspace(struct cor_conn
*cn_lx
);
1754 int cor_conn_src_unconn_write_allowed(struct cor_conn
*src_unconn_lx
);
1756 void cor_update_windowlimit(struct cor_conn
*src_in_lx
);
1758 __u8
_cor_bufsize_update_get_changerate(struct cor_conn
*cn_lx
);
1760 static inline int cor_bufsize_initial_phase(struct cor_conn
*cn_lx
)
1762 return unlikely(cn_lx
->bufsize
.bytes_rcvd
!= (1 << 24) - 1 &&
1763 cn_lx
->bufsize
.bytes_rcvd
< cn_lx
->bufsize
.bufsize
);
1766 static inline int cor_ackconn_urgent(struct cor_conn
*cn_lx
)
1768 return cor_bufsize_initial_phase(cn_lx
) ||
1769 cn_lx
->bufsize
.state
== BUFSIZE_INCR_FAST
;
1772 void cor_bufsize_read_to_sock(struct cor_conn
*trgt_sock_lx
);
1774 void cor_databuf_ackdiscard(struct cor_conn
*cn_lx
);
1776 void cor_reset_seqno(struct cor_conn
*cn_l
, __u32 initseqno
);
1778 void cor_databuf_pull(struct cor_conn
*cn_lx
, char *dst
, __u32 len
);
1780 static inline __u32
cor_databuf_trypull(struct cor_conn
*cn_l
, char *dst
,
1783 if (len
> cn_l
->data_buf
.read_remaining
)
1784 len
= cn_l
->data_buf
.read_remaining
;
1785 cor_databuf_pull(cn_l
, dst
, len
);
1789 void cor_databuf_unpull_dpi(struct cor_conn
*trgt_sock
, struct cor_sock
*cs
,
1790 struct cor_data_buf_item
*item
, __u16 next_read_offset
);
1792 void cor_databuf_pull_dbi(struct cor_sock
*cs_rl
, struct cor_conn
*trgt_sock_l
);
1794 void cor_databuf_unpull(struct cor_conn
*trgt_out_l
, __u32 bytes
);
1796 void cor_databuf_pullold(struct cor_conn
*trgt_out_l
, __u32 startpos
, char *dst
,
1799 void cor_databuf_ack(struct cor_conn
*trgt_out_l
, __u32 pos
);
1801 void cor_databuf_ackread(struct cor_conn
*cn_lx
);
1803 __u32
_cor_receive_buf(struct cor_conn
*cn_lx
, char *buf
, __u32 datalen
,
1804 int from_sock
, __u8 windowused
, __u8 flush
);
1806 static inline __u32
cor_receive_buf(struct cor_conn
*cn_lx
, char *buf
,
1807 __u32 datalen
, __u8 windowused
, __u8 flush
)
1809 return _cor_receive_buf(cn_lx
, buf
, datalen
, 0, windowused
, flush
);
1812 static inline __u32
cor_receive_sock(struct cor_conn
*src_sock_l
, char *buf
,
1813 __u32 datalen
, __u8 flush
)
1817 BUG_ON(src_sock_l
->sourcetype
!= SOURCE_SOCK
);
1819 ret
= _cor_receive_buf(src_sock_l
, buf
, datalen
, 1,
1820 src_sock_l
->src
.sock
.last_windowused
, flush
);
1822 if (likely(ret
> 0)) {
1823 __u32 bufsize
= src_sock_l
->bufsize
.bufsize
>> BUFSIZE_SHIFT
;
1824 __u32 bufused
= src_sock_l
->data_buf
.read_remaining
;
1826 if (bufused
>= bufsize
)
1827 src_sock_l
->src
.sock
.last_windowused
= 31;
1828 else if (unlikely(bufused
* 31 > U32_MAX
))
1829 src_sock_l
->src
.sock
.last_windowused
=
1830 bufused
/ ((bufsize
+ 30) / 31);
1832 src_sock_l
->src
.sock
.last_windowused
=
1833 (bufused
* 31) / bufsize
;
1839 __u32
cor_receive_skb(struct cor_conn
*src_in_l
, struct sk_buff
*skb
,
1840 __u8 windowused
, __u8 flush
);
1842 void cor_wake_sender(struct cor_conn
*cn
);
1844 int __init
cor_forward_init(void);
1846 void __exit
cor_forward_exit2(void);
1849 void cor_free_sock(struct kref
*ref
);
1851 int cor_socket_setsockopt_tos(struct socket
*sock
, char __user
*optval
,
1852 unsigned int optlen
);
1854 int cor_socket_setsockopt_priority(struct socket
*sock
, char __user
*optval
,
1855 unsigned int optlen
);
1857 int cor_socket_socketpair(struct socket
*sock1
, struct socket
*sock2
);
1859 int cor_socket_getname(struct socket
*sock
, struct sockaddr
*addr
, int peer
);
1861 int cor_socket_mmap(struct file
*file
, struct socket
*sock
,
1862 struct vm_area_struct
*vma
);
1864 int _cor_createsock(struct net
*net
, struct socket
*sock
, int protocol
,
1865 int kern
, __u8 is_client
);
1867 int __init
cor_sock_init1(void);
1869 int __init
cor_sock_init2(void);
1871 void __exit
cor_sock_exit1(void);
1873 /* sock_rdaemon.c */
1874 int cor_is_device_configurated(struct net_device
*dev
);
1876 int cor_create_rdaemon_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1879 int cor_rdreq_connect(struct cor_sock
*cs
);
1881 void cor_usersock_release(struct cor_sock
*cs
);
1883 int __init
cor_rd_init1(void);
1885 int __init
cor_rd_init2(void);
1887 void __exit
cor_rd_exit1(void);
1889 void __exit
cor_rd_exit2(void);
1892 int cor_create_raw_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1895 /* sock_managed.c */
1896 struct cor_sock
*cor_get_sock_by_cookie(__be64 cookie
);
1898 void __cor_set_sock_connecterror(struct cor_sock
*cs_m_l
, int errorno
);
1900 void _cor_set_sock_connecterror(struct cor_sock
*cs
, int errorno
);
1902 void cor_mngdsocket_chksum(char *hdr
, __u32 hdrlen
, char *data
, __u32 datalen
,
1903 char *chksum
, __u32 chksum_len
);
1905 static inline void cor_set_sock_connecterror(__be64 cookie
, int errorno
)
1907 struct cor_sock
*cs
= cor_get_sock_by_cookie(cookie
);
1910 _cor_set_sock_connecterror(cs
, errorno
);
1911 kref_put(&cs
->ref
, cor_free_sock
);
1915 void cor_mngdsocket_readfromconn_fromatomic(struct cor_sock
*cs
);
1917 void cor_mngdsocket_readfromconn_wq(struct work_struct
*work
);
1919 int cor_create_managed_sock(struct net
*net
, struct socket
*sock
, int protocol
,
1922 int __init
cor_sock_managed_init1(void);
1925 static inline struct cor_skb_procstate
*cor_skb_pstate(struct sk_buff
*skb
)
1927 BUILD_BUG_ON(sizeof(struct cor_skb_procstate
) > sizeof(skb
->cb
));
1928 return (struct cor_skb_procstate
*) &skb
->cb
[0];
1931 static inline struct sk_buff
*cor_skb_from_pstate(struct cor_skb_procstate
*ps
)
1933 return (struct sk_buff
*) (((char *)ps
) - offsetof(struct sk_buff
, cb
));
1936 static inline int cor_qos_fastsend_allowed_conn_retrans(struct cor_neighbor
*nb
)
1938 return atomic_read(&nb
->cd
->send_queue
.cong_status
) < CONGSTATUS_RETRANS
;
1941 static inline int cor_qos_fastsend_allowed_announce(struct net_device
*dev
)
1944 struct cor_dev
*cd
= cor_dev_get(dev
);
1949 rc
= atomic_read(&cd
->send_queue
.cong_status
) < CONGSTATUS_ANNOUNCE
;
1951 kref_put(&cd
->ref
, cor_dev_free
);
1956 static inline int cor_qos_fastsend_allowed_conn(struct cor_conn
*trgt_out_lx
)
1958 struct cor_dev
*cd
= trgt_out_lx
->trgt
.out
.nb
->cd
;
1960 return atomic_read(&cd
->send_queue
.cong_status
) < CONGSTATUS_CONNDATA
;
1963 static inline __u32
cor_rcv_mtu(struct cor_neighbor
*nb
)
1965 return nb
->dev
->mtu
;
1968 static inline __u32
cor_snd_mtu(struct cor_neighbor
*nb
)
1970 return min((__u32
) nb
->dev
->mtu
,
1971 (__u32
) atomic_read(&nb
->remote_rcvmtu
));
1974 static inline __u32
cor_mss(struct cor_neighbor
*nb
, __u32 l3overhead
)
1976 return cor_snd_mtu(nb
) - LL_RESERVED_SPACE(nb
->dev
) - l3overhead
;
1979 static inline __u32
cor_mss_cmsg(struct cor_neighbor
*nb
)
1981 return cor_mss(nb
, 5);
1984 static inline __u32
cor_mss_conndata(struct cor_neighbor
*nb
, int highlatency
)
1986 __u32 mss_tmp
= cor_mss(nb
, 9);
1989 if (mss_tmp
< 256 || highlatency
|| LOWLATENCY_LOWERMTU
== 0)
1992 for (i
= 256; i
< 4096; i
*= 2) {
1993 if (i
* 2 > mss_tmp
)
1997 return mss_tmp
- mss_tmp
% 4096;
2000 static inline __u32
cor_send_conndata_as_skb(struct cor_neighbor
*nb
,
2003 return size
>= cor_mss_conndata(nb
, 0) / 2 ||
2004 size
> KP_CONN_DATA_MAXLEN
;
2007 static inline long cor_calc_timeout(__u32 latency_us
, __u32 latency_stddev_us
,
2008 __u32 max_remote_ack_delay_us
)
2010 unsigned long addto
;
2012 if (unlikely(unlikely(latency_us
> 1000000000) ||
2013 unlikely(latency_stddev_us
> 500000000) ||
2014 unlikely(max_remote_ack_delay_us
> 1000000000))) {
2015 addto
= msecs_to_jiffies(latency_us
/ 1000 + latency_us
/ 4000 +
2016 latency_stddev_us
/ 333 +
2017 max_remote_ack_delay_us
/ 1000);
2019 addto
= usecs_to_jiffies(latency_us
+ latency_us
/ 4 +
2020 latency_stddev_us
* 3 +
2021 max_remote_ack_delay_us
);
2025 * 2 is added because
2026 * 1) _to_jiffies rounds down, but should round up, so add 1 to
2028 * 2) even if latency is 0, we never want to schedule the retransmit
2029 * to run right now, so add 1 more
2031 return jiffies
+ 2 + addto
;
2034 static inline void cor_put_be64(char *dst
, __be64 value
)
2036 char *p_value
= (char *) &value
;
2038 dst
[0] = p_value
[0];
2039 dst
[1] = p_value
[1];
2040 dst
[2] = p_value
[2];
2041 dst
[3] = p_value
[3];
2042 dst
[4] = p_value
[4];
2043 dst
[5] = p_value
[5];
2044 dst
[6] = p_value
[6];
2045 dst
[7] = p_value
[7];
2048 static inline void cor_put_u64(char *dst
, __u64 value
)
2050 cor_put_be64(dst
, cpu_to_be64(value
));
2053 static inline void cor_put_be32(char *dst
, __be32 value
)
2055 char *p_value
= (char *) &value
;
2057 dst
[0] = p_value
[0];
2058 dst
[1] = p_value
[1];
2059 dst
[2] = p_value
[2];
2060 dst
[3] = p_value
[3];
2063 static inline void cor_put_u32(char *dst
, __u32 value
)
2065 cor_put_be32(dst
, cpu_to_be32(value
));
2068 static inline void cor_put_be16(char *dst
, __be16 value
)
2070 char *p_value
= (char *) &value
;
2072 dst
[0] = p_value
[0];
2073 dst
[1] = p_value
[1];
2076 static inline void cor_put_u16(char *dst
, __u16 value
)
2078 cor_put_be16(dst
, cpu_to_be16(value
));
2081 static inline char *cor_pull_skb(struct sk_buff
*skb
, unsigned int len
)
2083 char *ptr
= skb_pull(skb
, len
);
2085 if (unlikely(ptr
== 0))
2091 static inline __be64
cor_parse_be64(char *buf
)
2097 ((char *)&ret
)[0] = buf
[0];
2098 ((char *)&ret
)[1] = buf
[1];
2099 ((char *)&ret
)[2] = buf
[2];
2100 ((char *)&ret
)[3] = buf
[3];
2101 ((char *)&ret
)[4] = buf
[4];
2102 ((char *)&ret
)[5] = buf
[5];
2103 ((char *)&ret
)[6] = buf
[6];
2104 ((char *)&ret
)[7] = buf
[7];
2109 static inline __u64
cor_parse_u64(char *buf
)
2111 return be64_to_cpu(cor_parse_be64(buf
));
2114 static inline __be32
cor_parse_be32(char *ptr
)
2120 ((char *)&ret
)[0] = ptr
[0];
2121 ((char *)&ret
)[1] = ptr
[1];
2122 ((char *)&ret
)[2] = ptr
[2];
2123 ((char *)&ret
)[3] = ptr
[3];
2128 static inline __u32
cor_parse_u32(char *ptr
)
2130 return be32_to_cpu(cor_parse_be32(ptr
));
2133 static inline __be16
cor_parse_be16(char *ptr
)
2139 ((char *)&ret
)[0] = ptr
[0];
2140 ((char *)&ret
)[1] = ptr
[1];
2145 static inline __u16
cor_parse_u16(char *ptr
)
2147 return be16_to_cpu(cor_parse_be16(ptr
));
2150 static inline __u8
cor_parse_u8(char *ptr
)
2153 return (__u8
) ptr
[0];
2156 static inline __be32
cor_pull_be32(struct sk_buff
*skb
)
2158 return cor_parse_be32(cor_pull_skb(skb
, 4));
2161 static inline __u32
cor_pull_u32(struct sk_buff
*skb
)
2163 return cor_parse_u32(cor_pull_skb(skb
, 4));
2166 static inline __u16
cor_pull_u16(struct sk_buff
*skb
)
2168 return cor_parse_u16(cor_pull_skb(skb
, 2));
2171 static inline __u8
cor_pull_u8(struct sk_buff
*skb
)
2173 char *ptr
= cor_pull_skb(skb
, 1);
2179 static inline int cor_is_conn_in(struct cor_conn
*cn_l
, struct cor_neighbor
*nb
,
2182 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_IN
) ||
2183 unlikely(cn_l
->src
.in
.nb
!= nb
) ||
2184 unlikely(cn_l
->src
.in
.conn_id
!= conn_id
) ||
2185 unlikely(cn_l
->isreset
!= 0)))
2190 static inline int cor_is_src_sock(struct cor_conn
*cn_l
, struct cor_sock
*cs
)
2192 if (unlikely(unlikely(cn_l
->sourcetype
!= SOURCE_SOCK
) ||
2193 unlikely(cn_l
->src
.sock
.ed
->cs
!= cs
)))
2198 static inline int cor_is_trgt_sock(struct cor_conn
*cn_l
, struct cor_sock
*cs
)
2200 if (unlikely(unlikely(cn_l
->targettype
!= TARGET_SOCK
) ||
2201 unlikely(cn_l
->trgt
.sock
.cs
!= cs
)))
2206 #define BUFLEN_MIN 128
2207 #define BUFLEN_MAX 4096
2208 #define PAGESIZE (1 << PAGE_SHIFT)
2210 static inline __u32
cor_buf_optlen(__u32 datalen
, int from_sock
)
2212 __u32 optlen
= BUFLEN_MIN
;
2217 while (optlen
< datalen
&& optlen
< PAGESIZE
&& optlen
< BUFLEN_MAX
)
2218 optlen
= (optlen
<< 1);
2223 static inline void cor_databuf_item_free(struct cor_data_buf_item
*item
)
2225 if (item
->type
== DATABUF_BUF
) {
2227 kmem_cache_free(cor_data_buf_item_slab
, item
);
2228 } else if (item
->type
== DATABUF_SKB
) {
2229 struct sk_buff
*skb
= cor_skb_from_pstate(container_of(item
,
2230 struct cor_skb_procstate
, funcstate
.rcv
.dbi
));
2237 static inline int cor_seqno_eq(__u32 seqno1
, __u32 seqno2
)
2239 return seqno1
== seqno2
;
2242 static inline int cor_seqno_before(__u32 seqno1
, __u32 seqno2
)
2244 return (seqno1
- seqno2
) >= (1LL << 31);
2247 static inline int cor_seqno_before_eq(__u32 seqno1
, __u32 seqno2
)
2249 return cor_seqno_eq(seqno1
, seqno2
) || cor_seqno_before(seqno1
, seqno2
);
2252 static inline int cor_seqno_after(__u32 seqno1
, __u32 seqno2
)
2254 return cor_seqno_before_eq(seqno1
, seqno2
) ? 0 : 1;
2257 static inline int cor_seqno_after_eq(__u32 seqno1
, __u32 seqno2
)
2259 return cor_seqno_before(seqno1
, seqno2
) ? 0 : 1;
2262 static inline int ktime_before_eq(ktime_t time1
, ktime_t time2
)
2264 return ktime_after(time1
, time2
) ? 0 : 1;
2267 static inline int ktime_after_eq(ktime_t time1
, ktime_t time2
)
2269 return ktime_before(time1
, time2
) ? 0 : 1;
2273 static inline void cor_sk_write_space(struct cor_sock
*cs
)
2275 atomic_set(&cs
->ready_to_write
, 1);
2277 cs
->sk
.sk_write_space(&cs
->sk
);
2280 static inline void cor_sk_data_ready(struct cor_sock
*cs
)
2282 atomic_set(&cs
->ready_to_read
, 1);
2284 cs
->sk
.sk_data_ready(&cs
->sk
);
2287 /* the other direction may be locked only if called from cor_proc_cpacket */
2288 static inline void cor_flush_buf(struct cor_conn
*cn_lx
)
2290 if (unlikely(cn_lx
->targettype
== TARGET_UNCONNECTED
)) {
2291 cor_proc_cpacket(cn_lx
);
2292 } else if (cn_lx
->targettype
== TARGET_SOCK
) {
2293 cor_flush_sock(cn_lx
);
2294 } else if (cn_lx
->targettype
== TARGET_OUT
) {
2295 __u32 bytessent
= 0;
2297 cor_flush_out(cn_lx
, &bytessent
);
2298 } else if (unlikely(cn_lx
->targettype
== TARGET_DISCARD
)) {
2299 cor_databuf_ackdiscard(cn_lx
);