From 93443565e45907c049efd63eef2cc8f21d3df32f Mon Sep 17 00:00:00 2001 From: Michael Blizek Date: Sat, 16 Oct 2010 11:00:41 +0200 Subject: [PATCH] qos fixes, reset bufferusage on conn reset, conn refcnt fix --- net/cor/common.c | 5 ++- net/cor/cor.h | 20 +++++++-- net/cor/cpacket_parse.c | 2 + net/cor/forward.c | 24 +++++++---- net/cor/kpacket_gen.c | 4 ++ net/cor/neighbor.c | 15 ++++--- net/cor/rcv.c | 102 +++++++++++++++++++++++++++++++------------- net/cor/snd.c | 109 +++++++++++++++++++++++------------------------- net/cor/sock.c | 3 ++ 9 files changed, 180 insertions(+), 104 deletions(-) diff --git a/net/cor/common.c b/net/cor/common.c index 6a4e2628de4..ddbdf6926e6 100644 --- a/net/cor/common.c +++ b/net/cor/common.c @@ -756,8 +756,6 @@ static int _reset_conn(struct conn *conn) list_del(&(conn->source.in.nb_list)); mutex_unlock(&(conn->source.in.nb->conn_list_lock)); - #warning todo reset bufferlimits/space - krefput++; if (conn->source.in.conn_id != 0) { @@ -815,6 +813,9 @@ static int _reset_conn(struct conn *conn) mutex_unlock(&(conn->rcv_lock)); + qos_remove_conn(conn); + reset_bufferusage(conn); + return krefput; } diff --git a/net/cor/cor.h b/net/cor/cor.h index 1c5e55127ae..d21bf34131d 100644 --- a/net/cor/cor.h +++ b/net/cor/cor.h @@ -547,8 +547,10 @@ struct conn{ * If one side is SOCK or NONE/UNCONNECTED and both directions * need to be locked, the direction with TARGET_UNCONNECTED or * TARGET_SOCK has to be locked first, the direction with - * SOURCE_NONE or SOURCE_SOCK afterwards. This is needed for changing - * source/targettype and credit flow. + * SOURCE_NONE or SOURCE_SOCK afterwards. If one side is TARGET_SOCK + * and the other is TARGET_UNCONNECTED, TARGET_SOCK needs to be locked + * first. This is needed for changing source/targettype, credit flow + * and TARGET_UNCONNECTED generating responses. * If data is forwarded, (both sides are IN/OUT), only one direction * may be locked. */ @@ -777,6 +779,8 @@ extern int __init cor_neighbor_init(void); /* rcv.c */ extern __u32 get_window(struct conn *rconn); +extern void reset_bufferusage(struct conn *conn); + extern void refresh_speedstat(struct conn *rconn, __u32 written); extern void drain_ooo_queue(struct conn *rconn); @@ -860,6 +864,10 @@ extern void qos_enqueue(struct net_device *dev, struct resume_block *rb, extern void qos_enqueue_kpacket(struct neighbor *nb); +extern void qos_remove_conn(struct conn *rconn); + +extern void qos_enqueue_conn(struct conn *rconn, int oom); + extern struct sk_buff *create_packet(struct neighbor *nb, int size, gfp_t alloc_flags, __u32 conn_id, __u32 seqno); @@ -870,7 +878,11 @@ extern void retransmit_conn_timerfunc(struct work_struct *work); extern void conn_ack_rcvd(__u32 kpacket_seqno, struct conn *rconn, __u32 seqno, __u8 window, __u32 seqno_ooo, __u32 length); -extern void flush_out(struct conn *rconn); +#define RC_FLUSH_CONN_OUT_OK 0 +#define RC_FLUSH_CONN_OUT_CONG 1 +#define RC_FLUSH_CONN_OUT_CREDITS 2 +#define RC_FLUSH_CONN_OUT_OOM 3 +extern int flush_out(struct conn *rconn); extern int __init cor_snd_init(void); @@ -890,6 +902,8 @@ extern void databuf_ackread(struct conn *rconn); extern int databuf_maypush(struct data_buf *buf); +extern void flush_buf(struct conn *rconn); + extern void reset_seqno(struct data_buf *buf, __u32 initseqno); extern void databuf_free(struct data_buf *data); diff --git a/net/cor/cpacket_parse.c b/net/cor/cpacket_parse.c index 5b724fd194f..7036557d10b 100644 --- a/net/cor/cpacket_parse.c +++ b/net/cor/cpacket_parse.c @@ -71,6 +71,7 @@ static void send_resp(struct conn *rconn, __u16 respcode, receive_buf(sconn, (char *) &reasonlen_be, 2); receive_buf(sconn, reasontext, reasonlen); mutex_unlock(&(sconn->rcv_lock)); + flush_buf(sconn); } static void send_resp_bin(struct conn *rconn, char *buf, __u32 len) @@ -86,6 +87,7 @@ static void send_resp_bin(struct conn *rconn, char *buf, __u32 len) receive_buf(sconn, (char *) &len_be, 4); receive_buf(sconn, buf, len); mutex_unlock(&(sconn->rcv_lock)); + flush_buf(sconn); } static void parse_set_timeout(struct conn *rconn, int backwards) diff --git a/net/cor/forward.c b/net/cor/forward.c index 2814e6ed868..1e302c3f41a 100644 --- a/net/cor/forward.c +++ b/net/cor/forward.c @@ -348,21 +348,35 @@ int databuf_maypush(struct data_buf *buf) void flush_buf(struct conn *rconn) { + int rc = RC_FLUSH_CONN_OUT_OK; + int flushagain = 0; + mutex_lock(&(rconn->rcv_lock)); switch (rconn->targettype) { case TARGET_UNCONNECTED: parse(rconn); if (rconn->targettype != TARGET_UNCONNECTED) - flush_buf(rconn); + flushagain = 1; break; case TARGET_SOCK: wake_up_interruptible(&(rconn->target.sock.wait)); break; case TARGET_OUT: - flush_out(rconn); + rc = flush_out(rconn); break; default: BUG(); } + mutex_unlock(&(rconn->rcv_lock)); + + if (unlikely(flushagain)) + flush_buf(rconn); + + if (rc == RC_FLUSH_CONN_OUT_CONG) { + qos_enqueue_conn(rconn, 0); + } else if (rc == RC_FLUSH_CONN_OUT_OOM) { + printk(KERN_DEBUG "oom"); + qos_enqueue_conn(rconn, 1); + } } static int _receive_buf(struct conn *rconn, char *buf, int len, int userbuf) @@ -491,9 +505,6 @@ int receive_userbuf(struct conn *rconn, struct msghdr *msg) iovread += rc; } - if (copied > 0) - flush_buf(rconn);; - return copied; } @@ -501,7 +512,6 @@ void receive_buf(struct conn *rconn, char *buf, int len) { BUG_ON(databuf_maypush(&(rconn->buf)) < len); _receive_buf(rconn, buf, len, 0); - flush_buf(rconn); } int receive_skb(struct conn *rconn, struct sk_buff *skb) @@ -522,8 +532,6 @@ int receive_skb(struct conn *rconn, struct sk_buff *skb) rconn->buf.read_remaining += skb->len; rconn->buf.last_buflen = 0; - flush_buf(rconn); - return 0; } diff --git a/net/cor/kpacket_gen.c b/net/cor/kpacket_gen.c index de1e5d763fa..f4da294c260 100644 --- a/net/cor/kpacket_gen.c +++ b/net/cor/kpacket_gen.c @@ -1388,6 +1388,7 @@ void send_ack_conn(struct control_msg_out *cm, struct conn *rconn, { cm->type = MSGTYPE_ACK_CONN; kref_get(&(rconn->ref)); + BUG_ON(rconn->sourcetype != SOURCE_IN); cm->msg.ack_conn.rconn = rconn; cm->msg.ack_conn.conn_id = conn_id; cm->msg.ack_conn.seqno = seqno; @@ -1401,6 +1402,7 @@ void send_ack_conn_ooo(struct control_msg_out *cm, struct conn *rconn, { cm->type = MSGTYPE_ACK_CONN_OOO; kref_get(&(rconn->ref)); + BUG_ON(rconn->sourcetype != SOURCE_IN); cm->msg.ack_conn_ooo.rconn = rconn; cm->msg.ack_conn_ooo.conn_id = conn_id; cm->msg.ack_conn_ooo.seqno = seqno; @@ -1418,6 +1420,7 @@ void send_connect_success(struct control_msg_out *cm, __u32 rcvd_conn_id, cm->msg.connect_success.gen_conn_id = gen_conn_id; cm->msg.connect_success.init_seqno = init_seqno; kref_get(&(rconn->ref)); + BUG_ON(rconn->sourcetype != SOURCE_IN); cm->msg.connect_success.rconn = rconn; cm->length = 14; add_control_msg(cm, 0); @@ -1430,6 +1433,7 @@ void send_connect_nb(struct control_msg_out *cm, __u32 conn_id, cm->msg.connect.conn_id = conn_id; cm->msg.connect.init_seqno = init_seqno; kref_get(&(sconn->ref)); + BUG_ON(sconn->sourcetype != SOURCE_IN); cm->msg.connect.sconn = sconn; cm->length = 10; add_control_msg(cm, 0); diff --git a/net/cor/neighbor.c b/net/cor/neighbor.c index e92c2df1aeb..1a66fd50d40 100644 --- a/net/cor/neighbor.c +++ b/net/cor/neighbor.c @@ -1228,9 +1228,7 @@ static int send_announce_chunk(struct announce_data *ann) rc = dev_queue_xmit(skb); - if (rc != 0) { - qos_enqueue(ann->dev, &(ann->rb), QOS_CALLER_ANNOUNCE); - } else { + if (rc == 0) { ann->curr_announce_msg_offset += packet_size; if (ann->curr_announce_msg_offset == ann->ann->announce_msg_len) @@ -1276,6 +1274,7 @@ static void send_announce(struct work_struct *work) struct announce_data *ann = container_of(to_delayed_work(work), struct announce_data, announce_work); int reschedule = 0; + int rc = 0; mutex_lock(&(neighbor_operation_lock)); @@ -1293,12 +1292,17 @@ static void send_announce(struct work_struct *work) kref_get(&(ann->ann->ref)); } - send_announce_chunk(ann); + rc = send_announce_chunk(ann); out: mutex_unlock(&(neighbor_operation_lock)); - if (reschedule) { + if (rc != 0) + qos_enqueue(ann->dev, &(ann->rb), QOS_CALLER_ANNOUNCE); + + if (unlikely(reschedule == 0)) { + kref_put(&(ann->ref), announce_data_free); + } else { __u64 jiffies = get_jiffies_64(); int delay; @@ -1372,7 +1376,6 @@ static void announce_send_rmdev(struct net_device *dev) dev_put(ann->dev); ann->dev = 0; - kref_put(&(ann->ref), announce_data_free); out: mutex_unlock(&(neighbor_operation_lock)); } diff --git a/net/cor/rcv.c b/net/cor/rcv.c index 1c5516818d0..0ce4e5a83bb 100644 --- a/net/cor/rcv.c +++ b/net/cor/rcv.c @@ -81,7 +81,6 @@ DEFINE_MUTEX(bufferlimits_lock); static __u64 bufferassigned_init; static __u64 bufferassigned_speed; static __u64 bufferassigned_ata; -static __u64 bufferassigned_reserve; static __u64 bufferusage_init; static __u64 bufferusage_speed; @@ -235,15 +234,15 @@ static int compare_scores(__u32 usage1, __u64 speed1, __u32 usage2, return 0; } -#define OUTOFBUFFERSPACE_OFFENDERS 10 +#define OOBS_SIZE 10 static void _outofbufferspace(void) { int i; struct list_head *curr; - struct conn *offendingconns[OUTOFBUFFERSPACE_OFFENDERS]; - __u32 offendingusage[OUTOFBUFFERSPACE_OFFENDERS]; - __u64 offendingspeed[OUTOFBUFFERSPACE_OFFENDERS]; + struct conn *offendingconns[OOBS_SIZE]; + __u32 offendingusage[OOBS_SIZE]; + __u64 offendingspeed[OOBS_SIZE]; memset(&offendingconns, 0, sizeof(offendingconns)); @@ -261,6 +260,8 @@ static void _outofbufferspace(void) int i; + curr = curr->next; + mutex_lock(&(conn->rcv_lock)); BUG_ON(conn->sourcetype != SOURCE_IN); @@ -273,18 +274,23 @@ static void _outofbufferspace(void) mutex_unlock(&(conn->rcv_lock)); - if (offendingconns[OUTOFBUFFERSPACE_OFFENDERS-1] != 0 && + if (offendingconns[OOBS_SIZE-1] != 0 && compare_scores( - offendingusage[OUTOFBUFFERSPACE_OFFENDERS-1], - offendingspeed[OUTOFBUFFERSPACE_OFFENDERS-1], + offendingusage[OOBS_SIZE-1], + offendingspeed[OOBS_SIZE-1], usage, speed) >= 0) continue; - offendingconns[OUTOFBUFFERSPACE_OFFENDERS-1] = conn; - offendingusage[OUTOFBUFFERSPACE_OFFENDERS-1] = usage; - offendingspeed[OUTOFBUFFERSPACE_OFFENDERS-1] = speed; + if (offendingconns[OOBS_SIZE-1] != 0) + kref_put(&(offendingconns[OOBS_SIZE-1]->ref), + free_conn); - for (i=OUTOFBUFFERSPACE_OFFENDERS-2;i>=0;i++) { + kref_get(&(conn->ref)); + offendingconns[OOBS_SIZE-1] = conn; + offendingusage[OOBS_SIZE-1] = usage; + offendingspeed[OOBS_SIZE-1] = speed; + + for (i=OOBS_SIZE-2;i>=0;i++) { struct conn *tmpconn; __u32 usage_tmp; __u64 speed_tmp; @@ -307,25 +313,26 @@ static void _outofbufferspace(void) offendingusage[i+1] = usage_tmp; offendingspeed[i+1] = speed_tmp; } - } - for (i=0;iref)); } mutex_unlock(&buffer_conn_list_lock); - for (i=0;i BUFFERSPACE_RESERVE); mutex_unlock(&bufferlimits_lock); - if (resetneeded == 0) - break; - - reset_conn(offendingconns[i]); + if (resetneeded) + reset_conn(offendingconns[i]); kref_put(&(offendingconns[i]->ref), free_conn); } @@ -472,16 +479,17 @@ static __u32 _get_window(struct conn *rconn, int listlocked) if (atomic_read(&(rconn->isreset)) != 0) { if (listlocked && (rconn->source.in.buffer_list.next != 0 || - rconn->source.in.buffer_list.prev != 0)) - list_del(&(rconn->source.in.buffer_list)); - rconn->source.in.buffer_list.next = 0; - rconn->source.in.buffer_list.prev = 0; - kref_put(&(rconn->ref), free_conn); + rconn->source.in.buffer_list.prev != 0)) { + list_del(&(rconn->source.in.buffer_list)); + rconn->source.in.buffer_list.next = 0; + rconn->source.in.buffer_list.prev = 0; + kref_put(&(rconn->ref), free_conn); + } goto out; } if (listlocked){ - if (rconn->source.in.buffer_list.next != 0 && + if (rconn->source.in.buffer_list.next != 0 || rconn->source.in.buffer_list.prev != 0) { list_del(&(rconn->source.in.buffer_list)); } else { @@ -489,8 +497,8 @@ static __u32 _get_window(struct conn *rconn, int listlocked) } list_add_tail(&(rconn->source.in.buffer_list), &buffer_conn_list); - } else if (rconn->source.in.buffer_list.next != 0 || - rconn->source.in.buffer_list.prev != 0) { + } else if (rconn->source.in.buffer_list.next == 0 && + rconn->source.in.buffer_list.prev == 0) { kref_get(&(rconn->ref)); list_add_tail(&(rconn->source.in.buffer_list), &buffer_conn_tmp_list); @@ -578,6 +586,41 @@ __u32 get_window(struct conn *rconn) return window; } +void reset_bufferusage(struct conn *conn) +{ + int listlocked; + + mutex_lock(&bufferlimits_lock); + listlocked = mutex_trylock(&buffer_conn_list_lock); + mutex_lock(&(conn->rcv_lock)); + + if (conn->sourcetype != SOURCE_IN) + goto out; + + bufferusage_init -= conn->source.in.usage_init; + bufferusage_speed -= conn->source.in.usage_speed; + bufferusage_ata -= conn->source.in.usage_ata; + bufferusage_reserve -= conn->source.in.usage_reserve; + + bufferassigned_init -= conn->source.in.buffer_init; + bufferassigned_speed -= conn->source.in.buffer_speed; + bufferassigned_ata -= conn->source.in.buffer_ata; + + if (listlocked && (conn->source.in.buffer_list.next != 0 || + conn->source.in.buffer_list.prev != 0)) { + list_del(&(conn->source.in.buffer_list)); + conn->source.in.buffer_list.next = 0; + conn->source.in.buffer_list.prev = 0; + kref_put(&(conn->ref), free_conn); + } + +out: + mutex_unlock(&(conn->rcv_lock)); + if (listlocked) + mutex_unlock(&buffer_conn_list_lock); + mutex_unlock(&bufferlimits_lock); +} + void refresh_speedstat(struct conn *rconn, __u32 written) { unsigned long iflags; @@ -689,6 +732,7 @@ static void _conn_rcv(struct conn *rconn, struct sk_buff *skb) int in_order; int drop = 1; + int flush = 0; __u32 len = skb->len; @@ -718,6 +762,7 @@ static void _conn_rcv(struct conn *rconn, struct sk_buff *skb) #warning todo balance credits if (in_order == 0) { + flush = 1; send_ack_conn_ooo(cm, rconn, rconn->reversedir->target.out.conn_id, rconn->source.in.next_seqno, @@ -730,6 +775,8 @@ static void _conn_rcv(struct conn *rconn, struct sk_buff *skb) out: mutex_unlock(&(rconn->rcv_lock)); + + flush_buf(rconn); } static void conn_rcv(struct sk_buff *skb, __u32 conn_id, __u32 seqno) @@ -878,7 +925,6 @@ int __init cor_rcv_init(void) bufferassigned_init = 0; bufferassigned_speed = 0; bufferassigned_ata = 0; - bufferassigned_reserve = 0; bufferusage_init = 0; bufferusage_speed = 0; diff --git a/net/cor/snd.c b/net/cor/snd.c index e00019293aa..3850164f24d 100644 --- a/net/cor/snd.c +++ b/net/cor/snd.c @@ -62,11 +62,6 @@ struct qos_queue { struct list_head conns_waiting; }; -#define RC_FLUSH_CONN_OK 0 -#define RC_FLUSH_CONN_CONG 1 -#define RC_FLUSH_CONN_CREDITS 2 -#define RC_FLUSH_CONN_OOM 3 - static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte); /* Higherst bidder "pays" the credits the second has bid */ @@ -124,18 +119,18 @@ static int _resume_conns(struct qos_queue *q) } if (best == 0) - return RC_FLUSH_CONN_OK; + return RC_FLUSH_CONN_OUT_OK; mutex_lock(&(best->rcv_lock)); rc = _flush_out(best, 1, (__u32) (secondcredit >> 32)); - if (rc == RC_FLUSH_CONN_OK) { + if (rc == RC_FLUSH_CONN_OUT_OK) { best->target.out.rb.in_queue = 0; list_del(&(best->target.out.rb.lh)); } mutex_unlock(&(best->rcv_lock)); - if (rc == RC_FLUSH_CONN_OK) + if (rc == RC_FLUSH_CONN_OUT_OK) kref_put(&(best->ref), free_conn); return rc; @@ -151,7 +146,7 @@ static int resume_conns(struct qos_queue *q) return 0; } -static int send_retrans(struct neighbor *nb); +static int send_retrans(struct neighbor *nb, int fromqos); static int _qos_resume(struct qos_queue *q, int caller) { @@ -175,8 +170,6 @@ static int _qos_resume(struct qos_queue *q, int caller) rb->in_queue = 0; list_del(curr); - mutex_unlock(&(queues_lock)); - if (caller == QOS_CALLER_KPACKET) { struct neighbor *nb = container_of(rb, struct neighbor, rb_kp); @@ -185,7 +178,7 @@ static int _qos_resume(struct qos_queue *q, int caller) struct neighbor *nb = container_of(rb, struct neighbor, rb_cr); #warning todo do not send if neighbor is stalled - rc = send_retrans(nb); + rc = send_retrans(nb, 1); } else if (caller == QOS_CALLER_ANNOUNCE) { struct announce_data *ann = container_of(rb, struct announce_data, rb); @@ -194,8 +187,6 @@ static int _qos_resume(struct qos_queue *q, int caller) BUG(); } - mutex_lock(&(queues_lock)); - if (rc != 0 && rb->in_queue == 0) { rb->in_queue = 1; list_add(curr , lh); @@ -347,8 +338,13 @@ void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller) list_add(&(rb->lh), &(q->announce_waiting)); kref_get(&(container_of(rb, struct announce_data, rb)->ref)); } else if (caller == QOS_CALLER_CONN) { + struct conn *rconn = container_of(rb, struct conn, + target.out.rb); + mutex_lock(&(rconn->rcv_lock)); + BUG_ON(rconn->targettype != TARGET_OUT); list_add(&(rb->lh), &(q->conns_waiting)); - kref_get(&(container_of(rb, struct conn, target.out.rb)->ref)); + kref_get(&(rconn->ref)); + mutex_lock(&(rconn->rcv_lock)); } else { BUG(); } @@ -372,7 +368,20 @@ static void qos_enqueue_conn_retrans(struct neighbor *nb) qos_enqueue(nb->dev, &(nb->rb_cr), QOS_CALLER_CONN_RETRANS); } -static void qos_enqueue_conn(struct conn *rconn) +void qos_remove_conn(struct conn *rconn) +{ + mutex_lock(&(rconn->rcv_lock)); + if (rconn->targettype != TARGET_OUT) + goto out; + + #warning todo + +out: + mutex_unlock(&(rconn->rcv_lock)); +} + +#warning todo oom +void qos_enqueue_conn(struct conn *rconn, int oom) { BUG_ON(rconn->targettype != TARGET_OUT); qos_enqueue(rconn->target.out.nb->dev, &(rconn->target.out.rb), @@ -632,7 +641,6 @@ static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr) if (0) { qos_enqueue: - qos_enqueue_conn_retrans(nb); queuefull = 1; } out: @@ -643,14 +651,14 @@ out: return queuefull; } -static int send_retrans(struct neighbor *nb) +static int send_retrans(struct neighbor *nb, int fromqos) { unsigned long iflags; struct conn_retrans *cr = 0; int nbstate; - int nbput = 1; + int rescheduled = 0; int queuefull = 0; spin_lock_irqsave( &(nb->state_lock), iflags ); @@ -662,6 +670,7 @@ static int send_retrans(struct neighbor *nb) if (list_empty(&(nb->retrans_list_conn))) { nb->retrans_timer_conn_running = 0; + spin_unlock_irqrestore( &(nb->retrans_lock), iflags ); break; } @@ -686,7 +695,8 @@ static int send_retrans(struct neighbor *nb) if (time_after(cr->timeout, jiffies)) { schedule_delayed_work(&(nb->retrans_timer_conn), cr->timeout - jiffies); - nbput = 0; + rescheduled = 1; + spin_unlock_irqrestore( &(nb->retrans_lock), iflags ); break; } @@ -694,14 +704,15 @@ static int send_retrans(struct neighbor *nb) spin_unlock_irqrestore( &(nb->retrans_lock), iflags ); queuefull = _send_retrans(nb, cr); kref_put(&(cr->ref), free_connretrans); - if (queuefull) - goto out; + if (queuefull) { + rescheduled = 1; + if (fromqos == 0) + qos_enqueue_conn_retrans(nb); + break; + } } - spin_unlock_irqrestore( &(nb->retrans_lock), iflags ); - -out: - if (nbput) + if (rescheduled == 0) kref_put(&(nb->ref), neighbor_free); return queuefull; @@ -712,7 +723,7 @@ void retransmit_conn_timerfunc(struct work_struct *work) struct neighbor *nb = container_of(to_delayed_work(work), struct neighbor, retrans_timer_conn); - send_retrans(nb); + send_retrans(nb, 0); } static struct conn_retrans *search_seqno(struct conn *rconn, __u32 seqno) @@ -876,13 +887,13 @@ static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte) BUG_ON(rconn->targettype != TARGET_OUT); if (unlikely(rconn->target.out.conn_id == 0)) - return RC_FLUSH_CONN_OK; + return RC_FLUSH_CONN_OUT_OK; if (unlikely(atomic_read(&(rconn->isreset)) != 0)) - return RC_FLUSH_CONN_OK; + return RC_FLUSH_CONN_OUT_OK; if (fromqos == 0 && may_send_conn(rconn) == 0) - goto qos; + return RC_FLUSH_CONN_OUT_CONG; while (rconn->buf.read_remaining >= targetmss) { struct conn_retrans *cr; @@ -892,18 +903,18 @@ static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte) if (unlikely(creditsperbyte * targetmss > rconn->credits)) - return RC_FLUSH_CONN_CREDITS; + return RC_FLUSH_CONN_OUT_CREDITS; seqno = rconn->target.out.seqno_nextsend; skb = create_packet(rconn->target.out.nb, targetmss, GFP_ATOMIC, rconn->target.out.conn_id, seqno); if (unlikely(skb == 0)) - goto oom; + return RC_FLUSH_CONN_OUT_OOM; cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL); if (unlikely(cr == 0)) { kfree_skb(skb); - goto oom; + return RC_FLUSH_CONN_OUT_OOM; } dst = skb_put(skb, targetmss); @@ -914,7 +925,7 @@ static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte) if (rc != 0) { databuf_unpull(&(rconn->buf), targetmss); kmem_cache_free(connretrans_slab, cr); - goto qos; + return RC_FLUSH_CONN_OUT_CONG; } rconn->credits -= creditsperbyte * targetmss; @@ -929,22 +940,22 @@ static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte) char *buf = kmalloc(len, GFP_KERNEL); if (unlikely(creditsperbyte * len > rconn->credits)) - return RC_FLUSH_CONN_CREDITS; + return RC_FLUSH_CONN_OUT_CREDITS; if (unlikely(buf == 0)) - goto oom; + return RC_FLUSH_CONN_OUT_OOM; cm = alloc_control_msg(rconn->target.out.nb, ACM_PRIORITY_MED); if (unlikely(cm == 0)) { kfree(buf); - goto oom; + return RC_FLUSH_CONN_OUT_OOM; } cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL); if (unlikely(cr == 0)) { kfree(buf); free_control_msg(cm); - goto oom; + return RC_FLUSH_CONN_OUT_CONG; } databuf_pull(&(rconn->buf), buf, len); @@ -961,28 +972,12 @@ static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte) wake_sender(rconn); - if (0) { -qos: - printk(KERN_ERR "qos"); - if (fromqos == 0) - qos_enqueue_conn(rconn); - return RC_FLUSH_CONN_CONG; - } - - if (0) { -oom: - printk(KERN_ERR "oom"); - if (fromqos == 0) - qos_enqueue_conn(rconn); - return RC_FLUSH_CONN_OOM; - } - - return RC_FLUSH_CONN_OK; + return RC_FLUSH_CONN_OUT_OK; } -void flush_out(struct conn *rconn) +int flush_out(struct conn *rconn) { - _flush_out(rconn, 0, 0); + return _flush_out(rconn, 0, 0); } int __init cor_snd_init(void) diff --git a/net/cor/sock.c b/net/cor/sock.c index cb8d30c8996..b3404ab6506 100644 --- a/net/cor/sock.c +++ b/net/cor/sock.c @@ -231,6 +231,9 @@ recv: out: mutex_unlock(&(rconn->rcv_lock)); + if (copied > 0) + flush_buf(rconn); + if (copied == -EAGAIN && blocking) { if (wait_event_interruptible(rconn->source.sock.wait, sendmsg_maypush(rconn) == 0)) -- 2.11.4.GIT