From dc6d3f87ba2fc5365a98706d79b4c68c408c94ba Mon Sep 17 00:00:00 2001 From: Michael Blizek Date: Sun, 26 Jun 2011 17:52:15 +0200 Subject: [PATCH] qos_queue locking+content free, conn_ack_rcvd reset check, neighbor_operations lock split --- net/cor/common.c | 3 +- net/cor/cor.h | 24 +++- net/cor/forward.c | 16 +-- net/cor/kpacket_gen.c | 5 +- net/cor/neighbor.c | 58 +++++---- net/cor/snd.c | 350 +++++++++++++++++++++++++++++++------------------- 6 files changed, 284 insertions(+), 172 deletions(-) diff --git a/net/cor/common.c b/net/cor/common.c index 74469a293c5..84490798f5e 100644 --- a/net/cor/common.c +++ b/net/cor/common.c @@ -1342,6 +1342,8 @@ static int _reset_conn(struct conn *cn, int trgt_out_resetneeded) cn->target.out.conn_id = 0; cancel_retrans(cn); + + qos_remove_conn(cn); } else if (cn->targettype == TARGET_SOCK) { wake_up_interruptible(&(cn->target.sock.wait)); } @@ -1350,7 +1352,6 @@ static int _reset_conn(struct conn *cn, int trgt_out_resetneeded) mutex_unlock(&(cn->rcv_lock)); - qos_remove_conn(cn); /* target out only */ reset_bufferusage(cn); /* source in only */ connreset_credits(cn); connreset_sbt(cn); /* source sock only */ diff --git a/net/cor/cor.h b/net/cor/cor.h index bde834b5979..87b7ae0637d 100644 --- a/net/cor/cor.h +++ b/net/cor/cor.h @@ -344,6 +344,21 @@ struct htable{ __u32 kref_offset; }; +struct qos_queue { + spinlock_t qlock; + + struct kref ref; + + struct list_head queue_list; + + struct net_device *dev; + + struct list_head kpackets_waiting; + struct list_head conn_retrans_waiting; + struct list_head announce_waiting; + struct list_head conns_waiting; +}; + struct resume_block{ struct list_head lh; int in_queue; @@ -380,6 +395,7 @@ struct neighbor{ struct net_device *dev; char mac[MAX_ADDR_LEN]; + struct qos_queue *queue; char *addr; __u16 addrlen; @@ -1025,6 +1041,10 @@ extern void parse(struct conn *trtg_unconn, int fromresume); extern int __init cor_cpacket_init(void); /* snd.c */ +extern void free_qos(struct kref *ref); + +extern struct qos_queue *get_queue(struct net_device *dev); + extern int destroy_queue(struct net_device *dev); extern int create_queue(struct net_device *dev); @@ -1034,10 +1054,10 @@ extern int create_queue(struct net_device *dev); #define QOS_CALLER_ANNOUNCE 2 #define QOS_CALLER_CONN 3 -extern void qos_enqueue(struct net_device *dev, struct resume_block *rb, +extern void qos_enqueue(struct qos_queue *q, struct resume_block *rb, int caller); -extern void qos_remove_conn(struct conn *cn); +void qos_remove_conn(struct conn *trgt_out_l); extern struct sk_buff *create_packet(struct neighbor *nb, int size, gfp_t alloc_flags, __u32 conn_id, __u32 seqno); diff --git a/net/cor/forward.c b/net/cor/forward.c index 904517d7ef5..bbee4b4fcf6 100644 --- a/net/cor/forward.c +++ b/net/cor/forward.c @@ -567,7 +567,8 @@ void wake_sender(struct conn *cn) void flush_buf(struct conn *cn) { - int rc = RC_FLUSH_CONN_OUT_OK; + int rc; + int sent = 0; mutex_lock(&(cn->rcv_lock)); switch (cn->targettype) { @@ -588,10 +589,12 @@ void flush_buf(struct conn *cn) case TARGET_OUT: rc = flush_out(cn, 0, 0); mutex_unlock(&(cn->rcv_lock)); + sent = (rc == RC_FLUSH_CONN_OUT_OK_SENT); break; case TARGET_DISCARD: databuf_ackdiscard(cn); mutex_unlock(&(cn->rcv_lock)); + sent = 1; break; default: BUG(); @@ -599,16 +602,7 @@ void flush_buf(struct conn *cn) refresh_conn_credits(cn, 0, 0); - if (rc == RC_FLUSH_CONN_OUT_CONG) { - #warning todo locking - qos_enqueue(cn->target.out.nb->dev, &(cn->target.out.rb), - QOS_CALLER_CONN); - } else if (rc == RC_FLUSH_CONN_OUT_OOM) { - printk(KERN_DEBUG "oom"); - #warning todo locking - qos_enqueue(cn->target.out.nb->dev, &(cn->target.out.rb), - QOS_CALLER_CONN); - } else if (rc == RC_FLUSH_CONN_OUT_OK_SENT) { + if (sent) { wake_sender(cn); } } diff --git a/net/cor/kpacket_gen.c b/net/cor/kpacket_gen.c index 6d77eb4d88d..3892b729bed 100644 --- a/net/cor/kpacket_gen.c +++ b/net/cor/kpacket_gen.c @@ -1287,7 +1287,8 @@ oom: if (rc != 0) { if (resume == 0) - qos_enqueue(nb->dev, &(nb->rb_kp), QOS_CALLER_KPACKET); + qos_enqueue(nb->queue, &(nb->rb_kp), + QOS_CALLER_KPACKET); } else { atomic_set(&(nb->cmsg_work_scheduled), 0); schedule_controlmsg_timer(nb); @@ -1296,7 +1297,7 @@ oom: mutex_unlock(&(nb->cmsg_lock)); mutex_unlock(&(nb->send_cmsg_lock)); - if (rc == 0) + if (resume == 0) kref_put(&(nb->ref), neighbor_free); return rc; diff --git a/net/cor/neighbor.c b/net/cor/neighbor.c index 6994b326037..38d3a70ad0b 100644 --- a/net/cor/neighbor.c +++ b/net/cor/neighbor.c @@ -62,7 +62,8 @@ */ -DEFINE_MUTEX(neighbor_operation_lock); +DEFINE_MUTEX(announce_rcv_lock); +DEFINE_SPINLOCK(announce_snd_lock); DEFINE_MUTEX(neighbor_list_lock); char *addrtype = "id"; @@ -103,6 +104,9 @@ void neighbor_free(struct kref *ref) if (nb->dev != 0) dev_put(nb->dev); nb->dev = 0; + if (nb->queue != 0) + kref_put(&(nb->queue->ref), free_qos); + nb->queue = 0; kmem_cache_free(nb_slab, nb); } @@ -162,10 +166,8 @@ int is_from_nb(struct sk_buff *skb, struct neighbor *nb) skb->dev->header_ops->parse != 0) skb->dev->header_ops->parse(skb, source_hw); - mutex_lock(&(neighbor_operation_lock)); rc = (skb->dev == nb->dev && memcmp(nb->mac, source_hw, MAX_ADDR_LEN) == 0); - mutex_unlock(&(neighbor_operation_lock)); return rc; } @@ -1108,6 +1110,17 @@ static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev, if (unlikely(nb == 0)) return; + nb->queue = get_queue(dev); + if (nb->queue == 0) { + kmem_cache_free(nb_slab, nb); + return; + } + + dev_hold(dev); + nb->dev = dev; + + memcpy(nb->mac, source_hw, MAX_ADDR_LEN); + while (len >= 8) { __u32 cmd; __u32 cmdlen; @@ -1129,10 +1142,6 @@ static void apply_announce_cmds(char *msg, __u32 len, struct net_device *dev, BUG_ON(len != 0); - memcpy(nb->mac, source_hw, MAX_ADDR_LEN); - - dev_hold(dev); - nb->dev = dev; add_neighbor(nb); } @@ -1365,7 +1374,7 @@ void rcv_announce(struct sk_buff *skb) if (total_size > 8192) goto discard; - mutex_lock(&(neighbor_operation_lock)); + mutex_lock(&(announce_rcv_lock)); if (announce_proto_version != 0) goto discard; @@ -1431,7 +1440,7 @@ void rcv_announce(struct sk_buff *skb) kfree_skb(skb); } - mutex_unlock(&(neighbor_operation_lock)); + mutex_unlock(&(announce_rcv_lock)); } struct announce{ @@ -1459,7 +1468,7 @@ static int send_announce_chunk(struct announce_data *ann) if (remainingdata < packet_size) packet_size = remainingdata; - skb = alloc_skb(packet_size + overhead, GFP_KERNEL); + skb = alloc_skb(packet_size + overhead, GFP_ATOMIC); if (unlikely(skb == 0)) return 0; @@ -1512,9 +1521,9 @@ out_err: int send_announce_qos(struct announce_data *ann) { int rc; - mutex_lock(&(neighbor_operation_lock)); + spin_lock_bh(&(announce_snd_lock)); rc = send_announce_chunk(ann); - mutex_unlock(&(neighbor_operation_lock)); + spin_unlock_bh(&(announce_snd_lock)); return rc; } @@ -1541,7 +1550,7 @@ static void send_announce(struct work_struct *work) int reschedule = 0; int rc = 0; - mutex_lock(&(neighbor_operation_lock)); + spin_lock_bh(&(announce_snd_lock)); if (unlikely(ann->dev == 0)) goto out; @@ -1560,10 +1569,15 @@ static void send_announce(struct work_struct *work) rc = send_announce_chunk(ann); out: - mutex_unlock(&(neighbor_operation_lock)); + spin_unlock_bh(&(announce_snd_lock)); - if (rc != 0) - qos_enqueue(ann->dev, &(ann->rb), QOS_CALLER_ANNOUNCE); + if (rc != 0) { + struct qos_queue *q = get_queue(ann->dev); + if (q != 0) { + qos_enqueue(q, &(ann->rb), QOS_CALLER_ANNOUNCE); + kref_put(&(q->ref), free_qos); + } + } if (unlikely(reschedule == 0)) { kref_put(&(ann->ref), announce_data_free); @@ -1618,9 +1632,9 @@ static void announce_send_adddev(struct net_device *dev) dev_hold(dev); ann->dev = dev; - mutex_lock(&(neighbor_operation_lock)); + spin_lock_bh(&(announce_snd_lock)); list_add_tail(&(ann->lh), &announce_out_list); - mutex_unlock(&(neighbor_operation_lock)); + spin_unlock_bh(&(announce_snd_lock)); ann->scheduled_announce_timer = get_jiffies_64(); INIT_DELAYED_WORK(&(ann->announce_work), send_announce); @@ -1631,7 +1645,7 @@ static void announce_send_rmdev(struct net_device *dev) { struct announce_data *ann; - mutex_lock(&(neighbor_operation_lock)); + spin_lock_bh(&(announce_snd_lock)); ann = get_announce_by_netdev(dev); @@ -1642,7 +1656,7 @@ static void announce_send_rmdev(struct net_device *dev) ann->dev = 0; out: - mutex_unlock(&(neighbor_operation_lock)); + spin_unlock_bh(&(announce_snd_lock)); } int netdev_notify_func(struct notifier_block *not, unsigned long event, @@ -1697,7 +1711,7 @@ static int set_announce(char *msg, __u32 len) kref_init(&(ann->ref)); - mutex_lock(&(neighbor_operation_lock)); + spin_lock_bh(&(announce_snd_lock)); if (last_announce != 0) { ann->packet_version = last_announce->packet_version + 1; @@ -1706,7 +1720,7 @@ static int set_announce(char *msg, __u32 len) last_announce = ann; - mutex_unlock(&(neighbor_operation_lock)); + spin_unlock_bh(&(announce_snd_lock)); return 0; } diff --git a/net/cor/snd.c b/net/cor/snd.c index 57b58b04ec2..da16bd01682 100644 --- a/net/cor/snd.c +++ b/net/cor/snd.c @@ -51,16 +51,11 @@ LIST_HEAD(queues); struct delayed_work qos_resume_work; int qos_resume_scheduled; -struct qos_queue { - struct list_head queue_list; - - struct net_device *dev; - - struct list_head kpackets_waiting; - struct list_head conn_retrans_waiting; - struct list_head announce_waiting; - struct list_head conns_waiting; -}; +void free_qos(struct kref *ref) +{ + struct qos_queue *q = container_of(ref, struct qos_queue, ref); + kfree(q); +} /* Highest bidder "pays" the credits the second has bid */ static int _resume_conns(struct qos_queue *q) @@ -153,12 +148,14 @@ static int resume_conns(struct qos_queue *q) static int send_retrans(struct neighbor *nb, int fromqos); -static int _qos_resume(struct qos_queue *q, int caller) +static int __qos_resume(struct qos_queue *q, int caller) { + unsigned long iflags; int rc = 0; - struct list_head *lh; + spin_lock_irqsave(&(q->qlock), iflags); + if (caller == QOS_CALLER_KPACKET) lh = &(q->conn_retrans_waiting); else if (caller == QOS_CALLER_CONN_RETRANS) @@ -178,43 +175,98 @@ static int _qos_resume(struct qos_queue *q, int caller) if (caller == QOS_CALLER_KPACKET) { struct neighbor *nb = container_of(rb, struct neighbor, rb_kp); + kref_put(&(nb->ref), neighbor_free); + spin_unlock_irqrestore(&(q->qlock), iflags); rc = send_messages(nb, 1); + kref_put(&(nb->ref), neighbor_free); + spin_lock_irqsave(&(q->qlock), iflags); } else if (caller == QOS_CALLER_CONN_RETRANS) { struct neighbor *nb = container_of(rb, struct neighbor, rb_cr); #warning todo do not send if neighbor is stalled + kref_put(&(nb->ref), neighbor_free); + spin_unlock_irqrestore(&(q->qlock), iflags); rc = send_retrans(nb, 1); + kref_put(&(nb->ref), neighbor_free); + spin_lock_irqsave(&(q->qlock), iflags); } else if (caller == QOS_CALLER_ANNOUNCE) { struct announce_data *ann = container_of(rb, struct announce_data, rb); + kref_get(&(ann->ref)); + spin_unlock_irqrestore(&(q->qlock), iflags); rc = send_announce_qos(ann); + kref_put(&(ann->ref), announce_data_free); + spin_lock_irqsave(&(q->qlock), iflags); } else { BUG(); } - if (rc != 0 && rb->in_queue == 0) { - rb->in_queue = 1; - list_add(curr, lh); - } else { - if (caller == QOS_CALLER_KPACKET) { - kref_put(&(container_of(rb, struct neighbor, - rb_kp)->ref), neighbor_free); - } else if (caller == QOS_CALLER_CONN_RETRANS) { - kref_put(&(container_of(rb, struct neighbor, - rb_cr)->ref), neighbor_free); - } else if (caller == QOS_CALLER_ANNOUNCE) { - kref_put(&(container_of(rb, - struct announce_data, rb)->ref), - announce_data_free); - } else { - BUG(); + if (rc != 0) { + if (rb->in_queue == 0) { + rb->in_queue = 1; + list_add(curr, lh); } + break; + } + if (caller == QOS_CALLER_KPACKET) { + kref_put(&(container_of(rb, struct neighbor, + rb_kp)->ref), neighbor_free); + } else if (caller == QOS_CALLER_CONN_RETRANS) { + kref_put(&(container_of(rb, struct neighbor, + rb_cr)->ref), neighbor_free); + } else if (caller == QOS_CALLER_ANNOUNCE) { + kref_put(&(container_of(rb, + struct announce_data, rb)->ref), + announce_data_free); + } else { + BUG(); } + } - if (rc != 0) - break; + spin_unlock_irqrestore(&(q->qlock), iflags); + + return rc; +} + +static int _qos_resume(struct qos_queue *q) +{ + int rc = 0; + unsigned long iflags; + int i; + + spin_lock_irqsave(&(q->qlock), iflags); + + for (i=0;i<4 && rc == 0;i++) { + struct list_head *lh; + int rc; + + if (i == QOS_CALLER_KPACKET) + lh = &(q->conn_retrans_waiting); + else if (i == QOS_CALLER_CONN_RETRANS) + lh = &(q->kpackets_waiting); + else if (i == QOS_CALLER_ANNOUNCE) + lh = &(q->announce_waiting); + else if (i == QOS_CALLER_CONN) + lh = &(q->conns_waiting); + else + BUG(); + + if (list_empty(lh)) + continue; + + spin_unlock_irqrestore(&(q->qlock), iflags); + if (i == QOS_CALLER_CONN) + rc = resume_conns(q); + else + rc = __qos_resume(q, i); + spin_lock_irqsave(&(q->qlock), iflags); + + i = 0; } + + spin_unlock_irqrestore(&(q->qlock), iflags); + return rc; } @@ -222,74 +274,109 @@ static void qos_resume(struct work_struct *work) { struct list_head *curr; + int congested = 0; + mutex_lock(&(queues_lock)); curr = queues.next; while (curr != (&queues)) { struct qos_queue *q = container_of(curr, struct qos_queue, queue_list); - int i; - for (i=0;i<4;i++) { - int rc; - if (i == 3) - rc = resume_conns(q); - else - rc = _qos_resume(q, i); - - if (rc != 0) - goto congested; - } + if (_qos_resume(q)) + congested = 1; curr = curr->next; - - if (i == 4 && unlikely(q->dev == 0)) { - list_del(&(q->queue_list)); - kfree(q); - } } - qos_resume_scheduled = 0; - - if (0) { -congested: + if (congested) { schedule_delayed_work(&(qos_resume_work), 1); + } else { + qos_resume_scheduled = 0; } mutex_unlock(&(queues_lock)); } -static struct qos_queue *get_queue(struct net_device *dev) +struct qos_queue *get_queue(struct net_device *dev) { - struct list_head *curr = queues.next; + struct qos_queue *ret = 0; + struct list_head *curr; + + mutex_lock(&(queues_lock)); + curr = queues.next; while (curr != (&queues)) { struct qos_queue *q = container_of(curr, struct qos_queue, queue_list); - if (q->dev == dev) - return q; + if (q->dev == dev) { + ret = q; + break; + } } - return 0; + mutex_unlock(&(queues_lock)); + return ret; } -#warning todo content of the queue? -int destroy_queue(struct net_device *dev) +static void _destroy_queue(struct qos_queue *q, int caller) { - struct qos_queue *q; + struct list_head *lh; - mutex_lock(&(queues_lock)); + if (caller == QOS_CALLER_KPACKET) + lh = &(q->conn_retrans_waiting); + else if (caller == QOS_CALLER_CONN_RETRANS) + lh = &(q->kpackets_waiting); + else if (caller == QOS_CALLER_ANNOUNCE) + lh = &(q->announce_waiting); + else + BUG(); + + while (list_empty(lh) == 0) { + struct list_head *curr = lh->next; + struct resume_block *rb = container_of(curr, + struct resume_block, lh); + rb->in_queue = 0; + list_del(curr); - q = get_queue(dev); + if (caller == QOS_CALLER_KPACKET) { + kref_put(&(container_of(rb, struct neighbor, + rb_kp)->ref), neighbor_free); + } else if (caller == QOS_CALLER_CONN_RETRANS) { + kref_put(&(container_of(rb, struct neighbor, + rb_cr)->ref), neighbor_free); + } else if (caller == QOS_CALLER_ANNOUNCE) { + kref_put(&(container_of(rb, + struct announce_data, rb)->ref), + announce_data_free); + } else { + BUG(); + } + } +} - if (q == 0) { - mutex_unlock(&(queues_lock)); +int destroy_queue(struct net_device *dev) +{ + int unlink; + unsigned long iflags; + struct qos_queue *q = get_queue(dev); + if (q == 0) return 1; - } + spin_lock_irqsave(&(q->qlock), iflags); + unlink = (q->dev != 0); q->dev = 0; - - dev_put(dev); - - mutex_unlock(&(queues_lock)); + _destroy_queue(q, QOS_CALLER_KPACKET); + _destroy_queue(q, QOS_CALLER_CONN_RETRANS); + _destroy_queue(q, QOS_CALLER_ANNOUNCE); + spin_unlock_irqrestore(&(q->qlock), iflags); + + if (unlink) { + dev_put(dev); + mutex_lock(&(queues_lock)); + list_del(&(q->queue_list)); + mutex_unlock(&(queues_lock)); + kref_put(&(q->ref), free_qos); + } + kref_put(&(q->ref), free_qos); return 0; } @@ -304,6 +391,8 @@ int create_queue(struct net_device *dev) return 1; } + spin_lock_init(&(q->qlock)); + q->dev = dev; dev_hold(dev); @@ -319,19 +408,15 @@ int create_queue(struct net_device *dev) return 0; } -void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller) +void qos_enqueue(struct qos_queue *q, struct resume_block *rb, int caller) { - struct qos_queue *q; + unsigned long iflags; - mutex_lock(&(queues_lock)); + spin_lock_irqsave(&(q->qlock), iflags); if (rb->in_queue) goto out; - q = get_queue(dev); - if (unlikely(q == 0)) - goto out; - rb->in_queue = 1; if (caller == QOS_CALLER_KPACKET) { @@ -344,14 +429,8 @@ void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller) list_add(&(rb->lh), &(q->announce_waiting)); kref_get(&(container_of(rb, struct announce_data, rb)->ref)); } else if (caller == QOS_CALLER_CONN) { - struct conn *trgt_out = container_of(rb, struct conn, - target.out.rb); - mutex_lock(&(trgt_out->rcv_lock)); - #warning todo targettype might have changed - BUG_ON(trgt_out->targettype != TARGET_OUT); list_add(&(rb->lh), &(q->conns_waiting)); - kref_get(&(trgt_out->ref)); - mutex_unlock(&(trgt_out->rcv_lock)); + kref_get(&(container_of(rb, struct conn, target.out.rb)->ref)); } else { BUG(); } @@ -362,69 +441,64 @@ void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller) } out: - mutex_unlock(&(queues_lock)); + spin_unlock_irqrestore(&(q->qlock), iflags); } -void qos_remove_conn(struct conn *cn) +void qos_remove_conn(struct conn *trgt_out_l) { - int kref = 0; - mutex_lock(&(queues_lock)); - mutex_lock(&(cn->rcv_lock)); - if (cn->targettype != TARGET_OUT) - goto out; - if (cn->target.out.rb.in_queue == 0) - goto out; + unsigned long iflags; + struct qos_queue *q; - cn->target.out.rb.in_queue = 0; - list_del(&(cn->target.out.rb.lh)); - kref = 1; + BUG_ON(trgt_out_l->targettype != TARGET_OUT); -out: - mutex_unlock(&(cn->rcv_lock)); - mutex_unlock(&(queues_lock)); + if (trgt_out_l->target.out.rb.in_queue == 0) + return; + + q = trgt_out_l->target.out.nb->queue; + BUG_ON(q == 0); - if (kref) - kref_put(&(cn->ref), free_conn); + trgt_out_l->target.out.rb.in_queue = 0; + spin_lock_irqsave(&(q->qlock), iflags); + list_del(&(trgt_out_l->target.out.rb.lh)); + spin_unlock_irqrestore(&(q->qlock), iflags); + + kref_put(&(trgt_out_l->ref), free_conn); } -static int may_send_conn_retrans(struct neighbor *nb) +static void qos_enqueue_conn(struct conn *trgt_out_l) { - struct qos_queue *q; - int rc = 0; - - mutex_lock(&(queues_lock)); + qos_enqueue(trgt_out_l->target.out.nb->queue, + &(trgt_out_l->target.out.rb), QOS_CALLER_CONN); +} - q = get_queue(nb->dev); - if (unlikely(q == 0)) - goto out; +static int may_send_conn_retrans(struct neighbor *nb) +{ + unsigned long iflags; + int rc; - rc = (list_empty(&(q->kpackets_waiting))); + BUG_ON(nb->queue == 0); -out: - mutex_unlock(&(queues_lock)); + spin_lock_irqsave(&(nb->queue->qlock), iflags); + rc = (list_empty(&(nb->queue->kpackets_waiting))); + spin_unlock_irqrestore(&(nb->queue->qlock), iflags); return rc; } static int may_send_conn(struct conn *trgt_out_l) { - struct qos_queue *q; - int rc = 0; + unsigned long iflags; + struct qos_queue *q = trgt_out_l->target.out.nb->queue; + int rc; - #warning todo this may deadlock, use atomic_ops instead, modify get_queue (move pointer to neighbor?) - mutex_lock(&(queues_lock)); - - q = get_queue(trgt_out_l->target.out.nb->dev); - if (unlikely(q == 0)) - goto out; + BUG_ON(q == 0); + spin_lock_irqsave(&(q->qlock), iflags); rc = (list_empty(&(q->kpackets_waiting)) && list_empty(&(q->conn_retrans_waiting)) && list_empty(&(q->announce_waiting)) && list_empty(&(q->conns_waiting))); - -out: - mutex_unlock(&(queues_lock)); + spin_unlock_irqrestore(&(q->qlock), iflags); return rc; } @@ -677,9 +751,6 @@ static int send_retrans(struct neighbor *nb, int fromqos) cr = container_of(nb->retrans_list_conn.next, struct conn_retrans, timeout_list); - #warning todo lock - BUG_ON(cr->trgt_out_o->targettype != TARGET_OUT); - if (unlikely(nbstate == NEIGHBOR_STATE_KILLED)) { list_del(&(cr->timeout_list)); list_del(&(cr->conn_list)); @@ -689,13 +760,13 @@ static int send_retrans(struct neighbor *nb, int fromqos) continue; } - BUG_ON(nb != cr->trgt_out_o->target.out.nb); - #warning todo check window limit if (time_after(cr->timeout, jiffies)) { schedule_delayed_work(&(nb->retrans_timer_conn), cr->timeout - jiffies); + if (fromqos) + kref_get(&(nb->ref)); rescheduled = 1; spin_unlock_irqrestore(&(nb->retrans_lock), iflags); break; @@ -706,15 +777,14 @@ static int send_retrans(struct neighbor *nb, int fromqos) queuefull = _send_retrans(nb, cr); kref_put(&(cr->ref), free_connretrans); if (queuefull) { - rescheduled = 1; if (fromqos == 0) - qos_enqueue(nb->dev, &(nb->rb_cr), + qos_enqueue(nb->queue, &(nb->rb_cr), QOS_CALLER_CONN_RETRANS); break; } } - if (rescheduled == 0) + if (rescheduled == 0 && fromqos == 0) kref_put(&(nb->ref), neighbor_free); return queuefull; @@ -808,7 +878,8 @@ void conn_ack_rcvd(struct neighbor *nb, __u32 conn_id, struct conn *trgt_out, mutex_lock(&(trgt_out->rcv_lock)); - #warning todo reset check? + if (unlikely(trgt_out->isreset != 0)) + goto out; if (unlikely(trgt_out->targettype != TARGET_OUT)) goto out; if (unlikely(trgt_out->target.out.nb != nb)) @@ -936,8 +1007,10 @@ int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte) trgt_out_l->source.sock.delay_flush != 0)) return RC_FLUSH_CONN_OUT_OK; - if (fromqos == 0 && may_send_conn(trgt_out_l) == 0) + if (fromqos == 0 && may_send_conn(trgt_out_l) == 0) { + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_CONG; + } while (trgt_out_l->data_buf.read_remaining >= targetmss && get_windowlimit(trgt_out_l) >= targetmss) { @@ -951,14 +1024,18 @@ int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte) return RC_FLUSH_CONN_OUT_CREDITS; seqno = trgt_out_l->target.out.seqno_nextsend; - skb = create_packet(trgt_out_l->target.out.nb, targetmss, GFP_ATOMIC, - trgt_out_l->target.out.conn_id, seqno); - if (unlikely(skb == 0)) + skb = create_packet(trgt_out_l->target.out.nb, targetmss, + GFP_ATOMIC, trgt_out_l->target.out.conn_id, + seqno); + if (unlikely(skb == 0)) { + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_OOM; + } cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL); if (unlikely(cr == 0)) { kfree_skb(skb); + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_OOM; } @@ -970,6 +1047,7 @@ int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte) if (rc != 0) { databuf_unpull(trgt_out_l, targetmss); kmem_cache_free(connretrans_slab, cr); + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_CONG; } @@ -1004,13 +1082,16 @@ int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte) if (unlikely(creditsperbyte * len > trgt_out_l->credits)) return RC_FLUSH_CONN_OUT_CREDITS; - if (unlikely(buf == 0)) + if (unlikely(buf == 0)) { + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_OOM; + } cm = alloc_control_msg(trgt_out_l->target.out.nb, ACM_PRIORITY_LOW); if (unlikely(cm == 0)) { kfree(buf); + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_OOM; } @@ -1018,6 +1099,7 @@ int flush_out(struct conn *trgt_out_l, int fromqos, __u32 creditsperbyte) if (unlikely(cr == 0)) { kfree(buf); free_control_msg(cm); + qos_enqueue_conn(trgt_out_l); return RC_FLUSH_CONN_OUT_CONG; } -- 2.11.4.GIT