4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/list.h>
26 #include <linux/workqueue.h>
27 #include <linux/spinlock.h>
28 #include <linux/slab.h>
29 #include <linux/skbuff.h>
30 #include <linux/suspend.h>
31 #include <linux/connector.h>
32 #include <linux/delay.h>
34 static void cn_queue_wrapper(void *data
)
36 struct cn_callback_entry
*cbq
= data
;
38 cbq
->cb
->callback(cbq
->cb
->priv
);
39 cbq
->destruct_data(cbq
->ddata
);
43 static struct cn_callback_entry
*cn_queue_alloc_callback_entry(struct cn_callback
*cb
)
45 struct cn_callback_entry
*cbq
;
47 cbq
= kzalloc(sizeof(*cbq
), GFP_KERNEL
);
49 printk(KERN_ERR
"Failed to create new callback queue.\n");
54 INIT_WORK(&cbq
->work
, &cn_queue_wrapper
, cbq
);
58 static void cn_queue_free_callback(struct cn_callback_entry
*cbq
)
60 cancel_delayed_work(&cbq
->work
);
61 flush_workqueue(cbq
->pdev
->cn_queue
);
66 int cn_cb_equal(struct cb_id
*i1
, struct cb_id
*i2
)
68 return ((i1
->idx
== i2
->idx
) && (i1
->val
== i2
->val
));
71 int cn_queue_add_callback(struct cn_queue_dev
*dev
, struct cn_callback
*cb
)
73 struct cn_callback_entry
*cbq
, *__cbq
;
76 cbq
= cn_queue_alloc_callback_entry(cb
);
80 atomic_inc(&dev
->refcnt
);
83 spin_lock_bh(&dev
->queue_lock
);
84 list_for_each_entry(__cbq
, &dev
->queue_list
, callback_entry
) {
85 if (cn_cb_equal(&__cbq
->cb
->id
, &cb
->id
)) {
91 list_add_tail(&cbq
->callback_entry
, &dev
->queue_list
);
92 spin_unlock_bh(&dev
->queue_lock
);
95 atomic_dec(&dev
->refcnt
);
96 cn_queue_free_callback(cbq
);
102 cbq
->group
= cbq
->cb
->id
.idx
;
107 void cn_queue_del_callback(struct cn_queue_dev
*dev
, struct cb_id
*id
)
109 struct cn_callback_entry
*cbq
, *n
;
112 spin_lock_bh(&dev
->queue_lock
);
113 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
) {
114 if (cn_cb_equal(&cbq
->cb
->id
, id
)) {
115 list_del(&cbq
->callback_entry
);
120 spin_unlock_bh(&dev
->queue_lock
);
123 cn_queue_free_callback(cbq
);
124 atomic_dec_and_test(&dev
->refcnt
);
128 struct cn_queue_dev
*cn_queue_alloc_dev(char *name
, struct sock
*nls
)
130 struct cn_queue_dev
*dev
;
132 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
136 snprintf(dev
->name
, sizeof(dev
->name
), "%s", name
);
137 atomic_set(&dev
->refcnt
, 0);
138 INIT_LIST_HEAD(&dev
->queue_list
);
139 spin_lock_init(&dev
->queue_lock
);
142 dev
->netlink_groups
= 0;
144 dev
->cn_queue
= create_workqueue(dev
->name
);
145 if (!dev
->cn_queue
) {
153 void cn_queue_free_dev(struct cn_queue_dev
*dev
)
155 struct cn_callback_entry
*cbq
, *n
;
157 flush_workqueue(dev
->cn_queue
);
158 destroy_workqueue(dev
->cn_queue
);
160 spin_lock_bh(&dev
->queue_lock
);
161 list_for_each_entry_safe(cbq
, n
, &dev
->queue_list
, callback_entry
)
162 list_del(&cbq
->callback_entry
);
163 spin_unlock_bh(&dev
->queue_lock
);
165 while (atomic_read(&dev
->refcnt
)) {
166 printk(KERN_INFO
"Waiting for %s to become free: refcnt=%d.\n",
167 dev
->name
, atomic_read(&dev
->refcnt
));