0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036 #include <linux/klist.h>
0037 #include <linux/export.h>
0038 #include <linux/sched.h>
0039
0040
0041
0042
0043
0044 #define KNODE_DEAD 1LU
0045 #define KNODE_KLIST_MASK ~KNODE_DEAD
0046
0047 static struct klist *knode_klist(struct klist_node *knode)
0048 {
0049 return (struct klist *)
0050 ((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
0051 }
0052
0053 static bool knode_dead(struct klist_node *knode)
0054 {
0055 return (unsigned long)knode->n_klist & KNODE_DEAD;
0056 }
0057
0058 static void knode_set_klist(struct klist_node *knode, struct klist *klist)
0059 {
0060 knode->n_klist = klist;
0061
0062 WARN_ON(knode_dead(knode));
0063 }
0064
0065 static void knode_kill(struct klist_node *knode)
0066 {
0067
0068 WARN_ON(knode_dead(knode));
0069 *(unsigned long *)&knode->n_klist |= KNODE_DEAD;
0070 }
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 void klist_init(struct klist *k, void (*get)(struct klist_node *),
0085 void (*put)(struct klist_node *))
0086 {
0087 INIT_LIST_HEAD(&k->k_list);
0088 spin_lock_init(&k->k_lock);
0089 k->get = get;
0090 k->put = put;
0091 }
0092 EXPORT_SYMBOL_GPL(klist_init);
0093
0094 static void add_head(struct klist *k, struct klist_node *n)
0095 {
0096 spin_lock(&k->k_lock);
0097 list_add(&n->n_node, &k->k_list);
0098 spin_unlock(&k->k_lock);
0099 }
0100
0101 static void add_tail(struct klist *k, struct klist_node *n)
0102 {
0103 spin_lock(&k->k_lock);
0104 list_add_tail(&n->n_node, &k->k_list);
0105 spin_unlock(&k->k_lock);
0106 }
0107
0108 static void klist_node_init(struct klist *k, struct klist_node *n)
0109 {
0110 INIT_LIST_HEAD(&n->n_node);
0111 kref_init(&n->n_ref);
0112 knode_set_klist(n, k);
0113 if (k->get)
0114 k->get(n);
0115 }
0116
0117
0118
0119
0120
0121
0122 void klist_add_head(struct klist_node *n, struct klist *k)
0123 {
0124 klist_node_init(k, n);
0125 add_head(k, n);
0126 }
0127 EXPORT_SYMBOL_GPL(klist_add_head);
0128
0129
0130
0131
0132
0133
0134 void klist_add_tail(struct klist_node *n, struct klist *k)
0135 {
0136 klist_node_init(k, n);
0137 add_tail(k, n);
0138 }
0139 EXPORT_SYMBOL_GPL(klist_add_tail);
0140
0141
0142
0143
0144
0145
0146 void klist_add_behind(struct klist_node *n, struct klist_node *pos)
0147 {
0148 struct klist *k = knode_klist(pos);
0149
0150 klist_node_init(k, n);
0151 spin_lock(&k->k_lock);
0152 list_add(&n->n_node, &pos->n_node);
0153 spin_unlock(&k->k_lock);
0154 }
0155 EXPORT_SYMBOL_GPL(klist_add_behind);
0156
0157
0158
0159
0160
0161
0162 void klist_add_before(struct klist_node *n, struct klist_node *pos)
0163 {
0164 struct klist *k = knode_klist(pos);
0165
0166 klist_node_init(k, n);
0167 spin_lock(&k->k_lock);
0168 list_add_tail(&n->n_node, &pos->n_node);
0169 spin_unlock(&k->k_lock);
0170 }
0171 EXPORT_SYMBOL_GPL(klist_add_before);
0172
0173 struct klist_waiter {
0174 struct list_head list;
0175 struct klist_node *node;
0176 struct task_struct *process;
0177 int woken;
0178 };
0179
0180 static DEFINE_SPINLOCK(klist_remove_lock);
0181 static LIST_HEAD(klist_remove_waiters);
0182
0183 static void klist_release(struct kref *kref)
0184 {
0185 struct klist_waiter *waiter, *tmp;
0186 struct klist_node *n = container_of(kref, struct klist_node, n_ref);
0187
0188 WARN_ON(!knode_dead(n));
0189 list_del(&n->n_node);
0190 spin_lock(&klist_remove_lock);
0191 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
0192 if (waiter->node != n)
0193 continue;
0194
0195 list_del(&waiter->list);
0196 waiter->woken = 1;
0197 mb();
0198 wake_up_process(waiter->process);
0199 }
0200 spin_unlock(&klist_remove_lock);
0201 knode_set_klist(n, NULL);
0202 }
0203
0204 static int klist_dec_and_del(struct klist_node *n)
0205 {
0206 return kref_put(&n->n_ref, klist_release);
0207 }
0208
0209 static void klist_put(struct klist_node *n, bool kill)
0210 {
0211 struct klist *k = knode_klist(n);
0212 void (*put)(struct klist_node *) = k->put;
0213
0214 spin_lock(&k->k_lock);
0215 if (kill)
0216 knode_kill(n);
0217 if (!klist_dec_and_del(n))
0218 put = NULL;
0219 spin_unlock(&k->k_lock);
0220 if (put)
0221 put(n);
0222 }
0223
0224
0225
0226
0227
0228 void klist_del(struct klist_node *n)
0229 {
0230 klist_put(n, true);
0231 }
0232 EXPORT_SYMBOL_GPL(klist_del);
0233
0234
0235
0236
0237
0238 void klist_remove(struct klist_node *n)
0239 {
0240 struct klist_waiter waiter;
0241
0242 waiter.node = n;
0243 waiter.process = current;
0244 waiter.woken = 0;
0245 spin_lock(&klist_remove_lock);
0246 list_add(&waiter.list, &klist_remove_waiters);
0247 spin_unlock(&klist_remove_lock);
0248
0249 klist_del(n);
0250
0251 for (;;) {
0252 set_current_state(TASK_UNINTERRUPTIBLE);
0253 if (waiter.woken)
0254 break;
0255 schedule();
0256 }
0257 __set_current_state(TASK_RUNNING);
0258 }
0259 EXPORT_SYMBOL_GPL(klist_remove);
0260
0261
0262
0263
0264
0265 int klist_node_attached(struct klist_node *n)
0266 {
0267 return (n->n_klist != NULL);
0268 }
0269 EXPORT_SYMBOL_GPL(klist_node_attached);
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280 void klist_iter_init_node(struct klist *k, struct klist_iter *i,
0281 struct klist_node *n)
0282 {
0283 i->i_klist = k;
0284 i->i_cur = NULL;
0285 if (n && kref_get_unless_zero(&n->n_ref))
0286 i->i_cur = n;
0287 }
0288 EXPORT_SYMBOL_GPL(klist_iter_init_node);
0289
0290
0291
0292
0293
0294
0295
0296
0297 void klist_iter_init(struct klist *k, struct klist_iter *i)
0298 {
0299 klist_iter_init_node(k, i, NULL);
0300 }
0301 EXPORT_SYMBOL_GPL(klist_iter_init);
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void klist_iter_exit(struct klist_iter *i)
0312 {
0313 if (i->i_cur) {
0314 klist_put(i->i_cur, false);
0315 i->i_cur = NULL;
0316 }
0317 }
0318 EXPORT_SYMBOL_GPL(klist_iter_exit);
0319
0320 static struct klist_node *to_klist_node(struct list_head *n)
0321 {
0322 return container_of(n, struct klist_node, n_node);
0323 }
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333 struct klist_node *klist_prev(struct klist_iter *i)
0334 {
0335 void (*put)(struct klist_node *) = i->i_klist->put;
0336 struct klist_node *last = i->i_cur;
0337 struct klist_node *prev;
0338 unsigned long flags;
0339
0340 spin_lock_irqsave(&i->i_klist->k_lock, flags);
0341
0342 if (last) {
0343 prev = to_klist_node(last->n_node.prev);
0344 if (!klist_dec_and_del(last))
0345 put = NULL;
0346 } else
0347 prev = to_klist_node(i->i_klist->k_list.prev);
0348
0349 i->i_cur = NULL;
0350 while (prev != to_klist_node(&i->i_klist->k_list)) {
0351 if (likely(!knode_dead(prev))) {
0352 kref_get(&prev->n_ref);
0353 i->i_cur = prev;
0354 break;
0355 }
0356 prev = to_klist_node(prev->n_node.prev);
0357 }
0358
0359 spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
0360
0361 if (put && last)
0362 put(last);
0363 return i->i_cur;
0364 }
0365 EXPORT_SYMBOL_GPL(klist_prev);
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375 struct klist_node *klist_next(struct klist_iter *i)
0376 {
0377 void (*put)(struct klist_node *) = i->i_klist->put;
0378 struct klist_node *last = i->i_cur;
0379 struct klist_node *next;
0380 unsigned long flags;
0381
0382 spin_lock_irqsave(&i->i_klist->k_lock, flags);
0383
0384 if (last) {
0385 next = to_klist_node(last->n_node.next);
0386 if (!klist_dec_and_del(last))
0387 put = NULL;
0388 } else
0389 next = to_klist_node(i->i_klist->k_list.next);
0390
0391 i->i_cur = NULL;
0392 while (next != to_klist_node(&i->i_klist->k_list)) {
0393 if (likely(!knode_dead(next))) {
0394 kref_get(&next->n_ref);
0395 i->i_cur = next;
0396 break;
0397 }
0398 next = to_klist_node(next->n_node.next);
0399 }
0400
0401 spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
0402
0403 if (put && last)
0404 put(last);
0405 return i->i_cur;
0406 }
0407 EXPORT_SYMBOL_GPL(klist_next);