0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/init.h>
0009 #include <linux/fs.h>
0010 #include <linux/kdev_t.h>
0011 #include <linux/slab.h>
0012 #include <linux/string.h>
0013
0014 #include <linux/major.h>
0015 #include <linux/errno.h>
0016 #include <linux/module.h>
0017 #include <linux/seq_file.h>
0018
0019 #include <linux/kobject.h>
0020 #include <linux/kobj_map.h>
0021 #include <linux/cdev.h>
0022 #include <linux/mutex.h>
0023 #include <linux/backing-dev.h>
0024 #include <linux/tty.h>
0025
0026 #include "internal.h"
0027
0028 static struct kobj_map *cdev_map;
0029
0030 static DEFINE_MUTEX(chrdevs_lock);
0031
0032 #define CHRDEV_MAJOR_HASH_SIZE 255
0033
0034 static struct char_device_struct {
0035 struct char_device_struct *next;
0036 unsigned int major;
0037 unsigned int baseminor;
0038 int minorct;
0039 char name[64];
0040 struct cdev *cdev;
0041 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
0042
0043
0044 static inline int major_to_index(unsigned major)
0045 {
0046 return major % CHRDEV_MAJOR_HASH_SIZE;
0047 }
0048
0049 #ifdef CONFIG_PROC_FS
0050
0051 void chrdev_show(struct seq_file *f, off_t offset)
0052 {
0053 struct char_device_struct *cd;
0054
0055 mutex_lock(&chrdevs_lock);
0056 for (cd = chrdevs[major_to_index(offset)]; cd; cd = cd->next) {
0057 if (cd->major == offset)
0058 seq_printf(f, "%3d %s\n", cd->major, cd->name);
0059 }
0060 mutex_unlock(&chrdevs_lock);
0061 }
0062
0063 #endif
0064
0065 static int find_dynamic_major(void)
0066 {
0067 int i;
0068 struct char_device_struct *cd;
0069
0070 for (i = ARRAY_SIZE(chrdevs)-1; i >= CHRDEV_MAJOR_DYN_END; i--) {
0071 if (chrdevs[i] == NULL)
0072 return i;
0073 }
0074
0075 for (i = CHRDEV_MAJOR_DYN_EXT_START;
0076 i >= CHRDEV_MAJOR_DYN_EXT_END; i--) {
0077 for (cd = chrdevs[major_to_index(i)]; cd; cd = cd->next)
0078 if (cd->major == i)
0079 break;
0080
0081 if (cd == NULL)
0082 return i;
0083 }
0084
0085 return -EBUSY;
0086 }
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096 static struct char_device_struct *
0097 __register_chrdev_region(unsigned int major, unsigned int baseminor,
0098 int minorct, const char *name)
0099 {
0100 struct char_device_struct *cd, *curr, *prev = NULL;
0101 int ret;
0102 int i;
0103
0104 if (major >= CHRDEV_MAJOR_MAX) {
0105 pr_err("CHRDEV \"%s\" major requested (%u) is greater than the maximum (%u)\n",
0106 name, major, CHRDEV_MAJOR_MAX-1);
0107 return ERR_PTR(-EINVAL);
0108 }
0109
0110 if (minorct > MINORMASK + 1 - baseminor) {
0111 pr_err("CHRDEV \"%s\" minor range requested (%u-%u) is out of range of maximum range (%u-%u) for a single major\n",
0112 name, baseminor, baseminor + minorct - 1, 0, MINORMASK);
0113 return ERR_PTR(-EINVAL);
0114 }
0115
0116 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
0117 if (cd == NULL)
0118 return ERR_PTR(-ENOMEM);
0119
0120 mutex_lock(&chrdevs_lock);
0121
0122 if (major == 0) {
0123 ret = find_dynamic_major();
0124 if (ret < 0) {
0125 pr_err("CHRDEV \"%s\" dynamic allocation region is full\n",
0126 name);
0127 goto out;
0128 }
0129 major = ret;
0130 }
0131
0132 ret = -EBUSY;
0133 i = major_to_index(major);
0134 for (curr = chrdevs[i]; curr; prev = curr, curr = curr->next) {
0135 if (curr->major < major)
0136 continue;
0137
0138 if (curr->major > major)
0139 break;
0140
0141 if (curr->baseminor + curr->minorct <= baseminor)
0142 continue;
0143
0144 if (curr->baseminor >= baseminor + minorct)
0145 break;
0146
0147 goto out;
0148 }
0149
0150 cd->major = major;
0151 cd->baseminor = baseminor;
0152 cd->minorct = minorct;
0153 strlcpy(cd->name, name, sizeof(cd->name));
0154
0155 if (!prev) {
0156 cd->next = curr;
0157 chrdevs[i] = cd;
0158 } else {
0159 cd->next = prev->next;
0160 prev->next = cd;
0161 }
0162
0163 mutex_unlock(&chrdevs_lock);
0164 return cd;
0165 out:
0166 mutex_unlock(&chrdevs_lock);
0167 kfree(cd);
0168 return ERR_PTR(ret);
0169 }
0170
0171 static struct char_device_struct *
0172 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
0173 {
0174 struct char_device_struct *cd = NULL, **cp;
0175 int i = major_to_index(major);
0176
0177 mutex_lock(&chrdevs_lock);
0178 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
0179 if ((*cp)->major == major &&
0180 (*cp)->baseminor == baseminor &&
0181 (*cp)->minorct == minorct)
0182 break;
0183 if (*cp) {
0184 cd = *cp;
0185 *cp = cd->next;
0186 }
0187 mutex_unlock(&chrdevs_lock);
0188 return cd;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 int register_chrdev_region(dev_t from, unsigned count, const char *name)
0201 {
0202 struct char_device_struct *cd;
0203 dev_t to = from + count;
0204 dev_t n, next;
0205
0206 for (n = from; n < to; n = next) {
0207 next = MKDEV(MAJOR(n)+1, 0);
0208 if (next > to)
0209 next = to;
0210 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
0211 next - n, name);
0212 if (IS_ERR(cd))
0213 goto fail;
0214 }
0215 return 0;
0216 fail:
0217 to = n;
0218 for (n = from; n < to; n = next) {
0219 next = MKDEV(MAJOR(n)+1, 0);
0220 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
0221 }
0222 return PTR_ERR(cd);
0223 }
0224
0225
0226
0227
0228
0229
0230
0231
0232
0233
0234
0235
0236 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
0237 const char *name)
0238 {
0239 struct char_device_struct *cd;
0240 cd = __register_chrdev_region(0, baseminor, count, name);
0241 if (IS_ERR(cd))
0242 return PTR_ERR(cd);
0243 *dev = MKDEV(cd->major, cd->baseminor);
0244 return 0;
0245 }
0246
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263
0264
0265
0266
0267
0268 int __register_chrdev(unsigned int major, unsigned int baseminor,
0269 unsigned int count, const char *name,
0270 const struct file_operations *fops)
0271 {
0272 struct char_device_struct *cd;
0273 struct cdev *cdev;
0274 int err = -ENOMEM;
0275
0276 cd = __register_chrdev_region(major, baseminor, count, name);
0277 if (IS_ERR(cd))
0278 return PTR_ERR(cd);
0279
0280 cdev = cdev_alloc();
0281 if (!cdev)
0282 goto out2;
0283
0284 cdev->owner = fops->owner;
0285 cdev->ops = fops;
0286 kobject_set_name(&cdev->kobj, "%s", name);
0287
0288 err = cdev_add(cdev, MKDEV(cd->major, baseminor), count);
0289 if (err)
0290 goto out;
0291
0292 cd->cdev = cdev;
0293
0294 return major ? 0 : cd->major;
0295 out:
0296 kobject_put(&cdev->kobj);
0297 out2:
0298 kfree(__unregister_chrdev_region(cd->major, baseminor, count));
0299 return err;
0300 }
0301
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311 void unregister_chrdev_region(dev_t from, unsigned count)
0312 {
0313 dev_t to = from + count;
0314 dev_t n, next;
0315
0316 for (n = from; n < to; n = next) {
0317 next = MKDEV(MAJOR(n)+1, 0);
0318 if (next > to)
0319 next = to;
0320 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
0321 }
0322 }
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335 void __unregister_chrdev(unsigned int major, unsigned int baseminor,
0336 unsigned int count, const char *name)
0337 {
0338 struct char_device_struct *cd;
0339
0340 cd = __unregister_chrdev_region(major, baseminor, count);
0341 if (cd && cd->cdev)
0342 cdev_del(cd->cdev);
0343 kfree(cd);
0344 }
0345
0346 static DEFINE_SPINLOCK(cdev_lock);
0347
0348 static struct kobject *cdev_get(struct cdev *p)
0349 {
0350 struct module *owner = p->owner;
0351 struct kobject *kobj;
0352
0353 if (owner && !try_module_get(owner))
0354 return NULL;
0355 kobj = kobject_get_unless_zero(&p->kobj);
0356 if (!kobj)
0357 module_put(owner);
0358 return kobj;
0359 }
0360
0361 void cdev_put(struct cdev *p)
0362 {
0363 if (p) {
0364 struct module *owner = p->owner;
0365 kobject_put(&p->kobj);
0366 module_put(owner);
0367 }
0368 }
0369
0370
0371
0372
0373 static int chrdev_open(struct inode *inode, struct file *filp)
0374 {
0375 const struct file_operations *fops;
0376 struct cdev *p;
0377 struct cdev *new = NULL;
0378 int ret = 0;
0379
0380 spin_lock(&cdev_lock);
0381 p = inode->i_cdev;
0382 if (!p) {
0383 struct kobject *kobj;
0384 int idx;
0385 spin_unlock(&cdev_lock);
0386 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
0387 if (!kobj)
0388 return -ENXIO;
0389 new = container_of(kobj, struct cdev, kobj);
0390 spin_lock(&cdev_lock);
0391
0392
0393 p = inode->i_cdev;
0394 if (!p) {
0395 inode->i_cdev = p = new;
0396 list_add(&inode->i_devices, &p->list);
0397 new = NULL;
0398 } else if (!cdev_get(p))
0399 ret = -ENXIO;
0400 } else if (!cdev_get(p))
0401 ret = -ENXIO;
0402 spin_unlock(&cdev_lock);
0403 cdev_put(new);
0404 if (ret)
0405 return ret;
0406
0407 ret = -ENXIO;
0408 fops = fops_get(p->ops);
0409 if (!fops)
0410 goto out_cdev_put;
0411
0412 replace_fops(filp, fops);
0413 if (filp->f_op->open) {
0414 ret = filp->f_op->open(inode, filp);
0415 if (ret)
0416 goto out_cdev_put;
0417 }
0418
0419 return 0;
0420
0421 out_cdev_put:
0422 cdev_put(p);
0423 return ret;
0424 }
0425
0426 void cd_forget(struct inode *inode)
0427 {
0428 spin_lock(&cdev_lock);
0429 list_del_init(&inode->i_devices);
0430 inode->i_cdev = NULL;
0431 inode->i_mapping = &inode->i_data;
0432 spin_unlock(&cdev_lock);
0433 }
0434
0435 static void cdev_purge(struct cdev *cdev)
0436 {
0437 spin_lock(&cdev_lock);
0438 while (!list_empty(&cdev->list)) {
0439 struct inode *inode;
0440 inode = container_of(cdev->list.next, struct inode, i_devices);
0441 list_del_init(&inode->i_devices);
0442 inode->i_cdev = NULL;
0443 }
0444 spin_unlock(&cdev_lock);
0445 }
0446
0447
0448
0449
0450
0451
0452 const struct file_operations def_chr_fops = {
0453 .open = chrdev_open,
0454 .llseek = noop_llseek,
0455 };
0456
0457 static struct kobject *exact_match(dev_t dev, int *part, void *data)
0458 {
0459 struct cdev *p = data;
0460 return &p->kobj;
0461 }
0462
0463 static int exact_lock(dev_t dev, void *data)
0464 {
0465 struct cdev *p = data;
0466 return cdev_get(p) ? 0 : -1;
0467 }
0468
0469
0470
0471
0472
0473
0474
0475
0476
0477
0478
0479 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
0480 {
0481 int error;
0482
0483 p->dev = dev;
0484 p->count = count;
0485
0486 if (WARN_ON(dev == WHITEOUT_DEV))
0487 return -EBUSY;
0488
0489 error = kobj_map(cdev_map, dev, count, NULL,
0490 exact_match, exact_lock, p);
0491 if (error)
0492 return error;
0493
0494 kobject_get(p->kobj.parent);
0495
0496 return 0;
0497 }
0498
0499
0500
0501
0502
0503
0504
0505
0506
0507
0508 void cdev_set_parent(struct cdev *p, struct kobject *kobj)
0509 {
0510 WARN_ON(!kobj->state_initialized);
0511 p->kobj.parent = kobj;
0512 }
0513
0514
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532
0533
0534
0535
0536
0537 int cdev_device_add(struct cdev *cdev, struct device *dev)
0538 {
0539 int rc = 0;
0540
0541 if (dev->devt) {
0542 cdev_set_parent(cdev, &dev->kobj);
0543
0544 rc = cdev_add(cdev, dev->devt, 1);
0545 if (rc)
0546 return rc;
0547 }
0548
0549 rc = device_add(dev);
0550 if (rc)
0551 cdev_del(cdev);
0552
0553 return rc;
0554 }
0555
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565
0566
0567
0568
0569
0570
0571 void cdev_device_del(struct cdev *cdev, struct device *dev)
0572 {
0573 device_del(dev);
0574 if (dev->devt)
0575 cdev_del(cdev);
0576 }
0577
0578 static void cdev_unmap(dev_t dev, unsigned count)
0579 {
0580 kobj_unmap(cdev_map, dev, count);
0581 }
0582
0583
0584
0585
0586
0587
0588
0589
0590
0591
0592
0593
0594 void cdev_del(struct cdev *p)
0595 {
0596 cdev_unmap(p->dev, p->count);
0597 kobject_put(&p->kobj);
0598 }
0599
0600
0601 static void cdev_default_release(struct kobject *kobj)
0602 {
0603 struct cdev *p = container_of(kobj, struct cdev, kobj);
0604 struct kobject *parent = kobj->parent;
0605
0606 cdev_purge(p);
0607 kobject_put(parent);
0608 }
0609
0610 static void cdev_dynamic_release(struct kobject *kobj)
0611 {
0612 struct cdev *p = container_of(kobj, struct cdev, kobj);
0613 struct kobject *parent = kobj->parent;
0614
0615 cdev_purge(p);
0616 kfree(p);
0617 kobject_put(parent);
0618 }
0619
0620 static struct kobj_type ktype_cdev_default = {
0621 .release = cdev_default_release,
0622 };
0623
0624 static struct kobj_type ktype_cdev_dynamic = {
0625 .release = cdev_dynamic_release,
0626 };
0627
0628
0629
0630
0631
0632
0633 struct cdev *cdev_alloc(void)
0634 {
0635 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
0636 if (p) {
0637 INIT_LIST_HEAD(&p->list);
0638 kobject_init(&p->kobj, &ktype_cdev_dynamic);
0639 }
0640 return p;
0641 }
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
0652 {
0653 memset(cdev, 0, sizeof *cdev);
0654 INIT_LIST_HEAD(&cdev->list);
0655 kobject_init(&cdev->kobj, &ktype_cdev_default);
0656 cdev->ops = fops;
0657 }
0658
0659 static struct kobject *base_probe(dev_t dev, int *part, void *data)
0660 {
0661 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
0662
0663 request_module("char-major-%d", MAJOR(dev));
0664 return NULL;
0665 }
0666
0667 void __init chrdev_init(void)
0668 {
0669 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
0670 }
0671
0672
0673
0674 EXPORT_SYMBOL(register_chrdev_region);
0675 EXPORT_SYMBOL(unregister_chrdev_region);
0676 EXPORT_SYMBOL(alloc_chrdev_region);
0677 EXPORT_SYMBOL(cdev_init);
0678 EXPORT_SYMBOL(cdev_alloc);
0679 EXPORT_SYMBOL(cdev_del);
0680 EXPORT_SYMBOL(cdev_add);
0681 EXPORT_SYMBOL(cdev_set_parent);
0682 EXPORT_SYMBOL(cdev_device_add);
0683 EXPORT_SYMBOL(cdev_device_del);
0684 EXPORT_SYMBOL(__register_chrdev);
0685 EXPORT_SYMBOL(__unregister_chrdev);