4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
25 #include <linux/kmod.h>
29 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
31 * - permits shared-mmap for read, write and/or exec
32 * - does not permit private mmap in NOMMU mode (can't do COW)
33 * - no readahead or I/O queue unplugging required
35 struct backing_dev_info directly_mappable_cdev_bdi = {
38 /* permit private copies of the data to be taken */
41 /* permit direct mmap, for read, write or exec */
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
46 static struct kobj_map *cdev_map;
48 static DEFINE_MUTEX(chrdevs_lock);
50 static struct char_device_struct {
51 struct char_device_struct *next;
53 unsigned int baseminor;
56 struct file_operations *fops;
57 struct cdev *cdev; /* will die */
58 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
60 /* index in the above */
61 static inline int major_to_index(int major)
63 return major % CHRDEV_MAJOR_HASH_SIZE;
68 void chrdev_show(struct seq_file *f, off_t offset)
70 struct char_device_struct *cd;
72 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
73 mutex_lock(&chrdevs_lock);
74 for (cd = chrdevs[offset]; cd; cd = cd->next)
75 seq_printf(f, "%3d %s\n", cd->major, cd->name);
76 mutex_unlock(&chrdevs_lock);
80 #endif /* CONFIG_PROC_FS */
83 * Register a single major with a specified minor range.
85 * If major == 0 this functions will dynamically allocate a major and return
88 * If major > 0 this function will attempt to reserve the passed range of
89 * minors and will return zero on success.
91 * Returns a -ve errno on failure.
93 static struct char_device_struct *
94 __register_chrdev_region(unsigned int major, unsigned int baseminor,
95 int minorct, const char *name)
97 struct char_device_struct *cd, **cp;
101 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
103 return ERR_PTR(-ENOMEM);
105 mutex_lock(&chrdevs_lock);
109 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
110 if (chrdevs[i] == NULL)
123 cd->baseminor = baseminor;
124 cd->minorct = minorct;
125 strncpy(cd->name,name, 64);
127 i = major_to_index(major);
129 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
130 if ((*cp)->major > major ||
131 ((*cp)->major == major &&
132 (((*cp)->baseminor >= baseminor) ||
133 ((*cp)->baseminor + (*cp)->minorct > baseminor))))
136 /* Check for overlapping minor ranges. */
137 if (*cp && (*cp)->major == major) {
138 int old_min = (*cp)->baseminor;
139 int old_max = (*cp)->baseminor + (*cp)->minorct - 1;
140 int new_min = baseminor;
141 int new_max = baseminor + minorct - 1;
143 /* New driver overlaps from the left. */
144 if (new_max >= old_min && new_max <= old_max) {
149 /* New driver overlaps from the right. */
150 if (new_min <= old_max && new_min >= old_min) {
158 mutex_unlock(&chrdevs_lock);
161 mutex_unlock(&chrdevs_lock);
166 static struct char_device_struct *
167 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
169 struct char_device_struct *cd = NULL, **cp;
170 int i = major_to_index(major);
172 mutex_lock(&chrdevs_lock);
173 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
174 if ((*cp)->major == major &&
175 (*cp)->baseminor == baseminor &&
176 (*cp)->minorct == minorct)
182 mutex_unlock(&chrdevs_lock);
186 int register_chrdev_region(dev_t from, unsigned count, const char *name)
188 struct char_device_struct *cd;
189 dev_t to = from + count;
192 for (n = from; n < to; n = next) {
193 next = MKDEV(MAJOR(n)+1, 0);
196 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
204 for (n = from; n < to; n = next) {
205 next = MKDEV(MAJOR(n)+1, 0);
206 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
211 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
214 struct char_device_struct *cd;
215 cd = __register_chrdev_region(0, baseminor, count, name);
218 *dev = MKDEV(cd->major, cd->baseminor);
223 * register_chrdev() - Register a major number for character devices.
224 * @major: major device number or 0 for dynamic allocation
225 * @name: name of this range of devices
226 * @fops: file operations associated with this devices
228 * If @major == 0 this functions will dynamically allocate a major and return
231 * If @major > 0 this function will attempt to reserve a device with the given
232 * major number and will return zero on success.
234 * Returns a -ve errno on failure.
236 * The name of this device has nothing to do with the name of the device in
237 * /dev. It only helps to keep track of the different owners of devices. If
238 * your module name has only one type of devices it's ok to use e.g. the name
239 * of the module here.
241 * This function registers a range of 256 minor numbers. The first minor number
244 int register_chrdev(unsigned int major, const char *name,
245 const struct file_operations *fops)
247 struct char_device_struct *cd;
252 cd = __register_chrdev_region(major, 0, 256, name);
260 cdev->owner = fops->owner;
262 kobject_set_name(&cdev->kobj, "%s", name);
263 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
266 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
272 return major ? 0 : cd->major;
274 kobject_put(&cdev->kobj);
276 kfree(__unregister_chrdev_region(cd->major, 0, 256));
280 void unregister_chrdev_region(dev_t from, unsigned count)
282 dev_t to = from + count;
285 for (n = from; n < to; n = next) {
286 next = MKDEV(MAJOR(n)+1, 0);
289 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
293 int unregister_chrdev(unsigned int major, const char *name)
295 struct char_device_struct *cd;
296 cd = __unregister_chrdev_region(major, 0, 256);
303 static DEFINE_SPINLOCK(cdev_lock);
305 static struct kobject *cdev_get(struct cdev *p)
307 struct module *owner = p->owner;
308 struct kobject *kobj;
310 if (owner && !try_module_get(owner))
312 kobj = kobject_get(&p->kobj);
318 void cdev_put(struct cdev *p)
321 struct module *owner = p->owner;
322 kobject_put(&p->kobj);
328 * Called every time a character special file is opened
330 int chrdev_open(struct inode * inode, struct file * filp)
333 struct cdev *new = NULL;
336 spin_lock(&cdev_lock);
339 struct kobject *kobj;
341 spin_unlock(&cdev_lock);
342 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
345 new = container_of(kobj, struct cdev, kobj);
346 spin_lock(&cdev_lock);
349 inode->i_cdev = p = new;
350 inode->i_cindex = idx;
351 list_add(&inode->i_devices, &p->list);
353 } else if (!cdev_get(p))
355 } else if (!cdev_get(p))
357 spin_unlock(&cdev_lock);
361 filp->f_op = fops_get(p->ops);
366 if (filp->f_op->open) {
368 ret = filp->f_op->open(inode,filp);
376 void cd_forget(struct inode *inode)
378 spin_lock(&cdev_lock);
379 list_del_init(&inode->i_devices);
380 inode->i_cdev = NULL;
381 spin_unlock(&cdev_lock);
384 static void cdev_purge(struct cdev *cdev)
386 spin_lock(&cdev_lock);
387 while (!list_empty(&cdev->list)) {
389 inode = container_of(cdev->list.next, struct inode, i_devices);
390 list_del_init(&inode->i_devices);
391 inode->i_cdev = NULL;
393 spin_unlock(&cdev_lock);
397 * Dummy default file-operations: the only thing this does
398 * is contain the open that then fills in the correct operations
399 * depending on the special file...
401 const struct file_operations def_chr_fops = {
405 static struct kobject *exact_match(dev_t dev, int *part, void *data)
407 struct cdev *p = data;
411 static int exact_lock(dev_t dev, void *data)
413 struct cdev *p = data;
414 return cdev_get(p) ? 0 : -1;
417 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
421 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
424 static void cdev_unmap(dev_t dev, unsigned count)
426 kobj_unmap(cdev_map, dev, count);
429 void cdev_del(struct cdev *p)
431 cdev_unmap(p->dev, p->count);
432 kobject_put(&p->kobj);
436 static void cdev_default_release(struct kobject *kobj)
438 struct cdev *p = container_of(kobj, struct cdev, kobj);
442 static void cdev_dynamic_release(struct kobject *kobj)
444 struct cdev *p = container_of(kobj, struct cdev, kobj);
449 static struct kobj_type ktype_cdev_default = {
450 .release = cdev_default_release,
453 static struct kobj_type ktype_cdev_dynamic = {
454 .release = cdev_dynamic_release,
457 struct cdev *cdev_alloc(void)
459 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
461 p->kobj.ktype = &ktype_cdev_dynamic;
462 INIT_LIST_HEAD(&p->list);
463 kobject_init(&p->kobj);
468 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
470 memset(cdev, 0, sizeof *cdev);
471 INIT_LIST_HEAD(&cdev->list);
472 cdev->kobj.ktype = &ktype_cdev_default;
473 kobject_init(&cdev->kobj);
477 static struct kobject *base_probe(dev_t dev, int *part, void *data)
479 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
480 /* Make old-style 2.4 aliases work */
481 request_module("char-major-%d", MAJOR(dev));
485 void __init chrdev_init(void)
487 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
491 /* Let modules do char dev stuff */
492 EXPORT_SYMBOL(register_chrdev_region);
493 EXPORT_SYMBOL(unregister_chrdev_region);
494 EXPORT_SYMBOL(alloc_chrdev_region);
495 EXPORT_SYMBOL(cdev_init);
496 EXPORT_SYMBOL(cdev_alloc);
497 EXPORT_SYMBOL(cdev_del);
498 EXPORT_SYMBOL(cdev_add);
499 EXPORT_SYMBOL(register_chrdev);
500 EXPORT_SYMBOL(unregister_chrdev);
501 EXPORT_SYMBOL(directly_mappable_cdev_bdi);