Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  *  linux/drivers/char/mem.c
0004  *
0005  *  Copyright (C) 1991, 1992  Linus Torvalds
0006  *
0007  *  Added devfs support.
0008  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
0009  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
0010  */
0011 
0012 #include <linux/mm.h>
0013 #include <linux/miscdevice.h>
0014 #include <linux/slab.h>
0015 #include <linux/vmalloc.h>
0016 #include <linux/mman.h>
0017 #include <linux/random.h>
0018 #include <linux/init.h>
0019 #include <linux/tty.h>
0020 #include <linux/capability.h>
0021 #include <linux/ptrace.h>
0022 #include <linux/device.h>
0023 #include <linux/highmem.h>
0024 #include <linux/backing-dev.h>
0025 #include <linux/shmem_fs.h>
0026 #include <linux/splice.h>
0027 #include <linux/pfn.h>
0028 #include <linux/export.h>
0029 #include <linux/io.h>
0030 #include <linux/uio.h>
0031 #include <linux/uaccess.h>
0032 #include <linux/security.h>
0033 
0034 #ifdef CONFIG_IA64
0035 # include <linux/efi.h>
0036 #endif
0037 
0038 #define DEVMEM_MINOR    1
0039 #define DEVPORT_MINOR   4
0040 
0041 static inline unsigned long size_inside_page(unsigned long start,
0042                          unsigned long size)
0043 {
0044     unsigned long sz;
0045 
0046     sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
0047 
0048     return min(sz, size);
0049 }
0050 
0051 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
0052 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
0053 {
0054     return addr + count <= __pa(high_memory);
0055 }
0056 
0057 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
0058 {
0059     return 1;
0060 }
0061 #endif
0062 
0063 #ifdef CONFIG_STRICT_DEVMEM
0064 static inline int page_is_allowed(unsigned long pfn)
0065 {
0066     return devmem_is_allowed(pfn);
0067 }
0068 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
0069 {
0070     u64 from = ((u64)pfn) << PAGE_SHIFT;
0071     u64 to = from + size;
0072     u64 cursor = from;
0073 
0074     while (cursor < to) {
0075         if (!devmem_is_allowed(pfn))
0076             return 0;
0077         cursor += PAGE_SIZE;
0078         pfn++;
0079     }
0080     return 1;
0081 }
0082 #else
0083 static inline int page_is_allowed(unsigned long pfn)
0084 {
0085     return 1;
0086 }
0087 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
0088 {
0089     return 1;
0090 }
0091 #endif
0092 
0093 #ifndef unxlate_dev_mem_ptr
0094 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
0095 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
0096 {
0097 }
0098 #endif
0099 
0100 static inline bool should_stop_iteration(void)
0101 {
0102     if (need_resched())
0103         cond_resched();
0104     return signal_pending(current);
0105 }
0106 
0107 /*
0108  * This funcion reads the *physical* memory. The f_pos points directly to the
0109  * memory location.
0110  */
0111 static ssize_t read_mem(struct file *file, char __user *buf,
0112             size_t count, loff_t *ppos)
0113 {
0114     phys_addr_t p = *ppos;
0115     ssize_t read, sz;
0116     void *ptr;
0117     char *bounce;
0118     int err;
0119 
0120     if (p != *ppos)
0121         return 0;
0122 
0123     if (!valid_phys_addr_range(p, count))
0124         return -EFAULT;
0125     read = 0;
0126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
0127     /* we don't have page 0 mapped on sparc and m68k.. */
0128     if (p < PAGE_SIZE) {
0129         sz = size_inside_page(p, count);
0130         if (sz > 0) {
0131             if (clear_user(buf, sz))
0132                 return -EFAULT;
0133             buf += sz;
0134             p += sz;
0135             count -= sz;
0136             read += sz;
0137         }
0138     }
0139 #endif
0140 
0141     bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
0142     if (!bounce)
0143         return -ENOMEM;
0144 
0145     while (count > 0) {
0146         unsigned long remaining;
0147         int allowed, probe;
0148 
0149         sz = size_inside_page(p, count);
0150 
0151         err = -EPERM;
0152         allowed = page_is_allowed(p >> PAGE_SHIFT);
0153         if (!allowed)
0154             goto failed;
0155 
0156         err = -EFAULT;
0157         if (allowed == 2) {
0158             /* Show zeros for restricted memory. */
0159             remaining = clear_user(buf, sz);
0160         } else {
0161             /*
0162              * On ia64 if a page has been mapped somewhere as
0163              * uncached, then it must also be accessed uncached
0164              * by the kernel or data corruption may occur.
0165              */
0166             ptr = xlate_dev_mem_ptr(p);
0167             if (!ptr)
0168                 goto failed;
0169 
0170             probe = copy_from_kernel_nofault(bounce, ptr, sz);
0171             unxlate_dev_mem_ptr(p, ptr);
0172             if (probe)
0173                 goto failed;
0174 
0175             remaining = copy_to_user(buf, bounce, sz);
0176         }
0177 
0178         if (remaining)
0179             goto failed;
0180 
0181         buf += sz;
0182         p += sz;
0183         count -= sz;
0184         read += sz;
0185         if (should_stop_iteration())
0186             break;
0187     }
0188     kfree(bounce);
0189 
0190     *ppos += read;
0191     return read;
0192 
0193 failed:
0194     kfree(bounce);
0195     return err;
0196 }
0197 
0198 static ssize_t write_mem(struct file *file, const char __user *buf,
0199              size_t count, loff_t *ppos)
0200 {
0201     phys_addr_t p = *ppos;
0202     ssize_t written, sz;
0203     unsigned long copied;
0204     void *ptr;
0205 
0206     if (p != *ppos)
0207         return -EFBIG;
0208 
0209     if (!valid_phys_addr_range(p, count))
0210         return -EFAULT;
0211 
0212     written = 0;
0213 
0214 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
0215     /* we don't have page 0 mapped on sparc and m68k.. */
0216     if (p < PAGE_SIZE) {
0217         sz = size_inside_page(p, count);
0218         /* Hmm. Do something? */
0219         buf += sz;
0220         p += sz;
0221         count -= sz;
0222         written += sz;
0223     }
0224 #endif
0225 
0226     while (count > 0) {
0227         int allowed;
0228 
0229         sz = size_inside_page(p, count);
0230 
0231         allowed = page_is_allowed(p >> PAGE_SHIFT);
0232         if (!allowed)
0233             return -EPERM;
0234 
0235         /* Skip actual writing when a page is marked as restricted. */
0236         if (allowed == 1) {
0237             /*
0238              * On ia64 if a page has been mapped somewhere as
0239              * uncached, then it must also be accessed uncached
0240              * by the kernel or data corruption may occur.
0241              */
0242             ptr = xlate_dev_mem_ptr(p);
0243             if (!ptr) {
0244                 if (written)
0245                     break;
0246                 return -EFAULT;
0247             }
0248 
0249             copied = copy_from_user(ptr, buf, sz);
0250             unxlate_dev_mem_ptr(p, ptr);
0251             if (copied) {
0252                 written += sz - copied;
0253                 if (written)
0254                     break;
0255                 return -EFAULT;
0256             }
0257         }
0258 
0259         buf += sz;
0260         p += sz;
0261         count -= sz;
0262         written += sz;
0263         if (should_stop_iteration())
0264             break;
0265     }
0266 
0267     *ppos += written;
0268     return written;
0269 }
0270 
0271 int __weak phys_mem_access_prot_allowed(struct file *file,
0272     unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
0273 {
0274     return 1;
0275 }
0276 
0277 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
0278 
0279 /*
0280  * Architectures vary in how they handle caching for addresses
0281  * outside of main memory.
0282  *
0283  */
0284 #ifdef pgprot_noncached
0285 static int uncached_access(struct file *file, phys_addr_t addr)
0286 {
0287 #if defined(CONFIG_IA64)
0288     /*
0289      * On ia64, we ignore O_DSYNC because we cannot tolerate memory
0290      * attribute aliases.
0291      */
0292     return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
0293 #else
0294     /*
0295      * Accessing memory above the top the kernel knows about or through a
0296      * file pointer
0297      * that was marked O_DSYNC will be done non-cached.
0298      */
0299     if (file->f_flags & O_DSYNC)
0300         return 1;
0301     return addr >= __pa(high_memory);
0302 #endif
0303 }
0304 #endif
0305 
0306 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
0307                      unsigned long size, pgprot_t vma_prot)
0308 {
0309 #ifdef pgprot_noncached
0310     phys_addr_t offset = pfn << PAGE_SHIFT;
0311 
0312     if (uncached_access(file, offset))
0313         return pgprot_noncached(vma_prot);
0314 #endif
0315     return vma_prot;
0316 }
0317 #endif
0318 
0319 #ifndef CONFIG_MMU
0320 static unsigned long get_unmapped_area_mem(struct file *file,
0321                        unsigned long addr,
0322                        unsigned long len,
0323                        unsigned long pgoff,
0324                        unsigned long flags)
0325 {
0326     if (!valid_mmap_phys_addr_range(pgoff, len))
0327         return (unsigned long) -EINVAL;
0328     return pgoff << PAGE_SHIFT;
0329 }
0330 
0331 /* permit direct mmap, for read, write or exec */
0332 static unsigned memory_mmap_capabilities(struct file *file)
0333 {
0334     return NOMMU_MAP_DIRECT |
0335         NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
0336 }
0337 
0338 static unsigned zero_mmap_capabilities(struct file *file)
0339 {
0340     return NOMMU_MAP_COPY;
0341 }
0342 
0343 /* can't do an in-place private mapping if there's no MMU */
0344 static inline int private_mapping_ok(struct vm_area_struct *vma)
0345 {
0346     return vma->vm_flags & VM_MAYSHARE;
0347 }
0348 #else
0349 
0350 static inline int private_mapping_ok(struct vm_area_struct *vma)
0351 {
0352     return 1;
0353 }
0354 #endif
0355 
0356 static const struct vm_operations_struct mmap_mem_ops = {
0357 #ifdef CONFIG_HAVE_IOREMAP_PROT
0358     .access = generic_access_phys
0359 #endif
0360 };
0361 
0362 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
0363 {
0364     size_t size = vma->vm_end - vma->vm_start;
0365     phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
0366 
0367     /* Does it even fit in phys_addr_t? */
0368     if (offset >> PAGE_SHIFT != vma->vm_pgoff)
0369         return -EINVAL;
0370 
0371     /* It's illegal to wrap around the end of the physical address space. */
0372     if (offset + (phys_addr_t)size - 1 < offset)
0373         return -EINVAL;
0374 
0375     if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
0376         return -EINVAL;
0377 
0378     if (!private_mapping_ok(vma))
0379         return -ENOSYS;
0380 
0381     if (!range_is_allowed(vma->vm_pgoff, size))
0382         return -EPERM;
0383 
0384     if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
0385                         &vma->vm_page_prot))
0386         return -EINVAL;
0387 
0388     vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
0389                          size,
0390                          vma->vm_page_prot);
0391 
0392     vma->vm_ops = &mmap_mem_ops;
0393 
0394     /* Remap-pfn-range will mark the range VM_IO */
0395     if (remap_pfn_range(vma,
0396                 vma->vm_start,
0397                 vma->vm_pgoff,
0398                 size,
0399                 vma->vm_page_prot)) {
0400         return -EAGAIN;
0401     }
0402     return 0;
0403 }
0404 
0405 static ssize_t read_port(struct file *file, char __user *buf,
0406              size_t count, loff_t *ppos)
0407 {
0408     unsigned long i = *ppos;
0409     char __user *tmp = buf;
0410 
0411     if (!access_ok(buf, count))
0412         return -EFAULT;
0413     while (count-- > 0 && i < 65536) {
0414         if (__put_user(inb(i), tmp) < 0)
0415             return -EFAULT;
0416         i++;
0417         tmp++;
0418     }
0419     *ppos = i;
0420     return tmp-buf;
0421 }
0422 
0423 static ssize_t write_port(struct file *file, const char __user *buf,
0424               size_t count, loff_t *ppos)
0425 {
0426     unsigned long i = *ppos;
0427     const char __user *tmp = buf;
0428 
0429     if (!access_ok(buf, count))
0430         return -EFAULT;
0431     while (count-- > 0 && i < 65536) {
0432         char c;
0433 
0434         if (__get_user(c, tmp)) {
0435             if (tmp > buf)
0436                 break;
0437             return -EFAULT;
0438         }
0439         outb(c, i);
0440         i++;
0441         tmp++;
0442     }
0443     *ppos = i;
0444     return tmp-buf;
0445 }
0446 
0447 static ssize_t read_null(struct file *file, char __user *buf,
0448              size_t count, loff_t *ppos)
0449 {
0450     return 0;
0451 }
0452 
0453 static ssize_t write_null(struct file *file, const char __user *buf,
0454               size_t count, loff_t *ppos)
0455 {
0456     return count;
0457 }
0458 
0459 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
0460 {
0461     return 0;
0462 }
0463 
0464 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
0465 {
0466     size_t count = iov_iter_count(from);
0467     iov_iter_advance(from, count);
0468     return count;
0469 }
0470 
0471 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
0472             struct splice_desc *sd)
0473 {
0474     return sd->len;
0475 }
0476 
0477 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
0478                  loff_t *ppos, size_t len, unsigned int flags)
0479 {
0480     return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
0481 }
0482 
0483 static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
0484 {
0485     return 0;
0486 }
0487 
0488 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
0489 {
0490     size_t written = 0;
0491 
0492     while (iov_iter_count(iter)) {
0493         size_t chunk = iov_iter_count(iter), n;
0494 
0495         if (chunk > PAGE_SIZE)
0496             chunk = PAGE_SIZE;  /* Just for latency reasons */
0497         n = iov_iter_zero(chunk, iter);
0498         if (!n && iov_iter_count(iter))
0499             return written ? written : -EFAULT;
0500         written += n;
0501         if (signal_pending(current))
0502             return written ? written : -ERESTARTSYS;
0503         if (!need_resched())
0504             continue;
0505         if (iocb->ki_flags & IOCB_NOWAIT)
0506             return written ? written : -EAGAIN;
0507         cond_resched();
0508     }
0509     return written;
0510 }
0511 
0512 static ssize_t read_zero(struct file *file, char __user *buf,
0513              size_t count, loff_t *ppos)
0514 {
0515     size_t cleared = 0;
0516 
0517     while (count) {
0518         size_t chunk = min_t(size_t, count, PAGE_SIZE);
0519         size_t left;
0520 
0521         left = clear_user(buf + cleared, chunk);
0522         if (unlikely(left)) {
0523             cleared += (chunk - left);
0524             if (!cleared)
0525                 return -EFAULT;
0526             break;
0527         }
0528         cleared += chunk;
0529         count -= chunk;
0530 
0531         if (signal_pending(current))
0532             break;
0533         cond_resched();
0534     }
0535 
0536     return cleared;
0537 }
0538 
0539 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
0540 {
0541 #ifndef CONFIG_MMU
0542     return -ENOSYS;
0543 #endif
0544     if (vma->vm_flags & VM_SHARED)
0545         return shmem_zero_setup(vma);
0546     vma_set_anonymous(vma);
0547     return 0;
0548 }
0549 
0550 static unsigned long get_unmapped_area_zero(struct file *file,
0551                 unsigned long addr, unsigned long len,
0552                 unsigned long pgoff, unsigned long flags)
0553 {
0554 #ifdef CONFIG_MMU
0555     if (flags & MAP_SHARED) {
0556         /*
0557          * mmap_zero() will call shmem_zero_setup() to create a file,
0558          * so use shmem's get_unmapped_area in case it can be huge;
0559          * and pass NULL for file as in mmap.c's get_unmapped_area(),
0560          * so as not to confuse shmem with our handle on "/dev/zero".
0561          */
0562         return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
0563     }
0564 
0565     /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
0566     return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
0567 #else
0568     return -ENOSYS;
0569 #endif
0570 }
0571 
0572 static ssize_t write_full(struct file *file, const char __user *buf,
0573               size_t count, loff_t *ppos)
0574 {
0575     return -ENOSPC;
0576 }
0577 
0578 /*
0579  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
0580  * can fopen() both devices with "a" now.  This was previously impossible.
0581  * -- SRB.
0582  */
0583 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
0584 {
0585     return file->f_pos = 0;
0586 }
0587 
0588 /*
0589  * The memory devices use the full 32/64 bits of the offset, and so we cannot
0590  * check against negative addresses: they are ok. The return value is weird,
0591  * though, in that case (0).
0592  *
0593  * also note that seeking relative to the "end of file" isn't supported:
0594  * it has no meaning, so it returns -EINVAL.
0595  */
0596 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
0597 {
0598     loff_t ret;
0599 
0600     inode_lock(file_inode(file));
0601     switch (orig) {
0602     case SEEK_CUR:
0603         offset += file->f_pos;
0604         fallthrough;
0605     case SEEK_SET:
0606         /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
0607         if ((unsigned long long)offset >= -MAX_ERRNO) {
0608             ret = -EOVERFLOW;
0609             break;
0610         }
0611         file->f_pos = offset;
0612         ret = file->f_pos;
0613         force_successful_syscall_return();
0614         break;
0615     default:
0616         ret = -EINVAL;
0617     }
0618     inode_unlock(file_inode(file));
0619     return ret;
0620 }
0621 
0622 static int open_port(struct inode *inode, struct file *filp)
0623 {
0624     int rc;
0625 
0626     if (!capable(CAP_SYS_RAWIO))
0627         return -EPERM;
0628 
0629     rc = security_locked_down(LOCKDOWN_DEV_MEM);
0630     if (rc)
0631         return rc;
0632 
0633     if (iminor(inode) != DEVMEM_MINOR)
0634         return 0;
0635 
0636     /*
0637      * Use a unified address space to have a single point to manage
0638      * revocations when drivers want to take over a /dev/mem mapped
0639      * range.
0640      */
0641     filp->f_mapping = iomem_get_mapping();
0642 
0643     return 0;
0644 }
0645 
0646 #define zero_lseek  null_lseek
0647 #define full_lseek      null_lseek
0648 #define write_zero  write_null
0649 #define write_iter_zero write_iter_null
0650 #define open_mem    open_port
0651 
0652 static const struct file_operations __maybe_unused mem_fops = {
0653     .llseek     = memory_lseek,
0654     .read       = read_mem,
0655     .write      = write_mem,
0656     .mmap       = mmap_mem,
0657     .open       = open_mem,
0658 #ifndef CONFIG_MMU
0659     .get_unmapped_area = get_unmapped_area_mem,
0660     .mmap_capabilities = memory_mmap_capabilities,
0661 #endif
0662 };
0663 
0664 static const struct file_operations null_fops = {
0665     .llseek     = null_lseek,
0666     .read       = read_null,
0667     .write      = write_null,
0668     .read_iter  = read_iter_null,
0669     .write_iter = write_iter_null,
0670     .splice_write   = splice_write_null,
0671     .uring_cmd  = uring_cmd_null,
0672 };
0673 
0674 static const struct file_operations __maybe_unused port_fops = {
0675     .llseek     = memory_lseek,
0676     .read       = read_port,
0677     .write      = write_port,
0678     .open       = open_port,
0679 };
0680 
0681 static const struct file_operations zero_fops = {
0682     .llseek     = zero_lseek,
0683     .write      = write_zero,
0684     .read_iter  = read_iter_zero,
0685     .read       = read_zero,
0686     .write_iter = write_iter_zero,
0687     .mmap       = mmap_zero,
0688     .get_unmapped_area = get_unmapped_area_zero,
0689 #ifndef CONFIG_MMU
0690     .mmap_capabilities = zero_mmap_capabilities,
0691 #endif
0692 };
0693 
0694 static const struct file_operations full_fops = {
0695     .llseek     = full_lseek,
0696     .read_iter  = read_iter_zero,
0697     .write      = write_full,
0698 };
0699 
0700 static const struct memdev {
0701     const char *name;
0702     umode_t mode;
0703     const struct file_operations *fops;
0704     fmode_t fmode;
0705 } devlist[] = {
0706 #ifdef CONFIG_DEVMEM
0707      [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
0708 #endif
0709      [3] = { "null", 0666, &null_fops, FMODE_NOWAIT },
0710 #ifdef CONFIG_DEVPORT
0711      [4] = { "port", 0, &port_fops, 0 },
0712 #endif
0713      [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
0714      [7] = { "full", 0666, &full_fops, 0 },
0715      [8] = { "random", 0666, &random_fops, 0 },
0716      [9] = { "urandom", 0666, &urandom_fops, 0 },
0717 #ifdef CONFIG_PRINTK
0718     [11] = { "kmsg", 0644, &kmsg_fops, 0 },
0719 #endif
0720 };
0721 
0722 static int memory_open(struct inode *inode, struct file *filp)
0723 {
0724     int minor;
0725     const struct memdev *dev;
0726 
0727     minor = iminor(inode);
0728     if (minor >= ARRAY_SIZE(devlist))
0729         return -ENXIO;
0730 
0731     dev = &devlist[minor];
0732     if (!dev->fops)
0733         return -ENXIO;
0734 
0735     filp->f_op = dev->fops;
0736     filp->f_mode |= dev->fmode;
0737 
0738     if (dev->fops->open)
0739         return dev->fops->open(inode, filp);
0740 
0741     return 0;
0742 }
0743 
0744 static const struct file_operations memory_fops = {
0745     .open = memory_open,
0746     .llseek = noop_llseek,
0747 };
0748 
0749 static char *mem_devnode(struct device *dev, umode_t *mode)
0750 {
0751     if (mode && devlist[MINOR(dev->devt)].mode)
0752         *mode = devlist[MINOR(dev->devt)].mode;
0753     return NULL;
0754 }
0755 
0756 static struct class *mem_class;
0757 
0758 static int __init chr_dev_init(void)
0759 {
0760     int minor;
0761 
0762     if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
0763         printk("unable to get major %d for memory devs\n", MEM_MAJOR);
0764 
0765     mem_class = class_create(THIS_MODULE, "mem");
0766     if (IS_ERR(mem_class))
0767         return PTR_ERR(mem_class);
0768 
0769     mem_class->devnode = mem_devnode;
0770     for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
0771         if (!devlist[minor].name)
0772             continue;
0773 
0774         /*
0775          * Create /dev/port?
0776          */
0777         if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
0778             continue;
0779 
0780         device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
0781                   NULL, devlist[minor].name);
0782     }
0783 
0784     return tty_init();
0785 }
0786 
0787 fs_initcall(chr_dev_init);