Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 
0003 #include <linux/mm.h>
0004 #include <linux/file.h>
0005 #include <linux/fdtable.h>
0006 #include <linux/fs_struct.h>
0007 #include <linux/mount.h>
0008 #include <linux/ptrace.h>
0009 #include <linux/slab.h>
0010 #include <linux/seq_file.h>
0011 #include <linux/sched/mm.h>
0012 
0013 #include "internal.h"
0014 
0015 /*
0016  * Logic: we've got two memory sums for each process, "shared", and
0017  * "non-shared". Shared memory may get counted more than once, for
0018  * each process that owns it. Non-shared memory is counted
0019  * accurately.
0020  */
0021 void task_mem(struct seq_file *m, struct mm_struct *mm)
0022 {
0023     struct vm_area_struct *vma;
0024     struct vm_region *region;
0025     struct rb_node *p;
0026     unsigned long bytes = 0, sbytes = 0, slack = 0, size;
0027         
0028     mmap_read_lock(mm);
0029     for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
0030         vma = rb_entry(p, struct vm_area_struct, vm_rb);
0031 
0032         bytes += kobjsize(vma);
0033 
0034         region = vma->vm_region;
0035         if (region) {
0036             size = kobjsize(region);
0037             size += region->vm_end - region->vm_start;
0038         } else {
0039             size = vma->vm_end - vma->vm_start;
0040         }
0041 
0042         if (atomic_read(&mm->mm_count) > 1 ||
0043             vma->vm_flags & VM_MAYSHARE) {
0044             sbytes += size;
0045         } else {
0046             bytes += size;
0047             if (region)
0048                 slack = region->vm_end - vma->vm_end;
0049         }
0050     }
0051 
0052     if (atomic_read(&mm->mm_count) > 1)
0053         sbytes += kobjsize(mm);
0054     else
0055         bytes += kobjsize(mm);
0056     
0057     if (current->fs && current->fs->users > 1)
0058         sbytes += kobjsize(current->fs);
0059     else
0060         bytes += kobjsize(current->fs);
0061 
0062     if (current->files && atomic_read(&current->files->count) > 1)
0063         sbytes += kobjsize(current->files);
0064     else
0065         bytes += kobjsize(current->files);
0066 
0067     if (current->sighand && refcount_read(&current->sighand->count) > 1)
0068         sbytes += kobjsize(current->sighand);
0069     else
0070         bytes += kobjsize(current->sighand);
0071 
0072     bytes += kobjsize(current); /* includes kernel stack */
0073 
0074     seq_printf(m,
0075         "Mem:\t%8lu bytes\n"
0076         "Slack:\t%8lu bytes\n"
0077         "Shared:\t%8lu bytes\n",
0078         bytes, slack, sbytes);
0079 
0080     mmap_read_unlock(mm);
0081 }
0082 
0083 unsigned long task_vsize(struct mm_struct *mm)
0084 {
0085     struct vm_area_struct *vma;
0086     struct rb_node *p;
0087     unsigned long vsize = 0;
0088 
0089     mmap_read_lock(mm);
0090     for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
0091         vma = rb_entry(p, struct vm_area_struct, vm_rb);
0092         vsize += vma->vm_end - vma->vm_start;
0093     }
0094     mmap_read_unlock(mm);
0095     return vsize;
0096 }
0097 
0098 unsigned long task_statm(struct mm_struct *mm,
0099              unsigned long *shared, unsigned long *text,
0100              unsigned long *data, unsigned long *resident)
0101 {
0102     struct vm_area_struct *vma;
0103     struct vm_region *region;
0104     struct rb_node *p;
0105     unsigned long size = kobjsize(mm);
0106 
0107     mmap_read_lock(mm);
0108     for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
0109         vma = rb_entry(p, struct vm_area_struct, vm_rb);
0110         size += kobjsize(vma);
0111         region = vma->vm_region;
0112         if (region) {
0113             size += kobjsize(region);
0114             size += region->vm_end - region->vm_start;
0115         }
0116     }
0117 
0118     *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
0119         >> PAGE_SHIFT;
0120     *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
0121         >> PAGE_SHIFT;
0122     mmap_read_unlock(mm);
0123     size >>= PAGE_SHIFT;
0124     size += *text + *data;
0125     *resident = size;
0126     return size;
0127 }
0128 
0129 static int is_stack(struct vm_area_struct *vma)
0130 {
0131     struct mm_struct *mm = vma->vm_mm;
0132 
0133     /*
0134      * We make no effort to guess what a given thread considers to be
0135      * its "stack".  It's not even well-defined for programs written
0136      * languages like Go.
0137      */
0138     return vma->vm_start <= mm->start_stack &&
0139         vma->vm_end >= mm->start_stack;
0140 }
0141 
0142 /*
0143  * display a single VMA to a sequenced file
0144  */
0145 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
0146 {
0147     struct mm_struct *mm = vma->vm_mm;
0148     unsigned long ino = 0;
0149     struct file *file;
0150     dev_t dev = 0;
0151     int flags;
0152     unsigned long long pgoff = 0;
0153 
0154     flags = vma->vm_flags;
0155     file = vma->vm_file;
0156 
0157     if (file) {
0158         struct inode *inode = file_inode(vma->vm_file);
0159         dev = inode->i_sb->s_dev;
0160         ino = inode->i_ino;
0161         pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
0162     }
0163 
0164     seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
0165     seq_printf(m,
0166            "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
0167            vma->vm_start,
0168            vma->vm_end,
0169            flags & VM_READ ? 'r' : '-',
0170            flags & VM_WRITE ? 'w' : '-',
0171            flags & VM_EXEC ? 'x' : '-',
0172            flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
0173            pgoff,
0174            MAJOR(dev), MINOR(dev), ino);
0175 
0176     if (file) {
0177         seq_pad(m, ' ');
0178         seq_file_path(m, file, "");
0179     } else if (mm && is_stack(vma)) {
0180         seq_pad(m, ' ');
0181         seq_puts(m, "[stack]");
0182     }
0183 
0184     seq_putc(m, '\n');
0185     return 0;
0186 }
0187 
0188 /*
0189  * display mapping lines for a particular process's /proc/pid/maps
0190  */
0191 static int show_map(struct seq_file *m, void *_p)
0192 {
0193     struct rb_node *p = _p;
0194 
0195     return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
0196 }
0197 
0198 static void *m_start(struct seq_file *m, loff_t *pos)
0199 {
0200     struct proc_maps_private *priv = m->private;
0201     struct mm_struct *mm;
0202     struct rb_node *p;
0203     loff_t n = *pos;
0204 
0205     /* pin the task and mm whilst we play with them */
0206     priv->task = get_proc_task(priv->inode);
0207     if (!priv->task)
0208         return ERR_PTR(-ESRCH);
0209 
0210     mm = priv->mm;
0211     if (!mm || !mmget_not_zero(mm))
0212         return NULL;
0213 
0214     if (mmap_read_lock_killable(mm)) {
0215         mmput(mm);
0216         return ERR_PTR(-EINTR);
0217     }
0218 
0219     /* start from the Nth VMA */
0220     for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
0221         if (n-- == 0)
0222             return p;
0223 
0224     mmap_read_unlock(mm);
0225     mmput(mm);
0226     return NULL;
0227 }
0228 
0229 static void m_stop(struct seq_file *m, void *_vml)
0230 {
0231     struct proc_maps_private *priv = m->private;
0232 
0233     if (!IS_ERR_OR_NULL(_vml)) {
0234         mmap_read_unlock(priv->mm);
0235         mmput(priv->mm);
0236     }
0237     if (priv->task) {
0238         put_task_struct(priv->task);
0239         priv->task = NULL;
0240     }
0241 }
0242 
0243 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
0244 {
0245     struct rb_node *p = _p;
0246 
0247     (*pos)++;
0248     return p ? rb_next(p) : NULL;
0249 }
0250 
0251 static const struct seq_operations proc_pid_maps_ops = {
0252     .start  = m_start,
0253     .next   = m_next,
0254     .stop   = m_stop,
0255     .show   = show_map
0256 };
0257 
0258 static int maps_open(struct inode *inode, struct file *file,
0259              const struct seq_operations *ops)
0260 {
0261     struct proc_maps_private *priv;
0262 
0263     priv = __seq_open_private(file, ops, sizeof(*priv));
0264     if (!priv)
0265         return -ENOMEM;
0266 
0267     priv->inode = inode;
0268     priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
0269     if (IS_ERR(priv->mm)) {
0270         int err = PTR_ERR(priv->mm);
0271 
0272         seq_release_private(inode, file);
0273         return err;
0274     }
0275 
0276     return 0;
0277 }
0278 
0279 
0280 static int map_release(struct inode *inode, struct file *file)
0281 {
0282     struct seq_file *seq = file->private_data;
0283     struct proc_maps_private *priv = seq->private;
0284 
0285     if (priv->mm)
0286         mmdrop(priv->mm);
0287 
0288     return seq_release_private(inode, file);
0289 }
0290 
0291 static int pid_maps_open(struct inode *inode, struct file *file)
0292 {
0293     return maps_open(inode, file, &proc_pid_maps_ops);
0294 }
0295 
0296 const struct file_operations proc_pid_maps_operations = {
0297     .open       = pid_maps_open,
0298     .read       = seq_read,
0299     .llseek     = seq_lseek,
0300     .release    = map_release,
0301 };
0302