lkml.org 
[lkml]   [2004]   [Sep]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[2/2] handle CONFIG_MMU=n and use new vm stats for CONFIG_MMU=y
On Wed, Sep 08, 2004 at 06:15:49PM -0700, William Lee Irwin III wrote:
>> This is a straight rediff of nproc vs. 2.6.9-rc1-mm4. No changes
>> whatsoever to the underlying code were made; rather, this merely
>> resolves offsets so it applies cleanly.
>> Compiletested on ia64.

On Wed, Sep 08, 2004 at 06:17:08PM -0700, William Lee Irwin III wrote:
> Repost with appropriate Subject: line.

Make __task_mem() and __task_mem_cheap() use the appropriate methods
for CONFIG_MMU=y and add some attempt at correct code for CONFIG_MMU=n.
The new methods for /proc/ accounting involve using counters kept in
the mm instead of iteration over vmas. For the CONFIG_MMU=y case this
does not involve acquiring mm->mmap_sem for any per-mm statistics. The
CONFIG_MMU=n case still needs iteration over tblocks to calculate them.


-- wli

Index: mm4-2.6.9-rc1/kernel/nproc.c
===================================================================
--- mm4-2.6.9-rc1.orig/kernel/nproc.c 2004-09-08 17:45:27.503587983 -0700
+++ mm4-2.6.9-rc1/kernel/nproc.c 2004-09-08 18:11:24.826811093 -0700
@@ -44,44 +44,20 @@
* __task_mem/__task_mem_cheap basically duplicate the MMU version of
* task_mem, but they are split by cost and work on structs.
*/
-
+#ifdef CONFIG_MMU
static void __task_mem(struct task_struct *tsk, struct task_mem *res)
{
struct mm_struct *mm = get_task_mm(tsk);
- if (mm) {
- unsigned long data = 0, stack = 0, exec = 0, lib = 0;
- struct vm_area_struct *vma;
-
- down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long len = (vma->vm_end - vma->vm_start) >> 10;
- if (!vma->vm_file) {
- data += len;
- if (vma->vm_flags & VM_GROWSDOWN)
- stack += len;
- continue;
- }
- if (vma->vm_flags & VM_WRITE)
- continue;
- if (vma->vm_flags & VM_EXEC) {
- exec += len;
- if (vma->vm_flags & VM_EXECUTABLE)
- continue;
- lib += len;
- }
- }
- res->vmdata = data - stack;
- res->vmstack = stack;
- res->vmexe = exec - lib;
- res->vmlib = lib;
- up_read(&mm->mmap_sem);

+ if (!mm)
+ memset(res, 0, sizeof(struct task_mem));
+ else {
+ res->vmdata = (mm->total_vm - mm->shared_vm - mm->stack_vm)
+ << (PAGE_SHIFT - 10);
+ res->vmstack = mm->stack_vm << (PAGE_SHIFT - 10);
+ res->vmexe = PAGE_ALIGN(mm->end_code - mm->start_code) >> 10;
+ res->vmlib = (mm->exec_vm << (PAGE_SHIFT - 10)) - res->vmexe;
mmput(mm);
- } else {
- res->vmdata = 0;
- res->vmstack = 0;
- res->vmexe = 0;
- res->vmlib = 0;
}
}

@@ -99,6 +75,80 @@
res->vmrss = 0;
}
}
+#else /* !CONFIG_MMU */
+static void __task_mem(task_t *task, struct task_mem *stats)
+{
+ struct mm_struct *mm = get_task_mm(task)
+
+ if (!mm)
+ memset(stats, 0, sizeof(struct task_mem));
+ else {
+ unsigned long bytes = 0, sbytes = 0, slack = 0;
+ struct mm_tblk_struct *tblk;
+
+ down_read(&mm->mmap_sem);
+ for (tblk = &mm->context.tblk; tblk; tblk = tblk->next) {
+ if (!tblk->rblock)
+ continue;
+ bytes += kobjsize(tblk);
+ if (atomic_read(&mm->mm_count) > 1) ||
+ tblk->rblock->refcount > 1) {
+ sbytes += kobjsize(tblk->rblock->kblock);
+ sbytes += kobjsize(tblk->rblock);
+ } else {
+ bytes += kobjsize(tblk->rblock->kblock);
+ bytes += kobjsize(tblk->rblock);
+ slack += kobjsize(tblock->rblock->kblock);
+ }
+ }
+ if (atomic_read(&mm->mm_count) > 1)
+ sbytes += kobjsize(mm);
+ else
+ bytes += kobjsize(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ if (task->fs && atomic_read(&task->fs->count) > 1)
+ sbytes += kobjsize(task->files);
+ else
+ bytes += kobjsize(task->files);
+ if (task->sighand && atomic_read(&task->sighand->count) > 1)
+ sbytes += kobjsize(task->sighand);
+ else
+ bytes += kobjsize(task->sighand);
+ bytes += kobjsize(task);
+ /* some interpretation is needed */
+ stats->vmdata = bytes;
+ stats->vmstack = sbytes;
+ stats->vmexe = stats->vmlib = 0;
+ }
+}
+
+static void __task_mem_cheap(task_t *task, struct task_mem_cheap *stats)
+{
+ struct mm_struct *mm = get_task_mm(task);
+ struct mm_tblock_struct *tblk;
+ int size;
+
+ memset(stats, 0, sizeof(struct task_mem_cheap));
+ stats->vmrss += kobjsize(mm);
+ down_read(&mm->mmap_sem);
+ for (tblk = &mm->context.block; tblk; tblk = tblk->next) {
+ if (tblk->next)
+ stats->vmrss += kobjsize(tblk->next);
+ if (tblk->rblock) {
+ stats->vmsize += kobjsize(tblk->rblock);
+ stats->vmrss += kobjsize(tblk->rblock);
+ stats->vmrss += kobjsize(tblk->rblock->kblock);
+ }
+ }
+ stats->vmrss += mm->end_code - mm->start_code;
+ stats->vmrss += mm->start_stack - mm->start_data;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ stats->vmrss >>= 10;
+ stats->vmsize >>= 10;
+}
+#endif /* !CONFIG_MMU */

/*
* page_alloc.c already has an extra function broken out to fill a
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
\
 
 \ /
  Last update: 2005-03-22 14:06    [W:0.054 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site