patch-2.1.80 linux/mm/mmap.c
Next file: linux/mm/page_alloc.c
Previous file: linux/mm/filemap.c
Back to the patch index
Back to the overall index
- Lines: 109
- Date:
Wed Jan 14 14:52:44 1998
- Orig file:
v2.1.79/linux/mm/mmap.c
- Orig date:
Sat Nov 1 11:04:27 1997
diff -u --recursive --new-file v2.1.79/linux/mm/mmap.c linux/mm/mmap.c
@@ -173,6 +173,10 @@
if (off + len < off)
return -EINVAL;
+ /* Too many mappings? */
+ if (mm->map_count > MAX_MAP_COUNT)
+ return -ENOMEM;
+
/* mlock MCL_FUTURE? */
if (mm->def_flags & VM_LOCKED) {
unsigned long locked = mm->locked_vm << PAGE_SHIFT;
@@ -452,6 +456,7 @@
*/
int do_munmap(unsigned long addr, size_t len)
{
+ struct mm_struct * mm;
struct vm_area_struct *mpnt, *next, *free, *extra;
int freed;
@@ -466,7 +471,8 @@
* every area affected in some way (by any overlap) is put
* on the list. If nothing is put on, nothing is affected.
*/
- mpnt = current->mm->mmap;
+ mm = current->mm;
+ mpnt = mm->mmap;
while(mpnt && mpnt->vm_end <= addr)
mpnt = mpnt->vm_next;
if (!mpnt)
@@ -496,6 +502,13 @@
mpnt = next;
}
+ if (free && (free->vm_start < addr) && (free->vm_end > addr+len)) {
+ if (mm->map_count > MAX_MAP_COUNT) {
+ kmem_cache_free(vm_area_cachep, extra);
+ return -ENOMEM;
+ }
+ }
+
/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
* If the one of the segments is only being partially unmapped,
@@ -508,6 +521,7 @@
free = free->vm_next;
freed = 1;
+ mm->map_count--;
remove_shared_vm_struct(mpnt);
st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
@@ -518,9 +532,9 @@
if (mpnt->vm_ops && mpnt->vm_ops->unmap)
mpnt->vm_ops->unmap(mpnt, st, size);
- flush_cache_range(current->mm, st, end);
- zap_page_range(current->mm, st, size);
- flush_tlb_range(current->mm, st, end);
+ flush_cache_range(mm, st, end);
+ zap_page_range(mm, st, size);
+ flush_tlb_range(mm, st, end);
/*
* Fix the mapping, and free the old area if it wasn't reused.
@@ -534,7 +548,7 @@
kmem_cache_free(vm_area_cachep, extra);
if (freed)
- current->mm->mmap_cache = NULL; /* Kill the cache. */
+ mm->mmap_cache = NULL; /* Kill the cache. */
return 0;
}
@@ -560,6 +574,7 @@
if (mpnt->vm_ops->close)
mpnt->vm_ops->close(mpnt);
}
+ mm->map_count--;
remove_shared_vm_struct(mpnt);
zap_page_range(mm, start, size);
if (mpnt->vm_dentry)
@@ -567,6 +582,10 @@
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = next;
}
+
+ /* This is just debugging */
+ if (mm->map_count)
+ printk("exit_mmap: map count is %d\n", mm->map_count);
}
/* Insert vm structure into process list sorted by address
@@ -577,6 +596,8 @@
struct vm_area_struct **pprev = &mm->mmap;
struct dentry * dentry;
+ mm->map_count++;
+
/* Find where to link it in. */
while(*pprev && (*pprev)->vm_start <= vmp->vm_start)
pprev = &(*pprev)->vm_next;
@@ -668,6 +689,7 @@
mpnt->vm_start = mpnt->vm_end;
mpnt->vm_ops->close(mpnt);
}
+ mm->map_count--;
remove_shared_vm_struct(mpnt);
if (mpnt->vm_dentry)
dput(mpnt->vm_dentry);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov