patch-2.3.25 linux/arch/arm/mm/init.c

Next file: linux/arch/arm/mm/map.h
Previous file: linux/arch/arm/mm/fault-common.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.24/linux/arch/arm/mm/init.c linux/arch/arm/mm/init.c
@@ -1,7 +1,7 @@
 /*
  *  linux/arch/arm/mm/init.c
  *
- *  Copyright (C) 1995-1999  Russell King
+ *  Copyright (C) 1995-1999 Russell King
  */
 
 #include <linux/config.h>
@@ -18,6 +18,7 @@
 #include <linux/swapctl.h>
 #include <linux/smp.h>
 #include <linux/init.h>
+#include <linux/bootmem.h>
 #ifdef CONFIG_BLK_DEV_INITRD
 #include <linux/blk.h>
 #endif
@@ -27,74 +28,95 @@
 #include <asm/pgtable.h>
 #include <asm/dma.h>
 #include <asm/hardware.h>
+#include <asm/setup.h>
 
 #include "map.h"
 
+static unsigned long totalram_pages;
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
-#ifndef CONFIG_NO_PGT_CACHE
-struct pgtable_cache_struct quicklists;
-#endif
 
-extern unsigned long free_area_init(unsigned long, unsigned long);
 extern void show_net_buffers(void);
 
-extern char _etext, _text, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
-
-int do_check_pgt_cache(int low, int high)
-{
-	int freed = 0;
-#ifndef CONFIG_NO_PGT_CACHE
-	if(pgtable_cache_size > high) {
-		do {
-			if(pgd_quicklist)
-				free_pgd_slow(get_pgd_fast()), freed++;
-			if(pmd_quicklist)
-				free_pmd_slow(get_pmd_fast()), freed++;
-			if(pte_quicklist)
-				free_pte_slow(get_pte_fast()), freed++;
-		} while(pgtable_cache_size > low);
-	}
-#endif
-	return freed;
-}
-
 /*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
+ * empty_bad_page is the page that is used for page faults when
+ * linux is out-of-memory. Older versions of linux just did a
  * do_exit(), but using this instead means there is less risk
  * for a process dying in kernel mode, possibly leaving a inode
  * unused etc..
  *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
+ * empty_bad_pte_table is the accompanying page-table: it is
+ * initialized to point to BAD_PAGE entries.
  *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
  */
-pte_t *empty_bad_page_table;
+struct page *empty_zero_page;
+struct page *empty_bad_page;
+pte_t *empty_bad_pte_table;
 
-pte_t *__bad_pagetable(void)
+pte_t *get_bad_pte_table(void)
 {
-	pte_t bad_page;
+	pte_t v;
 	int i;
 
-	bad_page = BAD_PAGE;
+	v = pte_mkdirty(mk_pte(empty_bad_page, PAGE_SHARED));
+
 	for (i = 0; i < PTRS_PER_PTE; i++)
-		set_pte(empty_bad_page_table + i, bad_page);
+		set_pte(empty_bad_pte_table + i, v);
 
-	return empty_bad_page_table;
+	return empty_bad_pte_table;
 }
 
-unsigned long *empty_zero_page;
-unsigned long *empty_bad_page;
+void __handle_bad_pmd(pmd_t *pmd)
+{
+	pmd_ERROR(*pmd);
+#ifdef CONFIG_DEBUG_ERRORS
+	__backtrace();
+#endif
+	set_pmd(pmd, mk_user_pmd(get_bad_pte_table()));
+}
 
-pte_t __bad_page(void)
+void __handle_bad_pmd_kernel(pmd_t *pmd)
 {
-	memzero (empty_bad_page, PAGE_SIZE);
-	return pte_nocache(pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED)));
+	pmd_ERROR(*pmd);
+#ifdef CONFIG_DEBUG_ERRORS
+	__backtrace();
+#endif
+	set_pmd(pmd, mk_kernel_pmd(get_bad_pte_table()));
 }
 
+#ifndef CONFIG_NO_PGT_CACHE
+struct pgtable_cache_struct quicklists;
+
+int do_check_pgt_cache(int low, int high)
+{
+	int freed = 0;
+
+	if(pgtable_cache_size > high) {
+		do {
+			if(pgd_quicklist) {
+				free_pgd_slow(get_pgd_fast());
+				freed++;
+			}
+			if(pmd_quicklist) {
+				free_pmd_slow(get_pmd_fast());
+				freed++;
+			}
+			if(pte_quicklist) {
+				free_pte_slow(get_pte_fast());
+				 freed++;
+			}
+		} while(pgtable_cache_size > low);
+	}
+	return freed;
+}
+#else
+int do_check_pgt_cache(int low, int high)
+{
+	return 0;
+}
+#endif
+
 void show_mem(void)
 {
 	int free = 0, total = 0, reserved = 0;
@@ -104,23 +126,28 @@
 	printk("Mem-info:\n");
 	show_free_areas();
 	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
-	for (page = mem_map, end = mem_map + max_mapnr;
-	     page < end; page++) {
+
+	page = mem_map;
+	end  = mem_map + max_mapnr;
+
+	do {
 		if (PageSkip(page)) {
-			if (page->next_hash < page)
-				break;
 			page = page->next_hash;
+			if (page == NULL)
+				break;
 		}
 		total++;
 		if (PageReserved(page))
 			reserved++;
 		else if (PageSwapCache(page))
 			cached++;
-		else if (!atomic_read(&page->count))
+		else if (!page_count(page))
 			free++;
 		else
 			shared += atomic_read(&page->count) - 1;
-	}
+		page++;
+	} while (page < end);
+
 	printk("%d pages of RAM\n", total);
 	printk("%d free pages\n", free);
 	printk("%d reserved pages\n", reserved);
@@ -138,31 +165,42 @@
 /*
  * paging_init() sets up the page tables...
  */
-unsigned long __init paging_init(unsigned long start_mem, unsigned long end_mem)
+void __init paging_init(void)
 {
-	start_mem = PAGE_ALIGN(start_mem);
-
-	empty_zero_page = (unsigned long *)start_mem;
-	memzero(empty_zero_page, PAGE_SIZE);
-	start_mem += PAGE_SIZE;
-
-	empty_bad_page = (unsigned long *)start_mem;
-	start_mem += PAGE_SIZE;
+	void *zero_page, *bad_page, *bad_table;
 
 #ifdef CONFIG_CPU_32
-	start_mem += PTRS_PER_PTE * BYTES_PER_PTR;
+#define TABLE_OFFSET	(PTRS_PER_PTE)
+#else
+#define TABLE_OFFSET	0
 #endif
-	empty_bad_page_table = (pte_t *)start_mem;
-	start_mem += PTRS_PER_PTE * BYTES_PER_PTR;
-
-	start_mem = setup_page_tables(start_mem, end_mem);
+#define TABLE_SIZE	((TABLE_OFFSET + PTRS_PER_PTE) * sizeof(void *))
 
+	/*
+	 * allocate what we need for the bad pages
+	 */
+	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
+	bad_page  = alloc_bootmem_low_pages(PAGE_SIZE);
+	bad_table = alloc_bootmem_low_pages(TABLE_SIZE);
+
+	/*
+	 * initialise the page tables
+	 */
+	pagetable_init();
 	flush_tlb_all();
 
-	end_mem &= PAGE_MASK;
-	high_memory = (void *)end_mem;
+	free_area_init(max_low_pfn);
 
-	return free_area_init(start_mem, end_mem);
+	/*
+	 * finish off the bad pages once
+	 * the mem_map is initialised
+	 */
+	memzero(zero_page, PAGE_SIZE);
+	memzero(bad_page, PAGE_SIZE);
+
+	empty_zero_page = mem_map + MAP_NR(zero_page);
+	empty_bad_page  = mem_map + MAP_NR(bad_page);
+	empty_bad_pte_table = ((pte_t *)bad_table) + TABLE_OFFSET;
 }
 
 static inline void free_unused_mem_map(void)
@@ -184,7 +222,7 @@
 			high = ((unsigned long)page->next_hash) & PAGE_MASK;
 
 		while (low < high) {
-			clear_bit(PG_reserved, &mem_map[MAP_NR(low)].flags);
+			ClearPageReserved(mem_map + MAP_NR(low));
 			low += PAGE_SIZE;
 		}
 	}
@@ -195,67 +233,35 @@
  * memory is free.  This is done after various parts of the system have
  * claimed their memory after the kernel image.
  */
-void __init mem_init(unsigned long start_mem, unsigned long end_mem)
+void __init mem_init(void)
 {
 	int codepages = 0;
 	int reservedpages = 0;
 	int datapages = 0;
 	int initpages = 0, i, min_nr;
-	unsigned long tmp;
 
-	end_mem      &= PAGE_MASK;
-	high_memory   = (void *)end_mem;
-	max_mapnr     = MAP_NR(end_mem);
-	num_physpages = 0;
+	max_mapnr     = max_low_pfn;
+	high_memory   = (void *)__va(max_low_pfn * PAGE_SIZE);
 
-	/* setup address validity bitmap */
-	start_mem = create_mem_holes(start_mem, end_mem);
-
-	start_mem = PAGE_ALIGN(start_mem);
-
-	/* mark usable pages in the mem_map[] */
-	mark_usable_memory_areas(start_mem, end_mem);
-
-	/* free unused mem_map[] entries */
-	free_unused_mem_map();
-
-#define BETWEEN(w,min,max) ((w) >= (unsigned long)(min) && \
-			    (w) < (unsigned long)(max))
-
-	for (tmp = PAGE_OFFSET; tmp < end_mem ; tmp += PAGE_SIZE) {
-		if (PageSkip(mem_map+MAP_NR(tmp))) {
-			unsigned long next;
-
-			next = mem_map[MAP_NR(tmp)].next_hash - mem_map;
-
-			next = (next << PAGE_SHIFT) + PAGE_OFFSET;
-
-			if (next < tmp || next >= end_mem)
-				break;
-			tmp = next;
-		}
-		num_physpages++;
-		if (PageReserved(mem_map+MAP_NR(tmp))) {
-			if (BETWEEN(tmp, &__init_begin, &__init_end))
-				initpages++;
-			else if (BETWEEN(tmp, &_text, &_etext))
-				codepages++;
-			else if (BETWEEN(tmp, &_etext, &_edata))
-				datapages++;
-			else if (BETWEEN(tmp, &__bss_start, start_mem))
-				datapages++;
-			else
-				reservedpages++;
-			continue;
-		}
-		atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
-#ifdef CONFIG_BLK_DEV_INITRD
-		if (!initrd_start || !BETWEEN(tmp, initrd_start, initrd_end))
+#ifdef CONFIG_CPU_32
+	/*
+	 * We may have non-contiguous memory.  Setup the PageSkip stuff,
+	 * and mark the areas of mem_map which can be freed
+	 */
+	if (meminfo.nr_banks != 1)
+		create_memmap_holes();
 #endif
-			free_page(tmp);
-	}
 
-#undef BETWEEN
+	/* this will put all unused low memory onto the freelists */
+	totalram_pages += free_all_bootmem();
+
+	/*
+	 * Since our memory may not be contiguous, calculate the
+	 * real number of pages we have in this system
+	 */
+	num_physpages = 0;
+	for (i = 0; i < meminfo.nr_banks; i++)
+		num_physpages += meminfo.bank[i].size >> PAGE_SHIFT;
 
 	printk ("Memory: %luk/%luM available (%dk code, %dk reserved, %dk data, %dk init)\n",
 		 (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
@@ -265,6 +271,9 @@
 		 datapages     << (PAGE_SHIFT-10),
 		 initpages     << (PAGE_SHIFT-10));
 
+	/*
+	 * Correct freepages watermarks
+	 */
 	i = nr_free_pages >> 7;
 	if (PAGE_SIZE < 32768)
 		min_nr = 10;
@@ -288,22 +297,26 @@
 #endif
 }
 
-static void free_area(unsigned long addr, unsigned long end, char *s)
+static inline void free_area(unsigned long addr, unsigned long end, char *s)
 {
 	unsigned int size = (end - addr) >> 10;
+	struct page *page = mem_map + MAP_NR(addr);
 
-	for (; addr < end; addr += PAGE_SIZE) {
-		mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
-		atomic_set(&mem_map[MAP_NR(addr)].count, 1);
+	for (; addr < end; addr += PAGE_SIZE, page ++) {
+		ClearPageReserved(page);
+		set_page_count(page, 1);
 		free_page(addr);
+		totalram_pages++;
 	}
 
 	if (size)
 		printk(" %dk %s", size, s);
 }
 
-void free_initmem (void)
+void free_initmem(void)
 {
+	extern char __init_begin, __init_end;
+
 	printk("Freeing unused kernel memory:");
 
 	free_area((unsigned long)(&__init_begin),
@@ -333,28 +346,11 @@
 
 void si_meminfo(struct sysinfo *val)
 {
-	struct page *page, *end;
-
-	val->totalram = 0;
+	val->totalram  = totalram_pages;
 	val->sharedram = 0;
-	val->freeram = nr_free_pages << PAGE_SHIFT;
-	val->bufferram = atomic_read(&buffermem);
-	for (page = mem_map, end = mem_map + max_mapnr;
-	     page < end; page++) {
-		if (PageSkip(page)) {
-			if (page->next_hash < page)
-				break;
-			page = page->next_hash;
-		}
-		if (PageReserved(page))
-			continue;
-		val->totalram++;
-		if (!atomic_read(&page->count))
-			continue;
-		val->sharedram += atomic_read(&page->count) - 1;
-	}
-	val->totalram <<= PAGE_SHIFT;
-	val->sharedram <<= PAGE_SHIFT;
-	val->totalbig = 0;
-	val->freebig = 0;
+	val->freeram   = nr_free_pages;
+	val->bufferram = atomic_read(&buffermem_pages);
+	val->totalhigh = 0;
+	val->freehigh  = 0;
+	val->mem_unit  = PAGE_SIZE;
 }

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)