patch-2.3.13 linux/include/asm-arm/proc-armv/pgtable.h

Next file: linux/include/asm-arm/proc-armv/processor.h
Previous file: linux/include/asm-arm/proc-armv/domain.h
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.3.12/linux/include/asm-arm/proc-armv/pgtable.h linux/include/asm-arm/proc-armv/pgtable.h
@@ -220,25 +220,6 @@
 } while (0)
 
 
-/*
- * The "pgd_xxx()" functions here are trivial for a folded two-level
- * setup: the pgd is never bad, and a pmd always exists (as it's folded
- * into the pgd entry)
- */
-#define pgd_none(pgd)		(0)
-#define pgd_bad(pgd)		(0)
-#define pgd_present(pgd)	(1)
-#define pgd_clear(pgdp)
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
-{
-	return mm->pgd + (address >> PGDIR_SHIFT);
-}
-
 extern unsigned long get_page_2k(int priority);
 extern void free_page_2k(unsigned long page);
 
@@ -263,55 +244,6 @@
 #error Pgtable caches have to be per-CPU, so that no locking is needed.
 #endif
 
-extern pgd_t *get_pgd_slow(void);
-
-extern __inline__ pgd_t *get_pgd_fast(void)
-{
-	unsigned long *ret;
-
-	if((ret = pgd_quicklist) != NULL) {
-		pgd_quicklist = (unsigned long *)(*ret);
-		ret[0] = ret[1];
-		clean_cache_area(ret, 4);
-		pgtable_cache_size--;
-	} else
-		ret = (unsigned long *)get_pgd_slow();
-	return (pgd_t *)ret;
-}
-
-extern __inline__ void free_pgd_fast(pgd_t *pgd)
-{
-	*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
-	pgd_quicklist = (unsigned long *) pgd;
-	pgtable_cache_size++;
-}
-
-extern __inline__ void free_pgd_slow(pgd_t *pgd)
-{
-	free_pages((unsigned long) pgd, 2);
-}
-
-#define pgd_free(pgd)		free_pgd_fast(pgd)
-#define pgd_alloc()		get_pgd_fast()
-
-extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
-{
-	struct task_struct * p;
-	pgd_t *pgd;
-
-	read_lock(&tasklist_lock);
-	for_each_task(p) {
-		if (!p->mm)
-			continue;
-		*pgd_offset(p->mm,address) = entry;
-	}
-	read_unlock(&tasklist_lock);
-	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
-		pgd[address >> PGDIR_SHIFT] = entry;
-}
-
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
 /****************
 * PMD functions *
 ****************/
@@ -406,7 +338,6 @@
 	return __phys_to_virt(ptr);
 }
 
-
 /****************
 * PTE functions *
 ****************/
@@ -552,8 +483,6 @@
 #define __S110  PAGE_SHARED
 #define __S111  PAGE_SHARED
 
-
-
 #define pte_present(pte)	(pte_val(pte) & L_PTE_PRESENT)
 
 /*
@@ -569,12 +498,12 @@
 #define PTE_BIT_FUNC(fn,op)			\
 extern inline pte_t fn##(pte_t pte) { pte_val(pte) op##; return pte; }
 
-//PTE_BIT_FUNC(pte_rdprotect, &= ~L_PTE_USER);
+/*PTE_BIT_FUNC(pte_rdprotect, &= ~L_PTE_USER);*/
 PTE_BIT_FUNC(pte_wrprotect, &= ~L_PTE_WRITE);
 PTE_BIT_FUNC(pte_exprotect, &= ~L_PTE_EXEC);
 PTE_BIT_FUNC(pte_mkclean,   &= ~L_PTE_DIRTY);
 PTE_BIT_FUNC(pte_mkold,     &= ~L_PTE_YOUNG);
-//PTE_BIT_FUNC(pte_mkread,    |= L_PTE_USER);
+/*PTE_BIT_FUNC(pte_mkread,    |= L_PTE_USER);*/
 PTE_BIT_FUNC(pte_mkwrite,   |= L_PTE_WRITE);
 PTE_BIT_FUNC(pte_mkexec,    |= L_PTE_EXEC);
 PTE_BIT_FUNC(pte_mkdirty,   |= L_PTE_DIRTY);
@@ -629,6 +558,100 @@
 	}
 	return (pte_t *) pmd_page(*pmd) + address;
 }
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pgd is never bad, and a pmd always exists (as it's folded
+ * into the pgd entry)
+ */
+#define pgd_none(pgd)		(0)
+#define pgd_bad(pgd)		(0)
+#define pgd_present(pgd)	(1)
+#define pgd_clear(pgdp)
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* used for quicklists */
+#define __pgd_next(pgd) (((unsigned long *)pgd)[1])
+
+/* to find an entry in a page-table-directory */
+extern __inline__ pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
+{
+	return mm->pgd + (address >> PGDIR_SHIFT);
+}
+
+extern pgd_t *get_pgd_slow(void);
+
+extern __inline__ pgd_t *get_pgd_fast(void)
+{
+	unsigned long *ret;
+
+	if((ret = pgd_quicklist) != NULL) {
+		pgd_quicklist = (unsigned long *)__pgd_next(ret);
+		ret[1] = ret[2];
+		clean_cache_area(ret + 1, 4);
+		pgtable_cache_size--;
+	} else
+		ret = (unsigned long *)get_pgd_slow();
+	return (pgd_t *)ret;
+}
+
+extern __inline__ void free_pgd_fast(pgd_t *pgd)
+{
+	__pgd_next(pgd) = (unsigned long) pgd_quicklist;
+	pgd_quicklist = (unsigned long *) pgd;
+	pgtable_cache_size++;
+}
+
+extern __inline__ void free_pgd_slow(pgd_t *pgd)
+{
+	do {
+		if (pgd) { /* can pgd be NULL? */
+			pmd_t *pmd;
+			pte_t *pte;
+
+			/* pgd is never none and bad - it is
+			 * detected in the pmd macros.
+			 */
+			pmd = pmd_offset(pgd, 0);
+			if (pmd_none(*pmd))
+				break;
+			if (pmd_bad(*pmd)) {
+				printk("free_pgd_slow: bad directory entry %08lx\n", pmd_val(*pmd));
+				pmd_clear(pmd);
+				break;
+			}
+
+			pte = pte_offset(pmd, 0);
+			pmd_clear(pmd);
+			pte_free(pte);
+			pmd_free(pmd);
+		}
+	} while (0);
+	free_pages((unsigned long) pgd, 2);
+}
+
+#define pgd_free(pgd)		free_pgd_fast(pgd)
+#define pgd_alloc()		get_pgd_fast()
+
+extern __inline__ void set_pgdir(unsigned long address, pgd_t entry)
+{
+	struct task_struct * p;
+	pgd_t *pgd;
+
+	read_lock(&tasklist_lock);
+	for_each_task(p) {
+		if (!p->mm)
+			continue;
+		*pgd_offset(p->mm,address) = entry;
+	}
+	read_unlock(&tasklist_lock);
+	for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)__pgd_next(pgd))
+		pgd[address >> PGDIR_SHIFT] = entry;
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
 #define SWP_TYPE(entry) (((entry) >> 2) & 0x7f)
 #define SWP_OFFSET(entry) ((entry) >> 9)

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)