patch-1.3.48 linux/arch/mips/mm/init.c

Next file: linux/drivers/Makefile
Previous file: linux/arch/mips/mm/fault.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v1.3.47/linux/arch/mips/mm/init.c linux/arch/mips/mm/init.c
@@ -4,7 +4,6 @@
  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  *  Ported to MIPS by Ralf Baechle
  */
-
 #include <linux/config.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
@@ -17,11 +16,12 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 
+#include <asm/cachectl.h>
+#include <asm/jazzdma.h>
+#include <asm/vector.h>
 #include <asm/system.h>
 #include <asm/segment.h>
-#include <asm/mipsconfig.h>
-
-extern unsigned long pg0[1024];		/* page table for 0-4MB for everybody */
+#include <asm/pgtable.h>
 
 extern void deskstation_tyne_dma_init(void);
 extern void scsi_mem_init(unsigned long);
@@ -29,6 +29,8 @@
 extern void die_if_kernel(char *,struct pt_regs *,long);
 extern void show_net_buffers(void);
 
+extern char empty_zero_page[PAGE_SIZE];
+
 /*
  * BAD_PAGE is the page that is used for page faults when linux
  * is out-of-memory. Older versions of linux just did a
@@ -45,86 +47,177 @@
 pte_t * __bad_pagetable(void)
 {
 	extern char empty_bad_page_table[PAGE_SIZE];
-	unsigned long dummy;
+	unsigned long page;
+	unsigned long dummy1, dummy2;
 
+	page = ((unsigned long)empty_bad_page_table) + (PT_OFFSET - PAGE_OFFSET);
+#if __mips__ >= 3
+        /*
+         * Use 64bit code even for Linux/MIPS 32bit on R4000
+         */
 	__asm__ __volatile__(
-		".set\tnoreorder\n\t"
+		".set\tnoreorder\n"
+		".set\tnoat\n\t"
+		".set\tmips3\n\t"
+		"dsll32\t$1,%2,0\n\t"
+		"dsrl32\t%2,$1,0\n\t"
+		"or\t%2,$1\n"
+		"1:\tsd\t%2,(%0)\n\t"
+		"subu\t%1,1\n\t"
+		"bnez\t%1,1b\n\t"
+		"addiu\t%0,8\n\t"
+		".set\tmips0\n\t"
+		".set\tat\n"
+		".set\treorder"
+		:"=r" (dummy1),
+		 "=r" (dummy2)
+		:"r" (pte_val(BAD_PAGE)),
+		 "0" (page),
+		 "1" (PAGE_SIZE/8));
+#else
+	__asm__ __volatile__(
+		".set\tnoreorder\n"
 		"1:\tsw\t%2,(%0)\n\t"
-		"subu\t%1,%1,1\n\t"
-		"bne\t$0,%1,1b\n\t"
-		"addiu\t%0,%0,1\n\t"
+		"subu\t%1,1\n\t"
+		"bnez\t%1,1b\n\t"
+		"addiu\t%0,4\n\t"
 		".set\treorder"
-		:"=r" (dummy),
-		 "=r" (dummy)
+		:"=r" (dummy1),
+		 "=r" (dummy2)
 		:"r" (pte_val(BAD_PAGE)),
-		 "0" ((long) empty_bad_page_table),
-		 "1" (PTRS_PER_PAGE));
+		 "0" (page),
+		 "1" (PAGE_SIZE/4));
+#endif
 
-	return (pte_t *) empty_bad_page_table;
+	return (pte_t *)page;
 }
 
-pte_t __bad_page(void)
+static inline void
+__zeropage(unsigned long page)
 {
-	extern char empty_bad_page[PAGE_SIZE];
-	unsigned long dummy;
+	unsigned long dummy1, dummy2;
 
+#ifdef __R4000__
+        /*
+         * Use 64bit code even for Linux/MIPS 32bit on R4000
+         */
+	__asm__ __volatile__(
+		".set\tnoreorder\n"
+		".set\tnoat\n\t"
+		".set\tmips3\n"
+		"1:\tsd\t$0,(%0)\n\t"
+		"subu\t%1,1\n\t"
+		"bnez\t%1,1b\n\t"
+		"addiu\t%0,8\n\t"
+		".set\tmips0\n\t"
+		".set\tat\n"
+		".set\treorder"
+		:"=r" (dummy1),
+		 "=r" (dummy2)
+		:"0" (page),
+		 "1" (PAGE_SIZE/8));
+#else
 	__asm__ __volatile__(
-		".set\tnoreorder\n\t"
+		".set\tnoreorder\n"
 		"1:\tsw\t$0,(%0)\n\t"
-		"subu\t%1,%1,1\n\t"
-		"bne\t$0,%1,1b\n\t"
-		"addiu\t%0,%0,1\n\t"
+		"subu\t%1,1\n\t"
+		"bnez\t%1,1b\n\t"
+		"addiu\t%0,4\n\t"
 		".set\treorder"
-		:"=r" (dummy),
-		 "=r" (dummy)
-		:"0" ((long) empty_bad_page),
-		 "1" (PTRS_PER_PAGE));
+		:"=r" (dummy1),
+		 "=r" (dummy2)
+		:"0" (page),
+		 "1" (PAGE_SIZE/4));
+#endif
+}
 
-	return pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED));
+static inline void
+zeropage(unsigned long page)
+{
+	sys_cacheflush((void *)page, PAGE_SIZE, BCACHE);
+	sync_mem();
+	__zeropage(page + (PT_OFFSET - PAGE_OFFSET));
+}
+
+pte_t __bad_page(void)
+{
+	extern char empty_bad_page[PAGE_SIZE];
+	unsigned long page = (unsigned long)empty_bad_page;
+
+	zeropage(page);
+	return pte_mkdirty(mk_pte(page, PAGE_SHARED));
 }
 
 unsigned long __zero_page(void)
 {
-	extern char empty_zero_page[PAGE_SIZE];
-	unsigned long dummy;
+	unsigned long page = (unsigned long) empty_zero_page;
 
-	__asm__ __volatile__(
-		".set\tnoreorder\n\t"
-		"1:\tsw\t$0,(%0)\n\t"
-		"subu\t%1,%1,1\n\t"
-		"bne\t$0,%1,1b\n\t"
-		"addiu\t%0,%0,1\n\t"
-		".set\treorder"
-		:"=r" (dummy),
-		 "=r" (dummy)
-		:"0" ((long) empty_zero_page),
-		 "1" (PTRS_PER_PAGE));
+	zeropage(page);
+	return page;
+}
 
-	return (unsigned long) empty_zero_page;
+/*
+ * This is horribly inefficient ...
+ */
+void __copy_page(unsigned long from, unsigned long to)
+{
+	/*
+	 * Now copy page from uncached KSEG1 to KSEG0.  The copy destination
+	 * is in KSEG0 so that we keep stupid L2 caches happy.
+	 */
+	if(from == (unsigned long) empty_zero_page)
+	{
+		/*
+		 * The page copied most is the COW empty_zero_page.  Since we
+		 * know it's contents we can avoid the writeback reading of
+		 * the page.  Speeds up the standard case alot.
+		 */
+		__zeropage(to);
+	}
+	else
+	{
+		/*
+		 * Force writeback of old page to memory.  We don't know the
+		 * virtual address, so we have to flush the entire cache ...
+		 */
+		sys_cacheflush(0, ~0, DCACHE);
+		sync_mem();
+		memcpy((void *) to,
+		       (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE);
+	}
+	/*
+	 * Now writeback the page again if colour has changed.
+	 * Actually this does a Hit_Writeback, but due to an artifact in
+	 * the R4xx0 implementation this should be slightly faster.
+	 * Then sweep chipset controlled secondary caches and the ICACHE.
+	 */
+	if (page_colour(from) != page_colour(to))
+		sys_cacheflush(0, ~0, DCACHE);
+	sys_cacheflush(0, ~0, ICACHE);
 }
 
 void show_mem(void)
 {
-	int i,free = 0,total = 0,reserved = 0;
+	int i, free = 0, total = 0, reserved = 0;
 	int shared = 0;
 
 	printk("Mem-info:\n");
 	show_free_areas();
-	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
-	i = high_memory >> PAGE_SHIFT;
+	printk("Free swap:       %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+	i = (high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
 	while (i-- > 0) {
 		total++;
-		if (mem_map[i] & MAP_PAGE_RESERVED)
+		if (mem_map[i].reserved)
 			reserved++;
-		else if (!mem_map[i])
+		else if (!mem_map[i].count)
 			free++;
 		else
-			shared += mem_map[i]-1;
+			shared += mem_map[i].count-1;
 	}
-	printk("%d pages of RAM\n",total);
-	printk("%d free pages\n",free);
-	printk("%d reserved pages\n",reserved);
-	printk("%d pages shared\n",shared);
+	printk("%d pages of RAM\n", total);
+	printk("%d free pages\n", free);
+	printk("%d reserved pages\n", reserved);
+	printk("%d pages shared\n", shared);
 	show_buffers();
 #ifdef CONFIG_NET
 	show_net_buffers();
@@ -133,56 +226,22 @@
 
 extern unsigned long free_area_init(unsigned long, unsigned long);
 
-/*
- * paging_init() sets up the page tables - note that the first 4MB are
- * already mapped by head.S.
- *
- * This routines also unmaps the page at virtual kernel address 0, so
- * that we can trap those pesky NULL-reference errors in the kernel.
- */
 unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
 {
-	pgd_t * pg_dir;
-	pte_t * pg_table;
-	unsigned long tmp;
-	unsigned long address;
-
-	start_mem = PAGE_ALIGN(start_mem);
-	address = 0;
-	pg_dir = swapper_pg_dir;
-	while (address < end_mem) {
-		if (pgd_none(pg_dir[0])) {
-			pgd_set(pg_dir, (pte_t *) start_mem);
-			start_mem += PAGE_SIZE;
-		}
-		/*
-		 * also map it in at 0x00000000 for init
-		 */
-		pg_table = (pte_t *) pgd_page(pg_dir[0]);
-		pgd_set(pg_dir, pg_table);
-		pg_dir++;
-		for (tmp = 0 ; tmp < PTRS_PER_PAGE ; tmp++,pg_table++) {
-			if (address < end_mem)
-				set_pte(pg_table, mk_pte(address, PAGE_SHARED));
-			else
-				pte_clear(pg_table);
-			address += PAGE_SIZE;
-		}
-	}
-#if KERNELBASE == KSEG0
-	cacheflush();
-#endif
-	invalidate();
+	pgd_init((unsigned long)swapper_pg_dir - (PT_OFFSET - PAGE_OFFSET));
 	return free_area_init(start_mem, end_mem);
 }
 
 void mem_init(unsigned long start_mem, unsigned long end_mem)
 {
 	int codepages = 0;
-	int reservedpages = 0;
 	int datapages = 0;
 	unsigned long tmp;
-	extern int etext;
+	extern int _etext;
+
+#ifdef CONFIG_MIPS_JAZZ
+	start_mem = vdma_init(start_mem, end_mem);
+#endif
 
 	end_mem &= PAGE_MASK;
 	high_memory = end_mem;
@@ -190,10 +249,12 @@
 	/* mark usable pages in the mem_map[] */
 	start_mem = PAGE_ALIGN(start_mem);
 
-	while (start_mem < high_memory) {
-		mem_map[MAP_NR(start_mem)] = 0;
-		start_mem += PAGE_SIZE;
+	tmp = start_mem;
+	while (tmp < high_memory) {
+		mem_map[MAP_NR(tmp)].reserved = 0;
+		tmp += PAGE_SIZE;
 	}
+
 #ifdef CONFIG_DESKSTATION_TYNE
 	deskstation_tyne_dma_init();
 #endif
@@ -203,33 +264,24 @@
 #ifdef CONFIG_SOUND
 	sound_mem_init();
 #endif
-	for (tmp = 0 ; tmp < high_memory ; tmp += PAGE_SIZE) {
-		if (mem_map[MAP_NR(tmp)]) {
-			/*
-			 * We don't have any reserved pages on the
-			 * MIPS systems supported until now
-			 */
-			if (0)
-				reservedpages++;
-			else if (tmp < ((unsigned long) &etext - KERNELBASE))
+	for (tmp = PAGE_OFFSET ; tmp < high_memory ; tmp += PAGE_SIZE) {
+		if (mem_map[MAP_NR(tmp)].reserved) {
+			if (tmp < (unsigned long) &_etext)
 				codepages++;
-			else
+			else if (tmp < start_mem)
 				datapages++;
 			continue;
 		}
-		mem_map[MAP_NR(tmp)] = 1;
+		mem_map[MAP_NR(tmp)].count = 1;
 		free_page(tmp);
 	}
 	tmp = nr_free_pages << PAGE_SHIFT;
-	printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
+	printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
 		tmp >> 10,
-		high_memory >> 10,
+		(high_memory - PAGE_OFFSET) >> 10,
 		codepages << (PAGE_SHIFT-10),
-		reservedpages << (PAGE_SHIFT-10),
 		datapages << (PAGE_SHIFT-10));
-	pg0[0] = pte_val(mk_pte(0, PAGE_READONLY));
 
-	invalidate();
 	return;
 }
 
@@ -243,12 +295,12 @@
 	val->freeram = nr_free_pages << PAGE_SHIFT;
 	val->bufferram = buffermem;
 	while (i-- > 0)  {
-		if (mem_map[i] & MAP_PAGE_RESERVED)
+		if (mem_map[i].reserved)
 			continue;
 		val->totalram++;
-		if (!mem_map[i])
+		if (!mem_map[i].count)
 			continue;
-		val->sharedram += mem_map[i]-1;
+		val->sharedram += mem_map[i].count-1;
 	}
 	val->totalram <<= PAGE_SHIFT;
 	val->sharedram <<= PAGE_SHIFT;

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov with Sam's (original) version
of this