patch-2.1.9 linux/arch/sparc/mm/srmmu.c
Next file: linux/arch/sparc/mm/sun4c.c
Previous file: linux/arch/sparc/mm/loadmmu.c
Back to the patch index
Back to the overall index
- Lines: 2879
- Date:
Sat Nov 9 10:12:20 1996
- Orig file:
v2.1.8/linux/arch/sparc/mm/srmmu.c
- Orig date:
Mon May 6 12:26:03 1996
diff -u --recursive --new-file v2.1.8/linux/arch/sparc/mm/srmmu.c linux/arch/sparc/mm/srmmu.c
@@ -1,14 +1,15 @@
-/* $Id: srmmu.c,v 1.62 1996/04/25 09:11:47 davem Exp $
+/* $Id: srmmu.c,v 1.103 1996/10/31 06:28:35 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
- * Copyright (C) 1996 Eddie C. Dost (ecd@pool.informatik.rwth-aachen.de)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/malloc.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -34,8 +35,11 @@
enum mbus_module srmmu_modtype;
unsigned int hwbug_bitmask;
-int hyper_cache_size;
-int hyper_line_size;
+int vac_cache_size;
+int vac_line_size;
+int vac_badbits;
+
+extern unsigned long sparc_iobase_vaddr;
#ifdef __SMP__
extern void smp_capture(void);
@@ -45,6 +49,8 @@
#define smp_release()
#endif /* !(__SMP__) */
+/* #define USE_CHUNK_ALLOC 1 */
+
static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
@@ -72,12 +78,37 @@
static struct srmmu_trans {
unsigned long vbase;
unsigned long pbase;
- int size;
+ unsigned long size;
} srmmu_map[SPARC_PHYS_BANKS];
-static int can_cache_ptables = 0;
static int viking_mxcc_present = 0;
+void srmmu_frob_mem_map(unsigned long start_mem)
+{
+ unsigned long bank_start, bank_end;
+ unsigned long addr;
+ int i;
+
+ /* First, mark all pages as invalid. */
+ for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
+ mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+
+ start_mem = PAGE_ALIGN(start_mem);
+ for(i = 0; srmmu_map[i].size; i++) {
+ bank_start = srmmu_map[i].vbase;
+ bank_end = bank_start + srmmu_map[i].size;
+ while(bank_start < bank_end) {
+ if((bank_start >= KERNBASE) &&
+ (bank_start < start_mem)) {
+ bank_start += PAGE_SIZE;
+ continue;
+ }
+ mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved);
+ bank_start += PAGE_SIZE;
+ }
+ }
+}
+
/* Physical memory can be _very_ non-contiguous on the sun4m, especially
* the SS10/20 class machines and with the latest openprom revisions.
* So we have to crunch the free page pool.
@@ -88,8 +119,9 @@
for(i=0; srmmu_map[i].size != 0; i++) {
if(srmmu_map[i].vbase <= vaddr &&
- (srmmu_map[i].vbase + srmmu_map[i].size > vaddr))
+ (srmmu_map[i].vbase + srmmu_map[i].size > vaddr)) {
return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
+ }
}
return 0xffffffffUL;
}
@@ -112,11 +144,11 @@
*/
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
-#if CONFIG_AP1000
+#if MEM_BUS_SPACE
/* the AP1000 has its memory on bus 8, not 0 like suns do */
- if (!(value&0xf0000000))
- value |= 0x80000000;
- if (value == 0x80000000) value = 0;
+ if (!(value&KERNBASE))
+ value |= MEM_BUS_SPACE<<28;
+ if (value == MEM_BUS_SPACE<<28) value = 0;
#endif
__asm__ __volatile__("swap [%2], %0\n\t" :
"=&r" (value) :
@@ -144,8 +176,13 @@
static unsigned long srmmu_pmd_page(pmd_t pmd)
{ return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
+static inline int srmmu_device_memory(pte_t pte)
+{
+ return (pte_val(pte)>>28) != MEM_BUS_SPACE;
+}
+
static unsigned long srmmu_pte_page(pte_t pte)
-{ return srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
+{ return srmmu_device_memory(pte)?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
static int srmmu_pte_present(pte_t pte)
@@ -189,6 +226,9 @@
static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
+static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
+{ pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
+
static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
pte_t pte;
@@ -198,21 +238,24 @@
static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{
- srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
+ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
}
static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{
- srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
+ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
}
static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
{
- srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
+ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
}
static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
-{ pte_val(pte) = (pte_val(pte) & ~0xff) | pgprot_val(newprot); return pte; }
+{
+ pte_val(pte) = (pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
/* to find an entry in a top-level page table... */
static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
@@ -235,15 +278,29 @@
/* This must update the context table entry for this process. */
static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
- if(tsk->mm->context != NO_CONTEXT)
+ if(tsk->mm->context != NO_CONTEXT) {
+ flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(current->mm);
+ }
}
static inline void srmmu_uncache_page(unsigned long addr)
{
pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
- pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
- pte_t *ptep = srmmu_pte_offset(pmdp, addr);
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pgdp;
+ } else {
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pmdp;
+ } else {
+ ptep = srmmu_pte_offset(pmdp, addr);
+ }
+ }
flush_cache_page_to_uncache(addr);
set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
@@ -253,9 +310,19 @@
static inline void srmmu_recache_page(unsigned long addr)
{
pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
- pmd_t *pmdp = srmmu_pmd_offset(pgdp, addr);
- pte_t *ptep = srmmu_pte_offset(pmdp, addr);
+ pmd_t *pmdp;
+ pte_t *ptep;
+ if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pgdp;
+ } else {
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pmdp;
+ } else {
+ ptep = srmmu_pte_offset(pmdp, addr);
+ }
+ }
set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
flush_tlb_page_for_cbit(addr);
}
@@ -264,21 +331,259 @@
{
unsigned long page = get_free_page(GFP_KERNEL);
- if (can_cache_ptables)
- return page;
-
- if(page)
- srmmu_uncache_page(page);
return page;
}
static inline void srmmu_putpage(unsigned long page)
{
- if (!can_cache_ptables)
- srmmu_recache_page(page);
free_page(page);
}
+#ifdef USE_CHUNK_ALLOC
+
+#define LC_HIGH_WATER 128
+#define BC_HIGH_WATER 32
+
+static unsigned long *lcnks = 0;
+static unsigned long *bcnks = 0;
+static int lcwater = 0;
+static int bcwater = 0;
+static int chunk_pages = 0;
+static int clct_pages = 0;
+
+#define RELAX_JIFFIES 16
+
+static int lcjiffies;
+static int bcjiffies;
+
+struct chunk {
+ struct chunk *next;
+ struct chunk *prev;
+ struct chunk *npage;
+ struct chunk *ppage;
+ int count;
+};
+
+static int garbage_calls = 0;
+
+#define OTHER_PAGE(p,q) (((unsigned long)(p) ^ (unsigned long)(q)) & PAGE_MASK)
+
+static inline int garbage_collect(unsigned long **cnks, int n, int cpp)
+{
+ struct chunk *root = (struct chunk *)*cnks;
+ struct chunk *p, *q, *curr, *next;
+ int water = n;
+
+ next = root->next;
+ curr = root->prev = root->next = root->npage = root->ppage = root;
+ root->count = 1;
+
+ garbage_calls++;
+
+ while (--n) {
+ p = next;
+ next = next->next;
+
+ if (OTHER_PAGE(p, curr)) {
+
+ q = curr->npage;
+ while (q != curr) {
+ if (!OTHER_PAGE(p, q))
+ break;
+ q = q->npage;
+ }
+
+ if (q == curr) {
+
+ (p->npage = curr->npage)->ppage = p;
+ curr->npage = p;
+ p->ppage = curr;
+
+ p->next = p->prev = p;
+ p->count = 1;
+
+ curr = p;
+
+ continue;
+ }
+ curr = q;
+ }
+
+ (p->next = curr->next)->prev = p;
+ curr->next = p;
+ p->prev = curr;
+
+ if (++curr->count == cpp) {
+
+ q = curr->npage;
+ if (curr == q) {
+
+ srmmu_putpage((unsigned long)curr & PAGE_MASK);
+ water -= cpp;
+
+ clct_pages++;
+ chunk_pages--;
+
+ if (--n) {
+ p = next;
+ next = next->next;
+
+ curr = root->prev =
+ root->next = root->npage =
+ root->ppage = root = p;
+ root->count = 1;
+
+ continue;
+ }
+ return 0;
+ }
+
+ if (curr == root)
+ root = q;
+
+ curr->ppage->npage = q;
+ q->ppage = curr->ppage;
+
+ srmmu_putpage((unsigned long)curr & PAGE_MASK);
+ water -= cpp;
+
+ clct_pages++;
+ chunk_pages--;
+
+ curr = q;
+ }
+ }
+
+ p = root;
+ while (p->npage != root) {
+ p->prev->next = p->npage;
+ p = p->npage;
+ }
+
+ *cnks = (unsigned long *)root;
+ return water;
+}
+
+
+static inline unsigned long *get_small_chunk(void)
+{
+ unsigned long *rval;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if(lcwater) {
+ lcwater--;
+ rval = lcnks;
+ lcnks = (unsigned long *) *rval;
+ } else {
+ rval = (unsigned long *) __get_free_page(GFP_KERNEL);
+
+ if(!rval) {
+ restore_flags(flags);
+ return 0;
+ }
+ chunk_pages++;
+
+ lcnks = (rval + 64);
+
+ /* Cache stomping, I know... */
+ *(rval + 64) = (unsigned long) (rval + 128);
+ *(rval + 128) = (unsigned long) (rval + 192);
+ *(rval + 192) = (unsigned long) (rval + 256);
+ *(rval + 256) = (unsigned long) (rval + 320);
+ *(rval + 320) = (unsigned long) (rval + 384);
+ *(rval + 384) = (unsigned long) (rval + 448);
+ *(rval + 448) = (unsigned long) (rval + 512);
+ *(rval + 512) = (unsigned long) (rval + 576);
+ *(rval + 576) = (unsigned long) (rval + 640);
+ *(rval + 640) = (unsigned long) (rval + 704);
+ *(rval + 704) = (unsigned long) (rval + 768);
+ *(rval + 768) = (unsigned long) (rval + 832);
+ *(rval + 832) = (unsigned long) (rval + 896);
+ *(rval + 896) = (unsigned long) (rval + 960);
+ *(rval + 960) = 0;
+ lcwater = 15;
+ }
+ lcjiffies = jiffies;
+ restore_flags(flags);
+ memset(rval, 0, 256);
+ return rval;
+}
+
+static inline void free_small_chunk(unsigned long *it)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ *it = (unsigned long) lcnks;
+ lcnks = it;
+ lcwater++;
+
+ if ((lcwater > LC_HIGH_WATER) &&
+ (jiffies > lcjiffies + RELAX_JIFFIES))
+ lcwater = garbage_collect(&lcnks, lcwater, 16);
+
+ restore_flags(flags);
+}
+
+static inline unsigned long *get_big_chunk(void)
+{
+ unsigned long *rval;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if(bcwater) {
+ bcwater--;
+ rval = bcnks;
+ bcnks = (unsigned long *) *rval;
+ } else {
+ rval = (unsigned long *) __get_free_page(GFP_KERNEL);
+
+ if(!rval) {
+ restore_flags(flags);
+ return 0;
+ }
+ chunk_pages++;
+
+ bcnks = (rval + 256);
+
+ /* Cache stomping, I know... */
+ *(rval + 256) = (unsigned long) (rval + 512);
+ *(rval + 512) = (unsigned long) (rval + 768);
+ *(rval + 768) = 0;
+ bcwater = 3;
+ }
+ bcjiffies = jiffies;
+ restore_flags(flags);
+ memset(rval, 0, 1024);
+ return rval;
+}
+
+static inline void free_big_chunk(unsigned long *it)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ *it = (unsigned long) bcnks;
+ bcnks = it;
+ bcwater++;
+
+ if ((bcwater > BC_HIGH_WATER) &&
+ (jiffies > bcjiffies + RELAX_JIFFIES))
+ bcwater = garbage_collect(&bcnks, bcwater, 4);
+
+ restore_flags(flags);
+}
+
+#define NEW_PGD() (pgd_t *) get_big_chunk()
+#define NEW_PMD() (pmd_t *) get_small_chunk()
+#define NEW_PTE() (pte_t *) get_small_chunk()
+#define FREE_PGD(chunk) free_big_chunk((unsigned long *)(chunk))
+#define FREE_PMD(chunk) free_small_chunk((unsigned long *)(chunk))
+#define FREE_PTE(chunk) free_small_chunk((unsigned long *)(chunk))
+
+#else
+
/* The easy versions. */
#define NEW_PGD() (pgd_t *) srmmu_getpage()
#define NEW_PMD() (pmd_t *) srmmu_getpage()
@@ -287,6 +592,8 @@
#define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
#define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
+#endif
+
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
@@ -329,7 +636,8 @@
{
address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = NEW_PMD();
+ pmd_t *page;
+ page = NEW_PMD();
if(srmmu_pgd_none(*pgd)) {
if(page) {
pgd_set(pgd, page);
@@ -415,21 +723,90 @@
return NEW_PGD();
}
-static void srmmu_set_pte(pte_t *ptep, pte_t pteval)
+static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
+{
+ srmmu_set_entry(ptep, pte_val(pteval));
+}
+
+static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ srmmu_set_entry(ptep, pte_val(pteval));
+ hyper_flush_cache_page(((unsigned long)ptep) & PAGE_MASK);
+ restore_flags(flags);
+}
+
+static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line, page;
+
srmmu_set_entry(ptep, pte_val(pteval));
+ page = ((unsigned long)ptep) & PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+static void srmmu_set_pte_nocache_nomxccvik(pte_t *ptep, pte_t pteval)
+{
+ unsigned long paddr = srmmu_v2p(((unsigned long)ptep));
+ unsigned long vaddr;
+ int set;
+ int i;
+
+ set = (paddr >> 5) & 0x7f;
+ vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
+ srmmu_set_entry(ptep, pteval);
+ for (i = 0; i < 8; i++) {
+ __asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
+ vaddr += PAGE_SIZE;
+ }
}
static void srmmu_quick_kernel_fault(unsigned long address)
{
- printk("Penguin faults at address %08lx\n", address);
- panic("Srmmu bolixed...");
+#ifdef __SMP__
+ printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
+ smp_processor_id(), address);
+ while (1) ;
+#else
+ printk("Kernel faults at addr=0x%08lx\n", address);
+ printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
+ die_if_kernel("SRMMU bolixed...", current->tss.kregs);
+#endif
}
-static inline void alloc_context(struct mm_struct *mm)
+static inline void alloc_context(struct task_struct *tsk)
{
+ struct mm_struct *mm = tsk->mm;
struct ctx_list *ctxp;
+#if CONFIG_AP1000
+ if (tsk->taskid >= MPP_TASK_BASE) {
+ mm->context = MPP_CONTEXT_BASE + (tsk->taskid - MPP_TASK_BASE);
+ return;
+ }
+#endif
+
ctxp = ctx_free.next;
if(ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
@@ -452,19 +829,28 @@
mm->context = ctxp->ctx_number;
}
+static inline void free_context(int context)
+{
+ struct ctx_list *ctx_old;
+
+#if CONFIG_AP1000
+ if (context >= MPP_CONTEXT_BASE)
+ return; /* nothing to do! */
+#endif
+
+ ctx_old = ctx_list_pool + context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+}
+
+
static void srmmu_switch_to_context(struct task_struct *tsk)
{
- /* Kernel threads can execute in any context and so can tasks
- * sleeping in the middle of exiting. If this task has already
- * been allocated a piece of the mmu realestate, just jump to
- * it.
- */
- if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
- (tsk->flags & PF_EXITING))
- return;
if(tsk->mm->context == NO_CONTEXT) {
- alloc_context(tsk->mm);
+ alloc_context(tsk);
+ flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ flush_tlb_mm(current->mm);
}
srmmu_set_context(tsk->mm->context);
}
@@ -493,7 +879,22 @@
else
tmp |= SRMMU_PRIV;
flush_page_to_ram(virt_addr);
- srmmu_set_entry(ptep, tmp);
+ set_pte(ptep, tmp);
+ flush_tlb_all();
+}
+
+void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+
+ /* No need to flush uncacheable page. */
+ set_pte(ptep, pte_val(srmmu_mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)));
flush_tlb_all();
}
@@ -516,33 +917,22 @@
*/
struct task_struct *srmmu_alloc_task_struct(void)
{
- unsigned long page;
-
- page = get_free_page(GFP_KERNEL);
- if(!page)
- return (struct task_struct *) 0;
- return (struct task_struct *) page;
+ return (struct task_struct *) kmalloc(sizeof(struct task_struct), GFP_KERNEL);
}
unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
{
- unsigned long pages;
-
- pages = __get_free_pages(GFP_KERNEL, 2, 0);
- if(!pages)
- return 0;
- memset((void *) pages, 0, (PAGE_SIZE << 2));
- return pages;
+ return __get_free_pages(GFP_KERNEL, 1, 0);
}
static void srmmu_free_task_struct(struct task_struct *tsk)
{
- free_page((unsigned long) tsk);
+ kfree(tsk);
}
static void srmmu_free_kernel_stack(unsigned long stack)
{
- free_pages(stack, 2);
+ free_pages(stack, 1);
}
/* Tsunami flushes. It's page level tlb invalidation is not very
@@ -604,8 +994,6 @@
/* Tsunami does not have a Copy-back style virtual cache. */
static void tsunami_flush_page_to_ram(unsigned long page)
{
- tsunami_flush_icache();
- tsunami_flush_dcache();
}
/* However, Tsunami is not IO coherent. */
@@ -615,22 +1003,10 @@
tsunami_flush_dcache();
}
-/* TLB flushes seem to upset the tsunami sometimes, I can't figure out
- * what the hell is going on. All I see is a tlb flush (page or whole,
- * there is no consistent pattern) and then total local variable corruption
- * in the procedure who called us after return. Usually triggerable
- * by "cool" programs like crashme and bonnie. I played around a bit
- * and adding a bunch of forced nops seems to make the problems all
- * go away. (missed instruction fetches possibly? ugh...)
- */
-#define TSUNAMI_SUCKS do { nop(); nop(); nop(); nop(); nop(); \
- nop(); nop(); nop(); nop(); nop(); } while(0)
-
static void tsunami_flush_tlb_all(void)
{
module_stats.invall++;
srmmu_flush_whole_tlb();
- TSUNAMI_SUCKS;
}
static void tsunami_flush_tlb_mm(struct mm_struct *mm)
@@ -640,7 +1016,6 @@
if(mm->context != NO_CONTEXT) {
#endif
srmmu_flush_whole_tlb();
- TSUNAMI_SUCKS;
#ifndef __SMP__
}
#endif
@@ -653,7 +1028,6 @@
if(mm->context != NO_CONTEXT) {
#endif
srmmu_flush_whole_tlb();
- TSUNAMI_SUCKS;
#ifndef __SMP__
}
#endif
@@ -667,12 +1041,15 @@
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ unsigned long flags;
+
+ save_and_cli(flags);
octx = srmmu_get_context();
srmmu_set_context(mm->context);
srmmu_flush_tlb_page(page);
- TSUNAMI_SUCKS;
srmmu_set_context(octx);
+ restore_flags(flags);
#ifndef __SMP__
}
#endif
@@ -812,36 +1189,14 @@
static void viking_flush_cache_mm(struct mm_struct *mm)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
-#ifndef __SMP__
- }
-#endif
}
static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
-#ifndef __SMP__
- }
-#endif
}
static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
-#ifndef __SMP__
- struct mm_struct *mm = vma->vm_mm;
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
-#ifndef __SMP__
- }
-#endif
}
/* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
@@ -849,21 +1204,16 @@
{
}
-/* Viking is IO cache coherent. */
-static void viking_flush_page_for_dma(unsigned long page)
-{
-}
-
static void viking_mxcc_flush_page(unsigned long page)
{
- unsigned long ppage = srmmu_hwprobe(page);
+ unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
unsigned long paddr0, paddr1;
- if (!ppage)
+ if (ppage == 0xffffffffUL)
return;
- paddr0 = (ppage >> 28) | 0x10; /* Set cacheable bit. */
- paddr1 = (ppage << 4) & PAGE_MASK;
+ paddr0 = 0x10; /* Set cacheable bit. */
+ paddr1 = ppage;
/* Read the page's data through the stream registers,
* and write it back to memory. This will issue
@@ -897,14 +1247,15 @@
static void viking_no_mxcc_flush_page(unsigned long page)
{
- unsigned long ppage = srmmu_hwprobe(page) >> 8;
+ unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
int set, block;
unsigned long ptag[2];
unsigned long vaddr;
int i;
- if (!ppage)
+ if (ppage == 0xffffffffUL)
return;
+ ppage >>= 12;
for (set = 0; set < 128; set++) {
for (block = 0; block < 4; block++) {
@@ -937,9 +1288,15 @@
}
}
+/* Viking is IO cache coherent, but really only on MXCC. */
+static void viking_flush_page_for_dma(unsigned long page)
+{
+}
+
static void viking_flush_tlb_all(void)
{
module_stats.invall++;
+ flush_user_windows();
srmmu_flush_whole_tlb();
}
@@ -951,6 +1308,7 @@
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ flush_user_windows();
octx = srmmu_get_context();
srmmu_set_context(mm->context);
srmmu_flush_tlb_ctx();
@@ -968,12 +1326,27 @@
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ flush_user_windows();
octx = srmmu_get_context();
srmmu_set_context(mm->context);
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- srmmu_flush_tlb_segment(start);
- start += SRMMU_PMD_SIZE;
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
}
srmmu_set_context(octx);
#ifndef __SMP__
@@ -990,6 +1363,7 @@
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ flush_user_windows();
octx = srmmu_get_context();
srmmu_set_context(mm->context);
srmmu_flush_tlb_page(page);
@@ -1005,97 +1379,330 @@
}
/* Cypress flushes. */
-
-static void cypress_flush_tlb_all(void)
+static void cypress_flush_cache_all(void)
{
- module_stats.invall++;
- srmmu_flush_whole_tlb();
+ volatile unsigned long cypress_sucks;
+ unsigned long faddr, tagval;
+
+ flush_user_windows();
+ for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
+ __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
+ "=r" (tagval) :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+
+ /* If modified and valid, kick it. */
+ if((tagval & 0x60) == 0x60)
+ cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
+ }
}
-static void cypress_flush_tlb_mm(struct mm_struct *mm)
+static void cypress_flush_cache_mm(struct mm_struct *mm)
{
+ unsigned long flags, faddr;
int octx;
- module_stats.invmm++;
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
octx = srmmu_get_context();
srmmu_set_context(mm->context);
- srmmu_flush_tlb_ctx();
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ faddr = (0x10000 - 0x100);
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr), "i" (ASI_M_FLUSH_CTX),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(faddr);
srmmu_set_context(octx);
+ restore_flags(flags);
#ifndef __SMP__
}
#endif
}
-static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
+ unsigned long flags, faddr;
int octx;
- module_stats.invrnge++;
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
octx = srmmu_get_context();
srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
start &= SRMMU_PMD_MASK;
while(start < end) {
- srmmu_flush_tlb_segment(start);
+ faddr = (start + (0x10000 - 0x100));
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr),
+ "i" (ASI_M_FLUSH_SEG),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while (faddr != start);
start += SRMMU_PMD_SIZE;
}
srmmu_set_context(octx);
+ restore_flags(flags);
#ifndef __SMP__
}
#endif
}
-static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
- int octx;
struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags, line;
+ int octx;
- module_stats.invpg++;
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
octx = srmmu_get_context();
srmmu_set_context(mm->context);
- srmmu_flush_tlb_page(page);
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
srmmu_set_context(octx);
+ restore_flags(flags);
#ifndef __SMP__
}
#endif
}
-/* Hypersparc flushes. Very nice chip... */
-static void hypersparc_flush_cache_all(void)
+/* Cypress is copy-back, at least that is how we configure it. */
+static void cypress_flush_page_to_ram(unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line;
+
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+/* Cypress is also IO cache coherent. */
+static void cypress_flush_page_for_dma(unsigned long page)
{
- flush_user_windows();
- hyper_flush_unconditional_combined();
- hyper_flush_whole_icache();
}
-static void hypersparc_flush_cache_mm(struct mm_struct *mm)
+static void cypress_flush_page_to_uncache(unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line;
+
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+static void cypress_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
+
+static void cypress_flush_tlb_mm(struct mm_struct *mm)
{
+ int octx;
+
+ module_stats.invmm++;
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
- flush_user_windows();
- hyper_flush_unconditional_combined();
- hyper_flush_whole_icache();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
#ifndef __SMP__
}
#endif
}
-static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
+ module_stats.invrnge++;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
+}
+
+/* Hypersparc flushes. Very nice chip... */
+static void hypersparc_flush_cache_all(void)
+{
+ flush_user_windows();
+ hyper_flush_unconditional_combined();
+ hyper_flush_whole_icache();
+}
+
+static void hypersparc_flush_cache_mm(struct mm_struct *mm)
{
#ifndef __SMP__
if(mm->context != NO_CONTEXT) {
#endif
flush_user_windows();
- hyper_flush_unconditional_combined();
+ hyper_flush_cache_user();
+ hyper_flush_whole_icache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Boy was my older implementation inefficient... */
+static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ volatile unsigned long clear;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ start &= PAGE_MASK;
+ srmmu_set_context(mm->context);
+ while(start < end) {
+ if(srmmu_hwprobe(start))
+ hyper_flush_cache_page(start);
+ start += PAGE_SIZE;
+ }
+ clear = srmmu_get_fstatus();
+ srmmu_set_context(octx);
hyper_flush_whole_icache();
#ifndef __SMP__
}
@@ -1142,11 +1749,6 @@
/* HyperSparc is IO cache coherent. */
static void hypersparc_flush_page_for_dma(unsigned long page)
{
- volatile unsigned long clear;
-
- if(srmmu_hwprobe(page))
- hyper_flush_cache_page(page);
- clear = srmmu_get_fstatus();
}
static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
@@ -1194,10 +1796,24 @@
octx = srmmu_get_context();
srmmu_set_context(mm->context);
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- srmmu_flush_tlb_segment(start);
- start += SRMMU_PMD_SIZE;
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
}
srmmu_set_context(octx);
@@ -1234,45 +1850,26 @@
static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{
hyper_flush_whole_icache();
- srmmu_set_entry(ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
+ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
}
static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
if(tsk->mm->context != NO_CONTEXT) {
- hyper_flush_whole_icache();
+ flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(current->mm);
}
}
-static void hypersparc_set_pte(pte_t *ptep, pte_t pteval)
-{
- /* xor is your friend */
- __asm__ __volatile__("rd %%psr, %%g1\n\t"
- "wr %%g1, %4, %%psr\n\t"
- "nop; nop; nop;\n\t"
- "swap [%0], %1\n\t"
- "wr %%g1, 0x0, %%psr\n\t"
- "nop; nop; nop;\n\t" :
- "=r" (ptep), "=r" (pteval) :
- "0" (ptep), "1" (pteval), "i" (PSR_ET) :
- "g1");
-}
-
static void hypersparc_switch_to_context(struct task_struct *tsk)
{
- /* Kernel threads can execute in any context and so can tasks
- * sleeping in the middle of exiting. If this task has already
- * been allocated a piece of the mmu realestate, just jump to
- * it.
- */
hyper_flush_whole_icache();
- if((tsk->tss.flags & SPARC_FLAG_KTHREAD) ||
- (tsk->flags & PF_EXITING))
- return;
if(tsk->mm->context == NO_CONTEXT) {
- alloc_context(tsk->mm);
+ alloc_context(tsk);
+ flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ flush_tlb_mm(current->mm);
}
srmmu_set_context(tsk->mm->context);
}
@@ -1280,45 +1877,28 @@
/* IOMMU things go here. */
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
-static unsigned long first_dvma_page, last_dvma_page;
#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
-static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
+static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu,
+ unsigned long kern_end)
{
- unsigned long first = first_dvma_page;
- unsigned long last = last_dvma_page;
- iopte_t *iopte;
+ unsigned long first = page_offset;
+ unsigned long last = kern_end;
+ iopte_t *iopte = iommu->page_table;
- iopte = iommu->page_table;
- iopte += ((DVMA_VADDR - iommu->start) >> PAGE_SHIFT);
+ iopte += ((first - iommu->start) >> PAGE_SHIFT);
while(first <= last) {
iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
first += PAGE_SIZE;
}
}
-void srmmu_uncache_iommu_page_table(unsigned long start, int size)
-{
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- unsigned long end = start + size;
-
- while(start < end) {
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- pmdp = srmmu_pmd_offset(pgdp, start);
- ptep = srmmu_pte_offset(pmdp, start);
- pte_val(*ptep) &= ~SRMMU_CACHE;
- start += PAGE_SIZE;
- }
-}
-
unsigned long iommu_init(int iommund, unsigned long memory_start,
unsigned long memory_end, struct linux_sbus *sbus)
{
- int impl, vers, ptsize;
+ unsigned int impl, vers, ptsize;
unsigned long tmp;
struct iommu_struct *iommu;
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
@@ -1326,7 +1906,8 @@
memory_start = LONG_ALIGN(memory_start);
iommu = (struct iommu_struct *) memory_start;
memory_start += sizeof(struct iommu_struct);
- prom_getproperty(iommund, "reg", (void *) iommu_promregs, sizeof(iommu_promregs));
+ prom_getproperty(iommund, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs));
iommu->regs = (struct iommu_regs *)
sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
"IOMMU registers", iommu_promregs[0].which_io, 0x0);
@@ -1336,10 +1917,30 @@
vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
tmp = iommu->regs->control;
tmp &= ~(IOMMU_CTRL_RNGE);
- tmp |= (IOMMU_RNGE_64MB | IOMMU_CTRL_ENAB);
+ switch(page_offset & 0xf0000000) {
+ case 0xf0000000:
+ tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xf0000000;
+ break;
+ case 0xe0000000:
+ tmp |= (IOMMU_RNGE_512MB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xe0000000;
+ break;
+ case 0xd0000000:
+ case 0xc0000000:
+ tmp |= (IOMMU_RNGE_1GB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xc0000000;
+ break;
+ case 0xb0000000:
+ case 0xa0000000:
+ case 0x90000000:
+ case 0x80000000:
+ tmp |= (IOMMU_RNGE_2GB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0x80000000;
+ break;
+ }
iommu->regs->control = tmp;
iommu_invalidate(iommu->regs);
- iommu->plow = iommu->start = 0xfc000000;
iommu->end = 0xffffffff;
/* Allocate IOMMU page table */
@@ -1354,10 +1955,24 @@
/* Initialize new table. */
flush_cache_all();
- srmmu_uncache_iommu_page_table((unsigned long) iommu->page_table, ptsize);
- flush_tlb_all();
+ if(viking_mxcc_present) {
+ unsigned long start = (unsigned long) iommu->page_table;
+ unsigned long end = (start + ptsize);
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = (unsigned long) iommu->page_table;
+ unsigned long end = (start + ptsize);
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
memset(iommu->page_table, 0, ptsize);
- srmmu_map_dvma_pages_for_iommu(iommu);
+ srmmu_map_dvma_pages_for_iommu(iommu, memory_end);
+ flush_tlb_all();
iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
iommu_invalidate(iommu->regs);
@@ -1367,103 +1982,74 @@
return memory_start;
}
-static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus)
{
- struct iommu_struct *iommu = sbus->iommu;
- unsigned long page = (unsigned long) vaddr;
- unsigned long start, end, offset;
- iopte_t *iopte;
+ u32 *iommu;
+ struct linux_prom_registers iommu_promregs[PROMREG_MAX];
- offset = page & ~PAGE_MASK;
- page &= PAGE_MASK;
+ prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs));
+ iommu = (u32 *)
+ sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16),
+ "XPT", iommu_promregs[2].which_io, 0x0);
+ if(!iommu)
+ panic("Cannot map External Page Table.");
- start = iommu->plow;
- end = KADB_DEBUGGER_BEGVM; /* Don't step on kadb/prom. */
- iopte = iommu->lowest;
- while(start < end) {
- if(!(iopte_val(*iopte) & IOPTE_VALID))
- break;
- iopte++;
- start += PAGE_SIZE;
+ /* Initialize new table. */
+ flush_cache_all();
+ if(viking_mxcc_present) {
+ unsigned long start = (unsigned long) iommu;
+ unsigned long end = (start + 16 * PAGE_SIZE);
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = (unsigned long) iommu;
+ unsigned long end = (start + 16 * PAGE_SIZE);
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
}
+ memset(iommu, 0, 16 * PAGE_SIZE);
+ flush_tlb_all();
+
+ sbus->iommu = (struct iommu_struct *)iommu;
+}
- flush_page_for_dma(page);
- vaddr = (char *) (start | offset);
- iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
- iommu_invalidate_page(iommu->regs, start);
- iommu->lowest = iopte + 1;
- iommu->plow = start + PAGE_SIZE;
+static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+{
+ unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
+ while(page < ((unsigned long)(vaddr + len))) {
+ flush_page_for_dma(page);
+ page += PAGE_SIZE;
+ }
return vaddr;
}
static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
- struct iommu_struct *iommu = sbus->iommu;
- unsigned long page, start, end, offset;
- iopte_t *iopte = iommu->lowest;
+ unsigned long page;
- start = iommu->plow;
- end = KADB_DEBUGGER_BEGVM;
while(sz >= 0) {
page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
- offset = ((unsigned long) sg[sz].addr) & ~PAGE_MASK;
- while(start < end) {
- if(!(iopte_val(*iopte) & IOPTE_VALID))
- break;
- iopte++;
- start += PAGE_SIZE;
+ while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) {
+ flush_page_for_dma(page);
+ page += PAGE_SIZE;
}
- if(start == KADB_DEBUGGER_BEGVM)
- panic("Wheee, iomapping overflow.");
- flush_page_for_dma(page);
- sg[sz].alt_addr = (char *) (start | offset);
- iopte_val(*iopte) = MKIOPTE(srmmu_v2p(page));
- iommu_invalidate_page(iommu->regs, start);
- iopte++;
- start += PAGE_SIZE;
+ sg[sz].dvma_addr = (char *) (sg[sz].addr);
sz--;
}
- iommu->lowest = iopte;
- iommu->plow = start;
}
static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
- struct iommu_struct *iommu = sbus->iommu;
- unsigned long page = (unsigned long) vaddr;
- iopte_t *iopte;
-
- if(len > PAGE_SIZE)
- panic("Can only handle page sized IOMMU mappings.");
- page &= PAGE_MASK;
- iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
- iopte_val(*iopte) = 0;
- iommu_invalidate_page(iommu->regs, page);
- if(iopte < iommu->lowest) {
- iommu->lowest = iopte;
- iommu->plow = page;
- }
}
static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
{
- struct iommu_struct *iommu = sbus->iommu;
- unsigned long page;
- iopte_t *iopte;
-
- while(sz >= 0) {
- page = ((unsigned long)sg[sz].alt_addr) & PAGE_MASK;
- iopte = iommu->page_table + ((page - iommu->start) >> PAGE_SHIFT);
- iopte_val(*iopte) = 0;
- iommu_invalidate_page(iommu->regs, page);
- if(iopte < iommu->lowest) {
- iommu->lowest = iopte;
- iommu->plow = page;
- }
- sg[sz].alt_addr = 0;
- sz--;
- }
}
static unsigned long mempool;
@@ -1479,27 +2065,27 @@
/* Some dirty hacks to abstract away the painful boot up init. */
static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
{
- return ((vaddr - PAGE_OFFSET) + kbpage);
+ return ((vaddr - KERNBASE) + kbpage);
}
static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
- srmmu_set_entry(pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
+ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
}
static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- srmmu_set_entry(pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
+ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
}
static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
{
- return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
+ return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
}
static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
{
- return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + PAGE_OFFSET;
+ return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
}
static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
@@ -1512,26 +2098,6 @@
return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
-/* Allocate a block of RAM which is aligned to its size.
- * This procedure can be used until the call to mem_init().
- */
-static void *srmmu_init_alloc(unsigned long *kbrk, unsigned long size)
-{
- unsigned long mask = size - 1;
- unsigned long ret;
-
- if(!size)
- return 0x0;
- if(size & mask) {
- prom_printf("panic: srmmu_init_alloc botch\n");
- prom_halt();
- }
- ret = (*kbrk + mask) & ~mask;
- *kbrk = ret + size;
- memset((void*) ret, 0, size);
- return (void*) ret;
-}
-
static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
{
pgd_t *pgdp;
@@ -1541,12 +2107,12 @@
while(start < end) {
pgdp = srmmu_pgd_offset(init_task.mm, start);
if(srmmu_pgd_none(*pgdp)) {
- pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
+ pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
srmmu_early_pgd_set(pgdp, pmdp);
}
pmdp = srmmu_early_pmd_offset(pgdp, start);
if(srmmu_pmd_none(*pmdp)) {
- ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
+ ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
srmmu_early_pmd_set(pmdp, ptep);
}
start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
@@ -1596,7 +2162,7 @@
continue;
}
if(srmmu_pgd_none(*pgdp)) {
- pmdp = srmmu_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
+ pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
srmmu_early_pgd_set(pgdp, pmdp);
}
pmdp = srmmu_early_pmd_offset(pgdp, start);
@@ -1606,7 +2172,7 @@
continue;
}
if(srmmu_pmd_none(*pmdp)) {
- ptep = srmmu_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
+ ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
srmmu_early_pmd_set(pmdp, ptep);
}
ptep = srmmu_early_pte_offset(pmdp, start);
@@ -1615,187 +2181,408 @@
}
}
-static inline void srmmu_map_dvma_pages_for_cpu(unsigned long first, unsigned long last)
+static void srmmu_map_dma_area(unsigned long addr, int len)
{
- unsigned long start;
+ unsigned long page, end;
pgprot_t dvma_prot;
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
+ struct iommu_struct *iommu = SBus_chain->iommu;
+ iopte_t *iopte = iommu->page_table;
+ iopte_t *iopte_first = iopte;
- start = DVMA_VADDR;
- if (viking_mxcc_present)
+ if(viking_mxcc_present)
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
else
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
- while(first <= last) {
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- pmdp = srmmu_pmd_offset(pgdp, start);
- ptep = srmmu_pte_offset(pmdp, start);
- srmmu_set_entry(ptep, pte_val(srmmu_mk_pte(first, dvma_prot)));
+ iopte += ((addr - iommu->start) >> PAGE_SHIFT);
+ end = PAGE_ALIGN((addr + len));
+ while(addr < end) {
+ page = get_free_page(GFP_KERNEL);
+ if(!page) {
+ prom_printf("alloc_dvma: Cannot get a dvma page\n");
+ prom_halt();
+ } else {
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
- first += PAGE_SIZE;
- start += PAGE_SIZE;
- }
+ pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ ptep = srmmu_pte_offset(pmdp, addr);
+
+ set_pte(ptep, pte_val(srmmu_mk_pte(page, dvma_prot)));
- /* Uncache DVMA pages. */
- if (!viking_mxcc_present) {
- first = first_dvma_page;
- last = last_dvma_page;
- while(first <= last) {
- pgdp = srmmu_pgd_offset(init_task.mm, first);
- pmdp = srmmu_pmd_offset(pgdp, first);
- ptep = srmmu_pte_offset(pmdp, first);
- pte_val(*ptep) &= ~SRMMU_CACHE;
- first += PAGE_SIZE;
+ iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(page));
}
+ addr += PAGE_SIZE;
}
+ flush_cache_all();
+ if(viking_mxcc_present) {
+ unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
+ flush_tlb_all();
+ iommu_invalidate(iommu->regs);
}
-static void srmmu_map_kernel(unsigned long start, unsigned long end)
+/* #define DEBUG_MAP_KERNEL */
+
+#ifdef DEBUG_MAP_KERNEL
+#define MKTRACE(foo) prom_printf foo
+#else
+#define MKTRACE(foo)
+#endif
+
+static int lots_of_ram = 0;
+static int large_pte_optimize = 1;
+
+#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
+
+/* Create a third-level SRMMU 16MB page mapping. */
+static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
+ unsigned long big_pte;
+
+ MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr, phys_base));
+ big_pte = KERNEL_PTE(phys_base >> 4);
+ pgd_val(*pgdp) = big_pte;
+}
+
+/* Create second-level SRMMU 256K medium sized page mappings. */
+static inline void do_medium_mapping(unsigned long vaddr, unsigned long vend,
+ unsigned long phys_base)
{
- unsigned long last_page;
- int srmmu_bank, phys_bank, i;
pgd_t *pgdp;
pmd_t *pmdp;
- pte_t *ptep;
+ unsigned long medium_pte;
- end = PAGE_ALIGN(end);
+ MKTRACE(("dmm[v<%08lx,%08lx>-->p<%08lx>]", vaddr, vend, phys_base));
+ while(vaddr < vend) {
+ pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
+ pmdp = srmmu_early_pmd_offset(pgdp, vaddr);
+ medium_pte = KERNEL_PTE(phys_base >> 4);
+ pmd_val(*pmdp) = medium_pte;
+ phys_base += SRMMU_PMD_SIZE;
+ vaddr += SRMMU_PMD_SIZE;
+ }
+}
- if(start == (KERNBASE + PAGE_SIZE)) {
- unsigned long pte;
- unsigned long tmp;
-
- pgdp = srmmu_pgd_offset(init_task.mm, KERNBASE);
- pmdp = srmmu_early_pmd_offset(pgdp, KERNBASE);
- ptep = srmmu_early_pte_offset(pmdp, KERNBASE);
-
- /* Put a real mapping in for the KERNBASE page. */
- tmp = kbpage;
- pte = (tmp) >> 4;
- pte |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
- pte_val(*ptep) = pte;
- }
-
- /* Copy over mappings prom already gave us. */
- last_page = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
- while((srmmu_hwprobe(start) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- unsigned long tmp;
+/* Create a normal set of SRMMU page mappings for the virtual range
+ * START to END, using physical pages beginning at PHYS_BASE.
+ */
+static inline void do_small_mapping(unsigned long start, unsigned long end,
+ unsigned long phys_base)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ MKTRACE(("dsm[v<%08lx,%08lx>-->p<%08lx>]", start, end, phys_base));
+ while(start < end) {
pgdp = srmmu_pgd_offset(init_task.mm, start);
pmdp = srmmu_early_pmd_offset(pgdp, start);
ptep = srmmu_early_pte_offset(pmdp, start);
- tmp = srmmu_hwprobe(start);
- tmp &= ~(0xff);
- tmp |= (SRMMU_CACHE | SRMMU_PRIV | SRMMU_VALID);
- pte_val(*ptep) = tmp;
+
+ pte_val(*ptep) = KERNEL_PTE(phys_base >> 4);
+ phys_base += PAGE_SIZE;
start += PAGE_SIZE;
- tmp = (srmmu_hwprobe(start) & SRMMU_PTE_PMASK) << 4;
+ }
+}
- /* Never a cross bank boundary, thank you. */
- if(tmp != last_page + PAGE_SIZE)
- break;
- last_page = tmp;
+/* Look in the sp_bank for the given physical page, return the
+ * index number the entry was found in, or -1 for not found.
+ */
+static inline int find_in_spbanks(unsigned long phys_page)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ unsigned long start = sp_banks[entry].base_addr;
+ unsigned long end = start + sp_banks[entry].num_bytes;
+
+ if((start <= phys_page) && (phys_page < end))
+ return entry;
}
+ return -1;
+}
+
+/* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
+ * array of char's, each member indicating if that spbank is mapped
+ * yet or not.
+ */
+static inline int find_free_spbank(char *taken_vector)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ if(!taken_vector[entry])
+ break;
+ return entry;
+}
- /* Ok, that was assumed to be one full bank, begin
- * construction of srmmu_map[].
+/* Same as above, but with a given bank size limit BLIMIT. */
+static inline int find_free_spbank_limited(char *taken_vector, unsigned long limit)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ if(!taken_vector[entry] &&
+ (sp_banks[entry].num_bytes < limit))
+ break;
+ return entry;
+}
+
+/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
+ * This routine is expected to update the srmmu_map and try as
+ * hard as possible to use 16MB level-one SRMMU pte's when at all
+ * possible to get short termination and faster translations.
+ */
+static inline unsigned long map_spbank(unsigned long vbase, int sp_entry)
+{
+ unsigned long pstart = sp_banks[sp_entry].base_addr;
+ unsigned long vstart = vbase;
+ unsigned long vend = vbase + sp_banks[sp_entry].num_bytes;
+ static int srmmu_bank = 0;
+
+ /* If physically not aligned on 16MB boundry, just shortcut
+ * right here by mapping them with 4k normal pages, and bumping
+ * the next virtual address to the next 16MB boundry. You can
+ * get this with various RAM configurations due to the way in
+ * which the PROM carves out it's own chunks of memory.
*/
- for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++) {
- if(kbpage >= sp_banks[phys_bank].base_addr &&
- (kbpage <
- (sp_banks[phys_bank].base_addr + sp_banks[phys_bank].num_bytes)))
- break; /* found it */
- }
- srmmu_bank = 0;
- srmmu_map[srmmu_bank].vbase = KERNBASE;
- srmmu_map[srmmu_bank].pbase = sp_banks[phys_bank].base_addr;
- srmmu_map[srmmu_bank].size = sp_banks[phys_bank].num_bytes;
- if(kbpage != sp_banks[phys_bank].base_addr) {
- prom_printf("Detected PenguinPages, getting out of here.\n");
- prom_halt();
-#if 0
- srmmu_map[srmmu_bank].pbase = kbpage;
- srmmu_map[srmmu_bank].size -=
- (kbpage - sp_banks[phys_bank].base_addr);
-#endif
+ if(pstart & ~SRMMU_PGDIR_MASK) {
+ do_small_mapping(vstart, vend, pstart);
+ vstart = SRMMU_PGDIR_ALIGN(vend);
+ goto finish_up;
+ }
+ while(vstart < vend) {
+ unsigned long coverage, next_aligned;
+ if(vstart & ~SRMMU_PMD_MASK) {
+ next_aligned = SRMMU_PMD_ALIGN(vstart);
+ if(next_aligned <= vend) {
+ coverage = (next_aligned - vstart);
+ do_small_mapping(vstart, next_aligned, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ } else if(vstart & ~SRMMU_PGDIR_MASK) {
+ next_aligned = SRMMU_PGDIR_ALIGN(vstart);
+ if(next_aligned <= vend) {
+ coverage = (next_aligned - vstart);
+ do_medium_mapping(vstart, next_aligned, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ } else {
+ coverage = SRMMU_PGDIR_SIZE;
+ if(large_pte_optimize || ((vstart+coverage)<=vend)) {
+ do_large_mapping(vstart, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ }
+ vstart += coverage; pstart += coverage;
}
- /* Prom didn't map all of this first bank, fill
- * in the rest by hand.
- */
- while(start < (srmmu_map[srmmu_bank].vbase + srmmu_map[srmmu_bank].size)) {
- unsigned long pteval;
+finish_up:
+ srmmu_map[srmmu_bank].vbase = vbase;
+ srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
+ srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
+ MKTRACE(("SRMMUBANK[v<%08lx>p<%08lx>s<%08lx>]", vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
+ srmmu_bank++;
+ return vstart;
+}
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- pmdp = srmmu_early_pmd_offset(pgdp, start);
- ptep = srmmu_early_pte_offset(pmdp, start);
+static inline void memprobe_error(char *msg)
+{
+ prom_printf(msg);
+ prom_printf("Halting now...\n");
+ prom_halt();
+}
- pteval = (start - KERNBASE + srmmu_map[srmmu_bank].pbase) >> 4;
- pteval |= (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
- pte_val(*ptep) = pteval;
- start += PAGE_SIZE;
- }
+/* Assumptions: The bank given to the kernel from the prom/bootloader
+ * is part of a full bank which is at least 4MB in size and begins at
+ * 0xf0000000 (ie. KERNBASE).
+ */
+static void map_kernel(void)
+{
+ unsigned long raw_pte, physpage;
+ unsigned long vaddr, tally, low_base;
+ char etaken[SPARC_PHYS_BANKS];
+ int entry;
+
+ /* Step 1: Clear out sp_banks taken map. */
+ MKTRACE(("map_kernel: clearing etaken vector... "));
+ for(entry = 0; entry < SPARC_PHYS_BANKS; entry++)
+ etaken[entry] = 0;
+
+ low_base = KERNBASE;
+
+ /* Step 2: Calculate 'lots_of_ram'. */
+ tally = 0;
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ tally += sp_banks[entry].num_bytes;
+ if(tally >= (0xfd000000 - KERNBASE))
+ lots_of_ram = 1;
+ else
+ lots_of_ram = 0;
+ MKTRACE(("tally=%08lx lots_of_ram<%d>\n", tally, lots_of_ram));
- /* Mark this sp_bank invalid... */
- sp_banks[phys_bank].base_addr |= 1;
- srmmu_bank++;
+ /* Step 3: Fill in KERNBASE base pgd. Lots of sanity checking here. */
+ raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE)
+ memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
+ physpage = (raw_pte & SRMMU_PTE_PMASK) << 4;
+ physpage -= PAGE_SIZE;
+ if(physpage & ~(SRMMU_PGDIR_MASK))
+ memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
+ entry = find_in_spbanks(physpage);
+ if(entry == -1 || (sp_banks[entry].base_addr != physpage))
+ memprobe_error("Kernel mapped in non-existant memory.\n");
+ MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes));
+ if(((KERNBASE + (sp_banks[entry].num_bytes)) > 0xfd000000) ||
+ ((KERNBASE + (sp_banks[entry].num_bytes)) < KERNBASE)) {
+ unsigned long orig_base = sp_banks[entry].base_addr;
+ unsigned long orig_len = sp_banks[entry].num_bytes;
+ unsigned long can_map = (0xfd000000 - KERNBASE);
+
+ /* Map a partial bank in this case, adjust the base
+ * and the length, but don't mark it used.
+ */
+ sp_banks[entry].num_bytes = can_map;
+ MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
+ vaddr = map_spbank(KERNBASE, entry);
+ MKTRACE(("vaddr now %08lx ", vaddr));
+ sp_banks[entry].base_addr = orig_base + can_map;
+ sp_banks[entry].num_bytes = orig_len - can_map;
+ MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
+ MKTRACE(("map_kernel: skipping first loop\n"));
+ goto loop_skip;
+ }
+ vaddr = map_spbank(KERNBASE, entry);
+ etaken[entry] = 1;
+
+ /* Step 4: Map what we can above KERNBASE. */
+ MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr));
+ for(;;) {
+ unsigned long bank_size;
+
+ MKTRACE(("map_kernel: ffsp()"));
+ entry = find_free_spbank(&etaken[0]);
+ bank_size = sp_banks[entry].num_bytes;
+ MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
+ if(!bank_size)
+ break;
+ if(((vaddr + bank_size) >= 0xfd000000) ||
+ ((vaddr + bank_size) < KERNBASE)) {
+ unsigned long orig_base = sp_banks[entry].base_addr;
+ unsigned long orig_len = sp_banks[entry].num_bytes;
+ unsigned long can_map = (0xfd000000 - vaddr);
- /* Now, deal with what is left. */
- while(start < end) {
- unsigned long baddr;
- int btg;
+ /* Map a partial bank in this case, adjust the base
+ * and the length, but don't mark it used.
+ */
+ sp_banks[entry].num_bytes = can_map;
+ MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
+ vaddr = map_spbank(vaddr, entry);
+ MKTRACE(("vaddr now %08lx ", vaddr));
+ sp_banks[entry].base_addr = orig_base + can_map;
+ sp_banks[entry].num_bytes = orig_len - can_map;
+ MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
+ break;
+ }
+ if(!bank_size)
+ break;
- /* Find a usable cluster of physical ram. */
- for(i=0; sp_banks[i].num_bytes != 0; i++)
- if(!(sp_banks[i].base_addr & 1))
- break;
- if(sp_banks[i].num_bytes == 0)
+ /* Ok, we can map this one, do it. */
+ MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry));
+ vaddr = map_spbank(vaddr, entry);
+ etaken[entry] = 1;
+ MKTRACE(("vaddr now %08lx\n", vaddr));
+ }
+ MKTRACE(("\n"));
+ /* If not lots_of_ram, assume we did indeed map it all above. */
+loop_skip:
+ if(!lots_of_ram)
+ goto check_and_return;
+
+ /* Step 5: Map the rest (if any) right below KERNBASE. */
+ MKTRACE(("map_kernel: doing low mappings... "));
+ tally = 0;
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ if(!etaken[entry])
+ tally += SRMMU_PGDIR_ALIGN(sp_banks[entry].num_bytes);
+ }
+ if(!tally)
+ memprobe_error("Whee, lots_of_ram yet no low pages to map.\n");
+ low_base = (KERNBASE - tally);
+ MKTRACE(("tally=%08lx low_base=%08lx\n", tally, low_base));
+
+ /* Ok, now map 'em. */
+ MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE));
+ srmmu_allocate_ptable_skeleton(low_base, KERNBASE);
+ vaddr = low_base;
+ MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr));
+ for(;;) {
+ unsigned long bank_size;
+
+ entry = find_free_spbank(&etaken[0]);
+ bank_size = sp_banks[entry].num_bytes;
+ MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
+ if(!bank_size)
break;
+ if((vaddr + bank_size) > KERNBASE)
+ memprobe_error("Wheee, kernel low mapping overflow.\n");
+ MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry));
+ vaddr = map_spbank(vaddr, entry);
+ etaken[entry] = 1;
+ tally -= SRMMU_PGDIR_ALIGN(bank_size);
+ MKTRACE(("Now, vaddr=%08lx tally=%08lx\n", vaddr, tally));
+ }
+ MKTRACE(("\n"));
+ if(tally)
+ memprobe_error("Wheee, did not map all of low mappings.\n");
+check_and_return:
+ /* Step 6: Sanity check, make sure we did it all. */
+ MKTRACE(("check_and_return: "));
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ MKTRACE(("e[%d]=%d ", entry, etaken[entry]));
+ if(!etaken[entry]) {
+ MKTRACE(("oops\n"));
+ memprobe_error("Some bank did not get mapped.\n");
+ }
+ }
+ MKTRACE(("success\n"));
+ init_task.mm->mmap->vm_start = page_offset = low_base;
+ return; /* SUCCESS! */
+}
- /* Add it to srmmu_map */
- srmmu_map[srmmu_bank].vbase = start;
- srmmu_map[srmmu_bank].pbase = sp_banks[i].base_addr;
- srmmu_map[srmmu_bank].size = sp_banks[i].num_bytes;
- srmmu_bank++;
-
- btg = sp_banks[i].num_bytes;
- baddr = sp_banks[i].base_addr;
- while(btg) {
- pgdp = srmmu_pgd_offset(init_task.mm, start);
- pmdp = srmmu_early_pmd_offset(pgdp, start);
- ptep = srmmu_early_pte_offset(pmdp, start);
- pte_val(*ptep) = (SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV);
- pte_val(*ptep) |= (baddr >> 4);
+unsigned long srmmu_endmem_fixup(unsigned long mem_end_now)
+{
+ unsigned long tally = 0;
+ int i;
- baddr += PAGE_SIZE;
- start += PAGE_SIZE;
- btg -= PAGE_SIZE;
- }
- sp_banks[i].base_addr |= 1;
- }
- if(start < end) {
- prom_printf("weird, didn't use all of physical memory... ");
- prom_halt();
- }
- for(phys_bank = 0; sp_banks[phys_bank].num_bytes != 0; phys_bank++)
- sp_banks[phys_bank].base_addr &= ~1;
-#if 0
- for(i = 0; srmmu_map[i].size != 0; i++) {
- prom_printf("srmmu_map[%d]: vbase=%08lx pbase=%08lx size=%d\n",
- i, srmmu_map[i].vbase,
- srmmu_map[i].pbase, srmmu_map[i].size);
- }
- prom_getchar();
- for(i = 0; sp_banks[i].num_bytes != 0; i++) {
- prom_printf("sp_banks[%d]: base_addr=%08lx num_bytes=%d\n",
- i,
- sp_banks[i].base_addr,
- sp_banks[i].num_bytes);
+ for(i = 0; sp_banks[i].num_bytes; i++)
+ tally += SRMMU_PGDIR_ALIGN(sp_banks[i].num_bytes);
+ if(tally < (0x0d000000UL)) {
+ return KERNBASE + tally;
+ } else {
+ return 0xfd000000UL;
}
- prom_getchar();
- prom_halt();
-#endif
}
/* Paging initialization on the Sparc Reference MMU. */
@@ -1809,18 +2596,15 @@
unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
{
- unsigned long ptables_start, first_mapped_page;
+ unsigned long ptables_start;
int i, cpunode;
char node_str[128];
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
- physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
+ sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
+ physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
#if CONFIG_AP1000
- printk("Forcing num_contexts to 1024\n");
- num_contexts = 1024;
+ num_contexts = AP_NUM_CONTEXTS;
#else
/* Find the number of contexts on the srmmu. */
cpunode = prom_getchild(prom_root_node);
@@ -1840,65 +2624,60 @@
ptables_start = mempool = PAGE_ALIGN(start_mem);
memset(swapper_pg_dir, 0, PAGE_SIZE);
- first_mapped_page = KERNBASE;
- kbpage = srmmu_hwprobe(KERNBASE);
- if((kbpage & SRMMU_ET_MASK) != SRMMU_ET_PTE) {
- kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
- kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
- kbpage -= PAGE_SIZE;
- first_mapped_page += PAGE_SIZE;
- } else
- kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
+ kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
+ kbpage -= PAGE_SIZE;
srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
#if CONFIG_SUN_IO
- srmmu_allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_END);
+ srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END);
srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
#endif
- /* Steal DVMA pages now, I still don't like how we waste all this. */
mempool = PAGE_ALIGN(mempool);
- first_dvma_page = mempool;
- last_dvma_page = (mempool + (DVMA_LEN) - PAGE_SIZE);
- mempool = last_dvma_page + PAGE_SIZE;
-
#if CONFIG_AP1000
ap_inherit_mappings();
#else
srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
#endif
- srmmu_map_kernel(first_mapped_page, end_mem);
-#if CONFIG_SUN_IO
- srmmu_map_dvma_pages_for_cpu(first_dvma_page, last_dvma_page);
+ map_kernel();
+#if CONFIG_AP1000
+ /* the MSC wants this aligned on a 16k boundary */
+ srmmu_context_table =
+ sparc_init_alloc(&mempool,
+ num_contexts*sizeof(ctxd_t)<0x4000?
+ 0x4000:
+ num_contexts*sizeof(ctxd_t));
+#else
+ srmmu_context_table = sparc_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
#endif
- srmmu_context_table = srmmu_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
for(i = 0; i < num_contexts; i++)
ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
start_mem = PAGE_ALIGN(mempool);
- /* Some SRMMU's are _very_ stupid indeed. */
- if(!can_cache_ptables) {
- for( ; ptables_start < start_mem; ptables_start += PAGE_SIZE) {
- pgdp = srmmu_pgd_offset(init_task.mm, ptables_start);
- pmdp = srmmu_early_pmd_offset(pgdp, ptables_start);
- ptep = srmmu_early_pte_offset(pmdp, ptables_start);
- pte_val(*ptep) &= ~SRMMU_CACHE;
- }
+ flush_cache_all();
+ if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = ptables_start;
+ unsigned long end = start_mem;
- pgdp = srmmu_pgd_offset(init_task.mm, (unsigned long)swapper_pg_dir);
- pmdp = srmmu_early_pmd_offset(pgdp, (unsigned long)swapper_pg_dir);
- ptep = srmmu_early_pte_offset(pmdp, (unsigned long)swapper_pg_dir);
- pte_val(*ptep) &= ~SRMMU_CACHE;
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
}
-
- flush_cache_all();
srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
flush_tlb_all();
poke_srmmu();
+#if CONFIG_AP1000
+ /* on the AP we don't put the top few contexts into the free
+ context list as these are reserved for parallel tasks */
+ start_mem = sparc_context_init(start_mem, MPP_CONTEXT_BASE);
+#else
start_mem = sparc_context_init(start_mem, num_contexts);
+#endif
start_mem = free_area_init(start_mem, end_mem);
return PAGE_ALIGN(start_mem);
@@ -1914,19 +2693,24 @@
"invrnge\t\t: %d\n"
"invpg\t\t: %d\n"
"contexts\t: %d\n"
- "big_chunks\t: %d\n"
- "little_chunks\t: %d\n",
- srmmu_name,
+#ifdef USE_CHUNK_ALLOC
+ "big chunks\t: %d\n"
+ "little chunks\t: %d\n"
+ "chunk pages\t: %d\n"
+ "garbage\t\t: %d\n"
+ "garbage hits\t: %d\n"
+#endif
+ , srmmu_name,
module_stats.invall,
module_stats.invmm,
module_stats.invrnge,
module_stats.invpg,
- num_contexts,
-#if 0
- num_big_chunks,
- num_little_chunks
-#else
- 0, 0
+ num_contexts
+#ifdef USE_CHUNK_ALLOC
+ , bcwater, lcwater,
+ chunk_pages,
+ garbage_calls,
+ clct_pages
#endif
);
return srmmuinfo;
@@ -1938,16 +2722,13 @@
static void srmmu_exit_hook(void)
{
- struct ctx_list *ctx_old;
struct mm_struct *mm = current->mm;
- if(mm->context != NO_CONTEXT) {
+ if(mm->context != NO_CONTEXT && mm->count == 1) {
flush_cache_mm(mm);
ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
flush_tlb_mm(mm);
- ctx_old = ctx_list_pool + mm->context;
- remove_from_ctx_list(ctx_old);
- add_to_free_ctxlist(ctx_old);
+ free_context(mm->context);
mm->context = NO_CONTEXT;
}
}
@@ -1955,18 +2736,64 @@
static void srmmu_flush_hook(void)
{
if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- alloc_context(current->mm);
+ alloc_context(current);
+ flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ flush_tlb_mm(current->mm);
srmmu_set_context(current->mm->context);
}
}
+static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+#if 0
+ struct inode *inode;
+ struct vm_area_struct *vmaring;
+ unsigned long offset, vaddr;
+ unsigned long start;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (!(vma->vm_flags & VM_WRITE) ||
+ !(vma->vm_flags & VM_SHARED))
+ return;
+
+ inode = vma->vm_inode;
+ if (!inode)
+ return;
+
+ offset = (address & PAGE_MASK) - vma->vm_start;
+ vmaring = inode->i_mmap;
+ do {
+ vaddr = vmaring->vm_start + offset;
+
+ if ((vaddr ^ address) & vac_badbits) {
+ start = vma->vm_start;
+ while (start < vma->vm_end) {
+ pgdp = srmmu_pgd_offset(vma->vm_mm, start);
+ pmdp = srmmu_pmd_offset(pgdp, start);
+ ptep = srmmu_pte_offset(pmdp, start);
+
+ flush_cache_page_to_uncache(start);
+ set_pte(ptep, __pte((pte_val(*ptep) &
+ ~SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(start);
+
+ start += PAGE_SIZE;
+ }
+ return;
+ }
+ } while ((vmaring = vmaring->vm_next_share) != inode->i_mmap);
+#endif
+}
+
static void hypersparc_exit_hook(void)
{
- struct ctx_list *ctx_old;
struct mm_struct *mm = current->mm;
- if(mm->context != NO_CONTEXT) {
+ if(mm->context != NO_CONTEXT && mm->count == 1) {
/* HyperSparc is copy-back, any data for this
* process in a modified cache line is stale
* and must be written back to main memory now
@@ -1975,9 +2802,7 @@
flush_cache_mm(mm);
ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
flush_tlb_mm(mm);
- ctx_old = ctx_list_pool + mm->context;
- remove_from_ctx_list(ctx_old);
- add_to_free_ctxlist(ctx_old);
+ free_context(mm->context);
mm->context = NO_CONTEXT;
}
}
@@ -1985,21 +2810,52 @@
static void hypersparc_flush_hook(void)
{
if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- alloc_context(current->mm);
+ alloc_context(current);
flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ flush_tlb_mm(current->mm);
srmmu_set_context(current->mm->context);
}
}
/* Init various srmmu chip types. */
-void srmmu_is_bad(void)
+static void srmmu_is_bad(void)
{
prom_printf("Could not determine SRMMU chip type.\n");
prom_halt();
}
-void poke_hypersparc(void)
+static void init_vac_layout(void)
+{
+ int nd, cache_lines;
+ char node_str[128];
+
+ nd = prom_getchild(prom_root_node);
+ while((nd = prom_getsibling(nd)) != 0) {
+ prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu"))
+ break;
+ }
+ if(nd == 0) {
+ prom_printf("No CPU nodes found, halting.\n");
+ prom_halt();
+ }
+
+ vac_line_size = prom_getint(nd, "cache-line-size");
+ if (vac_line_size == -1) {
+ prom_printf("can't determine cache-line-size, halting.\n");
+ prom_halt();
+ }
+ cache_lines = prom_getint(nd, "cache-nlines");
+ if (cache_lines == -1) {
+ prom_printf("can't determine cache-nlines, halting.\n");
+ prom_halt();
+ }
+ vac_cache_size = cache_lines * vac_line_size;
+ vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
+}
+
+static void poke_hypersparc(void)
{
volatile unsigned long clear;
unsigned long mreg = srmmu_get_mmureg();
@@ -2019,20 +2875,13 @@
clear = srmmu_get_fstatus();
}
-void init_hypersparc(void)
+static void init_hypersparc(void)
{
- unsigned long mreg = srmmu_get_mmureg();
-
srmmu_name = "ROSS HyperSparc";
- can_cache_ptables = 0;
- if(mreg & HYPERSPARC_CSIZE) {
- hyper_cache_size = (256 * 1024);
- hyper_line_size = 64;
- } else {
- hyper_cache_size = (128 * 1024);
- hyper_line_size = 32;
- }
+ init_vac_layout();
+
+ set_pte = srmmu_set_pte_nocache_hyper;
flush_cache_all = hypersparc_flush_cache_all;
flush_cache_mm = hypersparc_flush_cache_mm;
flush_cache_range = hypersparc_flush_cache_range;
@@ -2052,41 +2901,67 @@
switch_to_context = hypersparc_switch_to_context;
mmu_exit_hook = hypersparc_exit_hook;
mmu_flush_hook = hypersparc_flush_hook;
+ update_mmu_cache = srmmu_vac_update_mmu_cache;
sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
- set_pte = hypersparc_set_pte;
poke_srmmu = poke_hypersparc;
}
-void poke_cypress(void)
+static void poke_cypress(void)
{
unsigned long mreg = srmmu_get_mmureg();
+ unsigned long faddr;
+ volatile unsigned long clear;
+
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+
+ for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
+ __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
+ "sta %%g0, [%0] %2\n\t" : :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+ }
+
+ /* And one more, for our good neighbor, Mr. Broken Cypress. */
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
- mreg &= ~CYPRESS_CMODE;
- mreg |= CYPRESS_CENABLE;
+ mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
srmmu_set_mmureg(mreg);
}
-void init_cypress_common(void)
+static void init_cypress_common(void)
{
- can_cache_ptables = 0;
+ init_vac_layout();
+
+ set_pte = srmmu_set_pte_nocache_cypress;
+ flush_cache_all = cypress_flush_cache_all;
+ flush_cache_mm = cypress_flush_cache_mm;
+ flush_cache_range = cypress_flush_cache_range;
+ flush_cache_page = cypress_flush_cache_page;
+
flush_tlb_all = cypress_flush_tlb_all;
flush_tlb_mm = cypress_flush_tlb_mm;
flush_tlb_page = cypress_flush_tlb_page;
flush_tlb_range = cypress_flush_tlb_range;
- poke_srmmu = poke_cypress;
- /* XXX Need to write cache flushes for this one... XXX */
+ flush_page_to_ram = cypress_flush_page_to_ram;
+ flush_page_for_dma = cypress_flush_page_for_dma;
+ flush_cache_page_to_uncache = cypress_flush_page_to_uncache;
+ flush_tlb_page_for_cbit = cypress_flush_tlb_page_for_cbit;
+ update_mmu_cache = srmmu_vac_update_mmu_cache;
+ poke_srmmu = poke_cypress;
}
-void init_cypress_604(void)
+static void init_cypress_604(void)
{
srmmu_name = "ROSS Cypress-604(UP)";
srmmu_modtype = Cypress;
init_cypress_common();
}
-void init_cypress_605(unsigned long mrev)
+static void init_cypress_605(unsigned long mrev)
{
srmmu_name = "ROSS Cypress-605(MP)";
if(mrev == 0xe) {
@@ -2103,7 +2978,7 @@
init_cypress_common();
}
-void poke_swift(void)
+static void poke_swift(void)
{
unsigned long mreg = srmmu_get_mmureg();
@@ -2124,7 +2999,7 @@
}
#define SWIFT_MASKID_ADDR 0x10003018
-void init_swift(void)
+static void init_swift(void)
{
unsigned long swift_rev;
@@ -2195,7 +3070,7 @@
poke_srmmu = poke_swift;
}
-void poke_tsunami(void)
+static void poke_tsunami(void)
{
unsigned long mreg = srmmu_get_mmureg();
@@ -2206,7 +3081,7 @@
srmmu_set_mmureg(mreg);
}
-void init_tsunami(void)
+static void init_tsunami(void)
{
/* Tsunami's pretty sane, Sun and TI actually got it
* somewhat right this time. Fujitsu should have
@@ -2215,7 +3090,6 @@
srmmu_name = "TI Tsunami";
srmmu_modtype = Tsunami;
- can_cache_ptables = 1;
flush_cache_all = tsunami_flush_cache_all;
flush_cache_mm = tsunami_flush_cache_mm;
@@ -2235,28 +3109,24 @@
poke_srmmu = poke_tsunami;
}
-void poke_viking(void)
+static void poke_viking(void)
{
unsigned long mreg = srmmu_get_mmureg();
static int smp_catch = 0;
if(viking_mxcc_present) {
- unsigned long mxcc_control;
+ unsigned long mxcc_control = mxcc_get_creg();
- __asm__ __volatile__("set -1, %%g2\n\t"
- "set -1, %%g3\n\t"
- "stda %%g2, [%1] %2\n\t"
- "lda [%3] %2, %0\n\t" :
- "=r" (mxcc_control) :
- "r" (MXCC_EREG), "i" (ASI_M_MXCC),
- "r" (MXCC_CREG) : "g2", "g3");
mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
- mxcc_control &= ~(MXCC_CTL_PARE | MXCC_CTL_RRC);
- mreg &= ~(VIKING_PCENABLE);
- __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
- "r" (mxcc_control), "r" (MXCC_CREG),
- "i" (ASI_M_MXCC));
- srmmu_set_mmureg(mreg);
+ mxcc_control &= ~(MXCC_CTL_RRC);
+ mxcc_set_creg(mxcc_control);
+
+ /* We don't need memory parity checks.
+ * XXX This is a mess, have to dig out later. ecd.
+ viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
+ */
+
+ /* We do cache ptables on MXCC. */
mreg |= VIKING_TCENABLE;
} else {
unsigned long bpreg;
@@ -2275,12 +3145,6 @@
}
}
- viking_unlock_icache();
- viking_flush_icache();
-#if 0
- viking_unlock_dcache();
- viking_flush_dcache();
-#endif
mreg |= VIKING_SPENABLE;
mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
mreg |= VIKING_SBENABLE;
@@ -2288,13 +3152,21 @@
#if CONFIG_AP1000
mreg &= ~(VIKING_SBENABLE);
#endif
+ srmmu_set_mmureg(mreg);
+
+
#ifdef __SMP__
- mreg &= ~(VIKING_SBENABLE);
+ /* Avoid unnecessary cross calls. */
+ flush_cache_all = local_flush_cache_all;
+ flush_page_to_ram = local_flush_page_to_ram;
+ flush_page_for_dma = local_flush_page_for_dma;
+ if (viking_mxcc_present) {
+ flush_cache_page_to_uncache = local_flush_cache_page_to_uncache;
+ }
#endif
- srmmu_set_mmureg(mreg);
}
-void init_viking(void)
+static void init_viking(void)
{
unsigned long mreg = srmmu_get_mmureg();
@@ -2305,7 +3177,7 @@
srmmu_name = "TI Viking";
viking_mxcc_present = 0;
- can_cache_ptables = 0;
+ set_pte = srmmu_set_pte_nocache_nomxccvik;
bpreg = viking_get_bpreg();
bpreg &= ~(VIKING_ACTION_MIX);
@@ -2314,11 +3186,21 @@
msi_set_sync();
flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
+
+ /* We need this to make sure old viking takes no hits
+ * on it's cache for dma snoops to workaround the
+ * "load from non-cacheable memory" interrupt bug.
+ * This is only necessary because of the new way in
+ * which we use the IOMMU.
+ */
+ flush_page_for_dma = viking_no_mxcc_flush_page;
} else {
srmmu_name = "TI Viking/MXCC";
viking_mxcc_present = 1;
- can_cache_ptables = 1;
flush_cache_page_to_uncache = viking_mxcc_flush_page;
+
+ /* MXCC vikings lack the DMA snooping bug. */
+ flush_page_for_dma = viking_flush_page_for_dma;
}
flush_cache_all = viking_flush_cache_all;
@@ -2332,7 +3214,6 @@
flush_tlb_range = viking_flush_tlb_range;
flush_page_to_ram = viking_flush_page_to_ram;
- flush_page_for_dma = viking_flush_page_for_dma;
flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
poke_srmmu = poke_viking;
@@ -2364,6 +3245,8 @@
/* Uniprocessor Cypress */
init_cypress_604();
break;
+ case 12:
+ /* _REALLY OLD_ Cypress MP chips... */
case 13:
case 14:
case 15:
@@ -2476,7 +3359,7 @@
pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
/* Functions */
- set_pte = srmmu_set_pte;
+ set_pte = srmmu_set_pte_cacheable;
switch_to_context = srmmu_switch_to_context;
pmd_align = srmmu_pmd_align;
pgdir_align = srmmu_pgdir_align;
@@ -2503,6 +3386,7 @@
pgd_clear = srmmu_pgd_clear;
mk_pte = srmmu_mk_pte;
+ mk_pte_phys = srmmu_mk_pte_phys;
pgd_set = srmmu_pgd_set;
mk_pte_io = srmmu_mk_pte_io;
pte_modify = srmmu_pte_modify;
@@ -2539,6 +3423,8 @@
mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
mmu_release_scsi_one = srmmu_release_scsi_one;
mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
+
+ mmu_map_dma_area = srmmu_map_dma_area;
mmu_info = srmmu_mmu_info;
mmu_v2p = srmmu_v2p;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov