patch-2.2.16 linux/include/asm-s390/pgtable.h
Next file: linux/include/asm-s390/processor.h
Previous file: linux/include/asm-s390/lowcore.h
Back to the patch index
Back to the overall index
- Lines: 180
- Date:
Wed Jun 7 14:26:44 2000
- Orig file:
v2.2.15/linux/include/asm-s390/pgtable.h
- Orig date:
Wed May 3 17:16:51 2000
diff -urN v2.2.15/linux/include/asm-s390/pgtable.h linux/include/asm-s390/pgtable.h
@@ -58,21 +58,6 @@
#define __flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
-
-static inline void __flush_global_tlb(void)
-{
- int cs1=0,dum=0;
- int *adr;
- long long dummy=0;
- adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
- __asm__ __volatile__("lr 2,%0\n\t"
- "lr 3,%1\n\t"
- "lr 4,%2\n\t"
- ".long 0xb2500024" :
- : "d" (cs1), "d" (dum), "d" (adr)
- : "2", "3", "4");
-}
-
static inline void __flush_tlb_one(struct mm_struct *mm,
unsigned long addr);
@@ -94,11 +79,15 @@
__flush_tlb();
}
+#if 0 /* Arggh, ipte doesn't work correctly !! */
static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
+ unsigned long va)
{
- __flush_tlb_one(vma->vm_mm,addr);
+ __flush_tlb_one(vma->vm_mm,va);
}
+#else
+#define flush_tlb_page(vma, va) flush_tlb_all()
+#endif
static inline void flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -115,6 +104,28 @@
#include <asm/smp.h>
+static inline void __flush_global_tlb_csp(void)
+{
+ int cs1=0,dum=0;
+ int *adr;
+ long long dummy=0;
+ adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
+ __asm__ __volatile__("lr 2,%0\n\t"
+ "lr 3,%1\n\t"
+ "lr 4,%2\n\t"
+ "csp 2,4" :
+ : "d" (cs1), "d" (dum), "d" (adr)
+ : "2", "3", "4");
+}
+
+static inline void __flush_global_tlb(void)
+{
+ if (MACHINE_HAS_CSP)
+ __flush_global_tlb_csp();
+ else
+ smp_ext_call_sync_others(ec_ptlb, NULL);
+}
+
#define local_flush_tlb() \
__flush_tlb()
@@ -127,8 +138,9 @@
static inline void flush_tlb_current_task(void)
{
- if ((atomic_read(¤t->mm->count) != 1) ||
- (current->mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ if ((smp_num_cpus > 1) &&
+ ((atomic_read(¤t->mm->count) != 1) ||
+ (current->mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
current->mm->cpu_vm_mask = (1UL << smp_processor_id());
__flush_global_tlb();
} else {
@@ -142,8 +154,9 @@
static inline void flush_tlb_mm(struct mm_struct * mm)
{
- if ((atomic_read(&mm->count) != 1) ||
- (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ if ((smp_num_cpus > 1) &&
+ ((atomic_read(&mm->count) != 1) ||
+ (mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
__flush_global_tlb();
} else {
@@ -151,17 +164,22 @@
}
}
+#if 0 /* Arggh, ipte doesn't work correctly !! */
static inline void flush_tlb_page(struct vm_area_struct * vma,
unsigned long va)
{
__flush_tlb_one(vma->vm_mm,va);
}
+#else
+#define flush_tlb_page(vma, va) flush_tlb_all()
+#endif
static inline void flush_tlb_range(struct mm_struct * mm,
unsigned long start, unsigned long end)
{
- if ((atomic_read(&mm->count) != 1) ||
- (mm->cpu_vm_mask != (1UL << smp_processor_id()))) {
+ if ((smp_num_cpus > 1) &&
+ ((atomic_read(&mm->count) != 1) ||
+ (mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
__flush_global_tlb();
} else {
@@ -290,8 +308,7 @@
/*
* No mapping available
*/
-#define PAGE_NONE __pgprot(_PAGE_INVALID )
-
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_INVALID)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_RO)
@@ -386,7 +403,8 @@
} while (0)
-extern inline int pte_none(pte_t pte) { return ((pte_val(pte) & (_PAGE_INVALID | _PAGE_RO)) ==
+extern inline int pte_none(pte_t pte) { return ((pte_val(pte) &
+ (_PAGE_INVALID | _PAGE_RO | _PAGE_PRESENT)) ==
_PAGE_INVALID); }
extern inline int pte_present(pte_t pte) { return pte_val(pte) & _PAGE_PRESENT; }
extern inline void pte_clear(pte_t *ptep) { pte_val(*ptep) = _PAGE_INVALID; }
@@ -612,22 +630,27 @@
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t * page = (pte_t *) get_pte_fast();
- if (!page)
- return get_pte_kernel_slow(pmd, address);
- pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(page);
- pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(page+1024);
- pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(page+2048);
- pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(page+3072);
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
- return NULL;
- }
+ if (pmd_none(*pmd))
+ goto getnew;
+ if (pmd_bad(*pmd))
+ goto fix;
return (pte_t *) pmd_page(*pmd) + address;
+getnew:
+{
+ unsigned long page = (unsigned long) get_pte_fast();
+
+ if (!page)
+ return get_pte_kernel_slow(pmd, address);
+ pmd_val(pmd[0]) = _KERNPG_TABLE + __pa(page);
+ pmd_val(pmd[1]) = _KERNPG_TABLE + __pa(page+1024);
+ pmd_val(pmd[2]) = _KERNPG_TABLE + __pa(page+2048);
+ pmd_val(pmd[3]) = _KERNPG_TABLE + __pa(page+3072);
+ return (pte_t *) page + address;
+}
+fix:
+ __bad_pte_kernel(pmd);
+ return NULL;
}
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)