lkml.org 
[lkml]   [1999]   [Sep]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH] enable _PAGE_GLOBAL for module memory and vmalloc()
This patch enables _PAGE_GLOBAL for module memory and vmalloc().

The patch is tested and stable on a dual Pentium II.
Could you test it on other CPU's?
The patch has no effect on Pentium and AMD-K6 CPU's.

--
Manfred// $Header: /pub/cvs/ms/patches/patch-PAGE_GLOBAL,v 1.2 1999/09/27 14:43:34 manfreds Exp $
// Kernel Version:
// VERSION = 2
// PATCHLEVEL = 3
// SUBLEVEL = 18
// EXTRAVERSION =
diff -r -u -P -x CVS -x *,v 2.3/arch/i386/kernel/smp.c build-2.3/arch/i386/kernel/smp.c
--- 2.3/arch/i386/kernel/smp.c Tue Sep 7 23:35:20 1999
+++ build-2.3/arch/i386/kernel/smp.c Mon Sep 27 15:56:25 1999
@@ -1696,10 +1696,26 @@
flush_tlb_others(cpu_mask);
}

+static void flush_tlb_all_ipi()
+{
+ __flush_tlb_pge();
+}
+
void flush_tlb_all(void)
{
- flush_tlb_others(~(1 << current->processor));
- local_flush_tlb();
+ if(pgkern_mask) {
+ /* flush_tlb_all() is called very early,
+ do not call other CPU's unless
+ they are initialized.*/
+ if(atomic_read(&smp_commenced))
+ smp_call_function (flush_tlb_all_ipi,0,1,1);
+
+ __flush_tlb_pge();
+ } else
+ {
+ flush_tlb_others(~(1 << current->processor));
+ local_flush_tlb();
+ }
}


diff -r -u -P -x CVS -x *,v 2.3/arch/i386/mm/init.c build-2.3/arch/i386/mm/init.c
--- 2.3/arch/i386/mm/init.c Tue Sep 7 23:35:21 1999
+++ build-2.3/arch/i386/mm/init.c Mon Sep 27 15:41:33 1999
@@ -222,8 +222,6 @@

pte = pte_offset(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr);
prot = PAGE_KERNEL;
- if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
- pgprot_val(prot) |= _PAGE_GLOBAL;
set_pte(pte, mk_pte_phys(phys, prot));

local_flush_tlb();
@@ -247,13 +245,19 @@
* This routines also unmaps the page at virtual kernel address 0, so
* that we can trap those pesky NULL-reference errors in the kernel.
*/
+
+/* additional bits for PAGE_KERNEL and PAGE_ROKERNEL */
+unsigned long pgkern_mask = 0;
+
unsigned long __init paging_init(unsigned long start_mem, unsigned long end_mem)
{
pgd_t * pg_dir;
pte_t * pg_table;
unsigned long tmp;
unsigned long address;
-
+ if (boot_cpu_data.x86_capability & X86_FEATURE_PSE) {
+ pgkern_mask |= _PAGE_GLOBAL;
+ }
/*
* Physical page 0 is special; it's not touched by Linux since BIOS
* and SMM (for laptops with [34]86/SL chips) may need it. It is read
diff -r -u -P -x CVS -x *,v 2.3/arch/i386/mm/ioremap.c build-2.3/arch/i386/mm/ioremap.c
--- 2.3/arch/i386/mm/ioremap.c Tue Jun 15 20:15:00 1999
+++ build-2.3/arch/i386/mm/ioremap.c Mon Sep 27 15:41:33 1999
@@ -23,8 +23,7 @@
do {
if (!pte_none(*pte))
printk("remap_area_pte: page already exists\n");
- set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
+ set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_KERNEL | flags)));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
Only in 2.3/drivers/net: sk_mca.h.orig
Binary files 2.3/fs/.file.c.swp and build-2.3/fs/.file.c.swp differ
diff -r -u -P -x CVS -x *,v 2.3/include/asm-i386/pgtable.h build-2.3/include/asm-i386/pgtable.h
--- 2.3/include/asm-i386/pgtable.h Thu Aug 26 17:42:12 1999
+++ build-2.3/include/asm-i386/pgtable.h Mon Sep 27 16:04:24 1999
@@ -49,13 +49,46 @@
#define __flush_tlb_one(addr) \
__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
#endif
-
+
+extern unsigned long pgkern_mask;
+
+#define __flush_tlb_pge() \
+ do { \
+ unsigned long tmp_cr3; \
+ unsigned long tmp_cr4; \
+ __asm__ __volatile__( \
+ "mov %%cr4,%0\n\t" \
+ "andl %2,%0\n\t" \
+ "mov %0,%%cr4\n\t" \
+ "mov %%cr3,%1\n\t" \
+ "mov %1,%%cr3\n\t" \
+ "orl %3,%0\n\t" \
+ "mov %0,%%cr4\n\t" \
+ "mov %1,%%cr3\n\t" \
+ : "=r" (tmp_cr4), "=r" (tmp_cr3) \
+ : "irg" (~X86_CR4_PGE), "irg" (X86_CR4_PGE) \
+ : "memory"); \
+ } while(0)
+
+
+#define __flush_tlb_all() \
+ do { \
+ if(pgkern_mask) { \
+ __flush_tlb_pge(); \
+ } else { \
+ __flush_tlb(); \
+ } \
+ } while(0)
+
+
#ifndef __SMP__

#define flush_tlb() __flush_tlb()
-#define flush_tlb_all() __flush_tlb()
+
#define local_flush_tlb() __flush_tlb()

+#define flush_tlb_all() __flush_tlb_all()
+
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
@@ -173,14 +206,20 @@

#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define _PAGE_KERNEL (pgkern_mask | _PAGE_RW | \
+ _PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_KERNELRO (pgkern_mask | \
+ _PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)

#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNELRO)

/*
* The i386 can't do page protection for execute, and considers that the same are read.
\
 
 \ /
  Last update: 2005-03-22 13:54    [W:0.147 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site