lkml.org 
[lkml]   [2005]   [Mar]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [PATCH] add a clear_pages function to clear pages of higher order
Date
On Thursday 17 March 2005 03:33, Christoph Lameter wrote:
> On Fri, 11 Mar 2005, Denis Vlasenko wrote:
>
> > Andi Kleen (iirc) says that non-temporal stores seem to be
> > big win in microbenchmarks (and I second that), but they are
> > a net loss when we are going to use zeroed page just after
> > zeroing. He recommends avoid using non-temporal stores
> >
> > With this new page prezeroing infrastructure, that argument
> > most likely is not right anymore. Especially clearing of
> > high-order pages definitely will benefit from NT stores
> > because they do not kill L1 data cache in the process.
> >
> > I don't have K8 and therefore cannot be 100% sure, but
> > I really doubt that K8 optimize "rep stosq" into _NT_ stores.
>
> Hmm. That would be interesting to know and may be necessary to justify
> the continued existence of this patch. I tried to get some numbers on
> the performance wins for zeroing larger pages with the patch as is (no
> NT stores) and came up with:
>
> Processor Performance Increase
> ----------------------------------------------------------------
> Itanium 2 1.3Ghz M1/R5 1.5%
> AMD Athlon 64 3200+ i386 mode 3%
> AMD Athlon 64 3200+ x86_64 mode 3.3%
>
> (this is if the zeroing engine is the cpu of course. Prezeroing
> may be done through some DMA gizmo independent of the cpu)
>
> Itanium has more extensive optimization capabilities and
> seems to be able to better cope with the loop logic for regular
> clear_page. Thus the improvement is even less on Itanium.
>
> Numbers obtained with the following patch that allows to get performance
> data from /proc/meminfo on zeroing performance (just divide Cycles by
> Pages for clear_page and clear_pages):

Here is a patch which allows to try different page zeroing
optimizations to be tested at runtime via sysctl.
Was run tested in 2.6.8 time. Rediffed to 2.6.11.
Feel free to adapt to your patch and test.

Also attached is a tarball for microbenchmarking routines. There are two
result files. Duron:

normal_clear_page - took 8644 max, 8400 min cycles per page
repstosl_clear_page - took 8626 max, 8418 min cycles per page
movq_clear_page - took 8647 max, 8300 min cycles per page
movntq_clear_page - took 2777 max, 2720 min cycles per page

And amd64:
normal_clear_page - took 9427 max, 5781 min cycles per page
repstosl_clear_page - took 9305 max, 5680 min cycles per page
movq_clear_page - took 6167 max, 5576 min cycles per page
movntq_clear_page - took 5456 max, 2354 min cycles per page

NT stores are not about 5% increase. 200%-300%. Provided you are ok with
the fact that zeroed page ends up evicted from cache. Luckily, this is exactly
what you want with prezeroing.
--
vda
diff -urpN linux-2.6.11.src/arch/i386/lib/Makefile linux-2.6.11-nt.src/arch/i386/lib/Makefile
--- linux-2.6.11.src/arch/i386/lib/Makefile Tue Oct 19 00:53:10 2004
+++ linux-2.6.11-nt.src/arch/i386/lib/Makefile Fri Mar 18 11:30:51 2005
@@ -4,7 +4,7 @@


lib-y = checksum.o delay.o usercopy.o getuser.o memcpy.o strstr.o \
- bitops.o
+ bitops.o page_ops.o mmx_page.o sse_page.o

lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff -urpN linux-2.6.11.src/arch/i386/lib/mmx.c linux-2.6.11-nt.src/arch/i386/lib/mmx.c
--- linux-2.6.11.src/arch/i386/lib/mmx.c Tue Oct 19 00:54:23 2004
+++ linux-2.6.11-nt.src/arch/i386/lib/mmx.c Fri Mar 18 11:30:51 2005
@@ -120,280 +120,3 @@ void *_mmx_memcpy(void *to, const void *
kernel_fpu_end();
return p;
}
-
-#ifdef CONFIG_MK7
-
-/*
- * The K7 has streaming cache bypass load/store. The Cyrix III, K6 and
- * other MMX using processors do not.
- */
-
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for(i=0;i<4096/64;i++)
- {
- __asm__ __volatile__ (
- " movntq %%mm0, (%0)\n"
- " movntq %%mm0, 8(%0)\n"
- " movntq %%mm0, 16(%0)\n"
- " movntq %%mm0, 24(%0)\n"
- " movntq %%mm0, 32(%0)\n"
- " movntq %%mm0, 40(%0)\n"
- " movntq %%mm0, 48(%0)\n"
- " movntq %%mm0, 56(%0)\n"
- : : "r" (page) : "memory");
- page+=64;
- }
- /* since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again.
- */
- __asm__ __volatile__ (
- " sfence \n" : :
- );
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
- kernel_fpu_begin();
-
- /* maybe the prefetch stuff can go before the expensive fnsave...
- * but that is for later. -AV
- */
- __asm__ __volatile__ (
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from) );
-
- for(i=0; i<(4096-320)/64; i++)
- {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- for(i=(4096-320)/64; i<4096/64; i++)
- {
- __asm__ __volatile__ (
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- /* since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again.
- */
- __asm__ __volatile__ (
- " sfence \n" : :
- );
- kernel_fpu_end();
-}
-
-#else
-
-/*
- * Generic MMX implementation without K7 specific streaming
- */
-
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for(i=0;i<4096/128;i++)
- {
- __asm__ __volatile__ (
- " movq %%mm0, (%0)\n"
- " movq %%mm0, 8(%0)\n"
- " movq %%mm0, 16(%0)\n"
- " movq %%mm0, 24(%0)\n"
- " movq %%mm0, 32(%0)\n"
- " movq %%mm0, 40(%0)\n"
- " movq %%mm0, 48(%0)\n"
- " movq %%mm0, 56(%0)\n"
- " movq %%mm0, 64(%0)\n"
- " movq %%mm0, 72(%0)\n"
- " movq %%mm0, 80(%0)\n"
- " movq %%mm0, 88(%0)\n"
- " movq %%mm0, 96(%0)\n"
- " movq %%mm0, 104(%0)\n"
- " movq %%mm0, 112(%0)\n"
- " movq %%mm0, 120(%0)\n"
- : : "r" (page) : "memory");
- page+=128;
- }
-
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
-
- kernel_fpu_begin();
-
- __asm__ __volatile__ (
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from) );
-
- for(i=0; i<4096/64; i++)
- {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 1b, 3b\n"
- ".previous"
- : : "r" (from), "r" (to) : "memory");
- from+=64;
- to+=64;
- }
- kernel_fpu_end();
-}
-
-
-#endif
-
-/*
- * Favour MMX for page clear and copy.
- */
-
-static void slow_zero_page(void * page)
-{
- int d0, d1;
- __asm__ __volatile__( \
- "cld\n\t" \
- "rep ; stosl" \
- : "=&c" (d0), "=&D" (d1)
- :"a" (0),"1" (page),"0" (1024)
- :"memory");
-}
-
-void mmx_clear_page(void * page)
-{
- if(unlikely(in_interrupt()))
- slow_zero_page(page);
- else
- fast_clear_page(page);
-}
-
-static void slow_copy_page(void *to, void *from)
-{
- int d0, d1, d2;
- __asm__ __volatile__( \
- "cld\n\t" \
- "rep ; movsl" \
- : "=&c" (d0), "=&D" (d1), "=&S" (d2) \
- : "0" (1024),"1" ((long) to),"2" ((long) from) \
- : "memory");
-}
-
-
-void mmx_copy_page(void *to, void *from)
-{
- if(unlikely(in_interrupt()))
- slow_copy_page(to, from);
- else
- fast_copy_page(to, from);
-}
diff -urpN linux-2.6.11.src/arch/i386/lib/mmx_page.c linux-2.6.11-nt.src/arch/i386/lib/mmx_page.c
--- linux-2.6.11.src/arch/i386/lib/mmx_page.c Thu Jan 1 03:00:00 1970
+++ linux-2.6.11-nt.src/arch/i386/lib/mmx_page.c Fri Mar 18 11:30:51 2005
@@ -0,0 +1,253 @@
+/*
+ * MMX/3DNow! library helper functions
+ *
+ * To do:
+ * We can use MMX just for prefetch in IRQ's. This may be a win.
+ * (reported so on K6-III)
+ * We should use a better code neutral filler for the short jump
+ * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
+ * We also want to clobber the filler register so we don't get any
+ * register forwarding stalls on the filler.
+ *
+ * Add *user handling. Checksums are not a win with MMX on any CPU
+ * tested so far for any MMX solution figured.
+ *
+ * 22/09/2000 - Arjan van de Ven
+ * Improved for non-egineering-sample Athlons
+ *
+ */
+
+#include <asm/i387.h>
+
+/*
+ * The K7 has streaming cache bypass load/store. The Cyrix III, K6 and
+ * other MMX using processors do not.
+ */
+
+void zero_page_3dnow(void *page)
+{
+ int i;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ " pxor %%mm0, %%mm0\n" : :
+ );
+
+ for(i = 0; i < PAGE_SIZE/64; i++)
+ {
+ __asm__ __volatile__ (
+ " movntq %%mm0, (%0)\n"
+ " movntq %%mm0, 8(%0)\n"
+ " movntq %%mm0, 16(%0)\n"
+ " movntq %%mm0, 24(%0)\n"
+ " movntq %%mm0, 32(%0)\n"
+ " movntq %%mm0, 40(%0)\n"
+ " movntq %%mm0, 48(%0)\n"
+ " movntq %%mm0, 56(%0)\n"
+ : : "r" (page) : "memory"
+ );
+ page+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence" is needed to become
+ * ordered again.
+ */
+ __asm__ __volatile__ (
+ " sfence\n" : :
+ );
+ kernel_fpu_end();
+}
+
+void copy_page_3dnow(void *to, void *from)
+{
+ int i;
+
+ kernel_fpu_begin();
+
+ /* maybe the prefetch stuff can go before the expensive fnsave...
+ * but that is for later. -AV
+ */
+ __asm__ __volatile__ (
+ "1: prefetch (%0)\n"
+ " prefetch 64(%0)\n"
+ " prefetch 128(%0)\n"
+ " prefetch 192(%0)\n"
+ " prefetch 256(%0)\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b, 3b\n"
+ ".previous"
+ : : "r" (from)
+ );
+
+ for(i = 0; i < (PAGE_SIZE-320)/64; i++)
+ {
+ __asm__ __volatile__ (
+ "1: prefetch 320(%0)\n"
+ "2: movq (%0), %%mm0\n"
+ " movntq %%mm0, (%1)\n"
+ " movq 8(%0), %%mm1\n"
+ " movntq %%mm1, 8(%1)\n"
+ " movq 16(%0), %%mm2\n"
+ " movntq %%mm2, 16(%1)\n"
+ " movq 24(%0), %%mm3\n"
+ " movntq %%mm3, 24(%1)\n"
+ " movq 32(%0), %%mm4\n"
+ " movntq %%mm4, 32(%1)\n"
+ " movq 40(%0), %%mm5\n"
+ " movntq %%mm5, 40(%1)\n"
+ " movq 48(%0), %%mm6\n"
+ " movntq %%mm6, 48(%1)\n"
+ " movq 56(%0), %%mm7\n"
+ " movntq %%mm7, 56(%1)\n"
+ ".section .fixup, \"ax\"\n"
+ "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b, 3b\n"
+ ".previous"
+ : : "r" (from), "r" (to) : "memory"
+ );
+ from+=64;
+ to+=64;
+ }
+ for(i = (PAGE_SIZE-320)/64; i < PAGE_SIZE/64; i++)
+ {
+ __asm__ __volatile__ (
+ "2: movq (%0), %%mm0\n"
+ " movntq %%mm0, (%1)\n"
+ " movq 8(%0), %%mm1\n"
+ " movntq %%mm1, 8(%1)\n"
+ " movq 16(%0), %%mm2\n"
+ " movntq %%mm2, 16(%1)\n"
+ " movq 24(%0), %%mm3\n"
+ " movntq %%mm3, 24(%1)\n"
+ " movq 32(%0), %%mm4\n"
+ " movntq %%mm4, 32(%1)\n"
+ " movq 40(%0), %%mm5\n"
+ " movntq %%mm5, 40(%1)\n"
+ " movq 48(%0), %%mm6\n"
+ " movntq %%mm6, 48(%1)\n"
+ " movq 56(%0), %%mm7\n"
+ " movntq %%mm7, 56(%1)\n"
+ : : "r" (from), "r" (to) : "memory"
+ );
+ from+=64;
+ to+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence" is needed to become
+ * ordered again.
+ */
+ __asm__ __volatile__ (
+ " sfence\n" : :
+ );
+ kernel_fpu_end();
+}
+
+/*
+ * Generic MMX implementation without K7 specific streaming
+ */
+void zero_page_mmx(void *page)
+{
+ int i;
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ " pxor %%mm0, %%mm0\n" : :
+ );
+
+ for(i = 0; i < PAGE_SIZE/128; i++)
+ {
+ __asm__ __volatile__ (
+ " movq %%mm0, (%0)\n"
+ " movq %%mm0, 8(%0)\n"
+ " movq %%mm0, 16(%0)\n"
+ " movq %%mm0, 24(%0)\n"
+ " movq %%mm0, 32(%0)\n"
+ " movq %%mm0, 40(%0)\n"
+ " movq %%mm0, 48(%0)\n"
+ " movq %%mm0, 56(%0)\n"
+ " movq %%mm0, 64(%0)\n"
+ " movq %%mm0, 72(%0)\n"
+ " movq %%mm0, 80(%0)\n"
+ " movq %%mm0, 88(%0)\n"
+ " movq %%mm0, 96(%0)\n"
+ " movq %%mm0, 104(%0)\n"
+ " movq %%mm0, 112(%0)\n"
+ " movq %%mm0, 120(%0)\n"
+ : : "r" (page) : "memory"
+ );
+ page+=128;
+ }
+
+ kernel_fpu_end();
+}
+
+void copy_page_mmx(void *to, void *from)
+{
+ int i;
+
+
+ kernel_fpu_begin();
+
+ __asm__ __volatile__ (
+ "1: prefetch (%0)\n"
+ " prefetch 64(%0)\n"
+ " prefetch 128(%0)\n"
+ " prefetch 192(%0)\n"
+ " prefetch 256(%0)\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b, 3b\n"
+ ".previous"
+ : : "r" (from)
+ );
+
+ for(i = 0; i < PAGE_SIZE/64; i++)
+ {
+ __asm__ __volatile__ (
+ "1: prefetch 320(%0)\n"
+ "2: movq (%0), %%mm0\n"
+ " movq 8(%0), %%mm1\n"
+ " movq 16(%0), %%mm2\n"
+ " movq 24(%0), %%mm3\n"
+ " movq %%mm0, (%1)\n"
+ " movq %%mm1, 8(%1)\n"
+ " movq %%mm2, 16(%1)\n"
+ " movq %%mm3, 24(%1)\n"
+ " movq 32(%0), %%mm0\n"
+ " movq 40(%0), %%mm1\n"
+ " movq 48(%0), %%mm2\n"
+ " movq 56(%0), %%mm3\n"
+ " movq %%mm0, 32(%1)\n"
+ " movq %%mm1, 40(%1)\n"
+ " movq %%mm2, 48(%1)\n"
+ " movq %%mm3, 56(%1)\n"
+ ".section .fixup, \"ax\"\n"
+ "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
+ " jmp 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b, 3b\n"
+ ".previous"
+ : : "r" (from), "r" (to) : "memory"
+ );
+ from+=64;
+ to+=64;
+ }
+ kernel_fpu_end();
+}
diff -urpN linux-2.6.11.src/arch/i386/lib/page_ops.c linux-2.6.11-nt.src/arch/i386/lib/page_ops.c
--- linux-2.6.11.src/arch/i386/lib/page_ops.c Thu Jan 1 03:00:00 1970
+++ linux-2.6.11-nt.src/arch/i386/lib/page_ops.c Fri Mar 18 11:30:51 2005
@@ -0,0 +1,108 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+
+#include <asm/hardirq.h>
+
+void zero_page_mmx(void*);
+void copy_page_mmx(void*, const void*);
+void zero_page_3dnow(void*);
+void copy_page_3dnow(void*, const void*);
+void zero_page_sse(void*);
+void copy_page_sse(void*, const void*);
+
+static void zero_page_slow(void * page)
+{
+ int d0, d1;
+ __asm__ __volatile__(
+ " cld\n"
+ " rep ; stosl\n"
+ : "=&c" (d0), "=&D" (d1)
+ :"a" (0),"1" (page),"0" (PAGE_SIZE/4)
+ :"memory"
+ );
+}
+
+static void copy_page_slow(void *to, const void *from)
+{
+ int d0, d1, d2;
+ __asm__ __volatile__(
+ " cld\n"
+ " rep ; movsl\n"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ : "0" (PAGE_SIZE/4),"1" ((long) to),"2" ((long) from)
+ : "memory"
+ );
+}
+
+int change_pageops = 0;
+
+static void (*zero_f)(void *) = zero_page_slow;
+static void (*copy_f)(void *, const void*) = copy_page_slow;
+
+#define SW_TO(a) do { \
+ zero_f = zero_page_##a; \
+ copy_f = copy_page_##a; \
+ printk("Switched to " #a " clear/copy page ops\n"); \
+} while(0)
+
+static void change_ops(void)
+{
+ switch(change_pageops) {
+ case 1: SW_TO(slow); break;
+ case 2: SW_TO(mmx); break;
+ case 3: SW_TO(3dnow); break;
+ case 4: SW_TO(sse); break;
+ default:
+ printk("unimplemented!\n");
+ }
+ change_pageops = 0;
+}
+
+void clear_page(void *page)
+{
+ if(unlikely(in_interrupt())) {
+ zero_page_slow(page);
+ return;
+ }
+ if(!change_pageops) {
+ zero_f(page);
+ return;
+ }
+ change_ops();
+ zero_f(page);
+}
+
+void copy_page(void *to, const void *from)
+{
+ if(unlikely(in_interrupt())) {
+ copy_page_slow(to, from);
+ return;
+ }
+ if(!change_pageops) {
+ copy_f(to, from);
+ return;
+ }
+ change_ops();
+ copy_f(to, from);
+}
+
+static struct ctl_table pageop_table[] = {
+ {
+ .ctl_name = 19847, /* I typed random number */
+ .procname = "pageop",
+ .data = &change_pageops,
+ .maxlen = sizeof(change_pageops),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ { .ctl_name = 0 }
+};
+
+static int __init pageops_init(void)
+{
+ register_sysctl_table(pageop_table, 1);
+ return 0;
+}
+
+module_init(pageops_init)
diff -urpN linux-2.6.11.src/arch/i386/lib/sse_page.c linux-2.6.11-nt.src/arch/i386/lib/sse_page.c
--- linux-2.6.11.src/arch/i386/lib/sse_page.c Thu Jan 1 03:00:00 1970
+++ linux-2.6.11-nt.src/arch/i386/lib/sse_page.c Fri Mar 18 11:30:51 2005
@@ -0,0 +1,112 @@
+/*
+* linux/arch/i386/lib/sse.c
+*
+* Copyright 2004 Jens Maurer
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+* Send feedback to <Jens.Maurer@gmx.net>
+*/
+
+#include <linux/preempt.h> /* preempt_disable */
+#include <asm/page.h> /* PAGE_SIZE */
+#include <asm/system.h> /* cr0 ops */
+
+
+/*
+* SSE library helper functions
+*/
+
+#define SSE_START(cr0) do { \
+ preempt_disable(); \
+ cr0 = read_cr0(); \
+ clts(); \
+ } while(0)
+
+
+#define SSE_END(cr0) do { \
+ write_cr0(cr0); \
+ preempt_enable(); \
+ } while(0)
+
+void zero_page_sse(void * page)
+{
+ unsigned char xmm_save[16];
+ unsigned int cr0;
+ int i;
+
+ SSE_START(cr0);
+ asm volatile(
+ " movups %%xmm0, (%0)\n"
+ " xorps %%xmm0, %%xmm0\n"
+ : : "r" (xmm_save)
+ );
+ for(i = 0; i < PAGE_SIZE/16/4; i++) {
+ asm volatile(
+ " movntps %%xmm0, (%0)\n"
+ " movntps %%xmm0, 16(%0)\n"
+ " movntps %%xmm0, 32(%0)\n"
+ " movntps %%xmm0, 48(%0)\n"
+ : : "r"(page) : "memory"
+ );
+ page += 16*4;
+ }
+ asm volatile(
+ " movups (%0), %%xmm0\n"
+ " sfence\n"
+ : : "r" (xmm_save) : "memory"
+ );
+ SSE_END(cr0);
+}
+
+void copy_page_sse(void *to, void *from)
+{
+ unsigned char xmm_save[16*4];
+ unsigned int cr0;
+ int i;
+
+ SSE_START(cr0);
+ asm volatile(
+ " movups %%xmm0, (%0)\n"
+ " movups %%xmm1, 16(%0)\n"
+ " movups %%xmm2, 32(%0)\n"
+ " movups %%xmm3, 48(%0)\n"
+ : : "r" (xmm_save)
+ );
+ for(i = 0; i < PAGE_SIZE/16/4; i++) {
+ asm volatile(
+ " movaps (%0), %%xmm0\n"
+ " movaps 16(%0), %%xmm1\n"
+ " movaps 32(%0), %%xmm2\n"
+ " movaps 48(%0), %%xmm3\n"
+ " movntps %%xmm0, (%1)\n"
+ " movntps %%xmm1, 16(%1)\n"
+ " movntps %%xmm2, 32(%1)\n"
+ " movntps %%xmm3, 48(%1)\n"
+ : : "r" (from), "r" (to) : "memory"
+ );
+ from += 16*4;
+ to += 16*4;
+ }
+ asm volatile(
+ " movups (%0), %%xmm0\n"
+ " movups 16(%0), %%xmm1\n"
+ " movups 32(%0), %%xmm2\n"
+ " movups 48(%0), %%xmm3\n"
+ " sfence\n"
+ : : "r" (xmm_save) : "memory"
+ );
+ SSE_END(cr0);
+}
diff -urpN linux-2.6.11.src/include/asm-i386/page.h linux-2.6.11-nt.src/include/asm-i386/page.h
--- linux-2.6.11.src/include/asm-i386/page.h Thu Mar 3 09:31:08 2005
+++ linux-2.6.11-nt.src/include/asm-i386/page.h Fri Mar 18 11:30:51 2005
@@ -12,26 +12,8 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__

-#include <linux/config.h>
-
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-#define clear_page(page) mmx_clear_page((void *)(page))
-#define copy_page(to,from) mmx_copy_page(to,from)
-
-#else
-
-/*
- * On older X86 processors it's not a win to use MMX here it seems.
- * Maybe the K6-III ?
- */
-
-#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
-#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
-
-#endif
+extern void clear_page(void*);
+extern void copy_page(void*, const void*);

#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)[unhandled content-type:application/x-tbz]
\
 
 \ /
  Last update: 2005-03-22 14:11    [W:0.101 / U:0.344 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site