lkml.org 
[lkml]   [1997]   [Jan]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subjectcsum_partial_copy_fromuser patch, #2

ok, this patch is for real. No checksumming oops-es should happen, if yes,
then please tell me ...

patch is against 2.1.22. Should go into 2.1.24 too (havent tried). Not all
checksumming calls are properly converted yet, but oopses shouldnt happen
anyways.

comments welcome.

-- mingo

--- /pdl/linux/linux-2.1.22/net/ipv4/tcp.c Sun Jan 19 14:47:28 1997
+++ linux/net/ipv4/tcp.c Thu Jan 30 20:41:47 1997
@@ -823,6 +823,7 @@
int tcp_do_sendmsg(struct sock *sk, int iovlen, struct iovec *iov,
int len, int flags)
{
+ int err = 0;
int copied = 0;
struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);

@@ -873,6 +874,8 @@
int tmp;
struct sk_buff *skb;

+ if (err)
+ return (err);
/*
* Stop on errors
*/
@@ -1054,7 +1057,7 @@
skb->h.th->urg_ptr = ntohs(copy);
}

- skb->csum = csum_partial_copy_fromuser(from,
+ skb->csum = csum_partial_copy_from_user(&err, from,
skb_put(skb, copy), copy, 0);

from += copy;
@@ -1071,6 +1074,9 @@

sk->err = 0;

+ if (err)
+ return (err);
+
return copied;
}

--- /pdl/linux/linux-2.1.22/include/asm-i386/checksum.h Sun Nov 3 10:04:41 1996
+++ linux/include/asm-i386/checksum.h Thu Jan 30 20:54:56 1997
@@ -17,20 +17,39 @@

/*
* the same as csum_partial, but copies from src while it
- * checksums
+ * checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/

-unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
+unsigned int csum_partial_copy_from_user( int * err, const char *src,
+ char *dst, int len, int sum);
+
+/*
+ * I hope GCC will optimize 'dummy' away ...
+ */
+
+unsigned int csum_partial_copy_nocheck_generic( int * err, const char *src, char *dst,
+ int len, int sum);

+extern __inline__ unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
+ int len, int sum)
+{
+ int dummy;
+
+ return csum_partial_copy_nocheck_generic ( &dummy, src, dst, len, sum);
+}

/*
- * the same as csum_partial, but copies from user space (but on the x86
- * we have just one address space, so this is identical to the above)
+ * These are the 'old' way of doing checksums, a warning message will be
+ * printed if they are used and an exeption occurs.
+ *
+ * these functions should go away after some time.
*/
+
#define csum_partial_copy_fromuser csum_partial_copy
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);

/*
* This is a version of ip_compute_csum() optimized for IP headers,
--- /pdl/linux/linux-2.1.22/arch/i386/lib/checksum.c Thu Dec 12 15:51:08 1996
+++ linux/arch/i386/lib/checksum.c Thu Jan 30 21:04:46 1997
@@ -11,6 +11,9 @@
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
+ * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
+ * handling.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -86,7 +89,7 @@
shll $16,%%ecx
5: movb (%%esi),%%cl
6: addl %%ecx,%%eax
- adcl $0, %%eax
+ adcl $0, %%eax
7: "
: "=a"(sum)
: "0"(sum), "c"(len), "S"(buff)
@@ -94,96 +97,228 @@
return(sum);
}

+/*
+ * Copy from ds while checksumming, otherwise like csum_partial
+ *
+ * The macros SRC and DST specify wether there should be exception handling
+ * for the source and/or the destination addresses.
+ *
+ * FIXME: could someone double check wether i havent mixed up some SRC and
+ * DST definitions? It's damn hard to trigger all cases, i hope i got
+ * them all but theres no guarantee ...
+ */

+#define csum_partial_copy_type(type) \
+unsigned int csum_partial_copy ##type (int * __csum_err, const char *src, char *dst, \
+ int len, int sum) { \
+ __asm__( \
+" testl $2, %%edi # Check alignment. \n" \
+" jz 2f # Jump if alignment is ok. \n" \
+" subl $2, %%ecx # Alignment uses up two bytes. \n" \
+" jae 1f # Jump if we had at least two bytes. \n" \
+" addl $2, %%ecx # ecx was < 2. Deal with it. \n" \
+" jmp 4f \n" \
+" 1000: \n" \
+" 1: movw (%%esi), %%bx \n" \
+" addl $2, %%esi \n" \
+" 1001: \n" \
+" movw %%bx, (%%edi) \n" \
+" addl $2, %%edi \n" \
+" addw %%bx, %%ax \n" \
+" adcl $0, %%eax \n" \
+" 2: \n" \
+" pushl %%ecx \n" \
+" shrl $5, %%ecx \n" \
+" jz 2f \n" \
+" testl %%esi, %%esi \n" \
+" 1002: \n" \
+" 1: movl (%%esi), %%ebx \n" \
+" 1003: \n" \
+" movl 4(%%esi), %%edx \n" \
+" adcl %%ebx, %%eax \n" \
+" 1004: \n" \
+" movl %%ebx, (%%edi) \n" \
+" adcl %%edx, %%eax \n" \
+" 1005: \n" \
+" movl %%edx, 4(%%edi) \n" \
+" \n" \
+" 1006: \n" \
+" movl 8(%%esi), %%ebx \n" \
+" 1007: \n" \
+" movl 12(%%esi), %%edx \n" \
+" adcl %%ebx, %%eax \n" \
+" 1008: \n" \
+" movl %%ebx, 8(%%edi) \n" \
+" adcl %%edx, %%eax \n" \
+" 1009: \n" \
+" movl %%edx, 12(%%edi) \n" \
+" \n" \
+" 1010: \n" \
+" movl 16(%%esi), %%ebx \n" \
+" 1011: \n" \
+" movl 20(%%esi), %%edx \n" \
+" adcl %%ebx, %%eax \n" \
+" 1012: \n" \
+" movl %%ebx, 16(%%edi) \n" \
+" adcl %%edx, %%eax \n" \
+" 1013: \n" \
+" movl %%edx, 20(%%edi) \n" \
+" \n" \
+" 1014: \n" \
+" movl 24(%%esi), %%ebx \n" \
+" 1015: \n" \
+" movl 28(%%esi), %%edx \n" \
+" adcl %%ebx, %%eax \n" \
+" 1016: \n" \
+" movl %%ebx, 24(%%edi) \n" \
+" adcl %%edx, %%eax \n" \
+" 1017: \n" \
+" movl %%edx, 28(%%edi) \n" \
+" \n" \
+" 1018: \n" \
+" lea 32(%%esi), %%esi \n" \
+" 1019: \n" \
+" lea 32(%%edi), %%edi \n" \
+" dec %%ecx \n" \
+" jne 1b \n" \
+" adcl $0, %%eax \n" \
+" 2: popl %%edx \n" \
+" movl %%edx, %%ecx \n" \
+" andl $0x1c, %%edx \n" \
+" je 4f \n" \
+" shrl $2, %%edx # This clears CF \n" \
+" 1020: \n" \
+" 3: movl (%%esi), %%ebx \n" \
+" adcl %%ebx, %%eax \n" \
+" 1021: \n" \
+" movl %%ebx, (%%edi) \n" \
+" 1022: \n" \
+" lea 4(%%esi), %%esi \n" \
+" 1023: \n" \
+" lea 4(%%edi), %%edi \n" \
+" dec %%edx \n" \
+" jne 3b \n" \
+" adcl $0, %%eax \n" \
+" 4: andl $3, %%ecx \n" \
+" jz 7f \n" \
+" cmpl $2, %%ecx \n" \
+" jb 5f \n" \
+" 1024: \n" \
+" movw (%%esi), %%cx \n" \
+" 1025: \n" \
+" leal 2(%%esi), %%esi \n" \
+" 1026: \n" \
+" movw %%cx, (%%edi) \n" \
+" 1027: \n" \
+" leal 2(%%edi), %%edi \n" \
+" je 6f \n" \
+" shll $16,%%ecx \n" \
+" 1028: \n" \
+" 5: movb (%%esi), %%cl \n" \
+" 1029: \n" \
+" movb %%cl, (%%edi) \n" \
+" 6: addl %%ecx, %%eax \n" \
+" adcl $0, %%eax \n" \
+" 7: \n" \
+" 2000: \n" \
+" .section .fixup,\"ax\" \n" \
+" 3000: movl %7,%1 \n" \
+/* FIXME: zero out the rest of the buffer here !!!!!! */ \
+" jmp 2000b \n" \
+" .previous \n" \
+" .section __ex_table,\"a\" \n" \
+" .align 4 \n" \
+" \n" \
+SRC( " .long 1000b,3000b \n " ) \
+DST( " .long 1001b,3000b \n " ) \
+SRC( " .long 1002b,3000b \n " ) \
+SRC( " .long 1003b,3000b \n " ) \
+DST( " .long 1004b,3000b \n " ) \
+DST( " .long 1005b,3000b \n " ) \
+SRC( " .long 1006b,3000b \n " ) \
+SRC( " .long 1007b,3000b \n " ) \
+DST( " .long 1008b,3000b \n " ) \
+DST( " .long 1009b,3000b \n " ) \
+SRC( " .long 1010b,3000b \n " ) \
+SRC( " .long 1011b,3000b \n " ) \
+DST( " .long 1012b,3000b \n " ) \
+DST( " .long 1013b,3000b \n " ) \
+SRC( " .long 1014b,3000b \n " ) \
+SRC( " .long 1015b,3000b \n " ) \
+DST( " .long 1016b,3000b \n " ) \
+DST( " .long 1017b,3000b \n " ) \
+SRC( " .long 1018b,3000b \n " ) \
+DST( " .long 1019b,3000b \n " ) \
+SRC( " .long 1020b,3000b \n " ) \
+DST( " .long 1021b,3000b \n " ) \
+SRC( " .long 1022b,3000b \n " ) \
+DST( " .long 1023b,3000b \n " ) \
+SRC( " .long 1024b,3000b \n " ) \
+SRC( " .long 1025b,3000b \n " ) \
+DST( " .long 1026b,3000b \n " ) \
+DST( " .long 1027b,3000b \n " ) \
+SRC( " .long 1028b,3000b \n " ) \
+DST( " .long 1029b,3000b \n " ) \
+" .previous \n " \
+ : "=a" (sum), "=r" (*__csum_err) \
+ : "0" (sum), "c" (len), "S" (src), "D" (dst), \
+ "1" (*__csum_err), "i" (-EFAULT) \
+ : "bx", "cx", "dx", "si", "di" ); \
+ \
+ return(sum); \
+}

/*
- * copy from ds while checksumming, otherwise like csum_partial
+ * Currently we need only 2 out of the 4 possible type combinations:
*/

-unsigned int csum_partial_copy(const char *src, char *dst,
- int len, int sum) {
- __asm__("
- testl $2, %%edi # Check alignment.
- jz 2f # Jump if alignment is ok.
- subl $2, %%ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %%ecx # ecx was < 2. Deal with it.
- jmp 4f
-1: movw (%%esi), %%bx
- addl $2, %%esi
- movw %%bx, (%%edi)
- addl $2, %%edi
- addw %%bx, %%ax
- adcl $0, %%eax
-2:
- pushl %%ecx
- shrl $5, %%ecx
- jz 2f
- testl %%esi, %%esi
-1: movl (%%esi), %%ebx
- movl 4(%%esi), %%edx
- adcl %%ebx, %%eax
- movl %%ebx, (%%edi)
- adcl %%edx, %%eax
- movl %%edx, 4(%%edi)
-
- movl 8(%%esi), %%ebx
- movl 12(%%esi), %%edx
- adcl %%ebx, %%eax
- movl %%ebx, 8(%%edi)
- adcl %%edx, %%eax
- movl %%edx, 12(%%edi)
-
- movl 16(%%esi), %%ebx
- movl 20(%%esi), %%edx
- adcl %%ebx, %%eax
- movl %%ebx, 16(%%edi)
- adcl %%edx, %%eax
- movl %%edx, 20(%%edi)
-
- movl 24(%%esi), %%ebx
- movl 28(%%esi), %%edx
- adcl %%ebx, %%eax
- movl %%ebx, 24(%%edi)
- adcl %%edx, %%eax
- movl %%edx, 28(%%edi)
-
- lea 32(%%esi), %%esi
- lea 32(%%edi), %%edi
- dec %%ecx
- jne 1b
- adcl $0, %%eax
-2: popl %%edx
- movl %%edx, %%ecx
- andl $0x1c, %%edx
- je 4f
- shrl $2, %%edx # This clears CF
-3: movl (%%esi), %%ebx
- adcl %%ebx, %%eax
- movl %%ebx, (%%edi)
- lea 4(%%esi), %%esi
- lea 4(%%edi), %%edi
- dec %%edx
- jne 3b
- adcl $0, %%eax
-4: andl $3, %%ecx
- jz 7f
- cmpl $2, %%ecx
- jb 5f
- movw (%%esi), %%cx
- leal 2(%%esi), %%esi
- movw %%cx, (%%edi)
- leal 2(%%edi), %%edi
- je 6f
- shll $16,%%ecx
-5: movb (%%esi), %%cl
- movb %%cl, (%%edi)
-6: addl %%ecx, %%eax
- adcl $0, %%eax
-7:
- "
- : "=a" (sum)
- : "0"(sum), "c"(len), "S"(src), "D" (dst)
- : "bx", "cx", "dx", "si", "di" );
- return(sum);
+/*
+ * Generate 'csum_partial_copy_from_user()', we need to do exception
+ * handling for source addresses.
+ */
+
+#define SRC(x) x
+#define DST(x)
+csum_partial_copy_type(_from_user)
+#undef SRC
+#undef DST
+
+/*
+ * Generate 'csum_partial_copy_nocheck()', no need to do exception
+ * handling.
+ */
+
+#define SRC(x)
+#define DST(x)
+csum_partial_copy_type(_nocheck_generic)
+#undef SRC
+#undef DST
+
+/*
+ * Generate 'csum_partial_copy_old()', old and slow compability stuff,
+ * full checking.
+ *
+ * tell us if you see something printk-ing on this. This function will be
+ * removed soon.
+ */
+
+#define SRC(x) x
+#define DST(x) x
+csum_partial_copy_type(_old)
+#undef SRC
+#undef DST
+
+unsigned int csum_partial_copy ( const char *src, char *dst,
+ int len, int sum)
+{
+ int ret;
+ int error = 0;
+
+ ret = csum_partial_copy_old (&error, src, dst, len, sum);
+
+ if (error)
+ printk("csum_partial_copy_old(): tell mingo to convert me!\n");
+
+ return ret;
}
+

\
 
 \ /
  Last update: 2005-03-22 13:38    [W:0.093 / U:0.100 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site