lkml.org 
[lkml]   [1997]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectSmall Cyrix patch for 2.1.65+
Hello,

For those who like to live on the cutting edge, here is a version of my
small cyrix patch for kernels >= 2.1.65, contributed by Stephane Casset
<sept@renass3.u-strasbg.fr>. Thanks Stephane! :-)

The usual warnings apply: _must_ be used with set6x86, RTFD, YMMV,
etc...

Cheers,

========================================================
Andrew D. Balsa
Home Page: http://www.tux.org/~balsa
andrewbalsa@usa.net
========================================================
diff -u -r linux-2.1.65/arch/i386/kernel/head.S linux/arch/i386/kernel/head.S
--- linux-2.1.65/arch/i386/kernel/head.S Sat Nov 15 01:52:12 1997
+++ linux/arch/i386/kernel/head.S Thu Nov 20 16:58:32 1997
@@ -141,7 +141,72 @@
* apply at our cpl of 0 and the stack ought to be aligned already, and
* we don't need to preserve eflags.
*/
- movl $3, SYMBOL_NAME(x86)
+ /*
+ * A Cyrix/IBM 6x86(L) preserves flags after dividing 5 by 2
+ * (and it _must_ be 5 divided by 2) while other CPUs change
+ * them in undefined ways. We need to know this since we may
+ * need to enable the CPUID instruction at least.
+ */
+ xor %ax,%ax
+ sahf
+ movb $5,%ax
+ movb $2,%bx
+ div %bl
+ lahf
+ cmpb $2,%ah
+ jne ncyrix
+
+ /*
+ * It behaves like a Cyrix/IBM 6x86(L) so put "Cyrix" in the
+ * vendor id field. It may be overwritten later with the
+ * real thing if CPUID works.
+ */
+ movl $0x69727943,SYMBOL_NAME(x86_vendor_id) # low 4 chars
+ movl $0x00000078,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars
+
+ /*
+ * N.B. The pattern of accesses to 0x22 and 0x23 is *essential*
+ * so do not try to "optimize" it! For the same reason we
+ * do all this with interrupts off.
+ */
+#define setCx86(reg, val) \
+ movb reg,%ax; \
+ outb %ax,$0x22; \
+ movb val,%ax; \
+ outb %ax,$0x23
+
+#define getCx86(reg) \
+ movb reg,%ax; \
+ outb %ax,$0x22; \
+ inb $0x23,%ax
+
+ cli
+ getCx86($0xc3) # get CCR3
+ movb %ax,%cx # Save old value
+ movb %ax,%bx
+ andb $0x0f,%bx # Enable access to all config registers
+ orb $0x10,%bx # by setting bit 4
+ setCx86($0xc3,%bx)
+
+ getCx86($0xe8) # now we can get CCR4
+ orb $0x80,%ax # and set bit 7 (CPUIDEN)
+ movb %ax,%bx # to enable CPUID execution
+ setCx86($0xe8,%bx)
+
+ getCx86($0xfe) # DIR0 : let's check this is a 6x86(L)
+ andb $0xf0,%ax # should be 3xh
+ cmpb $0x30,%ax #
+ jne n6x86
+ getCx86($0xe9) # CCR5 : we reset the SLOP bit
+ andb $0xfd,%ax # so that udelay calculation
+ movb %ax,%bx # is correct on 6x86(L) CPUs
+ setCx86($0xe9,%bx)
+
+n6x86: setCx86($0xc3,%cx) # Restore old CCR3
+ sti
+
+ncyrix: movl $3, SYMBOL_NAME(x86)
+
pushfl # push EFLAGS
popl %eax # get EFLAGS
movl %eax,%ecx # save original EFLAGS
@@ -180,8 +245,20 @@
andb $0x0f, %cl # mask mask revision
movb %cl,SYMBOL_NAME(x86_mask)
movl %edx,SYMBOL_NAME(x86_capability)
+
+ xor %ax,%ax # test again for Cyrix CPU
+ sahf
+ movb $5,%ax
+ movb $2,%bx
+ div %bl
+ lahf
+ cmpb $2,%ah
+ jne ncyrx2 # skip if not Cyrix CPU
+ getCx86($0xff) # DIR1 : let's check the stepping
+ movb %al,SYMBOL_NAME(x86_mask)
+
/* get vendor info */
- xorl %eax, %eax # call CPUID with 0 -> return vendor ID
+ncyrx2: xorl %eax, %eax # call CPUID with 0 -> return vendor ID
.byte 0x0f, 0xa2 # CPUID
movl %ebx,SYMBOL_NAME(x86_vendor_id) # lo 4 chars
movl %edx,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars
diff -u -r linux-2.1.65/arch/i386/kernel/setup.c linux/arch/i386/kernel/setup.c
--- linux-2.1.65/arch/i386/kernel/setup.c Sat Nov 15 02:54:43 1997
+++ linux/arch/i386/kernel/setup.c Thu Nov 20 17:47:52 1997
@@ -47,6 +47,11 @@

char x86_vendor_id[13] = "unknown";

+unsigned char Cx86_step = 0;
+static const char *Cx86_type[] = {
+ "unknown", "1.3", "1.4", "2.4", "2.5", "2.6", "2.7 or 3.7", "4.2"
+ };
+
char ignore_irq13 = 0; /* set if exception 16 works */
char wp_works_ok = -1; /* set if paging hardware honours WP */
char hlt_works_ok = 1; /* set if the "hlt" instruction works */
@@ -266,6 +271,50 @@
return NULL;
}

+static const char * Cx86model(void)
+{
+ unsigned char nr6x86 = 0;
+ static const char *model[] = {
+ "unknown", "6x86", "6x86L", "6x86MX", "6x86MXi"
+ };
+ switch (x86) {
+ case 5:
+ nr6x86 = ((x86_capability & (1 << 8)) ? 2 : 1); /* cx8 flag only on 6x86L */
+ break;
+ case 6:
+ nr6x86 = 3;
+ break;
+ default:
+ nr6x86 = 0;
+ }
+ switch (x86_mask) {
+ case 0x03:
+ Cx86_step = 1; /* 6x86MX Rev 1.3 */
+ break;
+ case 0x04:
+ Cx86_step = 2; /* 6x86MX Rev 1.4 */
+ break;
+ case 0x14:
+ Cx86_step = 3; /* 6x86 Rev 2.4 */
+ break;
+ case 0x15:
+ Cx86_step = 4; /* 6x86 Rev 2.5 */
+ break;
+ case 0x16:
+ Cx86_step = 5; /* 6x86 Rev 2.6 */
+ break;
+ case 0x17:
+ Cx86_step = 6; /* 6x86 Rev 2.7 or 3.7 */
+ break;
+ case 0x22:
+ Cx86_step = 7; /* 6x86L Rev 4.2 */
+ break;
+ default:
+ Cx86_step = 0;
+ }
+ return model[nr6x86];
+}
+
static const char * i686model(unsigned int nr)
{
static const char *model[] = {
@@ -280,20 +329,24 @@
{
const char *p = NULL;
static char nbuf[12];
- switch (x86) {
- case 4:
- p = i486model(model);
- break;
- case 5:
- if(strcmp(x86_vendor_id, "AuthenticAMD") == 0){
- p = k5model(model);
- } else {
- p = i586model(model);
- }
- break;
- case 6:
- p = i686model(model);
- break;
+ if (strncmp(x86_vendor_id, "Cyrix", 5) == 0)
+ p = Cx86model();
+ else {
+ switch (x86) {
+ case 4:
+ p = i486model(model);
+ break;
+ case 5:
+ if(strcmp(x86_vendor_id, "AuthenticAMD") == 0){
+ p = k5model(model);
+ } else {
+ p = i586model(model);
+ }
+ break;
+ case 6:
+ p = i686model(model);
+ break;
+ }
}
if (p)
return p;
@@ -342,9 +395,16 @@
CD(x86_vendor_id));

if (CD(x86_mask))
- len += sprintf(buffer+len,
- "stepping\t: %d\n",
- CD(x86_mask));
+ if (strncmp(x86_vendor_id, "Cyrix", 5) != 0) {
+ len += sprintf(buffer+len,
+ "stepping\t: %d\n",
+ CD(x86_mask));
+ }
+ else { /* we have a Cyrix */
+ len += sprintf(buffer+len,
+ "stepping\t: %s\n",
+ Cx86_type[Cx86_step]);
+ }
else
len += sprintf(buffer+len,
"stepping\t: unknown\n");
diff -u -r linux-2.1.65/arch/i386/kernel/time.c linux/arch/i386/kernel/time.c
--- linux-2.1.65/arch/i386/kernel/time.c Mon Jun 16 23:35:53 1997
+++ linux/arch/i386/kernel/time.c Thu Nov 20 17:09:13 1997
@@ -528,30 +528,31 @@
/* Don't use them if a suspend/resume could
corrupt the timer value. This problem
needs more debugging. */
- if (x86_capability & 16) {
- do_gettimeoffset = do_fast_gettimeoffset;
+ if (x86_capability & 16)
+ if (strncmp(x86_vendor_id, "Cyrix", 5) != 0) {
+ do_gettimeoffset = do_fast_gettimeoffset;
do_get_fast_time = do_x86_get_fast_time;

- if( strcmp( x86_vendor_id, "AuthenticAMD" ) == 0 ) {
- if( x86 == 5 ) {
- if( x86_model == 0 ) {
- /* turn on cycle counters during power down */
- __asm__ __volatile__ (" movl $0x83, %%ecx \n \
- .byte 0x0f,0x32 \n \
- orl $1,%%eax \n \
- .byte 0x0f,0x30 \n "
- : : : "ax", "cx", "dx" );
- udelay(500);
+ if( strcmp( x86_vendor_id, "AuthenticAMD" ) == 0 ) {
+ if( x86 == 5 ) {
+ if( x86_model == 0 ) {
+ /* turn on cycle counters duringpower down */
+ __asm__ __volatile__ (" movl $0x83, %%ecx \n \
+ .byte 0x0f,0x32 \n \
+ orl $1,%%eax \n \
+ .byte 0x0f,0x30 \n "
+ : : : "ax", "cx", "dx" );
+ udelay(500);
+ }
}
- }
- }
+ }

- /* read Pentium cycle counter */
- __asm__(".byte 0x0f,0x31"
- :"=a" (init_timer_cc.low),
- "=d" (init_timer_cc.high));
- irq0.handler = pentium_timer_interrupt;
- }
+ /* read Pentium cycle counter */
+ __asm__(".byte 0x0f,0x31"
+ :"=a" (init_timer_cc.low),
+ "=d" (init_timer_cc.high));
+ irq0.handler = pentium_timer_interrupt;
+ }
#endif
setup_x86_irq(0, &irq0);
}
\
 
 \ /
  Last update: 2005-03-22 13:40    [W:0.129 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site