Messages in this thread Patch in this message |  | | Date | Tue, 5 Nov 1996 22:33:29 -0500 (EST) | From | Damm <> | Subject | Re: Cyrix 6x86 Patch.. |
| |
> Damm wrote: > > > > Where can i get the Cyrix 6x86 Patch for Linux Kernel 2.1.5+ I used to > > have the one for 2.0.x but it didn't like 2.1.5 obviously, can someone > > point me to a web site WITH the new one. or a ftp site or something > > > > or mail works too, it's just www.ecsnet.com or whatever only has the old > > one, it mentions the NEW one, but well... it's spiffy to know it exists > > and not able to get it. > > You asked, you got it.. The author posted this a week or two ago, but > did not specify what kernel it was against (please folks, be specific > about the target when posting patches to the newsgroup?). He would not > reply to (multiple) E-Mail queries, but I did deduce that it was for the > 2.1.x series.. > > Steve
thanks for the patch, but i only have step 1 revision 4, and it makes my computer hang, or reboot, or gives lots of Exception errors... so i dumped the kernel patch, it was nice to have it say i586, but that's all it did.
Cept for fuckup my computer.
diff -u -r1.1.2.2 Configure.help --- Configure.help 1996/10/09 20:58:03 1.1.2.2 +++ linux/Documentation/Configure.help 1996/10/24 19:38:35 @@ -3785,6 +3785,27 @@ Documentation/isdn/README and Documentation/isdn/README.pcbit for more information. +Support for Cyrix processors +CONFIG_CYRIX + This enables recognition of Cyrix processors. Without it /proc/cpuinfo + will list your processor as an unknown model of Cyrix. With it + it will list the correct details. If you have a 6x86 this will also + enable the use of the variable size paging mechanism (VSPM) for the + kernel memory mappings. This should give slightly improved MMU + performance. It should be safe to say Y here regardless of what + processor you actually have. + +Support for Cyrix VSPM enhancments +CONFIG_CYRIX_VSPM + Variable sized paging mechanism (VSPM) is a feature of the Cyrix + 6x86 family of processors that allows large regions of memory + to be mapped in one go. This significantly reduces the amount + of work the MMU has to do compared with traditional paging hence + this option is normally on. However VSPM appears to have some + problems. Work arounds are in place for step 1 rev 5 and 6 chips + but if you have a 6x86 and your kernel will not boot try disabling + this option. + Support for AP1000 multicomputer CONFIG_AP1000 This enables support for a sparc based parallel multi-computer @@ -3879,4 +3900,4 @@ # LocalWords: mgetty sendfax gert greenie muc lowlevel Lasermate LanManager io # LocalWords: OOPSes trackball binghamton mobileip ncr IOMAPPED settags ns ser # LocalWords: setsync NEGO MPARITY autotuning prefetch PIIX cdwrite utils rc -# LocalWords: PCWATCHDOG berkprod bitgate +# LocalWords: PCWATCHDOG berkprod bitgate Cyrix diff -u -r1.1.2.1 config.in --- config.in 1996/10/09 20:16:01 1.1.2.1 +++ linux/arch/i386/config.in 1996/10/24 19:29:44 @@ -44,6 +44,11 @@ PPro CONFIG_M686" Pentium endmenu +bool 'Cyrix processor recognition' CONFIG_CYRIX +if [ "$CONFIG_CYRIX" = "y" ]; then + bool 'Cyrix 6x86 VSPM enhancements' CONFIG_CYRIX_VSPM +fi + source drivers/block/Config.in if [ "$CONFIG_NET" = "y" ]; then diff -u -r1.1.2.2 defconfig --- defconfig 1996/10/18 20:21:49 1.1.2.2 +++ linux/arch/i386/defconfig 1996/10/24 19:29:59 @@ -28,6 +28,8 @@ # CONFIG_M486 is not set CONFIG_M586=y # CONFIG_M686 is not set +CONFIG_CYRIX=y +CONFIG_CYRIX_VSPM=y # # Floppy, IDE, and other block devices diff -u -r1.1.2.1 head.S --- head.S 1996/10/09 20:16:15 1.1.2.1 +++ linux/arch/i386/kernel/head.S 1996/10/21 21:52:32 @@ -125,7 +125,68 @@ * apply at our cpl of 0 and the stack ought to be aligned already, and * we don't need to preserve eflags. */ - movl $3, SYMBOL_NAME(x86) + /* + * A Cyrix preserves flags in cases where other CPUs change + * them in undefined ways. We need to know this since we may + * need to enable the CPUID instruction at least. + */ + xor %ax,%ax + sahf + movb $5,%ax + movb $2,%bx + div %bl + lahf + cmpb $2,%ah + jne ncyrix + + /* + * It behaves like a Cyrix so put "Cyrix" in the vendor id + * field. It may be overwritten later with the real thing + * if CPUID works. + */ + movl $0x69727943,SYMBOL_NAME(x86_vendor_id) # low 4 chars + movl $0x00000078,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars + +#ifdef CONFIG_CYRIX + /* + * N.B. The pattern of accesses to 0x22 and 0x23 is *important* + * so do not try and "optimise" it! For the same reason we + * do all this with interrupts off just to be sure. + */ +#define setCx86(reg, val) \ + movb reg,%ax; \ + outb %ax,$0x22; \ + movb val,%ax; \ + outb %ax,$0x23 + +#define getCx86(reg) \ + movb reg,%ax; \ + outb %ax,$0x22; \ + inb $0x23,%ax + + cli + getCx86($0xc3) # get CCR3 + movb %ax,%cx # Save old value + movb %ax,%bx + andb $0x0f,%bx # Enable all config registers (for CCR4 access) + orb $0x10,%bx + setCx86($0xc3,%bx) + + getCx86($0xc2) # CCR2 |= SUSP_HLT + orb $8,%ax + movb %ax,%bx + setCx86($0xc2,%bx) + + getCx86($0xe8) # CCR4 |= CPUID | DTE_EN + orb $0x90,%ax + movb %ax,%bx + setCx86($0xe8,%bx) + + setCx86($0xc3,%cx) # Restore old CCR3 + sti +#endif /* CONFIG_CYRIX */ + +ncyrix: movl $3, SYMBOL_NAME(x86) pushfl # push EFLAGS popl %eax # get EFLAGS movl %eax,%ecx # save original EFLAGS @@ -137,6 +198,7 @@ xorl %ecx,%eax # change in flags andl $0x40000,%eax # check if AC bit changed je is386 + movl $4,SYMBOL_NAME(x86) movl %ecx,%eax xorl $0x200000,%eax # check ID flag @@ -145,11 +207,23 @@ pushfl # 487SX we can't change it popl %eax xorl %ecx,%eax - andl $0x200000,%eax - je is486 -isnew: pushl %ecx # restore original EFLAGS + pushl %ecx # restore original EFLAGS popfl + andl $0x200000,%eax + je nocpuid + incl SYMBOL_NAME(have_cpuid) # we have CPUID + + /* get vendor info */ + xorl %eax, %eax # call CPUID with 0 -> return vendor ID + .byte 0x0f, 0xa2 # CPUID + movl %ebx,SYMBOL_NAME(x86_vendor_id) # lo 4 chars + movl %edx,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars + movl %ecx,SYMBOL_NAME(x86_vendor_id)+8 # last 4 chars + + cmpl $0,%eax # do we have processor info as well? + je nocpuid + /* get processor type */ movl $1, %eax # Use the CPUID instruction to .byte 0x0f, 0xa2 # check the processor type @@ -162,23 +236,48 @@ andb $0x0f, %cl # mask mask revision movb %cl,SYMBOL_NAME(x86_mask) movl %edx,SYMBOL_NAME(x86_capability) - /* get vendor info */ - xorl %eax, %eax # call CPUID with 0 -> return vendor ID - .byte 0x0f, 0xa2 # CPUID - movl %ebx,SYMBOL_NAME(x86_vendor_id) # lo 4 chars - movl %edx,SYMBOL_NAME(x86_vendor_id)+4 # next 4 chars - movl %ecx,SYMBOL_NAME(x86_vendor_id)+8 # last 4 chars - movl %cr0,%eax # 486+ - andl $0x80000011,%eax # Save PG,PE,ET - orl $0x50022,%eax # set AM, WP, NE and MP - jmp 2f -is486: pushl %ecx # restore original EFLAGS - popfl - movl %cr0,%eax # 486 +nocpuid: + /* + * Even if we had CPUID Cyrix tries to look compatible with + * Intel so we have to go elsewhere for the nitty gritty. + */ + cmpl $0x69727943,SYMBOL_NAME(x86_vendor_id) # "Cyri[x.*]"? + jne chkdevid # maybe not... + + orb $0x10,SYMBOL_NAME(x86) # Flag as Cyrix + movb $0xfe,SYMBOL_NAME(x86_model) # Generic Cx486? + movb $0,SYMBOL_NAME(x86_mask) + +chkdevid: +#ifdef CONFIG_CYRIX + cli # Test for DEVID + getCx86($0xc3) # by writing CCR3 + movb %ax,%cx + movb %ax,%bx + orb $0x80,%bx + setCx86($0xc3,%bx) + getCx86($0xc0) # dummy to change bus + getCx86($0xc3) + sti + cmp %ax,%cx + je is486 # not writable == no DEVID + + cli + setCx86($0xc3,%cx) # restore CCR3 + + getCx86($0xfe) # get DEVID in preference to any CPUID + movb %al,SYMBOL_NAME(x86_model) + getCx86($0xff) + movb %al,SYMBOL_NAME(x86_mask) + sti +#endif /* CONFIG_CYRIX */ + +is486: movl %cr0,%eax # 486 or better andl $0x80000011,%eax # Save PG,PE,ET orl $0x50022,%eax # set AM, WP, NE and MP jmp 2f + is386: pushl %ecx # restore original EFLAGS popfl movl %cr0,%eax # 386 diff -u -r1.1.2.1 setup.c --- setup.c 1996/10/09 20:16:16 1.1.2.1 +++ linux/arch/i386/kernel/setup.c 1996/10/20 13:57:16 @@ -235,11 +235,54 @@ return NULL; } +static const char * cyrixmodel(unsigned int nr) +{ + static char nbuf[32]; + + if (nr < 0x20 || nr == 0xfe) { + /* An abridged list. The values for the Cx486 series do + * not seem to follow much of a pattern. + */ + switch (nr) { + case 0x00: + return "Cx486 SLC"; + case 0x01: + return "Cx486 DLC"; + case 0x02: + return "Cx486 SLC2"; + case 0x03: + return "Cx486 DLC2"; + case 0x1a: + return "Cx486DX"; + case 0x1b: + return "Cx486DX2"; + case 0x1f: + return "Cx486DX4"; + case 0xfe: + return ("Unknown"); + default: + return "Cx486"; + } + } else if ((nr >= 0x28 && nr <= 0x2f) + || (nr >= 0x30 && nr <= 0x36)) { + sprintf(nbuf, "%cx86 %cx Core/Bus Clock", + nr >= 0x30 ? '6' : '5', + "12??43"[nr & 0x05]); + return nbuf; + } + return NULL; +} + static const char * getmodel(int x86, int model) { const char *p = NULL; static char nbuf[12]; - switch (x86) { + if ((x86 & 0xf0) == 0x10) { + p = cyrixmodel(model); + } else switch (x86) { + case 0: + p = "unknown"; + break; case 4: p = i486model(model); break; @@ -289,17 +332,21 @@ "model\t\t: %s\n" "vendor_id\t: %s\n", CPUN, - CD(x86)+'0', - CD(have_cpuid) ? - getmodel(CD(x86), CD(x86_model)) : - "unknown", + (CD(x86) & 0x0f)+'0', + getmodel(CD(x86), CD(x86_model)), CD(x86_vendor_id)); - if (CD(x86_mask)) - len += sprintf(buffer+len, - "stepping\t: %d\n", - CD(x86_mask)); - else + if (CD(x86_mask)) { + if ((CD(x86) & 0xf0) == 0x10) + len += sprintf(buffer+len, + "stepping\t: %d rev %d\n", + CD(x86_mask) >> 4, + CD(x86_mask) & 0x0f); + else + len += sprintf(buffer+len, + "stepping\t: %d\n", + CD(x86_mask)); + } else len += sprintf(buffer+len, "stepping\t: unknown\n"); diff -u -r1.1.2.3 init.c --- init.c 1996/10/20 16:28:19 1.1.2.3 +++ linux/arch/i386/mm/init.c 1996/10/24 22:09:13 @@ -110,6 +110,82 @@ unsigned long tmp; unsigned long address; +#if defined(CONFIG_CYRIX) && defined(CONFIG_CYRIX_VSPM) && !defined(__SMP__) + unsigned long vspm_max = __va(0); + + /* If this is a Cyrix 6x86 we use the variable size paging + * mechanism (VSPM) to map physical memory at 0xc0000000. + * Note that VSPM pages are stored on the CPU only so this + * needs to be done for each processor in a multi-processor + * system. If we have a mixture of processors we would also + * need to set up the traditional page tables for them. + * Note also that VSPM pages will be global to all memory + * spaces since they are not stored in the normal page + * directories. + * + * By experiment: + * VSPM pages must be power of two sizes. A single 24MB + * page fails. + * Documentation suggests there are 8 VSPM slots (3 bit + * index) but tests show the upper four slots mirror the + * lower four. + * With a 16MB page followed by an 8MB page I always get + * a write fault on the last 4k of the 8MB page. With 8MB + * plus 4MB I can't even boot. If we have such a memory + * size we map the first power of two with VSPM and use + * traditional paging for the rest. + * VSPM pages override traditional pages so we cannot + * overlap the start of the vmalloc region. + * Do not try and create a mapping with dirty and accessed + * flags clear - a step 1 rev 5 chip will crash and burn. + */ + if ((x86 & 0xf0) == 0x10 || (x86_model & 0xf0) == 0x30) { + int vspm_index = 0; + + do { + unsigned long mem_size; + + mem_size = 4096; + while (vspm_max+mem_size < end_mem) + mem_size <<= 1; + if (vspm_max+mem_size > end_mem) + if ((mem_size >>= 1) < 4096) + break; + + asm( "movl %0,%%eax\n" + "movl %%eax,%%tr7\n" + "movl $0x00000004,%%eax\n" + "movl %%eax,%%tr6\n" + "movl %1,%%eax\n" + "movl %%eax,%%tr7\n" + "movl %2,%%eax\n" + "movl %%eax,%%tr6\n" + : /* no outputs */ + : "g" ((((mem_size-1) & 0xfffff000)) | (vspm_index<<7)), + "g" ((__pa(vspm_max) & 0xfffff000) | (vspm_index<<7)), + "g" ((vspm_max & 0xfffff000) | 0x00000cd6) + : "eax", "cc" + ); + + vspm_max += mem_size; + +#if 1 + /* Just use one VSPM page for now, anything + * over will be traditionally paged. + */ + break; +#endif + } while (vspm_max < end_mem && vspm_index++ < 4); + + /* Write protect does work correctly but the test + * will fail because we can't map just page 0 read + * only from under the VSPM big page. If we didn't + * know before we do now. + */ + wp_works_ok = 1; + } +#endif + /* * Physical page 0 is special; it's not touched by Linux since BIOS * and SMM (for laptops with [34]86/SL chips) may need it. It is read @@ -178,6 +254,15 @@ continue; } #endif +#if defined(CONFIG_CYRIX) && defined(CONFIG_CYRIX_VSPM) && !defined(__SMP__) + if ((x86 & 0xf0) == 0x10 /* Cyrix */ + && (x86_model & 0xf0) == 0x30 /* 6x86 */ + && (address + PAGE_SIZE*PTRS_PER_PTE) < vspm_max) { /* within VSPM mappings */ + address += PAGE_SIZE * PTRS_PER_PTE; + continue; + } +#endif + /* map the memory at virtual addr 0xC0000000 */ /* pg_table is physical at this point */ pg_table = (pte_t *) (PAGE_MASK & pgd_val(pg_dir[768])); @@ -188,12 +273,18 @@ pgd_val(pg_dir[768]) = _PAGE_TABLE | (unsigned long) pg_table; pg_dir++; + /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va(pg_table); for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { pte_t pte = mk_pte(address, PAGE_KERNEL); +#if defined(CONFIG_CYRIX) && defined(CONFIG_CYRIX_VSPM) && !defined(__SMP__) + if (address < vspm_max || address >= end_mem) + pte_val(pte) = 0; +#else if (address >= end_mem) pte_val(pte) = 0; +#endif set_pte(pg_table, pte); address += PAGE_SIZE; } diff -u -r1.1.2.2 bugs.h --- bugs.h 1996/10/09 20:59:53 1.1.2.2 +++ linux/include/asm-i386/bugs.h 1996/10/22 19:15:57 @@ -130,5 +130,5 @@ check_tlb(); check_fpu(); check_hlt(); - system_utsname.machine[1] = '0' + x86; + system_utsname.machine[1] = '0' + (x86 & 0x0f); } |  |