diff -Nur linux-2.6.0-test2/arch/i386/Kconfig linux-2.6.0-test2-i486emu/arch/i386/Kconfig
--- linux-2.6.0-test2/arch/i386/Kconfig	Sun Jul 27 18:57:48 2003
+++ linux-2.6.0-test2-i486emu/arch/i386/Kconfig	Fri Aug  1 13:27:02 2003
@@ -461,6 +461,21 @@
 	  Say Y here if you are building a kernel for a desktop, embedded
 	  or real-time system.  Say N if you are unsure.
 
+config CPU_EMU486
+	bool "Emulate i486 opcodes on i386 CPU"
+	depends on M386 && !SMP && !PREEMPT
+	---help---
+	  This option installs an emulator for some certain i486 instructions
+	  which may be used by some optimized userspace binaries. This
+	  effectively enables you to run eg. Debian on an old i386 CPU.
+	  If you're really having a i386 CPU, you'll probably say Y here...
+
+config CPU_EMU486_DEBUG
+	bool "Debug i486 emulator"
+	depends on CPU_EMU486
+	---help---
+	  This will give you some extra warning if unhandled opcodes arise...
+
 config X86_UP_APIC
 	bool "Local APIC support on uniprocessors" if !SMP
 	depends on !(X86_VISWS || X86_VOYAGER)
diff -Nur linux-2.6.0-test2/arch/i386/kernel/Makefile linux-2.6.0-test2-i486emu/arch/i386/kernel/Makefile
--- linux-2.6.0-test2/arch/i386/kernel/Makefile	Sun Jul 27 18:57:50 2003
+++ linux-2.6.0-test2-i486emu/arch/i386/kernel/Makefile	Fri Aug  1 12:55:21 2003
@@ -29,6 +29,7 @@
 obj-$(CONFIG_X86_SUMMIT)	+= summit.o
 obj-$(CONFIG_EDD)             	+= edd.o
 obj-$(CONFIG_MODULES)		+= module.o
+obj-$(CONFIG_CPU_EMU486)	+= emu.o
 obj-y				+= sysenter.o vsyscall.o
 obj-$(CONFIG_ACPI_SRAT) 	+= srat.o
 
diff -Nur linux-2.6.0-test2/arch/i386/kernel/cpu/proc.c linux-2.6.0-test2-i486emu/arch/i386/kernel/cpu/proc.c
--- linux-2.6.0-test2/arch/i386/kernel/cpu/proc.c	Sun Jul 27 19:08:30 2003
+++ linux-2.6.0-test2-i486emu/arch/i386/kernel/cpu/proc.c	Mon Aug  4 15:10:25 2003
@@ -101,6 +101,7 @@
 			"hlt_bug\t\t: %s\n"
 			"f00f_bug\t: %s\n"
 			"coma_bug\t: %s\n"
+			"popad_bug\t: %s\n"
 			"fpu\t\t: %s\n"
 			"fpu_exception\t: %s\n"
 			"cpuid level\t: %d\n"
@@ -110,6 +111,7 @@
 		     c->hlt_works_ok ? "no" : "yes",
 		     c->f00f_bug ? "yes" : "no",
 		     c->coma_bug ? "yes" : "no",
+		     c->popad_bug ? "yes" : "no",
 		     c->hard_math ? "yes" : "no",
 		     fpu_exception ? "yes" : "no",
 		     c->cpuid_level,
diff -Nur linux-2.6.0-test2/arch/i386/kernel/emu.c linux-2.6.0-test2-i486emu/arch/i386/kernel/emu.c
--- linux-2.6.0-test2/arch/i386/kernel/emu.c	Thu Jan  1 01:00:00 1970
+++ linux-2.6.0-test2-i486emu/arch/i386/kernel/emu.c	Fri Aug  1 14:52:23 2003
@@ -0,0 +1,425 @@
+/*
+ *  linux/arch/i386/emu.c
+ *
+ *  Copyright (C) 2002  Willy Tarreau
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/linkage.h>
+#include <linux/ptrace.h>
+#include <linux/types.h>
+
+#include <asm/uaccess.h>
+#include <asm/segment.h>
+
+asmlinkage void do_general_protection(struct pt_regs *regs, long error_code);
+asmlinkage void do_invalid_op(struct pt_regs *regs, long error_code);
+
+/* gives the address of any register member in a struct pt_regs */
+static const int reg_ofs[8] = {
+	(int)&((struct pt_regs *)0)->eax,
+	(int)&((struct pt_regs *)0)->ecx,
+	(int)&((struct pt_regs *)0)->edx,
+	(int)&((struct pt_regs *)0)->ebx,
+	(int)&((struct pt_regs *)0)->esp,
+	(int)&((struct pt_regs *)0)->ebp,
+	(int)&((struct pt_regs *)0)->esi,
+	(int)&((struct pt_regs *)0)->edi
+};
+
+#define REG_PTR(regs, reg) ((unsigned long *)(((void *)(regs)) + reg_ofs[reg]))
+
+/* This code can be used to allow old 386's to hopefully correctly execute some
+ * code which was originally compiled for a 486, and to allow CMOV-disabled
+ * processors to emulate CMOV instructions. In user space, only 3 instructions
+ * have been added between the 386 the 486 :
+ *    - BSWAP reg		performs exactly htonl())
+ *    - CMPXCHG reg/mem, reg	used for mutex locking
+ *    - XADD reg/mem, reg	not encountered yet.
+ *
+ * Warning: this will NEVER allow a kernel compiled for a 486 to boot on a 386,
+ * neither will it allow a CMOV-optimized kernel to run on a processor without
+ * CMOV ! It will only help to port programs, or save you on a rescue disk, but
+ * for performance's sake, it's far better to recompile.
+ *
+ * Tests patterns have been submitted to this code on a 386, and it now seems
+ * OK. If you think you've found a bug, please report it to
+ * Willy Tarreau <willy@meta-x.org>.
+ */
+
+/* [modrm_address] returns a pointer to a user-space location by decoding the
+ * mod/rm byte and the bytes at <from>, which point to the mod/reg/rm byte.
+ * This must only be called if modrm indicates memory and not register. The
+ * <from> parameter is updated when bytes are read.
+ * NOTE: this code has some ugly lines, which produce a better assembler output
+ * than the "cleaner" version.
+ */
+static void *modrm_address(struct pt_regs *regs, u8 **from,
+                           int bit32, int modrm)
+{
+	u32 offset = 0;
+	u8 sib, mod, rm;
+
+	/* better optimization to compute them here, even
+	 * if rm is not always used
+	 */
+	rm = modrm & 7;
+	mod = modrm & 0xC0;
+
+	if (bit32) {  /* 32-bits addressing mode (default) */
+		if (mod == 0 && rm == 5) /* 32 bits offset and nothing more */
+			return (void *)*((u32*)*from)++;
+		
+		if (rm == 4) {
+			/* SIB byte is present and must be used */
+			sib = *(*from)++; /* SS(7-6) IDX(5-3) BASE(2-0) */
+
+			/* index * scale */
+			if (((sib >> 3) & 7) != 4)
+				offset += *REG_PTR(regs, (sib >> 3) & 7) << (sib >> 6);
+
+			rm = (sib & 7); /* base replaces rm from now */
+			if (mod == 0 && rm == 5) /* base off32 + scaled index */
+				return (void *)offset + *((u32*)*from)++;
+		}
+
+		/* base register */
+		offset += *REG_PTR(regs, rm);
+	
+		if (mod) {
+			if (mod & 0x80) /* 32 bits unsigned offset */
+				offset += *((u32*)*from)++;
+			else  /* 0x40: 8 bits signed offset */
+				offset += *((s8*)*from)++;
+		}
+
+		return (void *)offset;
+
+	} else { /* 16-bits addressing mode */
+		/* handle special case now */
+		if (mod == 0 && rm == 6) /* 16 bits offset */
+			return (void *)(u32)*((u16*)*from)++;
+
+		if ((rm & 4) == 0)
+			offset += (rm & 2) ? regs->ebp : regs->ebx;
+		if (rm < 6)
+			offset += (rm & 1) ? regs->edi : regs->esi;
+		else if (rm == 6)  /* bp */
+			offset += regs->ebp;
+		else if (rm == 7)  /* bx */
+			offset += regs->ebx;
+
+		/* now, let's include 8/16 bits offset */
+		if (mod) {
+			if (mod & 0x80) /* 16 bits unsigned offset */
+				offset += *((u16*)*from)++;
+			else  /* 0x40: 8 bits signed offset */
+				offset += *((s8*)*from)++;
+		}
+		return (void *)(offset & 0xFFFF);
+	}
+}
+
+
+/*
+ * skip_modrm() computes the EIP value of next instruction from the
+ * pointer <from> which points to the first byte after the mod/rm byte.
+ * Its purpose is to implement a fast alternative to modrm_address()
+ * when offset value is not needed.
+ */
+static inline void *skip_modrm(u8 *from, int bit32, int modrm)
+{
+    	u8 mod,rm;
+
+	/* better optimization to compute them here, even
+	 * if rm is not always used
+	 */
+	rm = modrm & 7;
+	mod = modrm & 0xC0;
+
+	/* most common case first : registers */
+	if (mod == 0xC0)
+		return from;
+
+	if (bit32) { /* 32 bits addressing mode (default) */
+		if (rm == 4)	/* SIB byte : rm becomes base */
+			rm = (*from++ & 7);
+		if (mod == 0x00) {
+			if (rm == 5)  /* 32 bits offset and nothing more */
+				return from + 4;
+			else
+				return from;
+		}
+	}
+	else { /* 16 bits mode */
+		if (mod == 0x00) {
+			if (rm == 6)  /* 16 bits offset and nothing more */
+				return from + 2;
+			else
+				return from;
+		}
+	}
+
+	if (mod & 0x80)
+		return from + (2 * (bit32 + 1)); /* + 2 or 4 bytes */
+	else
+		return from + 1;
+}
+
+
+/* [reg_address] returns a pointer to a register in the regs struct, depending
+ * on <w> (byte/word) and reg. Since the caller knows about <w>, it's
+ * responsible for understanding the result as a byte, word or dword pointer.
+ * Only the 3 lower bits of <reg> are meaningful, higher ones are ignored.
+ */
+static inline void *reg_address(struct pt_regs *regs, char w, u8 reg)
+{
+	if (w)
+		/* 16/32 bits mode */
+		return REG_PTR(regs, reg & 7);
+	else
+		/* 8 bits mode : al,cl,dl,bl,ah,ch,dh,bh */
+		return ((reg & 4) >> 2) + (u8*)REG_PTR(regs, reg & 3);
+
+	/* this is set just to prevent the compiler from complaining */
+	return NULL;
+}
+
+/* [do_emu] is called by exception 6 after an invalid opcode has been
+ * encountered. It will decode the prefixes and the instruction code, to try
+ * to emulate it, and will send a SIGILL or SIGSEGV to the process if not
+ * possible.
+ * REP/REPN prefixes are not supported anymore because it didn't make sense
+ * to emulate instructions prefixed with such opcodes since no arch-specific
+ * instruction start by one of them. At most, they will be the start of newer
+ * arch-specific instructions (SSE ?).
+ */
+asmlinkage void do_emu_op(struct pt_regs *regs, long error_code)
+{
+	enum {
+		PREFIX_ES   = 1,
+		PREFIX_CS   = 2,
+		PREFIX_SS   = 4,
+		PREFIX_DS   = 8,
+		PREFIX_FS   = 16,
+		PREFIX_GS   = 32,
+		PREFIX_SEG  = 63,  /* any seg */
+		PREFIX_D32  = 64,
+		PREFIX_A32  = 128,
+		PREFIX_LOCK = 256,
+	} prefixes = 0;
+
+	u32 *src, *dst;
+	u8 *eip = (u8*)regs->eip;
+
+#ifdef BENCH_CPU_EXCEPTION_BUT_NOT_THE_CODE
+	regs->eip += 3;
+	return;
+#endif
+	/* we'll first read all known opcode prefixes, and discard obviously
+	   invalid combinations.*/
+	while (1) {
+		/* prefix for CMOV, BSWAP, CMPXCHG, XADD */
+		if (*eip == 0x0F) {
+			eip++;
+
+			/* we'll verify if this is a BSWAP opcode, main source of SIGILL on 386's */
+			if ((*eip & 0xF8) == 0xC8) {  /* BSWAP */
+				u8 w, reg, modrm;
+				
+				reg = *eip++ & 0x07;
+				src = reg_address(regs, 1, reg);
+				
+				__asm__ __volatile__ (
+						      "xchgb %%al, %%ah\n\t"
+						      "roll $16, %%eax\n\t"
+						      "xchgb %%al, %%ah\n\t"
+						      : "=a" (*(u32*)src)
+						      : "a" (*(u32*)src));
+				regs->eip = (u32)eip;
+				return;
+			}
+
+
+			/* we'll also try to emulate the CMPXCHG instruction (used in mutex locks).
+			   This instruction is often locked, but it's not possible to put a lock
+			   here. Anyway, I don't believe that there are lots of multiprocessors
+			   386 out there ...
+			*/
+			if ((*eip & 0xFE) == 0xB0) {  /* CMPXCHG */
+				u8 w, reg, modrm;
+
+				w = *eip & 1;
+				modrm = *(eip + 1);
+				eip += 2; /* skips all the opcodes */
+
+				reg = (modrm >> 3) & 7;
+				
+				dst = reg_address(regs, w, reg);
+				if ((modrm & 0xC0) == 0xC0) /* register to register */
+					src = reg_address(regs, w, modrm);
+				else {
+					src = modrm_address(regs, &eip, !(prefixes & PREFIX_A32), modrm);
+					/* we must verify that src is valid for this task */
+					if ((prefixes & (PREFIX_FS | PREFIX_GS)) ||
+					    verify_area(VERIFY_WRITE, (void *)src, (w?((prefixes & PREFIX_D32)?2:4):1))) {
+						do_general_protection(regs, error_code);
+					    return;
+					}
+				}
+				
+				if (!w) { /* 8 bits operands */
+					if ((u8)regs->eax == *(u8*)src) {
+						*(u8*)src = *(u8*)dst;
+						regs->eflags |= X86_EFLAGS_ZF;  /* set Zero Flag */
+					}
+					else {
+						*(u8*)&(regs->eax) = *(u8*)src;
+						regs->eflags &= ~X86_EFLAGS_ZF;  /* clear Zero Flag */
+					}
+				}
+				else if (!(prefixes & PREFIX_D32)) { /* 32 bits operands */
+					if ((u32)regs->eax == *(u32*)src) {
+						*(u32*)src = *(u32*)dst;
+						regs->eflags |= X86_EFLAGS_ZF;  /* set Zero Flag */
+					}
+					else {
+						regs->eax = *(u32*)src;
+						regs->eflags &= ~X86_EFLAGS_ZF;  /* clear Zero Flag */
+					}
+				}
+				else { /* 16 bits operands */
+					if ((u16)regs->eax == *(u16*)src) {
+						*(u16*)src = *(u16*)dst;
+						regs->eflags |= X86_EFLAGS_ZF;  /* set Zero Flag */
+					}
+					else {
+						*(u16*)&regs->eax = *(u16*)src;
+						regs->eflags &= ~X86_EFLAGS_ZF;  /* clear Zero Flag */
+					}
+				}
+				regs->eip = (u32)eip;
+				return;
+			}
+
+			/* we'll also try to emulate the XADD instruction (not very common) */
+			if ((*eip & 0xFE) == 0xC0) {  /* XADD */
+				u8 w, reg, modrm;
+				u32 op1, op2;
+
+				w = *eip & 1;
+				modrm = *(eip + 1);
+				eip += 2; /* skips all the opcodes */
+
+				reg = (modrm >> 3) & 7;
+				
+				dst = reg_address(regs, w, reg);
+				if ((modrm & 0xC0) == 0xC0) /* register to register */
+					src = reg_address(regs, w, modrm);
+				else {
+					src = modrm_address(regs, &eip, !(prefixes & PREFIX_A32), modrm);
+					/* we must verify that src is valid for this task */
+					if ((prefixes & (PREFIX_FS | PREFIX_GS)) ||
+					    verify_area(VERIFY_WRITE, (void *)src, (w?((prefixes & PREFIX_D32)?2:4):1))) {
+						do_general_protection(regs, error_code);
+						return;
+					}
+				}
+				
+				if (!w) { /* 8 bits operands */
+					op1 = *(u8*)src;
+					op2 = *(u8*)dst;
+					*(u8*)src = op1 + op2;
+					*(u8*)dst = op1;
+				}
+				else if (!(prefixes & PREFIX_D32)) { /* 32 bits operands */
+					op1 = *(u32*)src;
+					op2 = *(u32*)dst;
+					*(u32*)src = op1 + op2;
+					*(u32*)dst = op1;
+				}
+				else { /* 16 bits operands */
+					op1 = *(u16*)src;
+					op2 = *(u16*)dst;
+					*(u16*)src = op1 + op2;
+					*(u16*)dst = op1;
+				}
+				regs->eip = (u32)eip;
+				return;
+			}
+		} /* if (*eip == 0x0F) */
+		else if ((*eip & 0xfc) == 0x64) {
+			switch (*eip) {
+			case 0x66: /* Operand switches 16/32 bits */
+				if (prefixes & PREFIX_D32)
+					goto invalid_opcode;
+				prefixes |= PREFIX_D32;
+				eip++;
+				continue;
+			case 0x67: /* Address switches 16/32 bits */
+				if (prefixes & PREFIX_A32)
+					goto invalid_opcode;
+				prefixes |= PREFIX_A32;
+				eip++;
+				continue;
+			case 0x64: /* FS: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_FS;
+				eip++;
+				continue;
+			case 0x65: /* GS: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_GS;
+				eip++;
+				continue;
+			}
+		}
+		else if (*eip == 0xf0) { /* lock */
+			if (prefixes & PREFIX_LOCK)
+				goto invalid_opcode;
+			prefixes |= PREFIX_LOCK;
+				eip++;
+				continue;
+		}
+		else if ((*eip & 0xe7) == 0x26) {
+			switch (*eip) {
+			case 0x26: /* ES: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_ES;
+				eip++;
+				continue;
+			case 0x2E: /* CS: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_CS;
+				eip++;
+				continue;
+			case 0x36: /* SS: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_SS;
+				eip++;
+				continue;
+			case 0x3E: /* DS: */
+				if (prefixes & PREFIX_SEG)
+					goto invalid_opcode;
+				prefixes |= PREFIX_DS;
+				eip++;
+				continue;
+			}
+		}
+		/* if this opcode has not been processed, it's not a prefix. */
+		break;
+	}
+
+	/* it's a case we can't handle. Unknown opcode or too many prefixes. */
+invalid_opcode:
+#ifdef CONFIG_CPU_EMU486_DEBUG
+	printk(KERN_DEBUG "do_emu() : invalid opcode detected @%p : %02x %02x ...\n", eip, eip[0], eip[1]);
+#endif
+	do_invalid_op(regs, error_code);
+}
diff -Nur linux-2.6.0-test2/arch/i386/kernel/entry.S linux-2.6.0-test2-i486emu/arch/i386/kernel/entry.S
--- linux-2.6.0-test2/arch/i386/kernel/entry.S	Sun Jul 27 18:59:34 2003
+++ linux-2.6.0-test2-i486emu/arch/i386/kernel/entry.S	Fri Aug  1 14:52:55 2003
@@ -558,10 +558,16 @@
 	pushl $do_bounds
 	jmp error_code
 
+#ifdef CONFIG_CPU_EMU486
+	pushl $0
+	pushl $do_emu_op
+	jmp error_code
+#else
 ENTRY(invalid_op)
 	pushl $0
 	pushl $do_invalid_op
 	jmp error_code
+#endif
 
 ENTRY(coprocessor_segment_overrun)
 	pushl $0
diff -Nur linux-2.6.0-test2/arch/i386/kernel/traps.c linux-2.6.0-test2-i486emu/arch/i386/kernel/traps.c
--- linux-2.6.0-test2/arch/i386/kernel/traps.c	Sun Jul 27 18:57:49 2003
+++ linux-2.6.0-test2-i486emu/arch/i386/kernel/traps.c	Fri Aug  1 14:55:37 2003
@@ -89,6 +89,7 @@
 asmlinkage void alignment_check(void);
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void machine_check(void);
+asmlinkage void do_emu_op(struct pt_regs * regs, long error_code);
 
 static int kstack_depth_to_print = 24;
 
@@ -852,7 +853,11 @@
 	set_system_gate(3,&int3);	/* int3-5 can be called from all */
 	set_system_gate(4,&overflow);
 	set_system_gate(5,&bounds);
+#ifdef CONFIG_CPU_EMU486
+	set_trap_gate(6, &do_emu_op);
+#else
 	set_trap_gate(6,&invalid_op);
+#endif
 	set_trap_gate(7,&device_not_available);
 	set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
 	set_trap_gate(9,&coprocessor_segment_overrun);
diff -Nur linux-2.6.0-test2/include/asm-i386/bugs.h linux-2.6.0-test2-i486emu/include/asm-i386/bugs.h
--- linux-2.6.0-test2/include/asm-i386/bugs.h	Sun Jul 27 18:57:49 2003
+++ linux-2.6.0-test2-i486emu/include/asm-i386/bugs.h	Mon Aug  4 15:12:52 2003
@@ -141,6 +141,7 @@
 	/* If this fails, it means that any user program may lock the CPU hard. Too bad. */
 	if (res != 12345678) printk( "Buggy.\n" );
 		        else printk( "OK.\n" );
+	boot_cpu_data.popad_bug = 1;
 #endif
 }
 
diff -Nur linux-2.6.0-test2/include/asm-i386/param.h linux-2.6.0-test2-i486emu/include/asm-i386/param.h
--- linux-2.6.0-test2/include/asm-i386/param.h	Sun Jul 27 18:57:50 2003
+++ linux-2.6.0-test2-i486emu/include/asm-i386/param.h	Fri Aug  1 22:49:33 2003
@@ -2,7 +2,7 @@
 #define _ASMi386_PARAM_H
 
 #ifdef __KERNEL__
-# define HZ		1000		/* Internal kernel timer frequency */
+# define HZ		80		/* Internal kernel timer frequency */
 # define USER_HZ	100		/* .. some user interfaces are in "ticks" */
 # define CLOCKS_PER_SEC	(USER_HZ)	/* like times() */
 #endif
diff -Nur linux-2.6.0-test2/include/asm-i386/processor.h linux-2.6.0-test2-i486emu/include/asm-i386/processor.h
--- linux-2.6.0-test2/include/asm-i386/processor.h	Sun Jul 27 18:56:58 2003
+++ linux-2.6.0-test2-i486emu/include/asm-i386/processor.h	Mon Aug  4 15:10:58 2003
@@ -62,6 +62,7 @@
 	int	fdiv_bug;
 	int	f00f_bug;
 	int	coma_bug;
+	int	popad_bug;
 	unsigned long loops_per_jiffy;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
