[prev in list] [next in list] [prev in thread] [next in thread] 

List:       bochs-cvs
Subject:    [Bochs-cvs] CVS: bochs/cpu cpu.h,1.226,1.227 ctrl_xfer_pro.cc,1.45,1.46 proc_ctrl.cc,1.109,1.110 seg
From:       Stanislav Shwartsman <sshwarts () users ! sourceforge ! net>
Date:       2005-07-29 6:30:00
Message-ID: E1DyONc-0006Ib-JS () sc8-pr-cvs1 ! sourceforge ! net
[Download RAW message or body]

Update of /cvsroot/bochs/bochs/cpu
In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv24119

Modified Files:
	cpu.h ctrl_xfer_pro.cc proc_ctrl.cc segment_ctrl_pro.cc 
Log Message:
Fixed code duplication, added canonical address checking for RETF in long mode


Index: cpu.h
===================================================================
RCS file: /cvsroot/bochs/bochs/cpu/cpu.h,v
retrieving revision 1.226
retrieving revision 1.227
diff -u -d -r1.226 -r1.227
--- cpu.h	25 Jul 2005 04:18:10 -0000	1.226
+++ cpu.h	29 Jul 2005 06:29:57 -0000	1.227
@@ -328,11 +328,11 @@
 #define BX_MSR_KERNELGSBASE     0xc0000102
 #endif
 
-#define BX_MODE_IA32_REAL       0x0   // CR0.PE=0
-#define BX_MODE_IA32_V8086      0x1   // CR0.PE=1, EFLAGS.VM=1
-#define BX_MODE_IA32_PROTECTED  0x2   // CR0.PE=1, EFLAGS.VM=0
-#define BX_MODE_LONG_COMPAT     0x3   // EFER.LMA = 0, EFER.LME = 1, CR0.PE=1
-#define BX_MODE_LONG_64         0x4   // EFER.LMA = 1, EFER.LME = 1, CR0.PE=1
+#define BX_MODE_IA32_REAL       0x0   // CR0.PE=0                |
+#define BX_MODE_IA32_V8086      0x1   // CR0.PE=1, EFLAGS.VM=1   | EFER.LMA=0
+#define BX_MODE_IA32_PROTECTED  0x2   // CR0.PE=1, EFLAGS.VM=0   | 
+#define BX_MODE_LONG_COMPAT     0x3   // EFER.LMA = EFER.LME = 1, CR0.PE=1, CS.L=0
+#define BX_MODE_LONG_64         0x4   // EFER.LMA = EFER.LME = 1, CR0.PE=1, CS.L=1
 
 #define BX_CANONICAL_BITS   (48)
 
@@ -2665,6 +2665,8 @@
 #define Write_RMW_virtual_qword(val64) write_RMW_virtual_qword(val64)
 
   BX_SMF void branch_near32(Bit32u new_eip) BX_CPP_AttrRegparmN(1);
+  BX_SMF void branch_far(bx_selector_t *selector, 
+       bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl);
 #if BX_SUPPORT_X86_64
   BX_SMF void branch_near64(bxInstruction_c *i) BX_CPP_AttrRegparmN(1);
 #endif
@@ -2686,9 +2688,9 @@
 #endif
 
   BX_SMF void access_linear(bx_address address, unsigned length, unsigned pl,
-                     unsigned rw, void *data) BX_CPP_AttrRegparmN(3);
+       unsigned rw, void *data) BX_CPP_AttrRegparmN(3);
   BX_SMF Bit32u  translate_linear(bx_address laddr, 
-     unsigned pl, unsigned rw, unsigned access_type) BX_CPP_AttrRegparmN(3);
+       unsigned pl, unsigned rw, unsigned access_type) BX_CPP_AttrRegparmN(3);
   BX_SMF Bit32u itranslate_linear(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
   BX_SMF Bit32u dtranslate_linear(bx_address laddr, unsigned pl, unsigned rw) BX_CPP_AttrRegparmN(3);
   BX_SMF void TLB_flush(bx_bool invalidateGlobal);

Index: ctrl_xfer_pro.cc
===================================================================
RCS file: /cvsroot/bochs/bochs/cpu/ctrl_xfer_pro.cc,v
retrieving revision 1.45
retrieving revision 1.46
diff -u -d -r1.45 -r1.46
--- ctrl_xfer_pro.cc	22 Jul 2005 05:00:40 -0000	1.45
+++ ctrl_xfer_pro.cc	29 Jul 2005 06:29:57 -0000	1.46
@@ -99,21 +99,8 @@
       return;
     }
 
-    /* instruction pointer must be in code segment limit else #GP(0) */
-    if (! IS_LONG64_SEGMENT(descriptor))
-    {
-      if (dispBig > descriptor.u.segment.limit_scaled) {
-        BX_ERROR(("jump_protected: EIP > limit"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-        return;
-      }
-    }
+    branch_far(&selector, &descriptor, dispBig, CPL);
 
-    /* Load CS:IP from destination pointer */
-    /* Load CS-cache with new segment descriptor */
-    /* CPL does not change for conforming code segment */
-    load_cs(&selector, &descriptor, CPL);
-    RIP = dispBig;
     return;
   }
   else {
@@ -170,7 +157,6 @@
         if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
           BX_ERROR(("jump_protected: EIP not within CS limits"));
           exception(BX_GP_EXCEPTION, 0, 0);
-          return;
         }
         return;
 
@@ -1087,27 +1073,7 @@
       return_RIP = return_IP;
     }
 
-    // EIP must be in code segment limit, else #GP(0)
-#if BX_SUPPORT_X86_64
-    if (IsLongMode()) {
-      if (! IsCanonical(return_RIP)) {
-        BX_ERROR(("branch_near64: canonical RIP violation"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-      }
-    }
-    else
-#endif
-    {
-      if (return_RIP > cs_descriptor.u.segment.limit_scaled) {
-        BX_ERROR(("return_protected: return RIP > CS.limit"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-      }
-    }
-
-    // load CS:EIP from stack
-    // load CS register with descriptor
-    load_cs(&cs_selector, &cs_descriptor, CPL);
-    RIP = return_RIP;
+    branch_far(&cs_selector, &cs_descriptor, return_RIP, CPL);
 
     // increment eSP
 #if BX_SUPPORT_X86_64
@@ -1293,29 +1259,7 @@
       return;
     }
 
-    /* EIP must be in code segment limit, else #GP(0) */
-#if BX_SUPPORT_X86_64
-    if (IsLongMode()) {
-      if (! IsCanonical(return_RIP)) {
-        BX_ERROR(("branch_near64: canonical RIP violation"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-      }
-    }
-    else
-#endif
-    {
-      if (return_RIP > cs_descriptor.u.segment.limit_scaled) {
-        BX_ERROR(("return_protected: EIP > CS.limit"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-      }
-    }
-
-    /* set CPL to RPL of return CS selector */
-    /* load CS:IP from stack */
-    /* set CS RPL to CPL */
-    /* load the CS-cache with return CS descriptor */
-    load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl);
-    RIP = return_RIP;
+    branch_far(&cs_selector, &cs_descriptor, return_RIP, cs_selector.rpl);
 
     /* load SS:SP from stack */
     /* load SS-cache with return SS descriptor */
@@ -1607,19 +1551,10 @@
       access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24,
         8, 0, BX_READ, &new_rsp);
 
-      /* RIP must be in code segment limit, else #GP(0) */
-      if (cs_descriptor.u.segment.l == 0 && new_rip > cs_descriptor.u.segment.limit_scaled ) {
-        BX_ERROR(("iret: IP > descriptor limit"));
-        exception(BX_GP_EXCEPTION, 0, 0);
-        return;
-      }
+      prev_cpl = CPL; /* previous CPL */
 
-      /* load CS:RIP from stack */
-      /* load the CS-cache with CS descriptor */
       /* set CPL to the RPL of the return CS selector */
-      prev_cpl = CPL; /* previous CPL */
-      load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl);
-      BX_CPU_THIS_PTR rip = new_rip;
+      branch_far(&cs_selector, &cs_descriptor, new_rip, cs_selector.rpl);
 
       /* load flags from stack */
       // perhaps I should always write_eflags(), thus zeroing
@@ -1958,6 +1893,38 @@
   revalidate_prefetch_q();
 }
 
+void BX_CPU_C::branch_far(bx_selector_t *selector, 
+           bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl)
+{
+#if BX_SUPPORT_X86_64
+  if (descriptor->u.segment.l)
+  {
+    if (! BX_CPU_THIS_PTR msr.lma)
+      BX_PANIC(("branch_far: attempt to enter x86-64 LONG mode without enabling EFER.LMA !"));
+
+    if (! IsCanonical(rip)) {
+      BX_ERROR(("branch_far: canonical RIP violation"));
+      exception(BX_GP_EXCEPTION, 0, 0);
+    }
+  }
+  else
+#endif
+  {
+    /* instruction pointer must be in code segment limit else #GP(0) */
+    if (rip > descriptor->u.segment.limit_scaled) {
+      BX_ERROR(("branch_far: EIP > limit"));
+      exception(BX_GP_EXCEPTION, 0, 0);
+    }
+  }
+
+  /* Load CS:IP from destination pointer */
+  /* Load CS-cache with new segment descriptor */
+  load_cs(selector, descriptor, cpl);
+
+  /* Change the RIP value */
+  RIP = rip;
+}
+
 #if BX_SUPPORT_X86_64
 void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
 {

Index: proc_ctrl.cc
===================================================================
RCS file: /cvsroot/bochs/bochs/cpu/proc_ctrl.cc,v
retrieving revision 1.109
retrieving revision 1.110
diff -u -d -r1.109 -r1.110
--- proc_ctrl.cc	7 Jul 2005 18:40:33 -0000	1.109
+++ proc_ctrl.cc	29 Jul 2005 06:29:57 -0000	1.110
@@ -1339,7 +1339,7 @@
   if (prev_pg==0 && BX_CPU_THIS_PTR cr0.pg) {
     if (BX_CPU_THIS_PTR msr.lme) {
       if (!BX_CPU_THIS_PTR cr4.get_PAE()) {
-        BX_PANIC(("SetCR0: attempt to enter x86-64 LONG mode without enabling CR4.PAE !!!"));
+        BX_ERROR(("SetCR0: attempt to enter x86-64 LONG mode without enabling CR4.PAE !"));
         exception(BX_GP_EXCEPTION, 0, 0);
       }
       BX_CPU_THIS_PTR msr.lma = 1;

Index: segment_ctrl_pro.cc
===================================================================
RCS file: /cvsroot/bochs/bochs/cpu/segment_ctrl_pro.cc,v
retrieving revision 1.40
retrieving revision 1.41
diff -u -d -r1.40 -r1.41
--- segment_ctrl_pro.cc	20 Jul 2005 01:26:46 -0000	1.40
+++ segment_ctrl_pro.cc	29 Jul 2005 06:29:57 -0000	1.41
@@ -381,7 +381,7 @@
   // Load a segment register in long-mode with nominal values,
   // so descriptor cache values are compatible with existing checks.
   seg->cache.u.segment.base = base;
-  // (KPL) I doubt we need limit_scaled.  If we do, it should be
+  // I doubt we need limit_scaled.  If we do, it should be
   // of type bx_addr and be maxed to 64bits, not 32.
   seg->cache.u.segment.limit_scaled = 0xffffffff;
   seg->cache.valid = 1;
@@ -544,13 +544,13 @@
 BX_CPU_C::load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor,
            Bit8u cpl)
 {
-  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector     = *selector;
-  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache        = *descriptor;
+  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector = *selector;
+  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache    = *descriptor;
 
   /* caller may request different CPL then in selector */
   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl = cpl;
-  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 1; /* ??? */
-  // (BW) Added cpl to the selector value.
+  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid  = 1;
+  // Added cpl to the selector value.
   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value =
     (0xfffc & BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value) | cpl;
 



-------------------------------------------------------
SF.Net email is Sponsored by the Better Software Conference & EXPO September
19-22, 2005 * San Francisco, CA * Development Lifecycle Practices
Agile & Plan-Driven Development * Managing Projects & Teams * Testing & QA
Security * Process Improvement & Measurement * http://www.sqe.com/bsce5sf
_______________________________________________
Bochs-cvs mailing list
Bochs-cvs@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/bochs-cvs
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic