[prev in list] [next in list] [prev in thread] [next in thread] 

List:       xen-ia64-devel
Subject:    [Xen-ia64-devel][PATCH] Fix a physical mode bug
From:       "Xu, Anthony" <anthony.xu () intel ! com>
Date:       2006-09-29 9:16:37
Message-ID: 51CFAB8CB6883745AE7B93B3E084EBE207DC58 () pdsmsx412 ! ccr ! corp ! intel ! com
[Download RAW message or body]

Fix a physical mode bug,
When guest writes rr in physical mode, if it is rr0 or rr4,  Xen
shouldn't write it into machine rr.



Signed-off-by: Xuefei Xu < anthony.xu@intel.com >

Thanks,
Anthony

["physical_mode_fix.patch" (application/octet-stream)]

Fix a physical mode bug,
When guest writes rr in physical mode, if it is rr0 or rr4, Xen can't 
write it into machine rr.


Signed-off-by: Xuefei Xu < anthony.xu@intel.com >

diff -r f34e37d0742d -r c2bacbb92f9b xen/arch/ia64/vmx/vmx_phy_mode.c
--- a/xen/arch/ia64/vmx/vmx_phy_mode.c	Tue Sep 26 19:11:33 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_phy_mode.c	Fri Sep 29 13:34:53 2006 +0800
@@ -126,10 +126,16 @@ vmx_init_all_rr(VCPU *vcpu)
 vmx_init_all_rr(VCPU *vcpu)
 {
 	VMX(vcpu, vrr[VRN0]) = 0x38;
+	//enable vhpt in guest physical mode
+	vcpu->arch.metaphysical_rr0|=1;
+	vcpu->arch.metaphysical_saved_rr0=vrrtomrr(vcpu, 0x38);
 	VMX(vcpu, vrr[VRN1]) = 0x38;
 	VMX(vcpu, vrr[VRN2]) = 0x38;
 	VMX(vcpu, vrr[VRN3]) = 0x38;
 	VMX(vcpu, vrr[VRN4]) = 0x38;
+	//enable vhpt in guest physical mode
+	vcpu->arch.metaphysical_rr4|=1;
+	vcpu->arch.metaphysical_saved_rr4=vrrtomrr(vcpu, 0x38);
 	VMX(vcpu, vrr[VRN5]) = 0x38;
 	VMX(vcpu, vrr[VRN6]) = 0x38;
 	VMX(vcpu, vrr[VRN7]) = 0x738;
@@ -141,10 +147,8 @@ vmx_load_all_rr(VCPU *vcpu)
 vmx_load_all_rr(VCPU *vcpu)
 {
 	unsigned long psr;
-	ia64_rr phy_rr;
 
 	local_irq_save(psr);
-
 
 	/* WARNING: not allow co-exist of both virtual mode and physical
 	 * mode in same region
@@ -154,24 +158,14 @@ vmx_load_all_rr(VCPU *vcpu)
 			panic_domain(vcpu_regs(vcpu),
 			             "Unexpected domain switch in phy emul\n");
 		}
-		phy_rr.rrval = vcpu->arch.metaphysical_rr0;
-		//phy_rr.ps = PAGE_SHIFT;
-		phy_rr.ve = 1;
-
-		ia64_set_rr((VRN0 << VRN_SHIFT), phy_rr.rrval);
-		ia64_dv_serialize_data();
-		phy_rr.rrval = vcpu->arch.metaphysical_rr4;
-		//phy_rr.ps = PAGE_SHIFT;
-		phy_rr.ve = 1;
-
-		ia64_set_rr((VRN4 << VRN_SHIFT), phy_rr.rrval);
+		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+		ia64_dv_serialize_data();
+		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
 		ia64_dv_serialize_data();
 	} else {
-		ia64_set_rr((VRN0 << VRN_SHIFT),
-			     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN0])));
-		ia64_dv_serialize_data();
-		ia64_set_rr((VRN4 << VRN_SHIFT),
-			     vrrtomrr(vcpu, VMX(vcpu, vrr[VRN4])));
+		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_saved_rr0);
+		ia64_dv_serialize_data();
+		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_saved_rr4);
 		ia64_dv_serialize_data();
 	}
 
@@ -209,21 +203,11 @@ switch_to_physical_rid(VCPU *vcpu)
 switch_to_physical_rid(VCPU *vcpu)
 {
     UINT64 psr;
-    ia64_rr phy_rr, mrr;
-
     /* Save original virtual mode rr[0] and rr[4] */
     psr=ia64_clear_ic();
-    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr0;
-    mrr.rrval = ia64_get_rr(VRN0 << VRN_SHIFT);
-    phy_rr.ps = mrr.ps;
-    phy_rr.ve = 1;
-    ia64_set_rr(VRN0<<VRN_SHIFT, phy_rr.rrval);
-    ia64_srlz_d();
-    phy_rr.rrval = vcpu->domain->arch.metaphysical_rr4;
-    mrr.rrval = ia64_get_rr(VRN4 << VRN_SHIFT);
-    phy_rr.ps = mrr.ps;
-    phy_rr.ve = 1;
-    ia64_set_rr(VRN4<<VRN_SHIFT, phy_rr.rrval);
+    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+    ia64_srlz_d();
+    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
     ia64_srlz_d();
 
     ia64_set_psr(psr);
@@ -236,15 +220,10 @@ switch_to_virtual_rid(VCPU *vcpu)
 switch_to_virtual_rid(VCPU *vcpu)
 {
     UINT64 psr;
-    ia64_rr mrr;
-
     psr=ia64_clear_ic();
-
-    vcpu_get_rr(vcpu,VRN0<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN0<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
-    ia64_srlz_d();
-    vcpu_get_rr(vcpu,VRN4<<VRN_SHIFT,&mrr.rrval);
-    ia64_set_rr(VRN4<<VRN_SHIFT, vrrtomrr(vcpu, mrr.rrval));
+    ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
+    ia64_srlz_d();
+    ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
     ia64_srlz_d();
     ia64_set_psr(psr);
     ia64_srlz_i();
diff -r f34e37d0742d -r c2bacbb92f9b xen/arch/ia64/vmx/vmx_vcpu.c
--- a/xen/arch/ia64/vmx/vmx_vcpu.c	Tue Sep 26 19:11:33 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_vcpu.c	Fri Sep 29 13:34:53 2006 +0800
@@ -212,19 +212,31 @@ IA64FAULT vmx_vcpu_set_rr(VCPU *vcpu, UI
 {
     ia64_rr oldrr,newrr;
     extern void * pal_vaddr;
-
+    u64 rrval;
     vcpu_get_rr(vcpu, reg, &oldrr.rrval);
     newrr.rrval=val;
     if (newrr.rid >= (1 << vcpu->domain->arch.rid_bits))
         panic_domain (NULL, "use of invalid rid %x\n", newrr.rid);
 
-    VMX(vcpu,vrr[reg>>61]) = val;
-    switch((u64)(reg>>61)) {
+    VMX(vcpu,vrr[reg>>VRN_SHIFT]) = val;
+    switch((u64)(reg>>VRN_SHIFT)) {
     case VRN7:
         vmx_switch_rr7(vrrtomrr(vcpu,val),vcpu->domain->shared_info,
         (void *)vcpu->arch.privregs,
         (void *)vcpu->arch.vhpt.hash, pal_vaddr );
        break;
+    case VRN4:
+        rrval=vrrtomrr(vcpu,val);
+        vcpu->arch.metaphysical_saved_rr4=rrval;
+        if(!is_physical_mode(vcpu))
+            ia64_set_rr(reg,rrval);
+        break;
+    case VRN0:
+        rrval=vrrtomrr(vcpu,val);
+        vcpu->arch.metaphysical_saved_rr0=rrval;
+        if(!is_physical_mode(vcpu))
+            ia64_set_rr(reg,rrval);
+        break;
     default:
         ia64_set_rr(reg,vrrtomrr(vcpu,val));
         break;


_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic