[prev in list] [next in list] [prev in thread] [next in thread] 

List:       xen-ia64-devel
Subject:    [Xen-ia64-devel] [PATCH 6/7] libxc: make xc_ia64_copy_memmap aware
From:       Isaku Yamahata <yamahata () valinux ! co ! jp>
Date:       2008-09-29 12:50:02
Message-ID: 20080929125002.GA32257 () nm ! localhost ! valinux ! co ! jp
[Download RAW message or body]

[IA64] libxc: make xc_ia64_copy_memmap aware of sequence lock.

Guest domain's memory map may be updated concurrently so that
it is protected sequence lock.
This patch makes xc_ia64_copy_memmap() aware of sequence lock.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>

diff -r af0969e7e408 tools/libxc/ia64/xc_ia64_stubs.c
--- a/tools/libxc/ia64/xc_ia64_stubs.c	Mon Sep 29 21:11:25 2008 +0900
+++ b/tools/libxc/ia64/xc_ia64_stubs.c	Mon Sep 29 21:12:53 2008 +0900
@@ -60,46 +60,120 @@
             ? -1 : domctl.u.getdomaininfo.max_pages);
 }
 
-int
-xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
-                    xen_ia64_memmap_info_t **memmap_info_p,
-                    unsigned long *memmap_info_num_pages_p)
+/*
+ * Copy memmap info from guest domain pages into local buffers.
+ * memmap info can be updated concurrently. So
+ *  - copy them into local buffers and use local copy in order to
+ *    avoie inconsistent state.
+ *  - protect them by seqlock. shared_info->arch.memmap_sequence.
+ *  - d->arch.convmem_end (which can be got by XENMEM_maximum_gpfn) is
+ *    also updated concurrently. The valuable is increased only.
+ */
+/*#define cpu_relax()     ia64_hint(ia64_hint_pause)*/
+#define cpu_relax()     sched_yield()
+#define unlikely(x)     __builtin_expect((x),0)
+
+static unsigned long
+xc_ia64_mi_read_seqbegin(const shared_info_t *live_shinfo)
 {
-    unsigned int memmap_info_num_pages;
+    unsigned long ret;
+
+ repeat:
+    ret = live_shinfo->arch.memmap_sequence;
+    xen_rmb();
+    if (unlikely(ret & 1)) {
+        cpu_relax();
+        goto repeat;
+    }
+
+    return ret;
+}
+
+static int
+xc_ia64_mi_read_seqretry(const shared_info_t *live_shinfo, unsigned long start)
+{
+    xen_rmb();
+
+    return (live_shinfo->arch.memmap_sequence != start);
+}
+
+/* copy before use in case someone updating them */
+static int
+__xc_ia64_copy_memmap(int xc_handle, uint32_t domid,
+                      shared_info_t *live_shinfo,
+                      xen_ia64_memmap_info_t **memmap_info,
+                      unsigned long *memmap_info_num_pages)
+{
     unsigned long memmap_info_pfn;
     unsigned long memmap_size;
+    xen_ia64_memmap_info_t *memmap_info_live;
 
-    xen_ia64_memmap_info_t *memmap_info_live;
-    xen_ia64_memmap_info_t *memmap_info;
-
-    /* copy before use in case someone updating them */
-    memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
+    *memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
     memmap_info_pfn = live_shinfo->arch.memmap_info_pfn;
-    if (memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
+    if (*memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
         ERROR("memmap_info_num_pages 0x%x memmap_info_pfn 0x%lx",
-              memmap_info_num_pages, memmap_info_pfn);
+              *memmap_info_num_pages, memmap_info_pfn);
         return -1;
     }
 
-    memmap_size = memmap_info_num_pages << PAGE_SHIFT;
+    memmap_size = *memmap_info_num_pages << PAGE_SHIFT;
     memmap_info_live = xc_map_foreign_range(xc_handle, domid, memmap_size,
                                             PROT_READ, memmap_info_pfn);
     if (memmap_info_live == NULL) {
         PERROR("Could not map memmap info.");
         return -1;
     }
-    memmap_info = malloc(memmap_size);
-    if (memmap_info == NULL) {
+    *memmap_info = malloc(memmap_size);
+    if (*memmap_info == NULL) {
         munmap(memmap_info_live, memmap_size);
         return -1;
     }
-    memcpy(memmap_info, memmap_info_live, memmap_size); /* copy before use */
+    memcpy(*memmap_info, memmap_info_live, memmap_size); /* copy before use */
     munmap(memmap_info_live, memmap_size);
+    return 0;
+}
+
+int
+xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
+                    xen_ia64_memmap_info_t **memmap_info_p,
+                    unsigned long *memmap_info_num_pages_p)
+{
+    unsigned long gpfn_max_prev;
+    unsigned long gpfn_max_post;
+    unsigned long seq;
+
+    unsigned long memmap_info_num_pages;
+    xen_ia64_memmap_info_t *memmap_info;
+
+    gpfn_max_prev = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0)
+        return -1;
+
+ again:
+    do {
+        seq = xc_ia64_mi_read_seqbegin(live_shinfo);
+        if(__xc_ia64_copy_memmap(xc_handle, domid, live_shinfo,
+                                 &memmap_info, &memmap_info_num_pages)) {
+            return -1;
+        }
+    } while (xc_ia64_mi_read_seqretry(live_shinfo, seq));
+
+    gpfn_max_post = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0) {
+        free(memmap_info);
+        return -1;
+    }
+    if (gpfn_max_post > gpfn_max_prev) {
+        free(memmap_info);
+        gpfn_max_prev = gpfn_max_post;
+        goto again;
+    }
 
     /* reject unknown memmap */
     if (memmap_info->efi_memdesc_size != sizeof(efi_memory_desc_t) ||
         (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
-        memmap_info->efi_memmap_size > memmap_size - sizeof(memmap_info) ||
+        memmap_info->efi_memmap_size >
+        (memmap_info_num_pages << PAGE_SHIFT) - sizeof(memmap_info) ||
         memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION) {
         PERROR("unknown memmap header. defaulting to compat mode.");
         free(memmap_info);

["memmap-info-tools-stack-seqlock.patch" (text/x-diff)]

[IA64] libxc: make xc_ia64_copy_memmap aware of sequence lock.

Guest domain's memory map may be updated concurrently so that
it is protected sequence lock.
This patch makes xc_ia64_copy_memmap() aware of sequence lock.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>

diff -r af0969e7e408 tools/libxc/ia64/xc_ia64_stubs.c
--- a/tools/libxc/ia64/xc_ia64_stubs.c	Mon Sep 29 21:11:25 2008 +0900
+++ b/tools/libxc/ia64/xc_ia64_stubs.c	Mon Sep 29 21:12:53 2008 +0900
@@ -60,46 +60,120 @@
             ? -1 : domctl.u.getdomaininfo.max_pages);
 }
 
-int
-xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
-                    xen_ia64_memmap_info_t **memmap_info_p,
-                    unsigned long *memmap_info_num_pages_p)
+/*
+ * Copy memmap info from guest domain pages into local buffers.
+ * memmap info can be updated concurrently. So
+ *  - copy them into local buffers and use local copy in order to
+ *    avoie inconsistent state.
+ *  - protect them by seqlock. shared_info->arch.memmap_sequence.
+ *  - d->arch.convmem_end (which can be got by XENMEM_maximum_gpfn) is
+ *    also updated concurrently. The valuable is increased only.
+ */
+/*#define cpu_relax()     ia64_hint(ia64_hint_pause)*/
+#define cpu_relax()     sched_yield()
+#define unlikely(x)     __builtin_expect((x),0)
+
+static unsigned long
+xc_ia64_mi_read_seqbegin(const shared_info_t *live_shinfo)
 {
-    unsigned int memmap_info_num_pages;
+    unsigned long ret;
+
+ repeat:
+    ret = live_shinfo->arch.memmap_sequence;
+    xen_rmb();
+    if (unlikely(ret & 1)) {
+        cpu_relax();
+        goto repeat;
+    }
+
+    return ret;
+}
+
+static int
+xc_ia64_mi_read_seqretry(const shared_info_t *live_shinfo, unsigned long start)
+{
+    xen_rmb();
+
+    return (live_shinfo->arch.memmap_sequence != start);
+}
+
+/* copy before use in case someone updating them */
+static int
+__xc_ia64_copy_memmap(int xc_handle, uint32_t domid,
+                      shared_info_t *live_shinfo,
+                      xen_ia64_memmap_info_t **memmap_info,
+                      unsigned long *memmap_info_num_pages)
+{
     unsigned long memmap_info_pfn;
     unsigned long memmap_size;
+    xen_ia64_memmap_info_t *memmap_info_live;
 
-    xen_ia64_memmap_info_t *memmap_info_live;
-    xen_ia64_memmap_info_t *memmap_info;
-
-    /* copy before use in case someone updating them */
-    memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
+    *memmap_info_num_pages = live_shinfo->arch.memmap_info_num_pages;
     memmap_info_pfn = live_shinfo->arch.memmap_info_pfn;
-    if (memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
+    if (*memmap_info_num_pages == 0 || memmap_info_pfn == 0) {
         ERROR("memmap_info_num_pages 0x%x memmap_info_pfn 0x%lx",
-              memmap_info_num_pages, memmap_info_pfn);
+              *memmap_info_num_pages, memmap_info_pfn);
         return -1;
     }
 
-    memmap_size = memmap_info_num_pages << PAGE_SHIFT;
+    memmap_size = *memmap_info_num_pages << PAGE_SHIFT;
     memmap_info_live = xc_map_foreign_range(xc_handle, domid, memmap_size,
                                             PROT_READ, memmap_info_pfn);
     if (memmap_info_live == NULL) {
         PERROR("Could not map memmap info.");
         return -1;
     }
-    memmap_info = malloc(memmap_size);
-    if (memmap_info == NULL) {
+    *memmap_info = malloc(memmap_size);
+    if (*memmap_info == NULL) {
         munmap(memmap_info_live, memmap_size);
         return -1;
     }
-    memcpy(memmap_info, memmap_info_live, memmap_size); /* copy before use */
+    memcpy(*memmap_info, memmap_info_live, memmap_size); /* copy before use */
     munmap(memmap_info_live, memmap_size);
+    return 0;
+}
+
+int
+xc_ia64_copy_memmap(int xc_handle, uint32_t domid, shared_info_t *live_shinfo,
+                    xen_ia64_memmap_info_t **memmap_info_p,
+                    unsigned long *memmap_info_num_pages_p)
+{
+    unsigned long gpfn_max_prev;
+    unsigned long gpfn_max_post;
+    unsigned long seq;
+
+    unsigned long memmap_info_num_pages;
+    xen_ia64_memmap_info_t *memmap_info;
+
+    gpfn_max_prev = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0)
+        return -1;
+
+ again:
+    do {
+        seq = xc_ia64_mi_read_seqbegin(live_shinfo);
+        if(__xc_ia64_copy_memmap(xc_handle, domid, live_shinfo,
+                                 &memmap_info, &memmap_info_num_pages)) {
+            return -1;
+        }
+    } while (xc_ia64_mi_read_seqretry(live_shinfo, seq));
+
+    gpfn_max_post = xc_memory_op(xc_handle, XENMEM_maximum_gpfn, &domid);
+    if (gpfn_max_prev < 0) {
+        free(memmap_info);
+        return -1;
+    }
+    if (gpfn_max_post > gpfn_max_prev) {
+        free(memmap_info);
+        gpfn_max_prev = gpfn_max_post;
+        goto again;
+    }
 
     /* reject unknown memmap */
     if (memmap_info->efi_memdesc_size != sizeof(efi_memory_desc_t) ||
         (memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size) == 0 ||
-        memmap_info->efi_memmap_size > memmap_size - sizeof(memmap_info) ||
+        memmap_info->efi_memmap_size >
+        (memmap_info_num_pages << PAGE_SHIFT) - sizeof(memmap_info) ||
         memmap_info->efi_memdesc_version != EFI_MEMORY_DESCRIPTOR_VERSION) {
         PERROR("unknown memmap header. defaulting to compat mode.");
         free(memmap_info);


_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@lists.xensource.com
http://lists.xensource.com/xen-ia64-devel

[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic